inst-jobs 2.0.0 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
  22. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
  23. data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
  24. data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
  25. data/db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb +137 -0
  26. data/db/migrate/20211207094200_update_after_delete_trigger_for_singleton_transition_cases.rb +171 -0
  27. data/db/migrate/20211220112800_fix_singleton_race_condition_insert.rb +59 -0
  28. data/db/migrate/20211220113000_fix_singleton_race_condition_delete.rb +207 -0
  29. data/db/migrate/20220127091200_fix_singleton_unique_constraint.rb +31 -0
  30. data/db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb +60 -0
  31. data/db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb +209 -0
  32. data/db/migrate/20220203063200_remove_old_singleton_index.rb +31 -0
  33. data/db/migrate/20220328152900_add_failed_jobs_indicies.rb +12 -0
  34. data/exe/inst_jobs +3 -2
  35. data/lib/delayed/backend/active_record.rb +226 -168
  36. data/lib/delayed/backend/base.rb +119 -72
  37. data/lib/delayed/batch.rb +11 -9
  38. data/lib/delayed/cli.rb +98 -84
  39. data/lib/delayed/core_ext/kernel.rb +4 -2
  40. data/lib/delayed/daemon.rb +70 -74
  41. data/lib/delayed/job_tracking.rb +26 -25
  42. data/lib/delayed/lifecycle.rb +28 -23
  43. data/lib/delayed/log_tailer.rb +17 -17
  44. data/lib/delayed/logging.rb +13 -16
  45. data/lib/delayed/message_sending.rb +43 -52
  46. data/lib/delayed/performable_method.rb +6 -8
  47. data/lib/delayed/periodic.rb +72 -68
  48. data/lib/delayed/plugin.rb +2 -4
  49. data/lib/delayed/pool.rb +205 -168
  50. data/lib/delayed/rails_reloader_plugin.rb +30 -0
  51. data/lib/delayed/server/helpers.rb +6 -6
  52. data/lib/delayed/server.rb +51 -54
  53. data/lib/delayed/settings.rb +96 -81
  54. data/lib/delayed/testing.rb +21 -22
  55. data/lib/delayed/version.rb +1 -1
  56. data/lib/delayed/work_queue/in_process.rb +21 -17
  57. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  58. data/lib/delayed/work_queue/parent_process/server.rb +245 -207
  59. data/lib/delayed/work_queue/parent_process.rb +52 -53
  60. data/lib/delayed/worker/consul_health_check.rb +32 -33
  61. data/lib/delayed/worker/health_check.rb +35 -27
  62. data/lib/delayed/worker/null_health_check.rb +3 -1
  63. data/lib/delayed/worker/process_helper.rb +11 -12
  64. data/lib/delayed/worker.rb +257 -244
  65. data/lib/delayed/yaml_extensions.rb +12 -10
  66. data/lib/delayed_job.rb +37 -37
  67. data/lib/inst-jobs.rb +1 -1
  68. data/spec/active_record_job_spec.rb +152 -139
  69. data/spec/delayed/cli_spec.rb +7 -7
  70. data/spec/delayed/daemon_spec.rb +10 -9
  71. data/spec/delayed/message_sending_spec.rb +16 -9
  72. data/spec/delayed/periodic_spec.rb +14 -21
  73. data/spec/delayed/server_spec.rb +38 -38
  74. data/spec/delayed/settings_spec.rb +26 -25
  75. data/spec/delayed/work_queue/in_process_spec.rb +8 -9
  76. data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
  77. data/spec/delayed/work_queue/parent_process/server_spec.rb +118 -42
  78. data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
  79. data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
  80. data/spec/delayed/worker/health_check_spec.rb +60 -52
  81. data/spec/delayed/worker_spec.rb +53 -24
  82. data/spec/sample_jobs.rb +45 -15
  83. data/spec/shared/delayed_batch.rb +74 -67
  84. data/spec/shared/delayed_method.rb +143 -102
  85. data/spec/shared/performable_method.rb +39 -38
  86. data/spec/shared/shared_backend.rb +801 -440
  87. data/spec/shared/testing.rb +14 -14
  88. data/spec/shared/worker.rb +157 -149
  89. data/spec/shared_jobs_specs.rb +13 -13
  90. data/spec/spec_helper.rb +57 -56
  91. metadata +183 -103
  92. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  93. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  94. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  95. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  96. data/lib/delayed/backend/redis/find_available.lua +0 -3
  97. data/lib/delayed/backend/redis/functions.rb +0 -59
  98. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  99. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  100. data/lib/delayed/backend/redis/job.rb +0 -535
  101. data/lib/delayed/backend/redis/set_running.lua +0 -5
  102. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  103. data/spec/gemfiles/42.gemfile +0 -7
  104. data/spec/gemfiles/50.gemfile +0 -7
  105. data/spec/gemfiles/51.gemfile +0 -7
  106. data/spec/gemfiles/52.gemfile +0 -7
  107. data/spec/gemfiles/60.gemfile +0 -7
  108. data/spec/redis_job_spec.rb +0 -148
@@ -1,236 +1,249 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module Delayed
4
-
5
- class TimeoutError < RuntimeError; end
6
-
7
- require 'tmpdir'
8
- require 'set'
9
-
10
- class Worker
11
- include Delayed::Logging
12
- SIGNALS = %i{INT TERM QUIT}
13
-
14
- attr_reader :config, :queue_name, :min_priority, :max_priority, :work_queue
15
-
16
- # Callback to fire when a delayed job fails max_attempts times. If this
17
- # callback is defined, then the value of destroy_failed_jobs is ignored, and
18
- # the job is destroyed if this block returns true.
19
- #
20
- # This allows for destroying "uninteresting" failures, while keeping around
21
- # interesting failures to be investigated later.
22
- #
23
- # The block is called with args(job, last_exception)
24
- def self.on_max_failures=(block)
25
- @@on_max_failures = block
26
- end
27
- cattr_reader :on_max_failures
28
-
29
- cattr_accessor :plugins
30
- self.plugins = Set.new
3
+ require "delayed/rails_reloader_plugin"
31
4
 
32
- def self.lifecycle
33
- @lifecycle ||= Delayed::Lifecycle.new
5
+ module Delayed
6
+ class TimeoutError < RuntimeError; end
7
+
8
+ class RetriableError < RuntimeError
9
+ # this error is a special case. You _should_ raise
10
+ # it from inside the rescue block for another error,
11
+ # because it indicates: "something made this job fail
12
+ # but we're pretty sure it's transient and it's safe to try again".
13
+ # the workflow is still the same (retry will happen unless
14
+ # retries are exhausted), but it won't call the :error
15
+ # callback unless it can't retry anymore. It WILL call the
16
+ # separate ":retry" callback, which is ONLY activated
17
+ # for this kind of error.
34
18
  end
35
19
 
36
- def self.current_job
37
- Thread.current[:running_delayed_job]
38
- end
20
+ require "tmpdir"
21
+ require "set"
22
+
23
+ class Worker
24
+ include Delayed::Logging
25
+ SIGNALS = %i[INT TERM QUIT].freeze
26
+
27
+ attr_reader :config, :queue_name, :min_priority, :max_priority, :work_queue
28
+
29
+ class << self
30
+ # Callback to fire when a delayed job fails max_attempts times. If this
31
+ # callback is defined, then the value of destroy_failed_jobs is ignored, and
32
+ # the job is destroyed if this block returns true.
33
+ #
34
+ # This allows for destroying "uninteresting" failures, while keeping around
35
+ # interesting failures to be investigated later.
36
+ #
37
+ # The block is called with args(job, last_exception)
38
+ attr_accessor :on_max_failures
39
+ end
39
40
 
40
- def self.running_job(job)
41
- Thread.current[:running_delayed_job] = job
42
- yield
43
- ensure
44
- Thread.current[:running_delayed_job] = nil
45
- end
41
+ cattr_accessor :plugins
42
+ self.plugins = Set.new
46
43
 
47
- def initialize(options = {})
48
- @exit = false
49
- @parent_pid = options[:parent_pid]
50
- @queue_name = options[:queue] ||= Settings.queue
51
- @min_priority = options[:min_priority]
52
- @max_priority = options[:max_priority]
53
- @max_job_count = options[:worker_max_job_count].to_i
54
- @max_memory_usage = options[:worker_max_memory_usage].to_i
55
- @work_queue = options.delete(:work_queue) || WorkQueue::InProcess.new
56
- @health_check_type = Settings.worker_health_check_type
57
- @health_check_config = Settings.worker_health_check_config
58
- @config = options
59
- @job_count = 0
60
-
61
- @self_pipe = IO.pipe
62
- @signal_queue = []
63
-
64
- app = Rails.application
65
- if app && !app.config.cache_classes
66
- Delayed::Worker.lifecycle.around(:perform) do |worker, job, &block|
67
- reload = app.config.reload_classes_only_on_change != true || app.reloaders.map(&:updated?).any?
68
-
69
- if reload
70
- if defined?(ActiveSupport::Reloader)
71
- Rails.application.reloader.reload!
72
- else
73
- ActionDispatch::Reloader.prepare!
74
- end
75
- end
44
+ def self.lifecycle
45
+ @lifecycle ||= Delayed::Lifecycle.new
46
+ end
76
47
 
77
- begin
78
- block.call(worker, job)
79
- ensure
80
- ActionDispatch::Reloader.cleanup! if reload && !defined?(ActiveSupport::Reloader)
81
- end
82
- end
48
+ def self.current_job
49
+ Thread.current[:running_delayed_job]
83
50
  end
84
51
 
85
- plugins.each { |plugin| plugin.inject! }
86
- end
52
+ def self.running_job(job)
53
+ Thread.current[:running_delayed_job] = job
54
+ yield
55
+ ensure
56
+ Thread.current[:running_delayed_job] = nil
57
+ end
87
58
 
88
- def name
89
- @name ||= "#{Socket.gethostname rescue "X"}:#{self.id}"
90
- end
59
+ def initialize(options = {})
60
+ @exit = false
61
+ @parent_pid = options[:parent_pid]
62
+ @queue_name = options[:queue] ||= Settings.queue
63
+ @min_priority = options[:min_priority]
64
+ @max_priority = options[:max_priority]
65
+ @max_job_count = options[:worker_max_job_count].to_i
66
+ @max_memory_usage = options[:worker_max_memory_usage].to_i
67
+ @work_queue = options.delete(:work_queue) || WorkQueue::InProcess.new
68
+ @health_check_type = Settings.worker_health_check_type
69
+ @health_check_config = Settings.worker_health_check_config
70
+ @config = options
71
+ @job_count = 0
72
+
73
+ @signal_queue = []
74
+
75
+ plugins << Delayed::RailsReloaderPlugin
76
+ plugins.each(&:inject!)
77
+ end
91
78
 
92
- def set_process_name(new_name)
93
- $0 = "delayed:#{new_name}"
94
- end
79
+ def name
80
+ @name ||= "#{Socket.gethostname rescue 'X'}:#{id}"
81
+ end
95
82
 
96
- def exit?
97
- @exit
98
- end
83
+ def process_name=(new_name)
84
+ $0 = "delayed:#{new_name}"
85
+ end
99
86
 
100
- def wake_up
101
- @self_pipe[1].write_nonblock('.', exception: false)
102
- work_queue.wake_up
103
- end
87
+ def exit?
88
+ !!@exit || parent_exited?
89
+ end
104
90
 
105
- def start
106
- logger.info "Starting worker"
107
- set_process_name("start:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}")
91
+ def parent_exited?
92
+ @parent_pid && @parent_pid != Process.ppid
93
+ end
108
94
 
109
- work_thread = Thread.current
110
- SIGNALS.each do |sig|
111
- trap(sig) { @signal_queue << sig; wake_up }
95
+ def wake_up
96
+ @self_pipe[1].write_nonblock(".", exception: false)
97
+ work_queue.wake_up
112
98
  end
113
99
 
114
- raise 'Could not register health_check' unless health_check.start
115
-
116
- signal_processor = Thread.new do
117
- loop do
118
- @self_pipe[0].read(1)
119
- case @signal_queue.pop
120
- when :INT, :TERM
121
- @exit = true # get the main thread to bail early if it's waiting for a job
122
- work_thread.raise(SystemExit) # Force the main thread to bail out of the current job
123
- cleanup! # we're going to get SIGKILL'd in a moment, so clean up asap
124
- break
125
- when :QUIT
126
- @exit = true
127
- else
128
- logger.error "Unknown signal '#{sig}' received"
100
+ def start
101
+ logger.info "Starting worker"
102
+ self.process_name =
103
+ "start:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}"
104
+ @self_pipe = IO.pipe
105
+ work_queue.init
106
+
107
+ work_thread = Thread.current
108
+ SIGNALS.each do |sig|
109
+ trap(sig) do
110
+ @signal_queue << sig
111
+ wake_up
129
112
  end
130
113
  end
131
- end
132
114
 
133
- self.class.lifecycle.run_callbacks(:execute, self) do
134
- until exit? do
135
- run
115
+ raise "Could not register health_check" unless health_check.start
116
+
117
+ signal_processor = Thread.new do
118
+ loop do
119
+ @self_pipe[0].read(1)
120
+ case @signal_queue.pop
121
+ when :INT, :TERM
122
+ @exit = true # get the main thread to bail early if it's waiting for a job
123
+ work_thread.raise(SystemExit) # Force the main thread to bail out of the current job
124
+ cleanup! # we're going to get SIGKILL'd in a moment, so clean up asap
125
+ break
126
+ when :QUIT
127
+ @exit = true
128
+ else
129
+ logger.error "Unknown signal '#{sig}' received"
130
+ end
131
+ end
136
132
  end
137
- end
138
133
 
139
- logger.info "Stopping worker"
140
- rescue => e
141
- Rails.logger.fatal("Child process died: #{e.inspect}") rescue nil
142
- self.class.lifecycle.run_callbacks(:exceptional_exit, self, e) { }
143
- ensure
144
- cleanup!
134
+ self.class.lifecycle.run_callbacks(:execute, self) do
135
+ run until exit?
136
+ end
137
+
138
+ logger.info "Stopping worker"
139
+ rescue => e
140
+ Rails.logger.fatal("Child process died: #{e.inspect}") rescue nil
141
+ self.class.lifecycle.run_callbacks(:exceptional_exit, self, e) { nil }
142
+ ensure
143
+ cleanup!
145
144
 
146
- if signal_processor
147
- signal_processor.kill
148
- signal_processor.join
145
+ if signal_processor
146
+ signal_processor.kill
147
+ signal_processor.join
148
+ end
149
+
150
+ @self_pipe&.each(&:close)
151
+ @self_pipe = nil
149
152
  end
150
- end
151
153
 
152
- def cleanup!
153
- return if cleaned?
154
+ def cleanup!
155
+ return if cleaned?
154
156
 
155
- health_check.stop
156
- work_queue.close
157
- Delayed::Job.clear_locks!(name)
157
+ health_check.stop
158
+ work_queue.close
159
+ Delayed::Job.clear_locks!(name)
158
160
 
159
- @cleaned = true
160
- end
161
+ @cleaned = true
162
+ end
161
163
 
162
- def cleaned?
163
- @cleaned
164
- end
164
+ def cleaned?
165
+ @cleaned
166
+ end
165
167
 
166
- def run
167
- return if exit?
168
- self.class.lifecycle.run_callbacks(:loop, self) do
169
- set_process_name("pop:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}")
170
- job = self.class.lifecycle.run_callbacks(:pop, self) do
171
- work_queue.get_and_lock_next_available(name, config)
172
- end
168
+ def run
169
+ return if exit?
173
170
 
174
- if job
175
- configure_for_job(job) do
176
- @job_count += perform(job)
171
+ self.class.lifecycle.run_callbacks(:loop, self) do
172
+ self.process_name =
173
+ "pop:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}"
174
+ job = self.class.lifecycle.run_callbacks(:pop, self) do
175
+ work_queue.get_and_lock_next_available(name, config)
176
+ end
177
177
 
178
- if @max_job_count > 0 && @job_count >= @max_job_count
179
- logger.debug "Max job count of #{@max_job_count} exceeded, dying"
180
- @exit = true
181
- end
178
+ if job
179
+ configure_for_job(job) do
180
+ @job_count += perform(job)
182
181
 
183
- if @max_memory_usage > 0
184
- memory = sample_memory
185
- if memory > @max_memory_usage
186
- logger.debug "Memory usage of #{memory} exceeds max of #{@max_memory_usage}, dying"
182
+ if @max_job_count.positive? && @job_count >= @max_job_count
183
+ logger.debug "Max job count of #{@max_job_count} exceeded, dying"
187
184
  @exit = true
188
- else
189
- logger.debug "Memory usage: #{memory}"
185
+ end
186
+
187
+ if @max_memory_usage.positive?
188
+ memory = sample_memory
189
+ if memory > @max_memory_usage
190
+ logger.debug "Memory usage of #{memory} exceeds max of #{@max_memory_usage}, dying"
191
+ @exit = true
192
+ else
193
+ logger.debug "Memory usage: #{memory}"
194
+ end
190
195
  end
191
196
  end
197
+ else
198
+ self.process_name =
199
+ "wait:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}"
200
+ sleep(Settings.sleep_delay + (rand * Settings.sleep_delay_stagger)) unless exit?
192
201
  end
193
- else
194
- set_process_name("wait:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}")
195
- sleep(Settings.sleep_delay + (rand * Settings.sleep_delay_stagger)) unless exit?
196
202
  end
197
203
  end
198
- end
199
204
 
200
- def perform(job)
201
- count = 1
202
- raise Delayed::Backend::JobExpired, "job expired at #{job.expires_at}" if job.expired?
203
- self.class.lifecycle.run_callbacks(:perform, self, job) do
204
- set_process_name("run:#{Settings.worker_procname_prefix}#{job.id}:#{job.name}")
205
- logger.info("Processing #{log_job(job, :long)}")
206
- runtime = Benchmark.realtime do
207
- if job.batch?
208
- # each job in the batch will have perform called on it, so we don't
209
- # need a timeout around this
210
- count = perform_batch(job)
211
- else
212
- job.invoke_job
205
+ def perform(job)
206
+ begin
207
+ count = 1
208
+ raise Delayed::Backend::JobExpired, "job expired at #{job.expires_at}" if job.expired?
209
+
210
+ self.class.lifecycle.run_callbacks(:perform, self, job) do
211
+ self.process_name = "run:#{Settings.worker_procname_prefix}#{job.id}:#{job.name}"
212
+ logger.info("Processing #{log_job(job, :long)}")
213
+ runtime = Benchmark.realtime do
214
+ if job.batch?
215
+ # each job in the batch will have perform called on it, so we don't
216
+ # need a timeout around this
217
+ count = perform_batch(job)
218
+ else
219
+ job.invoke_job
220
+ end
221
+ job.destroy
222
+ end
223
+ logger.info("Completed #{log_job(job, :short)} #{format('%.0fms', (runtime * 1000))}")
224
+ end
225
+ rescue ::Delayed::RetriableError => e
226
+ can_retry = job.attempts + 1 < job.inferred_max_attempts
227
+ callback_type = can_retry ? :retry : :error
228
+ self.class.lifecycle.run_callbacks(callback_type, self, job, e) do
229
+ handle_failed_job(job, e)
230
+ end
231
+ rescue SystemExit => e
232
+ # There wasn't really a failure here so no callbacks and whatnot needed,
233
+ # still reschedule the job though.
234
+ job.reschedule(e)
235
+ rescue Exception => e # rubocop:disable Lint/RescueException
236
+ self.class.lifecycle.run_callbacks(:error, self, job, e) do
237
+ handle_failed_job(job, e)
213
238
  end
214
- job.destroy
215
239
  end
216
- logger.info("Completed #{log_job(job)} #{"%.0fms" % (runtime * 1000)}")
217
- end
218
- count
219
- rescue SystemExit => se
220
- # There wasn't really a failure here so no callbacks and whatnot needed,
221
- # still reschedule the job though.
222
- job.reschedule(se)
223
- count
224
- rescue Exception => e
225
- self.class.lifecycle.run_callbacks(:error, self, job, e) do
226
- handle_failed_job(job, e)
240
+ count
227
241
  end
228
- count
229
- end
230
242
 
231
- def perform_batch(parent_job)
232
- batch = parent_job.payload_object
233
- if batch.mode == :serial
243
+ def perform_batch(parent_job)
244
+ batch = parent_job.payload_object
245
+ return unless batch.mode == :serial
246
+
234
247
  batch.jobs.each do |job|
235
248
  job.source = parent_job.source
236
249
  job.create_and_lock!(name)
@@ -240,72 +253,72 @@ class Worker
240
253
  end
241
254
  batch.items.size
242
255
  end
243
- end
244
256
 
245
- def handle_failed_job(job, error)
246
- job.last_error = "#{error.message}\n#{error.backtrace.join("\n")}"
247
- logger.error("Failed with #{error.class} [#{error.message}] (#{job.attempts} attempts)")
248
- job.reschedule(error)
249
- end
257
+ def handle_failed_job(job, error)
258
+ job.last_error = "#{error.message}\n#{error.backtrace.join("\n")}"
259
+ logger.error("Failed with #{error.class} [#{error.message}] (#{job.attempts} attempts)")
260
+ job.reschedule(error)
261
+ end
250
262
 
251
- def id
252
- Process.pid
253
- end
263
+ def id
264
+ Process.pid
265
+ end
254
266
 
255
- def log_job(job, format = :short)
256
- case format
257
- when :long
258
- "#{job.full_name} #{ Settings.job_detailed_log_format.call(job) }"
259
- else
260
- job.full_name
267
+ def log_job(job, format = :short)
268
+ case format
269
+ when :long
270
+ "#{job.full_name} #{Settings.job_detailed_log_format.call(job)}"
271
+ else
272
+ "#{job.full_name} #{Settings.job_short_log_format.call(job)}".strip
273
+ end
261
274
  end
262
- end
263
275
 
264
- # set up the session context information, so that it gets logged with the job log lines
265
- # also set up a unique tmpdir, which will get removed at the end of the job.
266
- def configure_for_job(job)
267
- previous_tmpdir = ENV['TMPDIR']
276
+ # set up the session context information, so that it gets logged with the job log lines
277
+ # also set up a unique tmpdir, which will get removed at the end of the job.
278
+ def configure_for_job(job)
279
+ previous_tmpdir = ENV.fetch("TMPDIR", nil)
268
280
 
269
- self.class.running_job(job) do
270
- dir = Dir.mktmpdir("job-#{job.id}-#{self.name.gsub(/[^\w\.]/, '.')}-")
271
- begin
272
- ENV['TMPDIR'] = dir
273
- yield
274
- ensure
275
- FileUtils.remove_entry(dir, true)
281
+ self.class.running_job(job) do
282
+ dir = Dir.mktmpdir("job-#{job.id}-#{name.gsub(/[^\w.]/, '.')}-")
283
+ begin
284
+ ENV["TMPDIR"] = dir
285
+ yield
286
+ ensure
287
+ FileUtils.remove_entry(dir, true)
288
+ end
276
289
  end
290
+ ensure
291
+ ENV["TMPDIR"] = previous_tmpdir
277
292
  end
278
- ensure
279
- ENV['TMPDIR'] = previous_tmpdir
280
- end
281
-
282
- def health_check
283
- @health_check ||= HealthCheck.build(
284
- type: @health_check_type,
285
- worker_name: name,
286
- config: @health_check_config
287
- )
288
- end
289
293
 
290
- # `sample` reports KB, not B
291
- if File.directory?("/proc")
292
- # linux w/ proc fs
293
- LINUX_PAGE_SIZE = (size = `getconf PAGESIZE`.to_i; size > 0 ? size : 4096)
294
- def sample_memory
295
- s = File.read("/proc/#{Process.pid}/statm").to_i rescue 0
296
- s * LINUX_PAGE_SIZE / 1024
294
+ def health_check
295
+ @health_check ||= HealthCheck.build(
296
+ type: @health_check_type,
297
+ worker_name: name,
298
+ config: @health_check_config
299
+ )
297
300
  end
298
- else
299
- # generic unix solution
300
- def sample_memory
301
- if Rails.env.test?
302
- 0
303
- else
304
- # hmm this is actually resident set size, doesn't include swapped-to-disk
305
- # memory.
306
- `ps -o rss= -p #{Process.pid}`.to_i
301
+
302
+ # `sample` reports KB, not B
303
+ if File.directory?("/proc")
304
+ # linux w/ proc fs
305
+ LINUX_PAGE_SIZE = (size = `getconf PAGESIZE`.to_i
306
+ size.positive? ? size : 4096)
307
+ def sample_memory
308
+ s = File.read("/proc/#{Process.pid}/statm").to_i rescue 0
309
+ s * LINUX_PAGE_SIZE / 1024
310
+ end
311
+ else
312
+ # generic unix solution
313
+ def sample_memory
314
+ if Rails.env.test?
315
+ 0
316
+ else
317
+ # hmm this is actually resident set size, doesn't include swapped-to-disk
318
+ # memory.
319
+ `ps -o rss= -p #{Process.pid}`.to_i
320
+ end
307
321
  end
308
322
  end
309
323
  end
310
324
  end
311
- end
@@ -2,36 +2,37 @@
2
2
 
3
3
  # New definitions for YAML to aid in serialization and deserialization of delayed jobs.
4
4
 
5
- require 'yaml'
5
+ require "yaml"
6
6
 
7
7
  # These two added domain types are for backwards compatibility with jobs created
8
8
  # using the old syck tags, as syck didn't have built-in module/class dumping. We
9
9
  # now use Psych's built-in tags, which are `!ruby/module` and `!ruby/class`. At
10
10
  # some point we can remove these, once there are no more jobs in any queues with
11
11
  # these tags.
12
- Psych.add_domain_type("ruby/object", "Module") do |type, val|
12
+ Psych.add_domain_type("ruby/object", "Module") do |_type, val|
13
13
  val.constantize
14
14
  end
15
- Psych.add_domain_type("ruby/object", "Class") do |type, val|
15
+ Psych.add_domain_type("ruby/object", "Class") do |_type, val|
16
16
  val.constantize
17
17
  end
18
18
 
19
19
  # Tell YAML how to intelligently load ActiveRecord objects, using the
20
20
  # database rather than just serializing their attributes to the YAML. This
21
21
  # ensures the object is up to date when we use it in the job.
22
- class ActiveRecord::Base
23
- def encode_with(coder)
24
- if id.nil?
25
- raise("Can't serialize unsaved ActiveRecord object for delayed job: #{self.inspect}")
22
+ module ActiveRecord
23
+ class Base
24
+ def encode_with(coder)
25
+ raise("Can't serialize unsaved ActiveRecord object for delayed job: #{inspect}") if id.nil?
26
+
27
+ coder.scalar("!ruby/ActiveRecord:#{self.class.name}", id.to_s)
26
28
  end
27
- coder.scalar("!ruby/ActiveRecord:#{self.class.name}", id.to_s)
28
29
  end
29
30
  end
30
31
 
31
32
  module Delayed
32
33
  module PsychExt
33
34
  module ToRuby
34
- def visit_Psych_Nodes_Scalar(object)
35
+ def visit_Psych_Nodes_Scalar(object) # rubocop:disable Naming/MethodName
35
36
  case object.tag
36
37
  when %r{^!ruby/ActiveRecord:(.+)$}
37
38
  begin
@@ -50,7 +51,8 @@ module Delayed
50
51
  end
51
52
 
52
53
  def resolve_class(klass_name)
53
- return nil if !klass_name || klass_name.empty?
54
+ return nil if klass_name.blank?
55
+
54
56
  klass_name.constantize
55
57
  rescue
56
58
  super