inst-jobs 2.0.0 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
  22. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
  23. data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
  24. data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
  25. data/db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb +137 -0
  26. data/db/migrate/20211207094200_update_after_delete_trigger_for_singleton_transition_cases.rb +171 -0
  27. data/db/migrate/20211220112800_fix_singleton_race_condition_insert.rb +59 -0
  28. data/db/migrate/20211220113000_fix_singleton_race_condition_delete.rb +207 -0
  29. data/db/migrate/20220127091200_fix_singleton_unique_constraint.rb +31 -0
  30. data/db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb +60 -0
  31. data/db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb +209 -0
  32. data/db/migrate/20220203063200_remove_old_singleton_index.rb +31 -0
  33. data/db/migrate/20220328152900_add_failed_jobs_indicies.rb +12 -0
  34. data/exe/inst_jobs +3 -2
  35. data/lib/delayed/backend/active_record.rb +226 -168
  36. data/lib/delayed/backend/base.rb +119 -72
  37. data/lib/delayed/batch.rb +11 -9
  38. data/lib/delayed/cli.rb +98 -84
  39. data/lib/delayed/core_ext/kernel.rb +4 -2
  40. data/lib/delayed/daemon.rb +70 -74
  41. data/lib/delayed/job_tracking.rb +26 -25
  42. data/lib/delayed/lifecycle.rb +28 -23
  43. data/lib/delayed/log_tailer.rb +17 -17
  44. data/lib/delayed/logging.rb +13 -16
  45. data/lib/delayed/message_sending.rb +43 -52
  46. data/lib/delayed/performable_method.rb +6 -8
  47. data/lib/delayed/periodic.rb +72 -68
  48. data/lib/delayed/plugin.rb +2 -4
  49. data/lib/delayed/pool.rb +205 -168
  50. data/lib/delayed/rails_reloader_plugin.rb +30 -0
  51. data/lib/delayed/server/helpers.rb +6 -6
  52. data/lib/delayed/server.rb +51 -54
  53. data/lib/delayed/settings.rb +96 -81
  54. data/lib/delayed/testing.rb +21 -22
  55. data/lib/delayed/version.rb +1 -1
  56. data/lib/delayed/work_queue/in_process.rb +21 -17
  57. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  58. data/lib/delayed/work_queue/parent_process/server.rb +245 -207
  59. data/lib/delayed/work_queue/parent_process.rb +52 -53
  60. data/lib/delayed/worker/consul_health_check.rb +32 -33
  61. data/lib/delayed/worker/health_check.rb +35 -27
  62. data/lib/delayed/worker/null_health_check.rb +3 -1
  63. data/lib/delayed/worker/process_helper.rb +11 -12
  64. data/lib/delayed/worker.rb +257 -244
  65. data/lib/delayed/yaml_extensions.rb +12 -10
  66. data/lib/delayed_job.rb +37 -37
  67. data/lib/inst-jobs.rb +1 -1
  68. data/spec/active_record_job_spec.rb +152 -139
  69. data/spec/delayed/cli_spec.rb +7 -7
  70. data/spec/delayed/daemon_spec.rb +10 -9
  71. data/spec/delayed/message_sending_spec.rb +16 -9
  72. data/spec/delayed/periodic_spec.rb +14 -21
  73. data/spec/delayed/server_spec.rb +38 -38
  74. data/spec/delayed/settings_spec.rb +26 -25
  75. data/spec/delayed/work_queue/in_process_spec.rb +8 -9
  76. data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
  77. data/spec/delayed/work_queue/parent_process/server_spec.rb +118 -42
  78. data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
  79. data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
  80. data/spec/delayed/worker/health_check_spec.rb +60 -52
  81. data/spec/delayed/worker_spec.rb +53 -24
  82. data/spec/sample_jobs.rb +45 -15
  83. data/spec/shared/delayed_batch.rb +74 -67
  84. data/spec/shared/delayed_method.rb +143 -102
  85. data/spec/shared/performable_method.rb +39 -38
  86. data/spec/shared/shared_backend.rb +801 -440
  87. data/spec/shared/testing.rb +14 -14
  88. data/spec/shared/worker.rb +157 -149
  89. data/spec/shared_jobs_specs.rb +13 -13
  90. data/spec/spec_helper.rb +57 -56
  91. metadata +183 -103
  92. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  93. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  94. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  95. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  96. data/lib/delayed/backend/redis/find_available.lua +0 -3
  97. data/lib/delayed/backend/redis/functions.rb +0 -59
  98. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  99. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  100. data/lib/delayed/backend/redis/job.rb +0 -535
  101. data/lib/delayed/backend/redis/set_running.lua +0 -5
  102. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  103. data/spec/gemfiles/42.gemfile +0 -7
  104. data/spec/gemfiles/50.gemfile +0 -7
  105. data/spec/gemfiles/51.gemfile +0 -7
  106. data/spec/gemfiles/52.gemfile +0 -7
  107. data/spec/gemfiles/60.gemfile +0 -7
  108. data/spec/redis_job_spec.rb +0 -148
@@ -1,11 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class ActiveRecord::Base
4
- def self.load_for_delayed_job(id)
5
- if id
6
- find(id)
7
- else
8
- super
3
+ module ActiveRecord
4
+ class Base
5
+ def self.load_for_delayed_job(id)
6
+ if id
7
+ find(id)
8
+ else
9
+ super
10
+ end
9
11
  end
10
12
  end
11
13
  end
@@ -13,9 +15,13 @@ end
13
15
  module Delayed
14
16
  module Backend
15
17
  module ActiveRecord
18
+ class AbstractJob < ::ActiveRecord::Base
19
+ self.abstract_class = true
20
+ end
21
+
16
22
  # A job object that is persisted to the database.
17
23
  # Contains the work object as a YAML field.
18
- class Job < ::ActiveRecord::Base
24
+ class Job < AbstractJob
19
25
  include Delayed::Backend::Base
20
26
  self.table_name = :delayed_jobs
21
27
 
@@ -27,17 +33,25 @@ module Delayed
27
33
 
28
34
  class << self
29
35
  def create(attributes, &block)
30
- return super if connection.prepared_statements || Rails.version < '5.2'
31
-
36
+ on_conflict = attributes.delete(:on_conflict)
32
37
  # modified from ActiveRecord::Persistence.create and ActiveRecord::Persistence#_insert_record
33
38
  job = new(attributes, &block)
34
- job.single_step_create
39
+ job.single_step_create(on_conflict: on_conflict)
40
+ end
41
+
42
+ def attempt_advisory_lock(lock_name)
43
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
44
+ connection.select_value("SELECT pg_try_advisory_xact_lock(#{fn_name}('#{lock_name}'));")
45
+ end
46
+
47
+ def advisory_lock(lock_name)
48
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
49
+ connection.execute("SELECT pg_advisory_xact_lock(#{fn_name}('#{lock_name}'));")
35
50
  end
36
51
  end
37
52
 
38
- def single_step_create
53
+ def single_step_create(on_conflict: nil)
39
54
  connection = self.class.connection
40
- return save if connection.prepared_statements || Rails.version < '5.2'
41
55
 
42
56
  # a before_save callback that we're skipping
43
57
  initialize_defaults
@@ -45,33 +59,74 @@ module Delayed
45
59
  current_time = current_time_from_proper_timezone
46
60
 
47
61
  all_timestamp_attributes_in_model.each do |column|
48
- if !attribute_present?(column)
49
- _write_attribute(column, current_time)
50
- end
62
+ _write_attribute(column, current_time) unless attribute_present?(column)
51
63
  end
52
64
 
53
- if Rails.version >= '6'
54
- attribute_names = attribute_names_for_partial_writes
55
- attribute_names = attributes_for_create(attribute_names)
56
- values = attributes_with_values(attribute_names)
57
- else
58
- attribute_names = partial_writes? ? keys_for_partial_write : self.attribute_names
59
- values = attributes_with_values_for_create(attribute_names)
65
+ attribute_names = if Rails.version < "7.0"
66
+ attribute_names_for_partial_writes
67
+ else
68
+ attribute_names_for_partial_inserts
69
+ end
70
+ attribute_names = attributes_for_create(attribute_names)
71
+ values = attributes_with_values(attribute_names)
72
+
73
+ im = if Rails.version < "7.0"
74
+ self.class.arel_table.compile_insert(self.class.send(:_substitute_values, values))
75
+ else
76
+ im = Arel::InsertManager.new(self.class.arel_table)
77
+ im.insert(values.transform_keys { |name| self.class.arel_table[name] })
78
+ im
79
+ end
80
+
81
+ lock_and_insert = values["strand"] && instance_of?(Job)
82
+ # can't use prepared statements if we're combining multiple statemenets
83
+ sql, binds = if lock_and_insert
84
+ connection.unprepared_statement do
85
+ connection.send(:to_sql_and_binds, im)
86
+ end
87
+ else
88
+ connection.send(:to_sql_and_binds, im)
89
+ end
90
+ sql = +sql
91
+
92
+ if singleton && instance_of?(Job)
93
+ sql << " ON CONFLICT (singleton) WHERE singleton IS NOT NULL AND locked_by IS NULL DO "
94
+ sql << case on_conflict
95
+ when :patient, :loose
96
+ "NOTHING"
97
+ when :overwrite
98
+ "UPDATE SET run_at=EXCLUDED.run_at, handler=EXCLUDED.handler"
99
+ else # :use_earliest
100
+ "UPDATE SET run_at=EXCLUDED.run_at WHERE EXCLUDED.run_at<delayed_jobs.run_at"
101
+ end
60
102
  end
61
- im = self.class.arel_table.compile_insert(self.class.send(:_substitute_values, values))
62
- sql, _binds = connection.send(:to_sql_and_binds, im, [])
63
103
 
64
104
  # https://www.postgresql.org/docs/9.5/libpq-exec.html
65
- sql = "#{sql} RETURNING id"
66
- # > Multiple queries sent in a single PQexec call are processed in a single transaction,
67
- # unless there are explicit BEGIN/COMMIT commands included in the query string to divide
68
- # it into multiple transactions.
69
- sql = "SELECT pg_advisory_xact_lock(#{connection.quote_table_name('half_md5_as_bigint')}(#{connection.quote(values['strand'])})); #{sql}" if values["strand"]
70
- result = connection.execute(sql, "#{self} Create")
71
- self.id = result.values.first.first
72
- result.clear
73
- @new_record = false
74
- changes_applied
105
+ sql << " RETURNING id"
106
+
107
+ if lock_and_insert
108
+ # > Multiple queries sent in a single PQexec call are processed in a single transaction,
109
+ # unless there are explicit BEGIN/COMMIT commands included in the query string to divide
110
+ # it into multiple transactions.
111
+ # but we don't need to lock when inserting into Delayed::Failed
112
+ if values["strand"] && instance_of?(Job)
113
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
114
+ quoted_strand = connection.quote(Rails.version < "7.0" ? values["strand"] : values["strand"].value)
115
+ sql = "SELECT pg_advisory_xact_lock(#{fn_name}(#{quoted_strand})); #{sql}"
116
+ end
117
+ result = connection.execute(sql, "#{self.class} Create")
118
+ self.id = result.values.first&.first
119
+ result.clear
120
+ else
121
+ result = connection.exec_query(sql, "#{self.class} Create", binds)
122
+ self.id = connection.send(:last_inserted_id, result)
123
+ end
124
+
125
+ # it might not get set if there was an existing record, and we didn't update it
126
+ if id
127
+ @new_record = false
128
+ changes_applied
129
+ end
75
130
 
76
131
  self
77
132
  end
@@ -98,9 +153,11 @@ module Delayed
98
153
  # to raise the lock level
99
154
  before_create :lock_strand_on_create
100
155
  def lock_strand_on_create
101
- if strand.present?
102
- self.class.connection.execute("SELECT pg_advisory_xact_lock(#{self.class.connection.quote_table_name('half_md5_as_bigint')}(#{self.class.connection.quote(strand)}))")
103
- end
156
+ return unless strand.present? && instance_of?(Job)
157
+
158
+ fn_name = self.class.connection.quote_table_name("half_md5_as_bigint")
159
+ quoted_strand_name = self.class.connection.quote(strand)
160
+ self.class.connection.execute("SELECT pg_advisory_xact_lock(#{fn_name}(#{quoted_strand_name}))")
104
161
  end
105
162
 
106
163
  # This overwrites the previous behavior
@@ -119,7 +176,7 @@ module Delayed
119
176
  end
120
177
 
121
178
  def self.failed
122
- where("failed_at IS NOT NULL")
179
+ where.not(failed_at: nil)
123
180
  end
124
181
 
125
182
  def self.running
@@ -127,51 +184,54 @@ module Delayed
127
184
  end
128
185
 
129
186
  # a nice stress test:
130
- # 10_000.times { |i| Kernel.delay(strand: 's1', run_at: (24.hours.ago + (rand(24.hours.to_i))).system("echo #{i} >> test1.txt") }
187
+ # 10_000.times do |i|
188
+ # Kernel.delay(strand: 's1', run_at: (24.hours.ago + (rand(24.hours.to_i))).system("echo #{i} >> test1.txt")
189
+ # end
131
190
  # 500.times { |i| "ohai".delay(run_at: (12.hours.ago + (rand(24.hours.to_i))).reverse }
132
191
  # then fire up your workers
133
192
  # you can check out strand correctness: diff test1.txt <(sort -n test1.txt)
134
- def self.ready_to_run(forced_latency: nil)
135
- now = db_time_now
136
- now -= forced_latency if forced_latency
137
- where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", now, true)
138
- end
193
+ def self.ready_to_run(forced_latency: nil)
194
+ now = db_time_now
195
+ now -= forced_latency if forced_latency
196
+ where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", now, true)
197
+ end
198
+
139
199
  def self.by_priority
140
200
  order(:priority, :run_at, :id)
141
201
  end
142
202
 
143
203
  # When a worker is exiting, make sure we don't have any locked jobs.
144
204
  def self.clear_locks!(worker_name)
145
- where(:locked_by => worker_name).update_all(:locked_by => nil, :locked_at => nil)
205
+ where(locked_by: worker_name).update_all(locked_by: nil, locked_at: nil)
146
206
  end
147
207
 
148
208
  def self.strand_size(strand)
149
- self.where(:strand => strand).count
209
+ where(strand: strand).count
150
210
  end
151
211
 
152
- def self.running_jobs()
153
- self.running.order(:locked_at)
212
+ def self.running_jobs
213
+ running.order(:locked_at)
154
214
  end
155
215
 
156
216
  def self.scope_for_flavor(flavor, query)
157
217
  scope = case flavor.to_s
158
- when 'current'
159
- self.current
160
- when 'future'
161
- self.future
162
- when 'failed'
163
- Delayed::Job::Failed
164
- when 'strand'
165
- self.where(:strand => query)
166
- when 'tag'
167
- self.where(:tag => query)
168
- else
169
- raise ArgumentError, "invalid flavor: #{flavor.inspect}"
170
- end
171
-
172
- if %w(current future).include?(flavor.to_s)
218
+ when "current"
219
+ current
220
+ when "future"
221
+ future
222
+ when "failed"
223
+ Delayed::Job::Failed
224
+ when "strand"
225
+ where(strand: query)
226
+ when "tag"
227
+ where(tag: query)
228
+ else
229
+ raise ArgumentError, "invalid flavor: #{flavor.inspect}"
230
+ end
231
+
232
+ if %w[current future].include?(flavor.to_s)
173
233
  queue = query.presence || Delayed::Settings.queue
174
- scope = scope.where(:queue => queue)
234
+ scope = scope.where(queue: queue)
175
235
  end
176
236
 
177
237
  scope
@@ -188,8 +248,8 @@ module Delayed
188
248
  limit,
189
249
  offset = 0,
190
250
  query = nil)
191
- scope = self.scope_for_flavor(flavor, query)
192
- order = flavor.to_s == 'future' ? 'run_at' : 'id desc'
251
+ scope = scope_for_flavor(flavor, query)
252
+ order = flavor.to_s == "future" ? "run_at" : "id desc"
193
253
  scope.order(order).limit(limit).offset(offset).to_a
194
254
  end
195
255
 
@@ -197,7 +257,7 @@ module Delayed
197
257
  # see list_jobs for documentation on arguments
198
258
  def self.jobs_count(flavor,
199
259
  query = nil)
200
- scope = self.scope_for_flavor(flavor, query)
260
+ scope = scope_for_flavor(flavor, query)
201
261
  scope.count
202
262
  end
203
263
 
@@ -206,30 +266,33 @@ module Delayed
206
266
  # to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
207
267
  # or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
208
268
  def self.bulk_update(action, opts)
209
- raise("Can't #{action.to_s} failed jobs") if opts[:flavor].to_s == 'failed' && action.to_s != 'destroy'
269
+ raise("Can't #{action} failed jobs") if opts[:flavor].to_s == "failed" && action.to_s != "destroy"
270
+
210
271
  scope = if opts[:ids]
211
- if opts[:flavor] == 'failed'
212
- Delayed::Job::Failed.where(:id => opts[:ids])
213
- else
214
- self.where(:id => opts[:ids])
215
- end
216
- elsif opts[:flavor]
272
+ if opts[:flavor] == "failed"
273
+ Delayed::Job::Failed.where(id: opts[:ids])
274
+ else
275
+ where(id: opts[:ids])
276
+ end
277
+ elsif opts[:flavor]
217
278
 
218
- self.scope_for_flavor(opts[:flavor], opts[:query])
219
- end
279
+ scope_for_flavor(opts[:flavor], opts[:query])
280
+ end
220
281
 
221
282
  return 0 unless scope
222
283
 
223
284
  case action.to_s
224
- when 'hold'
285
+ when "hold"
225
286
  scope = scope.where(locked_by: nil)
226
- scope.update_all(:locked_by => ON_HOLD_LOCKED_BY, :locked_at => db_time_now, :attempts => ON_HOLD_COUNT)
227
- when 'unhold'
287
+ scope.update_all(locked_by: ON_HOLD_LOCKED_BY, locked_at: db_time_now, attempts: ON_HOLD_COUNT)
288
+ when "unhold"
228
289
  now = db_time_now
229
290
  scope = scope.where(locked_by: ON_HOLD_LOCKED_BY)
230
- scope.update_all(["locked_by = NULL, locked_at = NULL, attempts = 0, run_at = (CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at = NULL", now, now])
231
- when 'destroy'
232
- scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == 'failed'
291
+ scope.update_all([<<~SQL.squish, now, now])
292
+ locked_by=NULL, locked_at=NULL, attempts=0, run_at=(CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at=NULL
293
+ SQL
294
+ when "destroy"
295
+ scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == "failed"
233
296
  scope.delete_all
234
297
  end
235
298
  end
@@ -240,23 +303,24 @@ module Delayed
240
303
  def self.tag_counts(flavor,
241
304
  limit,
242
305
  offset = 0)
243
- raise(ArgumentError, "invalid flavor: #{flavor}") unless %w(current all).include?(flavor.to_s)
306
+ raise(ArgumentError, "invalid flavor: #{flavor}") unless %w[current all].include?(flavor.to_s)
307
+
244
308
  scope = case flavor.to_s
245
- when 'current'
246
- self.current
247
- when 'all'
248
- self
249
- end
309
+ when "current"
310
+ current
311
+ when "all"
312
+ self
313
+ end
250
314
 
251
315
  scope = scope.group(:tag).offset(offset).limit(limit)
252
- scope.order(Arel.sql("COUNT(tag) DESC")).count.map { |t,c| { :tag => t, :count => c } }
316
+ scope.order(Arel.sql("COUNT(tag) DESC")).count.map { |t, c| { tag: t, count: c } }
253
317
  end
254
318
 
255
319
  def self.maybe_silence_periodic_log(&block)
256
320
  if Settings.silence_periodic_log
257
321
  ::ActiveRecord::Base.logger.silence(&block)
258
322
  else
259
- block.call
323
+ yield
260
324
  end
261
325
  end
262
326
 
@@ -273,7 +337,7 @@ module Delayed
273
337
 
274
338
  loop do
275
339
  jobs = maybe_silence_periodic_log do
276
- if connection.adapter_name == 'PostgreSQL' && !Settings.select_random_from_batch
340
+ if connection.adapter_name == "PostgreSQL" && !Settings.select_random_from_batch
277
341
  # In Postgres, we can lock a job and return which row was locked in a single
278
342
  # query by using RETURNING. Combine that with the ROW_NUMBER() window function
279
343
  # to assign a distinct locked_at value to each job locked, when doing multiple
@@ -281,22 +345,20 @@ module Delayed
281
345
  effective_worker_names = Array(worker_names)
282
346
 
283
347
  lock = nil
284
- lock = "FOR UPDATE SKIP LOCKED" if connection.postgresql_version >= 90500
348
+ lock = "FOR UPDATE SKIP LOCKED" if connection.postgresql_version >= 90_500
285
349
  target_jobs = all_available(queue,
286
350
  min_priority,
287
351
  max_priority,
288
- forced_latency: forced_latency).
289
- limit(effective_worker_names.length + prefetch).
290
- lock(lock)
291
- jobs_with_row_number = all.from(target_jobs).
292
- select("id, ROW_NUMBER() OVER () AS row_number")
352
+ forced_latency: forced_latency)
353
+ .limit(effective_worker_names.length + prefetch)
354
+ .lock(lock)
355
+ jobs_with_row_number = all.from(target_jobs)
356
+ .select("id, ROW_NUMBER() OVER () AS row_number")
293
357
  updates = +"locked_by = CASE row_number "
294
358
  effective_worker_names.each_with_index do |worker, i|
295
359
  updates << "WHEN #{i + 1} THEN #{connection.quote(worker)} "
296
360
  end
297
- if prefetch_owner
298
- updates << "ELSE #{connection.quote(prefetch_owner)} "
299
- end
361
+ updates << "ELSE #{connection.quote(prefetch_owner)} " if prefetch_owner
300
362
  updates << "END, locked_at = #{connection.quote(db_time_now)}"
301
363
 
302
364
  # Originally this was done with a subquery, but this allows the query planner to
@@ -306,22 +368,22 @@ module Delayed
306
368
  # For more details, see:
307
369
  # * https://dba.stackexchange.com/a/69497/55285
308
370
  # * https://github.com/feikesteenbergen/demos/blob/b7ecee8b2a79bf04cbcd74972e6bfb81903aee5d/bugs/update_limit_bug.txt
309
- query = "WITH limited_jobs AS (#{jobs_with_row_number.to_sql}) " \
310
- "UPDATE #{quoted_table_name} SET #{updates} FROM limited_jobs WHERE limited_jobs.id=#{quoted_table_name}.id " \
311
- "RETURNING #{quoted_table_name}.*"
371
+ query = <<~SQL.squish
372
+ WITH limited_jobs AS (#{jobs_with_row_number.to_sql})
373
+ UPDATE #{quoted_table_name} SET #{updates} FROM limited_jobs WHERE limited_jobs.id=#{quoted_table_name}.id
374
+ RETURNING #{quoted_table_name}.*
375
+ SQL
312
376
 
313
377
  jobs = find_by_sql(query)
314
378
  # because this is an atomic query, we don't have to return more jobs than we needed
315
379
  # to try and lock them, nor is there a possibility we need to try again because
316
380
  # all of the jobs we tried to lock had already been locked by someone else
317
- if worker_names.is_a?(Array)
318
- result = jobs.index_by(&:locked_by)
319
- # all of the prefetched jobs can come back as an array
320
- result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
321
- return result
322
- else
323
- return jobs.first
324
- end
381
+ return jobs.first unless worker_names.is_a?(Array)
382
+
383
+ result = jobs.index_by(&:locked_by)
384
+ # all of the prefetched jobs can come back as an array
385
+ result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
386
+ return result
325
387
  else
326
388
  batch_size = Settings.fetch_batch_size
327
389
  batch_size *= worker_names.length if worker_names.is_a?(Array)
@@ -331,13 +393,13 @@ module Delayed
331
393
  if jobs.empty?
332
394
  return worker_names.is_a?(Array) ? {} : nil
333
395
  end
334
- if Settings.select_random_from_batch
335
- jobs = jobs.sort_by { rand }
336
- end
396
+
397
+ jobs = jobs.sort_by { rand } if Settings.select_random_from_batch
337
398
  if worker_names.is_a?(Array)
338
399
  result = {}
339
400
  jobs.each do |job|
340
401
  break if worker_names.empty?
402
+
341
403
  worker_name = worker_names.first
342
404
  if job.send(:lock_exclusively!, worker_name)
343
405
  result[worker_name] = job
@@ -346,10 +408,10 @@ module Delayed
346
408
  end
347
409
  return result
348
410
  else
349
- job = jobs.detect do |job|
411
+ locked_job = jobs.detect do |job|
350
412
  job.send(:lock_exclusively!, worker_names)
351
413
  end
352
- return job if job
414
+ return locked_job if locked_job
353
415
  end
354
416
  end
355
417
  end
@@ -371,27 +433,9 @@ module Delayed
371
433
  check_queue(queue)
372
434
  check_priorities(min_priority, max_priority)
373
435
 
374
- self.ready_to_run(forced_latency: forced_latency).
375
- where(:priority => min_priority..max_priority, :queue => queue).
376
- by_priority
377
- end
378
-
379
- # used internally by create_singleton to take the appropriate lock
380
- # depending on the db driver
381
- def self.transaction_for_singleton(strand, on_conflict)
382
- return yield if on_conflict == :loose
383
- self.transaction do
384
- if on_conflict == :patient
385
- pg_function = 'pg_try_advisory_xact_lock'
386
- execute_method = :select_value
387
- else
388
- pg_function = 'pg_advisory_xact_lock'
389
- execute_method = :execute
390
- end
391
- result = connection.send(execute_method, sanitize_sql(["SELECT #{pg_function}(#{connection.quote_table_name('half_md5_as_bigint')}(?))", strand]))
392
- return if result == false && on_conflict == :patient
393
- yield
394
- end
436
+ ready_to_run(forced_latency: forced_latency)
437
+ .where(priority: min_priority..max_priority, queue: queue)
438
+ .by_priority
395
439
  end
396
440
 
397
441
  # Create the job on the specified strand, but only if there aren't any
@@ -399,10 +443,11 @@ module Delayed
399
443
  # (in other words, the job will still be created if there's another job
400
444
  # on the strand but it's already running)
401
445
  def self.create_singleton(options)
402
- strand = options[:strand]
446
+ strand = options[:singleton]
403
447
  on_conflict = options.delete(:on_conflict) || :use_earliest
404
- transaction_for_singleton(strand, on_conflict) do
405
- job = self.where(:strand => strand, :locked_at => nil).next_in_strand_order.first
448
+
449
+ transaction_for_singleton(singleton, on_conflict) do
450
+ job = where(strand: strand, locked_at: nil).next_in_strand_order.first
406
451
  new_job = new(options)
407
452
  if job
408
453
  new_job.initialize_defaults
@@ -426,12 +471,22 @@ module Delayed
426
471
 
427
472
  def self.processes_locked_locally(name: nil)
428
473
  name ||= Socket.gethostname rescue x
429
- where("locked_by LIKE ?", "#{name}:%").pluck(:locked_by).map{|locked_by| locked_by.split(":").last.to_i}
474
+ where("locked_by LIKE ?", "#{name}:%").pluck(:locked_by).map { |locked_by| locked_by.split(":").last.to_i }
475
+ end
476
+
477
+ def self.prefetch_jobs_lock_name
478
+ "Delayed::Job.unlock_orphaned_prefetched_jobs"
430
479
  end
431
480
 
432
481
  def self.unlock_orphaned_prefetched_jobs
433
- horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
434
- where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
482
+ transaction do
483
+ # for db performance reasons, we only need one process doing this at a time
484
+ # so if we can't get an advisory lock, just abort. we'll try again soon
485
+ next unless attempt_advisory_lock(prefetch_jobs_lock_name)
486
+
487
+ horizon = db_time_now - (Settings.parent_process[:prefetched_jobs_timeout] * 4)
488
+ where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
489
+ end
435
490
  end
436
491
 
437
492
  def self.unlock(jobs)
@@ -449,12 +504,14 @@ module Delayed
449
504
  def lock_exclusively!(worker)
450
505
  now = self.class.db_time_now
451
506
  # We don't own this job so we will update the locked_by name and the locked_at
452
- affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(:locked_at => now, :locked_by => worker)
507
+ affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(
508
+ locked_at: now, locked_by: worker
509
+ )
453
510
  if affected_rows == 1
454
511
  mark_as_locked!(now, worker)
455
- return true
512
+ true
456
513
  else
457
- return false
514
+ false
458
515
  end
459
516
  end
460
517
 
@@ -464,9 +521,9 @@ module Delayed
464
521
  affected_rows = self.class.where(id: self, locked_by: from).update_all(locked_at: now, locked_by: to)
465
522
  if affected_rows == 1
466
523
  mark_as_locked!(now, to)
467
- return true
524
+ true
468
525
  else
469
- return false
526
+ false
470
527
  end
471
528
  end
472
529
 
@@ -478,34 +535,43 @@ module Delayed
478
535
  if respond_to?(:changes_applied)
479
536
  changes_applied
480
537
  else
481
- changed_attributes['locked_at'] = time
482
- changed_attributes['locked_by'] = worker
538
+ changed_attributes["locked_at"] = time
539
+ changed_attributes["locked_by"] = worker
483
540
  end
484
541
  end
485
542
  protected :lock_exclusively!, :mark_as_locked!
486
543
 
487
544
  def create_and_lock!(worker)
488
545
  raise "job already exists" unless new_record?
546
+
547
+ # we don't want to process unique constraint violations of
548
+ # running singleton jobs; always do it as two steps
549
+ if singleton
550
+ single_step_create
551
+ lock_exclusively!(worker)
552
+ return
553
+ end
554
+
489
555
  self.locked_at = Delayed::Job.db_time_now
490
556
  self.locked_by = worker
491
557
  single_step_create
492
558
  end
493
559
 
494
560
  def fail!
495
- attrs = self.attributes
496
- attrs['original_job_id'] = attrs.delete('id')
497
- attrs['failed_at'] ||= self.class.db_time_now
498
- attrs.delete('next_in_strand')
499
- attrs.delete('max_concurrent')
561
+ attrs = attributes
562
+ attrs["original_job_id"] = attrs.delete("id") if Failed.columns_hash.key?("original_job_id")
563
+ attrs["failed_at"] ||= self.class.db_time_now
564
+ attrs.delete("next_in_strand")
565
+ attrs.delete("max_concurrent")
500
566
  self.class.transaction do
501
567
  failed_job = Failed.create(attrs)
502
- self.destroy
568
+ destroy
503
569
  failed_job
504
570
  end
505
571
  rescue
506
572
  # we got an error while failing the job -- we need to at least get
507
573
  # the job out of the queue
508
- self.destroy
574
+ destroy
509
575
  # re-raise so the worker logs the error, at least
510
576
  raise
511
577
  end
@@ -513,20 +579,12 @@ module Delayed
513
579
  class Failed < Job
514
580
  include Delayed::Backend::Base
515
581
  self.table_name = :failed_jobs
516
- # Rails hasn't completely loaded yet, and setting the table name will cache some stuff
517
- # so reset that cache so that it will load correctly after Rails is all loaded
518
- # It's fixed in Rails 5 to not cache anything when you set the table_name
519
- if Rails.version < '5' && Rails.version >= '4.2'
520
- @arel_engine = nil
521
- @arel_table = nil
582
+
583
+ def self.cleanup_old_jobs(before_date, batch_size: 10_000)
584
+ where("failed_at < ?", before_date).in_batches(of: batch_size).delete_all
522
585
  end
523
586
  end
524
- if Rails.version < '5' && Rails.version >= '4.2'
525
- @arel_engine = nil
526
- @arel_table = nil
527
- end
528
587
  end
529
-
530
588
  end
531
589
  end
532
590
  end