inst-jobs 2.3.1 → 2.4.8

Sign up to get free protection for your applications and to get access to all the features.
Files changed (94) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
  22. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
  23. data/exe/inst_jobs +3 -2
  24. data/lib/delayed/backend/active_record.rb +204 -150
  25. data/lib/delayed/backend/base.rb +107 -77
  26. data/lib/delayed/batch.rb +11 -9
  27. data/lib/delayed/cli.rb +98 -84
  28. data/lib/delayed/core_ext/kernel.rb +4 -2
  29. data/lib/delayed/daemon.rb +70 -74
  30. data/lib/delayed/job_tracking.rb +26 -25
  31. data/lib/delayed/lifecycle.rb +27 -24
  32. data/lib/delayed/log_tailer.rb +17 -17
  33. data/lib/delayed/logging.rb +13 -16
  34. data/lib/delayed/message_sending.rb +43 -52
  35. data/lib/delayed/performable_method.rb +6 -8
  36. data/lib/delayed/periodic.rb +72 -65
  37. data/lib/delayed/plugin.rb +2 -4
  38. data/lib/delayed/pool.rb +198 -192
  39. data/lib/delayed/server/helpers.rb +6 -6
  40. data/lib/delayed/server.rb +51 -54
  41. data/lib/delayed/settings.rb +93 -81
  42. data/lib/delayed/testing.rb +21 -22
  43. data/lib/delayed/version.rb +1 -1
  44. data/lib/delayed/work_queue/in_process.rb +21 -17
  45. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  46. data/lib/delayed/work_queue/parent_process/server.rb +219 -208
  47. data/lib/delayed/work_queue/parent_process.rb +52 -53
  48. data/lib/delayed/worker/consul_health_check.rb +21 -19
  49. data/lib/delayed/worker/health_check.rb +29 -22
  50. data/lib/delayed/worker/null_health_check.rb +3 -1
  51. data/lib/delayed/worker/process_helper.rb +8 -9
  52. data/lib/delayed/worker.rb +271 -261
  53. data/lib/delayed/yaml_extensions.rb +12 -10
  54. data/lib/delayed_job.rb +37 -38
  55. data/lib/inst-jobs.rb +1 -1
  56. data/spec/active_record_job_spec.rb +129 -136
  57. data/spec/delayed/cli_spec.rb +7 -7
  58. data/spec/delayed/daemon_spec.rb +8 -8
  59. data/spec/delayed/message_sending_spec.rb +16 -9
  60. data/spec/delayed/periodic_spec.rb +13 -12
  61. data/spec/delayed/server_spec.rb +38 -38
  62. data/spec/delayed/settings_spec.rb +26 -25
  63. data/spec/delayed/work_queue/in_process_spec.rb +7 -7
  64. data/spec/delayed/work_queue/parent_process/client_spec.rb +16 -12
  65. data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
  66. data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
  67. data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
  68. data/spec/delayed/worker/health_check_spec.rb +60 -52
  69. data/spec/delayed/worker_spec.rb +28 -25
  70. data/spec/sample_jobs.rb +45 -15
  71. data/spec/shared/delayed_batch.rb +74 -67
  72. data/spec/shared/delayed_method.rb +143 -102
  73. data/spec/shared/performable_method.rb +39 -38
  74. data/spec/shared/shared_backend.rb +550 -437
  75. data/spec/shared/testing.rb +14 -14
  76. data/spec/shared/worker.rb +155 -147
  77. data/spec/shared_jobs_specs.rb +13 -13
  78. data/spec/spec_helper.rb +46 -41
  79. metadata +79 -53
  80. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  81. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  82. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  83. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  84. data/lib/delayed/backend/redis/find_available.lua +0 -3
  85. data/lib/delayed/backend/redis/functions.rb +0 -59
  86. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  87. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  88. data/lib/delayed/backend/redis/job.rb +0 -528
  89. data/lib/delayed/backend/redis/set_running.lua +0 -5
  90. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  91. data/spec/gemfiles/52.gemfile +0 -7
  92. data/spec/gemfiles/60.gemfile +0 -7
  93. data/spec/gemfiles/61.gemfile +0 -7
  94. data/spec/redis_job_spec.rb +0 -148
@@ -1,11 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class ActiveRecord::Base
4
- def self.load_for_delayed_job(id)
5
- if id
6
- find(id)
7
- else
8
- super
3
+ module ActiveRecord
4
+ class Base
5
+ def self.load_for_delayed_job(id)
6
+ if id
7
+ find(id)
8
+ else
9
+ super
10
+ end
9
11
  end
10
12
  end
11
13
  end
@@ -31,17 +33,25 @@ module Delayed
31
33
 
32
34
  class << self
33
35
  def create(attributes, &block)
34
- return super if connection.prepared_statements
35
-
36
+ on_conflict = attributes.delete(:on_conflict)
36
37
  # modified from ActiveRecord::Persistence.create and ActiveRecord::Persistence#_insert_record
37
38
  job = new(attributes, &block)
38
- job.single_step_create
39
+ job.single_step_create(on_conflict: on_conflict)
40
+ end
41
+
42
+ def attempt_advisory_lock(lock_name)
43
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
44
+ connection.select_value("SELECT pg_try_advisory_xact_lock(#{fn_name}('#{lock_name}'));")
45
+ end
46
+
47
+ def advisory_lock(lock_name)
48
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
49
+ connection.execute("SELECT pg_advisory_xact_lock(#{fn_name}('#{lock_name}'));")
39
50
  end
40
51
  end
41
52
 
42
- def single_step_create
53
+ def single_step_create(on_conflict: nil)
43
54
  connection = self.class.connection
44
- return save if connection.prepared_statements
45
55
 
46
56
  # a before_save callback that we're skipping
47
57
  initialize_defaults
@@ -49,12 +59,10 @@ module Delayed
49
59
  current_time = current_time_from_proper_timezone
50
60
 
51
61
  all_timestamp_attributes_in_model.each do |column|
52
- if !attribute_present?(column)
53
- _write_attribute(column, current_time)
54
- end
62
+ _write_attribute(column, current_time) unless attribute_present?(column)
55
63
  end
56
64
 
57
- if Rails.version >= '6'
65
+ if Rails.version >= "6"
58
66
  attribute_names = attribute_names_for_partial_writes
59
67
  attribute_names = attributes_for_create(attribute_names)
60
68
  values = attributes_with_values(attribute_names)
@@ -62,21 +70,57 @@ module Delayed
62
70
  attribute_names = partial_writes? ? keys_for_partial_write : self.attribute_names
63
71
  values = attributes_with_values_for_create(attribute_names)
64
72
  end
73
+
65
74
  im = self.class.arel_table.compile_insert(self.class.send(:_substitute_values, values))
66
- sql, _binds = connection.send(:to_sql_and_binds, im, [])
75
+
76
+ lock_and_insert = values["strand"] && instance_of?(Job)
77
+ # can't use prepared statements if we're combining multiple statemenets
78
+ sql, binds = if lock_and_insert
79
+ connection.unprepared_statement do
80
+ connection.send(:to_sql_and_binds, im)
81
+ end
82
+ else
83
+ connection.send(:to_sql_and_binds, im)
84
+ end
85
+ sql = +sql
86
+
87
+ if singleton && instance_of?(Job)
88
+ sql << " ON CONFLICT (singleton) WHERE singleton IS NOT NULL AND locked_by IS NULL DO "
89
+ sql << case on_conflict
90
+ when :patient, :loose
91
+ "NOTHING"
92
+ when :overwrite
93
+ "UPDATE SET run_at=EXCLUDED.run_at, handler=EXCLUDED.handler"
94
+ else # :use_earliest
95
+ "UPDATE SET run_at=EXCLUDED.run_at WHERE EXCLUDED.run_at<delayed_jobs.run_at"
96
+ end
97
+ end
67
98
 
68
99
  # https://www.postgresql.org/docs/9.5/libpq-exec.html
69
- sql = "#{sql} RETURNING id"
70
- # > Multiple queries sent in a single PQexec call are processed in a single transaction,
71
- # unless there are explicit BEGIN/COMMIT commands included in the query string to divide
72
- # it into multiple transactions.
73
- # but we don't need to lock when inserting into Delayed::Failed
74
- sql = "SELECT pg_advisory_xact_lock(#{connection.quote_table_name('half_md5_as_bigint')}(#{connection.quote(values['strand'])})); #{sql}" if values["strand"] && self.class == Job
75
- result = connection.execute(sql, "#{self} Create")
76
- self.id = result.values.first.first
77
- result.clear
78
- @new_record = false
79
- changes_applied
100
+ sql << " RETURNING id"
101
+
102
+ if lock_and_insert
103
+ # > Multiple queries sent in a single PQexec call are processed in a single transaction,
104
+ # unless there are explicit BEGIN/COMMIT commands included in the query string to divide
105
+ # it into multiple transactions.
106
+ # but we don't need to lock when inserting into Delayed::Failed
107
+ if values["strand"] && instance_of?(Job)
108
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
109
+ sql = "SELECT pg_advisory_xact_lock(#{fn_name}(#{connection.quote(values['strand'])})); #{sql}"
110
+ end
111
+ result = connection.execute(sql, "#{self.class} Create")
112
+ self.id = result.values.first&.first
113
+ result.clear
114
+ else
115
+ result = connection.exec_query(sql, "#{self.class} Create", binds)
116
+ self.id = connection.send(:last_inserted_id, result)
117
+ end
118
+
119
+ # it might not get set if there was an existing record, and we didn't update it
120
+ if id
121
+ @new_record = false
122
+ changes_applied
123
+ end
80
124
 
81
125
  self
82
126
  end
@@ -103,9 +147,11 @@ module Delayed
103
147
  # to raise the lock level
104
148
  before_create :lock_strand_on_create
105
149
  def lock_strand_on_create
106
- if strand.present? && self.class == Job
107
- self.class.connection.execute("SELECT pg_advisory_xact_lock(#{self.class.connection.quote_table_name('half_md5_as_bigint')}(#{self.class.connection.quote(strand)}))")
108
- end
150
+ return unless strand.present? && instance_of?(Job)
151
+
152
+ fn_name = self.class.connection.quote_table_name("half_md5_as_bigint")
153
+ quoted_strand_name = self.class.connection.quote(strand)
154
+ self.class.connection.execute("SELECT pg_advisory_xact_lock(#{fn_name}(#{quoted_strand_name}))")
109
155
  end
110
156
 
111
157
  # This overwrites the previous behavior
@@ -124,7 +170,7 @@ module Delayed
124
170
  end
125
171
 
126
172
  def self.failed
127
- where("failed_at IS NOT NULL")
173
+ where.not(failed_at: nil)
128
174
  end
129
175
 
130
176
  def self.running
@@ -132,51 +178,54 @@ module Delayed
132
178
  end
133
179
 
134
180
  # a nice stress test:
135
- # 10_000.times { |i| Kernel.delay(strand: 's1', run_at: (24.hours.ago + (rand(24.hours.to_i))).system("echo #{i} >> test1.txt") }
181
+ # 10_000.times do |i|
182
+ # Kernel.delay(strand: 's1', run_at: (24.hours.ago + (rand(24.hours.to_i))).system("echo #{i} >> test1.txt")
183
+ # end
136
184
  # 500.times { |i| "ohai".delay(run_at: (12.hours.ago + (rand(24.hours.to_i))).reverse }
137
185
  # then fire up your workers
138
186
  # you can check out strand correctness: diff test1.txt <(sort -n test1.txt)
139
- def self.ready_to_run(forced_latency: nil)
140
- now = db_time_now
141
- now -= forced_latency if forced_latency
142
- where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", now, true)
143
- end
187
+ def self.ready_to_run(forced_latency: nil)
188
+ now = db_time_now
189
+ now -= forced_latency if forced_latency
190
+ where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", now, true)
191
+ end
192
+
144
193
  def self.by_priority
145
194
  order(:priority, :run_at, :id)
146
195
  end
147
196
 
148
197
  # When a worker is exiting, make sure we don't have any locked jobs.
149
198
  def self.clear_locks!(worker_name)
150
- where(:locked_by => worker_name).update_all(:locked_by => nil, :locked_at => nil)
199
+ where(locked_by: worker_name).update_all(locked_by: nil, locked_at: nil)
151
200
  end
152
201
 
153
202
  def self.strand_size(strand)
154
- self.where(:strand => strand).count
203
+ where(strand: strand).count
155
204
  end
156
205
 
157
- def self.running_jobs()
158
- self.running.order(:locked_at)
206
+ def self.running_jobs
207
+ running.order(:locked_at)
159
208
  end
160
209
 
161
210
  def self.scope_for_flavor(flavor, query)
162
211
  scope = case flavor.to_s
163
- when 'current'
164
- self.current
165
- when 'future'
166
- self.future
167
- when 'failed'
168
- Delayed::Job::Failed
169
- when 'strand'
170
- self.where(:strand => query)
171
- when 'tag'
172
- self.where(:tag => query)
173
- else
174
- raise ArgumentError, "invalid flavor: #{flavor.inspect}"
175
- end
176
-
177
- if %w(current future).include?(flavor.to_s)
212
+ when "current"
213
+ current
214
+ when "future"
215
+ future
216
+ when "failed"
217
+ Delayed::Job::Failed
218
+ when "strand"
219
+ where(strand: query)
220
+ when "tag"
221
+ where(tag: query)
222
+ else
223
+ raise ArgumentError, "invalid flavor: #{flavor.inspect}"
224
+ end
225
+
226
+ if %w[current future].include?(flavor.to_s)
178
227
  queue = query.presence || Delayed::Settings.queue
179
- scope = scope.where(:queue => queue)
228
+ scope = scope.where(queue: queue)
180
229
  end
181
230
 
182
231
  scope
@@ -193,8 +242,8 @@ module Delayed
193
242
  limit,
194
243
  offset = 0,
195
244
  query = nil)
196
- scope = self.scope_for_flavor(flavor, query)
197
- order = flavor.to_s == 'future' ? 'run_at' : 'id desc'
245
+ scope = scope_for_flavor(flavor, query)
246
+ order = flavor.to_s == "future" ? "run_at" : "id desc"
198
247
  scope.order(order).limit(limit).offset(offset).to_a
199
248
  end
200
249
 
@@ -202,7 +251,7 @@ module Delayed
202
251
  # see list_jobs for documentation on arguments
203
252
  def self.jobs_count(flavor,
204
253
  query = nil)
205
- scope = self.scope_for_flavor(flavor, query)
254
+ scope = scope_for_flavor(flavor, query)
206
255
  scope.count
207
256
  end
208
257
 
@@ -211,30 +260,33 @@ module Delayed
211
260
  # to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
212
261
  # or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
213
262
  def self.bulk_update(action, opts)
214
- raise("Can't #{action.to_s} failed jobs") if opts[:flavor].to_s == 'failed' && action.to_s != 'destroy'
263
+ raise("Can't #{action} failed jobs") if opts[:flavor].to_s == "failed" && action.to_s != "destroy"
264
+
215
265
  scope = if opts[:ids]
216
- if opts[:flavor] == 'failed'
217
- Delayed::Job::Failed.where(:id => opts[:ids])
218
- else
219
- self.where(:id => opts[:ids])
220
- end
221
- elsif opts[:flavor]
266
+ if opts[:flavor] == "failed"
267
+ Delayed::Job::Failed.where(id: opts[:ids])
268
+ else
269
+ where(id: opts[:ids])
270
+ end
271
+ elsif opts[:flavor]
222
272
 
223
- self.scope_for_flavor(opts[:flavor], opts[:query])
224
- end
273
+ scope_for_flavor(opts[:flavor], opts[:query])
274
+ end
225
275
 
226
276
  return 0 unless scope
227
277
 
228
278
  case action.to_s
229
- when 'hold'
279
+ when "hold"
230
280
  scope = scope.where(locked_by: nil)
231
- scope.update_all(:locked_by => ON_HOLD_LOCKED_BY, :locked_at => db_time_now, :attempts => ON_HOLD_COUNT)
232
- when 'unhold'
281
+ scope.update_all(locked_by: ON_HOLD_LOCKED_BY, locked_at: db_time_now, attempts: ON_HOLD_COUNT)
282
+ when "unhold"
233
283
  now = db_time_now
234
284
  scope = scope.where(locked_by: ON_HOLD_LOCKED_BY)
235
- scope.update_all(["locked_by = NULL, locked_at = NULL, attempts = 0, run_at = (CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at = NULL", now, now])
236
- when 'destroy'
237
- scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == 'failed'
285
+ scope.update_all([<<~SQL.squish, now, now])
286
+ locked_by=NULL, locked_at=NULL, attempts=0, run_at=(CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at=NULL
287
+ SQL
288
+ when "destroy"
289
+ scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == "failed"
238
290
  scope.delete_all
239
291
  end
240
292
  end
@@ -245,16 +297,17 @@ module Delayed
245
297
  def self.tag_counts(flavor,
246
298
  limit,
247
299
  offset = 0)
248
- raise(ArgumentError, "invalid flavor: #{flavor}") unless %w(current all).include?(flavor.to_s)
300
+ raise(ArgumentError, "invalid flavor: #{flavor}") unless %w[current all].include?(flavor.to_s)
301
+
249
302
  scope = case flavor.to_s
250
- when 'current'
251
- self.current
252
- when 'all'
253
- self
254
- end
303
+ when "current"
304
+ current
305
+ when "all"
306
+ self
307
+ end
255
308
 
256
309
  scope = scope.group(:tag).offset(offset).limit(limit)
257
- scope.order(Arel.sql("COUNT(tag) DESC")).count.map { |t,c| { :tag => t, :count => c } }
310
+ scope.order(Arel.sql("COUNT(tag) DESC")).count.map { |t, c| { tag: t, count: c } }
258
311
  end
259
312
 
260
313
  def self.maybe_silence_periodic_log(&block)
@@ -278,7 +331,7 @@ module Delayed
278
331
 
279
332
  loop do
280
333
  jobs = maybe_silence_periodic_log do
281
- if connection.adapter_name == 'PostgreSQL' && !Settings.select_random_from_batch
334
+ if connection.adapter_name == "PostgreSQL" && !Settings.select_random_from_batch
282
335
  # In Postgres, we can lock a job and return which row was locked in a single
283
336
  # query by using RETURNING. Combine that with the ROW_NUMBER() window function
284
337
  # to assign a distinct locked_at value to each job locked, when doing multiple
@@ -286,22 +339,20 @@ module Delayed
286
339
  effective_worker_names = Array(worker_names)
287
340
 
288
341
  lock = nil
289
- lock = "FOR UPDATE SKIP LOCKED" if connection.postgresql_version >= 90500
342
+ lock = "FOR UPDATE SKIP LOCKED" if connection.postgresql_version >= 90_500
290
343
  target_jobs = all_available(queue,
291
344
  min_priority,
292
345
  max_priority,
293
- forced_latency: forced_latency).
294
- limit(effective_worker_names.length + prefetch).
295
- lock(lock)
296
- jobs_with_row_number = all.from(target_jobs).
297
- select("id, ROW_NUMBER() OVER () AS row_number")
346
+ forced_latency: forced_latency)
347
+ .limit(effective_worker_names.length + prefetch)
348
+ .lock(lock)
349
+ jobs_with_row_number = all.from(target_jobs)
350
+ .select("id, ROW_NUMBER() OVER () AS row_number")
298
351
  updates = +"locked_by = CASE row_number "
299
352
  effective_worker_names.each_with_index do |worker, i|
300
353
  updates << "WHEN #{i + 1} THEN #{connection.quote(worker)} "
301
354
  end
302
- if prefetch_owner
303
- updates << "ELSE #{connection.quote(prefetch_owner)} "
304
- end
355
+ updates << "ELSE #{connection.quote(prefetch_owner)} " if prefetch_owner
305
356
  updates << "END, locked_at = #{connection.quote(db_time_now)}"
306
357
 
307
358
  # Originally this was done with a subquery, but this allows the query planner to
@@ -311,22 +362,22 @@ module Delayed
311
362
  # For more details, see:
312
363
  # * https://dba.stackexchange.com/a/69497/55285
313
364
  # * https://github.com/feikesteenbergen/demos/blob/b7ecee8b2a79bf04cbcd74972e6bfb81903aee5d/bugs/update_limit_bug.txt
314
- query = "WITH limited_jobs AS (#{jobs_with_row_number.to_sql}) " \
315
- "UPDATE #{quoted_table_name} SET #{updates} FROM limited_jobs WHERE limited_jobs.id=#{quoted_table_name}.id " \
316
- "RETURNING #{quoted_table_name}.*"
365
+ query = <<~SQL.squish
366
+ WITH limited_jobs AS (#{jobs_with_row_number.to_sql})
367
+ UPDATE #{quoted_table_name} SET #{updates} FROM limited_jobs WHERE limited_jobs.id=#{quoted_table_name}.id
368
+ RETURNING #{quoted_table_name}.*
369
+ SQL
317
370
 
318
371
  jobs = find_by_sql(query)
319
372
  # because this is an atomic query, we don't have to return more jobs than we needed
320
373
  # to try and lock them, nor is there a possibility we need to try again because
321
374
  # all of the jobs we tried to lock had already been locked by someone else
322
- if worker_names.is_a?(Array)
323
- result = jobs.index_by(&:locked_by)
324
- # all of the prefetched jobs can come back as an array
325
- result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
326
- return result
327
- else
328
- return jobs.first
329
- end
375
+ return jobs.first unless worker_names.is_a?(Array)
376
+
377
+ result = jobs.index_by(&:locked_by)
378
+ # all of the prefetched jobs can come back as an array
379
+ result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
380
+ return result
330
381
  else
331
382
  batch_size = Settings.fetch_batch_size
332
383
  batch_size *= worker_names.length if worker_names.is_a?(Array)
@@ -336,13 +387,13 @@ module Delayed
336
387
  if jobs.empty?
337
388
  return worker_names.is_a?(Array) ? {} : nil
338
389
  end
339
- if Settings.select_random_from_batch
340
- jobs = jobs.sort_by { rand }
341
- end
390
+
391
+ jobs = jobs.sort_by { rand } if Settings.select_random_from_batch
342
392
  if worker_names.is_a?(Array)
343
393
  result = {}
344
394
  jobs.each do |job|
345
395
  break if worker_names.empty?
396
+
346
397
  worker_name = worker_names.first
347
398
  if job.send(:lock_exclusively!, worker_name)
348
399
  result[worker_name] = job
@@ -351,10 +402,10 @@ module Delayed
351
402
  end
352
403
  return result
353
404
  else
354
- job = jobs.detect do |job|
405
+ locked_job = jobs.detect do |job|
355
406
  job.send(:lock_exclusively!, worker_names)
356
407
  end
357
- return job if job
408
+ return locked_job if locked_job
358
409
  end
359
410
  end
360
411
  end
@@ -376,27 +427,9 @@ module Delayed
376
427
  check_queue(queue)
377
428
  check_priorities(min_priority, max_priority)
378
429
 
379
- self.ready_to_run(forced_latency: forced_latency).
380
- where(:priority => min_priority..max_priority, :queue => queue).
381
- by_priority
382
- end
383
-
384
- # used internally by create_singleton to take the appropriate lock
385
- # depending on the db driver
386
- def self.transaction_for_singleton(strand, on_conflict)
387
- return yield if on_conflict == :loose
388
- self.transaction do
389
- if on_conflict == :patient
390
- pg_function = 'pg_try_advisory_xact_lock'
391
- execute_method = :select_value
392
- else
393
- pg_function = 'pg_advisory_xact_lock'
394
- execute_method = :execute
395
- end
396
- result = connection.send(execute_method, sanitize_sql(["SELECT #{pg_function}(#{connection.quote_table_name('half_md5_as_bigint')}(?))", strand]))
397
- return if result == false && on_conflict == :patient
398
- yield
399
- end
430
+ ready_to_run(forced_latency: forced_latency)
431
+ .where(priority: min_priority..max_priority, queue: queue)
432
+ .by_priority
400
433
  end
401
434
 
402
435
  # Create the job on the specified strand, but only if there aren't any
@@ -404,10 +437,11 @@ module Delayed
404
437
  # (in other words, the job will still be created if there's another job
405
438
  # on the strand but it's already running)
406
439
  def self.create_singleton(options)
407
- strand = options[:strand]
440
+ strand = options[:singleton]
408
441
  on_conflict = options.delete(:on_conflict) || :use_earliest
409
- transaction_for_singleton(strand, on_conflict) do
410
- job = self.where(:strand => strand, :locked_at => nil).next_in_strand_order.first
442
+
443
+ transaction_for_singleton(singleton, on_conflict) do
444
+ job = where(strand: strand, locked_at: nil).next_in_strand_order.first
411
445
  new_job = new(options)
412
446
  if job
413
447
  new_job.initialize_defaults
@@ -431,12 +465,22 @@ module Delayed
431
465
 
432
466
  def self.processes_locked_locally(name: nil)
433
467
  name ||= Socket.gethostname rescue x
434
- where("locked_by LIKE ?", "#{name}:%").pluck(:locked_by).map{|locked_by| locked_by.split(":").last.to_i}
468
+ where("locked_by LIKE ?", "#{name}:%").pluck(:locked_by).map { |locked_by| locked_by.split(":").last.to_i }
469
+ end
470
+
471
+ def self.prefetch_jobs_lock_name
472
+ "Delayed::Job.unlock_orphaned_prefetched_jobs"
435
473
  end
436
474
 
437
475
  def self.unlock_orphaned_prefetched_jobs
438
- horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
439
- where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
476
+ transaction do
477
+ # for db performance reasons, we only need one process doing this at a time
478
+ # so if we can't get an advisory lock, just abort. we'll try again soon
479
+ return unless attempt_advisory_lock(prefetch_jobs_lock_name)
480
+
481
+ horizon = db_time_now - (Settings.parent_process[:prefetched_jobs_timeout] * 4)
482
+ where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
483
+ end
440
484
  end
441
485
 
442
486
  def self.unlock(jobs)
@@ -454,12 +498,14 @@ module Delayed
454
498
  def lock_exclusively!(worker)
455
499
  now = self.class.db_time_now
456
500
  # We don't own this job so we will update the locked_by name and the locked_at
457
- affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(:locked_at => now, :locked_by => worker)
501
+ affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(
502
+ locked_at: now, locked_by: worker
503
+ )
458
504
  if affected_rows == 1
459
505
  mark_as_locked!(now, worker)
460
- return true
506
+ true
461
507
  else
462
- return false
508
+ false
463
509
  end
464
510
  end
465
511
 
@@ -469,9 +515,9 @@ module Delayed
469
515
  affected_rows = self.class.where(id: self, locked_by: from).update_all(locked_at: now, locked_by: to)
470
516
  if affected_rows == 1
471
517
  mark_as_locked!(now, to)
472
- return true
518
+ true
473
519
  else
474
- return false
520
+ false
475
521
  end
476
522
  end
477
523
 
@@ -483,34 +529,43 @@ module Delayed
483
529
  if respond_to?(:changes_applied)
484
530
  changes_applied
485
531
  else
486
- changed_attributes['locked_at'] = time
487
- changed_attributes['locked_by'] = worker
532
+ changed_attributes["locked_at"] = time
533
+ changed_attributes["locked_by"] = worker
488
534
  end
489
535
  end
490
536
  protected :lock_exclusively!, :mark_as_locked!
491
537
 
492
538
  def create_and_lock!(worker)
493
539
  raise "job already exists" unless new_record?
540
+
541
+ # we don't want to process unique constraint violations of
542
+ # running singleton jobs; always do it as two steps
543
+ if singleton
544
+ single_step_create
545
+ lock_exclusively!(worker)
546
+ return
547
+ end
548
+
494
549
  self.locked_at = Delayed::Job.db_time_now
495
550
  self.locked_by = worker
496
551
  single_step_create
497
552
  end
498
553
 
499
554
  def fail!
500
- attrs = self.attributes
501
- attrs['original_job_id'] = attrs.delete('id')
502
- attrs['failed_at'] ||= self.class.db_time_now
503
- attrs.delete('next_in_strand')
504
- attrs.delete('max_concurrent')
555
+ attrs = attributes
556
+ attrs["original_job_id"] = attrs.delete("id")
557
+ attrs["failed_at"] ||= self.class.db_time_now
558
+ attrs.delete("next_in_strand")
559
+ attrs.delete("max_concurrent")
505
560
  self.class.transaction do
506
561
  failed_job = Failed.create(attrs)
507
- self.destroy
562
+ destroy
508
563
  failed_job
509
564
  end
510
565
  rescue
511
566
  # we got an error while failing the job -- we need to at least get
512
567
  # the job out of the queue
513
- self.destroy
568
+ destroy
514
569
  # re-raise so the worker logs the error, at least
515
570
  raise
516
571
  end
@@ -520,7 +575,6 @@ module Delayed
520
575
  self.table_name = :failed_jobs
521
576
  end
522
577
  end
523
-
524
578
  end
525
579
  end
526
580
  end