inst-jobs 2.0.0 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
  22. data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
  23. data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
  24. data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
  25. data/exe/inst_jobs +3 -2
  26. data/lib/delayed/backend/active_record.rb +211 -168
  27. data/lib/delayed/backend/base.rb +110 -72
  28. data/lib/delayed/batch.rb +11 -9
  29. data/lib/delayed/cli.rb +98 -84
  30. data/lib/delayed/core_ext/kernel.rb +4 -2
  31. data/lib/delayed/daemon.rb +70 -74
  32. data/lib/delayed/job_tracking.rb +26 -25
  33. data/lib/delayed/lifecycle.rb +27 -23
  34. data/lib/delayed/log_tailer.rb +17 -17
  35. data/lib/delayed/logging.rb +13 -16
  36. data/lib/delayed/message_sending.rb +43 -52
  37. data/lib/delayed/performable_method.rb +6 -8
  38. data/lib/delayed/periodic.rb +72 -68
  39. data/lib/delayed/plugin.rb +2 -4
  40. data/lib/delayed/pool.rb +205 -168
  41. data/lib/delayed/server/helpers.rb +6 -6
  42. data/lib/delayed/server.rb +51 -54
  43. data/lib/delayed/settings.rb +94 -81
  44. data/lib/delayed/testing.rb +21 -22
  45. data/lib/delayed/version.rb +1 -1
  46. data/lib/delayed/work_queue/in_process.rb +21 -17
  47. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  48. data/lib/delayed/work_queue/parent_process/server.rb +245 -207
  49. data/lib/delayed/work_queue/parent_process.rb +52 -53
  50. data/lib/delayed/worker/consul_health_check.rb +32 -33
  51. data/lib/delayed/worker/health_check.rb +34 -26
  52. data/lib/delayed/worker/null_health_check.rb +3 -1
  53. data/lib/delayed/worker/process_helper.rb +8 -9
  54. data/lib/delayed/worker.rb +272 -241
  55. data/lib/delayed/yaml_extensions.rb +12 -10
  56. data/lib/delayed_job.rb +37 -37
  57. data/lib/inst-jobs.rb +1 -1
  58. data/spec/active_record_job_spec.rb +143 -139
  59. data/spec/delayed/cli_spec.rb +7 -7
  60. data/spec/delayed/daemon_spec.rb +10 -9
  61. data/spec/delayed/message_sending_spec.rb +16 -9
  62. data/spec/delayed/periodic_spec.rb +14 -21
  63. data/spec/delayed/server_spec.rb +38 -38
  64. data/spec/delayed/settings_spec.rb +26 -25
  65. data/spec/delayed/work_queue/in_process_spec.rb +7 -8
  66. data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
  67. data/spec/delayed/work_queue/parent_process/server_spec.rb +117 -41
  68. data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
  69. data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
  70. data/spec/delayed/worker/health_check_spec.rb +60 -52
  71. data/spec/delayed/worker_spec.rb +44 -21
  72. data/spec/sample_jobs.rb +45 -15
  73. data/spec/shared/delayed_batch.rb +74 -67
  74. data/spec/shared/delayed_method.rb +143 -102
  75. data/spec/shared/performable_method.rb +39 -38
  76. data/spec/shared/shared_backend.rb +550 -437
  77. data/spec/shared/testing.rb +14 -14
  78. data/spec/shared/worker.rb +156 -148
  79. data/spec/shared_jobs_specs.rb +13 -13
  80. data/spec/spec_helper.rb +53 -55
  81. metadata +148 -82
  82. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  83. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  84. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  85. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  86. data/lib/delayed/backend/redis/find_available.lua +0 -3
  87. data/lib/delayed/backend/redis/functions.rb +0 -59
  88. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  89. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  90. data/lib/delayed/backend/redis/job.rb +0 -535
  91. data/lib/delayed/backend/redis/set_running.lua +0 -5
  92. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  93. data/spec/gemfiles/42.gemfile +0 -7
  94. data/spec/gemfiles/50.gemfile +0 -7
  95. data/spec/gemfiles/51.gemfile +0 -7
  96. data/spec/gemfiles/52.gemfile +0 -7
  97. data/spec/gemfiles/60.gemfile +0 -7
  98. data/spec/redis_job_spec.rb +0 -148
@@ -1,11 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class ActiveRecord::Base
4
- def self.load_for_delayed_job(id)
5
- if id
6
- find(id)
7
- else
8
- super
3
+ module ActiveRecord
4
+ class Base
5
+ def self.load_for_delayed_job(id)
6
+ if id
7
+ find(id)
8
+ else
9
+ super
10
+ end
9
11
  end
10
12
  end
11
13
  end
@@ -13,9 +15,13 @@ end
13
15
  module Delayed
14
16
  module Backend
15
17
  module ActiveRecord
18
+ class AbstractJob < ::ActiveRecord::Base
19
+ self.abstract_class = true
20
+ end
21
+
16
22
  # A job object that is persisted to the database.
17
23
  # Contains the work object as a YAML field.
18
- class Job < ::ActiveRecord::Base
24
+ class Job < AbstractJob
19
25
  include Delayed::Backend::Base
20
26
  self.table_name = :delayed_jobs
21
27
 
@@ -27,17 +33,25 @@ module Delayed
27
33
 
28
34
  class << self
29
35
  def create(attributes, &block)
30
- return super if connection.prepared_statements || Rails.version < '5.2'
31
-
36
+ on_conflict = attributes.delete(:on_conflict)
32
37
  # modified from ActiveRecord::Persistence.create and ActiveRecord::Persistence#_insert_record
33
38
  job = new(attributes, &block)
34
- job.single_step_create
39
+ job.single_step_create(on_conflict: on_conflict)
40
+ end
41
+
42
+ def attempt_advisory_lock(lock_name)
43
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
44
+ connection.select_value("SELECT pg_try_advisory_xact_lock(#{fn_name}('#{lock_name}'));")
45
+ end
46
+
47
+ def advisory_lock(lock_name)
48
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
49
+ connection.execute("SELECT pg_advisory_xact_lock(#{fn_name}('#{lock_name}'));")
35
50
  end
36
51
  end
37
52
 
38
- def single_step_create
53
+ def single_step_create(on_conflict: nil)
39
54
  connection = self.class.connection
40
- return save if connection.prepared_statements || Rails.version < '5.2'
41
55
 
42
56
  # a before_save callback that we're skipping
43
57
  initialize_defaults
@@ -45,33 +59,63 @@ module Delayed
45
59
  current_time = current_time_from_proper_timezone
46
60
 
47
61
  all_timestamp_attributes_in_model.each do |column|
48
- if !attribute_present?(column)
49
- _write_attribute(column, current_time)
50
- end
62
+ _write_attribute(column, current_time) unless attribute_present?(column)
51
63
  end
52
64
 
53
- if Rails.version >= '6'
54
- attribute_names = attribute_names_for_partial_writes
55
- attribute_names = attributes_for_create(attribute_names)
56
- values = attributes_with_values(attribute_names)
57
- else
58
- attribute_names = partial_writes? ? keys_for_partial_write : self.attribute_names
59
- values = attributes_with_values_for_create(attribute_names)
60
- end
65
+ attribute_names = attribute_names_for_partial_writes
66
+ attribute_names = attributes_for_create(attribute_names)
67
+ values = attributes_with_values(attribute_names)
68
+
61
69
  im = self.class.arel_table.compile_insert(self.class.send(:_substitute_values, values))
62
- sql, _binds = connection.send(:to_sql_and_binds, im, [])
70
+
71
+ lock_and_insert = values["strand"] && instance_of?(Job)
72
+ # can't use prepared statements if we're combining multiple statemenets
73
+ sql, binds = if lock_and_insert
74
+ connection.unprepared_statement do
75
+ connection.send(:to_sql_and_binds, im)
76
+ end
77
+ else
78
+ connection.send(:to_sql_and_binds, im)
79
+ end
80
+ sql = +sql
81
+
82
+ if singleton && instance_of?(Job)
83
+ sql << " ON CONFLICT (singleton) WHERE singleton IS NOT NULL AND locked_by IS NULL DO "
84
+ sql << case on_conflict
85
+ when :patient, :loose
86
+ "NOTHING"
87
+ when :overwrite
88
+ "UPDATE SET run_at=EXCLUDED.run_at, handler=EXCLUDED.handler"
89
+ else # :use_earliest
90
+ "UPDATE SET run_at=EXCLUDED.run_at WHERE EXCLUDED.run_at<delayed_jobs.run_at"
91
+ end
92
+ end
63
93
 
64
94
  # https://www.postgresql.org/docs/9.5/libpq-exec.html
65
- sql = "#{sql} RETURNING id"
66
- # > Multiple queries sent in a single PQexec call are processed in a single transaction,
67
- # unless there are explicit BEGIN/COMMIT commands included in the query string to divide
68
- # it into multiple transactions.
69
- sql = "SELECT pg_advisory_xact_lock(#{connection.quote_table_name('half_md5_as_bigint')}(#{connection.quote(values['strand'])})); #{sql}" if values["strand"]
70
- result = connection.execute(sql, "#{self} Create")
71
- self.id = result.values.first.first
72
- result.clear
73
- @new_record = false
74
- changes_applied
95
+ sql << " RETURNING id"
96
+
97
+ if lock_and_insert
98
+ # > Multiple queries sent in a single PQexec call are processed in a single transaction,
99
+ # unless there are explicit BEGIN/COMMIT commands included in the query string to divide
100
+ # it into multiple transactions.
101
+ # but we don't need to lock when inserting into Delayed::Failed
102
+ if values["strand"] && instance_of?(Job)
103
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
104
+ sql = "SELECT pg_advisory_xact_lock(#{fn_name}(#{connection.quote(values['strand'])})); #{sql}"
105
+ end
106
+ result = connection.execute(sql, "#{self.class} Create")
107
+ self.id = result.values.first&.first
108
+ result.clear
109
+ else
110
+ result = connection.exec_query(sql, "#{self.class} Create", binds)
111
+ self.id = connection.send(:last_inserted_id, result)
112
+ end
113
+
114
+ # it might not get set if there was an existing record, and we didn't update it
115
+ if id
116
+ @new_record = false
117
+ changes_applied
118
+ end
75
119
 
76
120
  self
77
121
  end
@@ -98,9 +142,11 @@ module Delayed
98
142
  # to raise the lock level
99
143
  before_create :lock_strand_on_create
100
144
  def lock_strand_on_create
101
- if strand.present?
102
- self.class.connection.execute("SELECT pg_advisory_xact_lock(#{self.class.connection.quote_table_name('half_md5_as_bigint')}(#{self.class.connection.quote(strand)}))")
103
- end
145
+ return unless strand.present? && instance_of?(Job)
146
+
147
+ fn_name = self.class.connection.quote_table_name("half_md5_as_bigint")
148
+ quoted_strand_name = self.class.connection.quote(strand)
149
+ self.class.connection.execute("SELECT pg_advisory_xact_lock(#{fn_name}(#{quoted_strand_name}))")
104
150
  end
105
151
 
106
152
  # This overwrites the previous behavior
@@ -119,7 +165,7 @@ module Delayed
119
165
  end
120
166
 
121
167
  def self.failed
122
- where("failed_at IS NOT NULL")
168
+ where.not(failed_at: nil)
123
169
  end
124
170
 
125
171
  def self.running
@@ -127,51 +173,54 @@ module Delayed
127
173
  end
128
174
 
129
175
  # a nice stress test:
130
- # 10_000.times { |i| Kernel.delay(strand: 's1', run_at: (24.hours.ago + (rand(24.hours.to_i))).system("echo #{i} >> test1.txt") }
176
+ # 10_000.times do |i|
177
+ # Kernel.delay(strand: 's1', run_at: (24.hours.ago + (rand(24.hours.to_i))).system("echo #{i} >> test1.txt")
178
+ # end
131
179
  # 500.times { |i| "ohai".delay(run_at: (12.hours.ago + (rand(24.hours.to_i))).reverse }
132
180
  # then fire up your workers
133
181
  # you can check out strand correctness: diff test1.txt <(sort -n test1.txt)
134
- def self.ready_to_run(forced_latency: nil)
135
- now = db_time_now
136
- now -= forced_latency if forced_latency
137
- where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", now, true)
138
- end
182
+ def self.ready_to_run(forced_latency: nil)
183
+ now = db_time_now
184
+ now -= forced_latency if forced_latency
185
+ where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", now, true)
186
+ end
187
+
139
188
  def self.by_priority
140
189
  order(:priority, :run_at, :id)
141
190
  end
142
191
 
143
192
  # When a worker is exiting, make sure we don't have any locked jobs.
144
193
  def self.clear_locks!(worker_name)
145
- where(:locked_by => worker_name).update_all(:locked_by => nil, :locked_at => nil)
194
+ where(locked_by: worker_name).update_all(locked_by: nil, locked_at: nil)
146
195
  end
147
196
 
148
197
  def self.strand_size(strand)
149
- self.where(:strand => strand).count
198
+ where(strand: strand).count
150
199
  end
151
200
 
152
- def self.running_jobs()
153
- self.running.order(:locked_at)
201
+ def self.running_jobs
202
+ running.order(:locked_at)
154
203
  end
155
204
 
156
205
  def self.scope_for_flavor(flavor, query)
157
206
  scope = case flavor.to_s
158
- when 'current'
159
- self.current
160
- when 'future'
161
- self.future
162
- when 'failed'
163
- Delayed::Job::Failed
164
- when 'strand'
165
- self.where(:strand => query)
166
- when 'tag'
167
- self.where(:tag => query)
168
- else
169
- raise ArgumentError, "invalid flavor: #{flavor.inspect}"
170
- end
171
-
172
- if %w(current future).include?(flavor.to_s)
207
+ when "current"
208
+ current
209
+ when "future"
210
+ future
211
+ when "failed"
212
+ Delayed::Job::Failed
213
+ when "strand"
214
+ where(strand: query)
215
+ when "tag"
216
+ where(tag: query)
217
+ else
218
+ raise ArgumentError, "invalid flavor: #{flavor.inspect}"
219
+ end
220
+
221
+ if %w[current future].include?(flavor.to_s)
173
222
  queue = query.presence || Delayed::Settings.queue
174
- scope = scope.where(:queue => queue)
223
+ scope = scope.where(queue: queue)
175
224
  end
176
225
 
177
226
  scope
@@ -188,8 +237,8 @@ module Delayed
188
237
  limit,
189
238
  offset = 0,
190
239
  query = nil)
191
- scope = self.scope_for_flavor(flavor, query)
192
- order = flavor.to_s == 'future' ? 'run_at' : 'id desc'
240
+ scope = scope_for_flavor(flavor, query)
241
+ order = flavor.to_s == "future" ? "run_at" : "id desc"
193
242
  scope.order(order).limit(limit).offset(offset).to_a
194
243
  end
195
244
 
@@ -197,7 +246,7 @@ module Delayed
197
246
  # see list_jobs for documentation on arguments
198
247
  def self.jobs_count(flavor,
199
248
  query = nil)
200
- scope = self.scope_for_flavor(flavor, query)
249
+ scope = scope_for_flavor(flavor, query)
201
250
  scope.count
202
251
  end
203
252
 
@@ -206,30 +255,33 @@ module Delayed
206
255
  # to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
207
256
  # or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
208
257
  def self.bulk_update(action, opts)
209
- raise("Can't #{action.to_s} failed jobs") if opts[:flavor].to_s == 'failed' && action.to_s != 'destroy'
258
+ raise("Can't #{action} failed jobs") if opts[:flavor].to_s == "failed" && action.to_s != "destroy"
259
+
210
260
  scope = if opts[:ids]
211
- if opts[:flavor] == 'failed'
212
- Delayed::Job::Failed.where(:id => opts[:ids])
213
- else
214
- self.where(:id => opts[:ids])
215
- end
216
- elsif opts[:flavor]
261
+ if opts[:flavor] == "failed"
262
+ Delayed::Job::Failed.where(id: opts[:ids])
263
+ else
264
+ where(id: opts[:ids])
265
+ end
266
+ elsif opts[:flavor]
217
267
 
218
- self.scope_for_flavor(opts[:flavor], opts[:query])
219
- end
268
+ scope_for_flavor(opts[:flavor], opts[:query])
269
+ end
220
270
 
221
271
  return 0 unless scope
222
272
 
223
273
  case action.to_s
224
- when 'hold'
274
+ when "hold"
225
275
  scope = scope.where(locked_by: nil)
226
- scope.update_all(:locked_by => ON_HOLD_LOCKED_BY, :locked_at => db_time_now, :attempts => ON_HOLD_COUNT)
227
- when 'unhold'
276
+ scope.update_all(locked_by: ON_HOLD_LOCKED_BY, locked_at: db_time_now, attempts: ON_HOLD_COUNT)
277
+ when "unhold"
228
278
  now = db_time_now
229
279
  scope = scope.where(locked_by: ON_HOLD_LOCKED_BY)
230
- scope.update_all(["locked_by = NULL, locked_at = NULL, attempts = 0, run_at = (CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at = NULL", now, now])
231
- when 'destroy'
232
- scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == 'failed'
280
+ scope.update_all([<<~SQL.squish, now, now])
281
+ locked_by=NULL, locked_at=NULL, attempts=0, run_at=(CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at=NULL
282
+ SQL
283
+ when "destroy"
284
+ scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == "failed"
233
285
  scope.delete_all
234
286
  end
235
287
  end
@@ -240,16 +292,17 @@ module Delayed
240
292
  def self.tag_counts(flavor,
241
293
  limit,
242
294
  offset = 0)
243
- raise(ArgumentError, "invalid flavor: #{flavor}") unless %w(current all).include?(flavor.to_s)
295
+ raise(ArgumentError, "invalid flavor: #{flavor}") unless %w[current all].include?(flavor.to_s)
296
+
244
297
  scope = case flavor.to_s
245
- when 'current'
246
- self.current
247
- when 'all'
248
- self
249
- end
298
+ when "current"
299
+ current
300
+ when "all"
301
+ self
302
+ end
250
303
 
251
304
  scope = scope.group(:tag).offset(offset).limit(limit)
252
- scope.order(Arel.sql("COUNT(tag) DESC")).count.map { |t,c| { :tag => t, :count => c } }
305
+ scope.order(Arel.sql("COUNT(tag) DESC")).count.map { |t, c| { tag: t, count: c } }
253
306
  end
254
307
 
255
308
  def self.maybe_silence_periodic_log(&block)
@@ -273,7 +326,7 @@ module Delayed
273
326
 
274
327
  loop do
275
328
  jobs = maybe_silence_periodic_log do
276
- if connection.adapter_name == 'PostgreSQL' && !Settings.select_random_from_batch
329
+ if connection.adapter_name == "PostgreSQL" && !Settings.select_random_from_batch
277
330
  # In Postgres, we can lock a job and return which row was locked in a single
278
331
  # query by using RETURNING. Combine that with the ROW_NUMBER() window function
279
332
  # to assign a distinct locked_at value to each job locked, when doing multiple
@@ -281,22 +334,20 @@ module Delayed
281
334
  effective_worker_names = Array(worker_names)
282
335
 
283
336
  lock = nil
284
- lock = "FOR UPDATE SKIP LOCKED" if connection.postgresql_version >= 90500
337
+ lock = "FOR UPDATE SKIP LOCKED" if connection.postgresql_version >= 90_500
285
338
  target_jobs = all_available(queue,
286
339
  min_priority,
287
340
  max_priority,
288
- forced_latency: forced_latency).
289
- limit(effective_worker_names.length + prefetch).
290
- lock(lock)
291
- jobs_with_row_number = all.from(target_jobs).
292
- select("id, ROW_NUMBER() OVER () AS row_number")
341
+ forced_latency: forced_latency)
342
+ .limit(effective_worker_names.length + prefetch)
343
+ .lock(lock)
344
+ jobs_with_row_number = all.from(target_jobs)
345
+ .select("id, ROW_NUMBER() OVER () AS row_number")
293
346
  updates = +"locked_by = CASE row_number "
294
347
  effective_worker_names.each_with_index do |worker, i|
295
348
  updates << "WHEN #{i + 1} THEN #{connection.quote(worker)} "
296
349
  end
297
- if prefetch_owner
298
- updates << "ELSE #{connection.quote(prefetch_owner)} "
299
- end
350
+ updates << "ELSE #{connection.quote(prefetch_owner)} " if prefetch_owner
300
351
  updates << "END, locked_at = #{connection.quote(db_time_now)}"
301
352
 
302
353
  # Originally this was done with a subquery, but this allows the query planner to
@@ -306,22 +357,22 @@ module Delayed
306
357
  # For more details, see:
307
358
  # * https://dba.stackexchange.com/a/69497/55285
308
359
  # * https://github.com/feikesteenbergen/demos/blob/b7ecee8b2a79bf04cbcd74972e6bfb81903aee5d/bugs/update_limit_bug.txt
309
- query = "WITH limited_jobs AS (#{jobs_with_row_number.to_sql}) " \
310
- "UPDATE #{quoted_table_name} SET #{updates} FROM limited_jobs WHERE limited_jobs.id=#{quoted_table_name}.id " \
311
- "RETURNING #{quoted_table_name}.*"
360
+ query = <<~SQL.squish
361
+ WITH limited_jobs AS (#{jobs_with_row_number.to_sql})
362
+ UPDATE #{quoted_table_name} SET #{updates} FROM limited_jobs WHERE limited_jobs.id=#{quoted_table_name}.id
363
+ RETURNING #{quoted_table_name}.*
364
+ SQL
312
365
 
313
366
  jobs = find_by_sql(query)
314
367
  # because this is an atomic query, we don't have to return more jobs than we needed
315
368
  # to try and lock them, nor is there a possibility we need to try again because
316
369
  # all of the jobs we tried to lock had already been locked by someone else
317
- if worker_names.is_a?(Array)
318
- result = jobs.index_by(&:locked_by)
319
- # all of the prefetched jobs can come back as an array
320
- result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
321
- return result
322
- else
323
- return jobs.first
324
- end
370
+ return jobs.first unless worker_names.is_a?(Array)
371
+
372
+ result = jobs.index_by(&:locked_by)
373
+ # all of the prefetched jobs can come back as an array
374
+ result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
375
+ return result
325
376
  else
326
377
  batch_size = Settings.fetch_batch_size
327
378
  batch_size *= worker_names.length if worker_names.is_a?(Array)
@@ -331,13 +382,13 @@ module Delayed
331
382
  if jobs.empty?
332
383
  return worker_names.is_a?(Array) ? {} : nil
333
384
  end
334
- if Settings.select_random_from_batch
335
- jobs = jobs.sort_by { rand }
336
- end
385
+
386
+ jobs = jobs.sort_by { rand } if Settings.select_random_from_batch
337
387
  if worker_names.is_a?(Array)
338
388
  result = {}
339
389
  jobs.each do |job|
340
390
  break if worker_names.empty?
391
+
341
392
  worker_name = worker_names.first
342
393
  if job.send(:lock_exclusively!, worker_name)
343
394
  result[worker_name] = job
@@ -346,10 +397,10 @@ module Delayed
346
397
  end
347
398
  return result
348
399
  else
349
- job = jobs.detect do |job|
400
+ locked_job = jobs.detect do |job|
350
401
  job.send(:lock_exclusively!, worker_names)
351
402
  end
352
- return job if job
403
+ return locked_job if locked_job
353
404
  end
354
405
  end
355
406
  end
@@ -371,27 +422,9 @@ module Delayed
371
422
  check_queue(queue)
372
423
  check_priorities(min_priority, max_priority)
373
424
 
374
- self.ready_to_run(forced_latency: forced_latency).
375
- where(:priority => min_priority..max_priority, :queue => queue).
376
- by_priority
377
- end
378
-
379
- # used internally by create_singleton to take the appropriate lock
380
- # depending on the db driver
381
- def self.transaction_for_singleton(strand, on_conflict)
382
- return yield if on_conflict == :loose
383
- self.transaction do
384
- if on_conflict == :patient
385
- pg_function = 'pg_try_advisory_xact_lock'
386
- execute_method = :select_value
387
- else
388
- pg_function = 'pg_advisory_xact_lock'
389
- execute_method = :execute
390
- end
391
- result = connection.send(execute_method, sanitize_sql(["SELECT #{pg_function}(#{connection.quote_table_name('half_md5_as_bigint')}(?))", strand]))
392
- return if result == false && on_conflict == :patient
393
- yield
394
- end
425
+ ready_to_run(forced_latency: forced_latency)
426
+ .where(priority: min_priority..max_priority, queue: queue)
427
+ .by_priority
395
428
  end
396
429
 
397
430
  # Create the job on the specified strand, but only if there aren't any
@@ -399,10 +432,11 @@ module Delayed
399
432
  # (in other words, the job will still be created if there's another job
400
433
  # on the strand but it's already running)
401
434
  def self.create_singleton(options)
402
- strand = options[:strand]
435
+ strand = options[:singleton]
403
436
  on_conflict = options.delete(:on_conflict) || :use_earliest
404
- transaction_for_singleton(strand, on_conflict) do
405
- job = self.where(:strand => strand, :locked_at => nil).next_in_strand_order.first
437
+
438
+ transaction_for_singleton(singleton, on_conflict) do
439
+ job = where(strand: strand, locked_at: nil).next_in_strand_order.first
406
440
  new_job = new(options)
407
441
  if job
408
442
  new_job.initialize_defaults
@@ -426,12 +460,22 @@ module Delayed
426
460
 
427
461
  def self.processes_locked_locally(name: nil)
428
462
  name ||= Socket.gethostname rescue x
429
- where("locked_by LIKE ?", "#{name}:%").pluck(:locked_by).map{|locked_by| locked_by.split(":").last.to_i}
463
+ where("locked_by LIKE ?", "#{name}:%").pluck(:locked_by).map { |locked_by| locked_by.split(":").last.to_i }
464
+ end
465
+
466
+ def self.prefetch_jobs_lock_name
467
+ "Delayed::Job.unlock_orphaned_prefetched_jobs"
430
468
  end
431
469
 
432
470
  def self.unlock_orphaned_prefetched_jobs
433
- horizon = db_time_now - Settings.parent_process[:prefetched_jobs_timeout] * 4
434
- where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
471
+ transaction do
472
+ # for db performance reasons, we only need one process doing this at a time
473
+ # so if we can't get an advisory lock, just abort. we'll try again soon
474
+ return unless attempt_advisory_lock(prefetch_jobs_lock_name)
475
+
476
+ horizon = db_time_now - (Settings.parent_process[:prefetched_jobs_timeout] * 4)
477
+ where("locked_by LIKE 'prefetch:%' AND locked_at<?", horizon).update_all(locked_at: nil, locked_by: nil)
478
+ end
435
479
  end
436
480
 
437
481
  def self.unlock(jobs)
@@ -449,12 +493,14 @@ module Delayed
449
493
  def lock_exclusively!(worker)
450
494
  now = self.class.db_time_now
451
495
  # We don't own this job so we will update the locked_by name and the locked_at
452
- affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(:locked_at => now, :locked_by => worker)
496
+ affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(
497
+ locked_at: now, locked_by: worker
498
+ )
453
499
  if affected_rows == 1
454
500
  mark_as_locked!(now, worker)
455
- return true
501
+ true
456
502
  else
457
- return false
503
+ false
458
504
  end
459
505
  end
460
506
 
@@ -464,9 +510,9 @@ module Delayed
464
510
  affected_rows = self.class.where(id: self, locked_by: from).update_all(locked_at: now, locked_by: to)
465
511
  if affected_rows == 1
466
512
  mark_as_locked!(now, to)
467
- return true
513
+ true
468
514
  else
469
- return false
515
+ false
470
516
  end
471
517
  end
472
518
 
@@ -478,34 +524,43 @@ module Delayed
478
524
  if respond_to?(:changes_applied)
479
525
  changes_applied
480
526
  else
481
- changed_attributes['locked_at'] = time
482
- changed_attributes['locked_by'] = worker
527
+ changed_attributes["locked_at"] = time
528
+ changed_attributes["locked_by"] = worker
483
529
  end
484
530
  end
485
531
  protected :lock_exclusively!, :mark_as_locked!
486
532
 
487
533
  def create_and_lock!(worker)
488
534
  raise "job already exists" unless new_record?
535
+
536
+ # we don't want to process unique constraint violations of
537
+ # running singleton jobs; always do it as two steps
538
+ if singleton
539
+ single_step_create
540
+ lock_exclusively!(worker)
541
+ return
542
+ end
543
+
489
544
  self.locked_at = Delayed::Job.db_time_now
490
545
  self.locked_by = worker
491
546
  single_step_create
492
547
  end
493
548
 
494
549
  def fail!
495
- attrs = self.attributes
496
- attrs['original_job_id'] = attrs.delete('id')
497
- attrs['failed_at'] ||= self.class.db_time_now
498
- attrs.delete('next_in_strand')
499
- attrs.delete('max_concurrent')
550
+ attrs = attributes
551
+ attrs["original_job_id"] = attrs.delete("id")
552
+ attrs["failed_at"] ||= self.class.db_time_now
553
+ attrs.delete("next_in_strand")
554
+ attrs.delete("max_concurrent")
500
555
  self.class.transaction do
501
556
  failed_job = Failed.create(attrs)
502
- self.destroy
557
+ destroy
503
558
  failed_job
504
559
  end
505
560
  rescue
506
561
  # we got an error while failing the job -- we need to at least get
507
562
  # the job out of the queue
508
- self.destroy
563
+ destroy
509
564
  # re-raise so the worker logs the error, at least
510
565
  raise
511
566
  end
@@ -513,20 +568,8 @@ module Delayed
513
568
  class Failed < Job
514
569
  include Delayed::Backend::Base
515
570
  self.table_name = :failed_jobs
516
- # Rails hasn't completely loaded yet, and setting the table name will cache some stuff
517
- # so reset that cache so that it will load correctly after Rails is all loaded
518
- # It's fixed in Rails 5 to not cache anything when you set the table_name
519
- if Rails.version < '5' && Rails.version >= '4.2'
520
- @arel_engine = nil
521
- @arel_table = nil
522
- end
523
- end
524
- if Rails.version < '5' && Rails.version >= '4.2'
525
- @arel_engine = nil
526
- @arel_table = nil
527
571
  end
528
572
  end
529
-
530
573
  end
531
574
  end
532
575
  end