inst-jobs 2.3.1 → 2.4.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
  22. data/exe/inst_jobs +3 -2
  23. data/lib/delayed/backend/active_record.rb +182 -148
  24. data/lib/delayed/backend/base.rb +80 -69
  25. data/lib/delayed/batch.rb +11 -9
  26. data/lib/delayed/cli.rb +98 -84
  27. data/lib/delayed/core_ext/kernel.rb +4 -2
  28. data/lib/delayed/daemon.rb +70 -74
  29. data/lib/delayed/job_tracking.rb +26 -25
  30. data/lib/delayed/lifecycle.rb +27 -24
  31. data/lib/delayed/log_tailer.rb +17 -17
  32. data/lib/delayed/logging.rb +13 -16
  33. data/lib/delayed/message_sending.rb +42 -51
  34. data/lib/delayed/performable_method.rb +5 -7
  35. data/lib/delayed/periodic.rb +66 -65
  36. data/lib/delayed/plugin.rb +2 -4
  37. data/lib/delayed/pool.rb +198 -192
  38. data/lib/delayed/server/helpers.rb +6 -6
  39. data/lib/delayed/server.rb +51 -54
  40. data/lib/delayed/settings.rb +93 -81
  41. data/lib/delayed/testing.rb +21 -22
  42. data/lib/delayed/version.rb +1 -1
  43. data/lib/delayed/work_queue/in_process.rb +21 -17
  44. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  45. data/lib/delayed/work_queue/parent_process/server.rb +215 -209
  46. data/lib/delayed/work_queue/parent_process.rb +52 -53
  47. data/lib/delayed/worker/consul_health_check.rb +21 -19
  48. data/lib/delayed/worker/health_check.rb +21 -12
  49. data/lib/delayed/worker/null_health_check.rb +3 -1
  50. data/lib/delayed/worker/process_helper.rb +8 -9
  51. data/lib/delayed/worker.rb +271 -261
  52. data/lib/delayed/yaml_extensions.rb +12 -10
  53. data/lib/delayed_job.rb +37 -38
  54. data/lib/inst-jobs.rb +1 -1
  55. data/spec/active_record_job_spec.rb +128 -135
  56. data/spec/delayed/cli_spec.rb +7 -7
  57. data/spec/delayed/daemon_spec.rb +8 -8
  58. data/spec/delayed/message_sending_spec.rb +8 -9
  59. data/spec/delayed/periodic_spec.rb +13 -12
  60. data/spec/delayed/server_spec.rb +38 -38
  61. data/spec/delayed/settings_spec.rb +26 -25
  62. data/spec/delayed/work_queue/in_process_spec.rb +7 -7
  63. data/spec/delayed/work_queue/parent_process/client_spec.rb +16 -12
  64. data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
  65. data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
  66. data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
  67. data/spec/delayed/worker/health_check_spec.rb +51 -49
  68. data/spec/delayed/worker_spec.rb +28 -25
  69. data/spec/gemfiles/52.gemfile +5 -3
  70. data/spec/gemfiles/52.gemfile.lock +240 -0
  71. data/spec/gemfiles/60.gemfile +5 -3
  72. data/spec/gemfiles/60.gemfile.lock +246 -0
  73. data/spec/gemfiles/61.gemfile +5 -3
  74. data/spec/sample_jobs.rb +45 -15
  75. data/spec/shared/delayed_batch.rb +74 -67
  76. data/spec/shared/delayed_method.rb +143 -102
  77. data/spec/shared/performable_method.rb +39 -38
  78. data/spec/shared/shared_backend.rb +537 -437
  79. data/spec/shared/testing.rb +14 -14
  80. data/spec/shared/worker.rb +155 -147
  81. data/spec/shared_jobs_specs.rb +13 -13
  82. data/spec/spec_helper.rb +43 -40
  83. metadata +73 -52
  84. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  85. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  86. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  87. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  88. data/lib/delayed/backend/redis/find_available.lua +0 -3
  89. data/lib/delayed/backend/redis/functions.rb +0 -59
  90. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  91. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  92. data/lib/delayed/backend/redis/job.rb +0 -528
  93. data/lib/delayed/backend/redis/set_running.lua +0 -5
  94. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  95. data/spec/redis_job_spec.rb +0 -148
@@ -0,0 +1,12 @@
1
+ # frozen_string_literal: true
2
+
3
+ class AddNStrandIndex < ActiveRecord::Migration[5.2]
4
+ disable_ddl_transaction!
5
+
6
+ def change
7
+ add_index :delayed_jobs, %i[strand next_in_strand id],
8
+ name: "n_strand_index",
9
+ where: "strand IS NOT NULL",
10
+ algorithm: :concurrently
11
+ end
12
+ end
@@ -0,0 +1,203 @@
1
+ # frozen_string_literal: true
2
+
3
+ class AddSingletonColumn < ActiveRecord::Migration[5.2]
4
+ disable_ddl_transaction!
5
+
6
+ def change
7
+ add_column :delayed_jobs, :singleton, :string, if_not_exists: true
8
+ add_column :failed_jobs, :singleton, :string, if_not_exists: true
9
+ # only one job can be queued in a singleton
10
+ add_index :delayed_jobs,
11
+ :singleton,
12
+ where: "singleton IS NOT NULL AND locked_by IS NULL",
13
+ unique: true,
14
+ name: "index_delayed_jobs_on_singleton_not_running",
15
+ algorithm: :concurrently
16
+ # only one job can be running for a singleton
17
+ add_index :delayed_jobs,
18
+ :singleton,
19
+ where: "singleton IS NOT NULL AND locked_by IS NOT NULL",
20
+ unique: true,
21
+ name: "index_delayed_jobs_on_singleton_running",
22
+ algorithm: :concurrently
23
+
24
+ reversible do |direction|
25
+ direction.up do
26
+ execute(<<~SQL)
27
+ CREATE OR REPLACE FUNCTION delayed_jobs_after_delete_row_tr_fn () RETURNS trigger AS $$
28
+ DECLARE
29
+ running_count integer;
30
+ should_lock boolean;
31
+ should_be_precise boolean;
32
+ update_query varchar;
33
+ skip_locked varchar;
34
+ BEGIN
35
+ IF OLD.strand IS NOT NULL THEN
36
+ should_lock := true;
37
+ should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
38
+
39
+ IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
40
+ running_count := (SELECT COUNT(*) FROM (
41
+ SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
42
+ ) subquery_for_count);
43
+ should_lock := running_count < OLD.max_concurrent;
44
+ END IF;
45
+
46
+ IF should_lock THEN
47
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
48
+ END IF;
49
+
50
+ -- note that we don't really care if the row we're deleting has a singleton, or if it even
51
+ -- matches the row(s) we're going to update. we just need to make sure that whatever
52
+ -- singleton we grab isn't already running (which is a simple existence check, since
53
+ -- the unique indexes ensure there is at most one singleton running, and one queued)
54
+ update_query := 'UPDATE delayed_jobs SET next_in_strand=true WHERE id IN (
55
+ SELECT id FROM delayed_jobs j2
56
+ WHERE next_in_strand=false AND
57
+ j2.strand=$1.strand AND
58
+ (j2.singleton IS NULL OR NOT EXISTS (SELECT 1 FROM delayed_jobs j3 WHERE j3.singleton=j2.singleton AND j3.id<>j2.id))
59
+ ORDER BY j2.strand_order_override ASC, j2.id ASC
60
+ LIMIT ';
61
+
62
+ IF should_be_precise THEN
63
+ running_count := (SELECT COUNT(*) FROM (
64
+ SELECT 1 FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
65
+ ) s);
66
+ IF running_count < OLD.max_concurrent THEN
67
+ update_query := update_query || '($1.max_concurrent - $2)';
68
+ ELSE
69
+ -- we have too many running already; just bail
70
+ RETURN OLD;
71
+ END IF;
72
+ ELSE
73
+ update_query := update_query || '1';
74
+
75
+ -- n-strands don't require precise ordering; we can make this query more performant
76
+ IF OLD.max_concurrent > 1 THEN
77
+ skip_locked := ' SKIP LOCKED';
78
+ END IF;
79
+ END IF;
80
+
81
+ update_query := update_query || ' FOR UPDATE' || COALESCE(skip_locked, '') || ')';
82
+ EXECUTE update_query USING OLD, running_count;
83
+ ELSIF OLD.singleton IS NOT NULL THEN
84
+ UPDATE delayed_jobs SET next_in_strand = 't' WHERE singleton=OLD.singleton AND next_in_strand=false;
85
+ END IF;
86
+ RETURN OLD;
87
+ END;
88
+ $$ LANGUAGE plpgsql;
89
+ SQL
90
+ execute(<<~SQL)
91
+ CREATE OR REPLACE FUNCTION delayed_jobs_before_insert_row_tr_fn () RETURNS trigger AS $$
92
+ BEGIN
93
+ RAISE NOTICE 'inserting job';
94
+ IF NEW.strand IS NOT NULL THEN
95
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(NEW.strand));
96
+ IF (SELECT COUNT(*) FROM (
97
+ SELECT 1 FROM delayed_jobs WHERE strand = NEW.strand AND next_in_strand=true LIMIT NEW.max_concurrent
98
+ ) s) = NEW.max_concurrent THEN
99
+ NEW.next_in_strand := false;
100
+ END IF;
101
+ END IF;
102
+ IF NEW.singleton IS NOT NULL THEN
103
+ RAISE NOTICE 'inserting job that is a singleton';
104
+ PERFORM 1 FROM delayed_jobs WHERE singleton = NEW.singleton;
105
+ IF FOUND THEN
106
+ RAISE NOTICE 'and not first';
107
+ NEW.next_in_strand := false;
108
+ END IF;
109
+ END IF;
110
+ RETURN NEW;
111
+ END;
112
+ $$ LANGUAGE plpgsql;
113
+ SQL
114
+ end
115
+ direction.down do
116
+ execute(<<~SQL)
117
+ CREATE OR REPLACE FUNCTION delayed_jobs_after_delete_row_tr_fn () RETURNS trigger AS $$
118
+ DECLARE
119
+ running_count integer;
120
+ should_lock boolean;
121
+ should_be_precise boolean;
122
+ BEGIN
123
+ IF OLD.strand IS NOT NULL THEN
124
+ should_lock := true;
125
+ should_be_precise := OLD.id % (OLD.max_concurrent * 4) = 0;
126
+
127
+ IF NOT should_be_precise AND OLD.max_concurrent > 16 THEN
128
+ running_count := (SELECT COUNT(*) FROM (
129
+ SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
130
+ ) subquery_for_count);
131
+ should_lock := running_count < OLD.max_concurrent;
132
+ END IF;
133
+
134
+ IF should_lock THEN
135
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(OLD.strand));
136
+ END IF;
137
+
138
+ IF should_be_precise THEN
139
+ running_count := (SELECT COUNT(*) FROM (
140
+ SELECT 1 as one FROM delayed_jobs WHERE strand = OLD.strand AND next_in_strand = 't' LIMIT OLD.max_concurrent
141
+ ) subquery_for_count);
142
+ IF running_count < OLD.max_concurrent THEN
143
+ UPDATE delayed_jobs SET next_in_strand = 't' WHERE id IN (
144
+ SELECT id FROM delayed_jobs j2 WHERE next_in_strand = 'f' AND
145
+ j2.strand = OLD.strand ORDER BY j2.strand_order_override ASC, j2.id ASC LIMIT (OLD.max_concurrent - running_count) FOR UPDATE
146
+ );
147
+ END IF;
148
+ ELSE
149
+ -- n-strands don't require precise ordering; we can make this query more performant
150
+ IF OLD.max_concurrent > 1 THEN
151
+ UPDATE delayed_jobs SET next_in_strand = 't' WHERE id =
152
+ (SELECT id FROM delayed_jobs j2 WHERE next_in_strand = 'f' AND
153
+ j2.strand = OLD.strand ORDER BY j2.strand_order_override ASC, j2.id ASC LIMIT 1 FOR UPDATE SKIP LOCKED);
154
+ ELSE
155
+ UPDATE delayed_jobs SET next_in_strand = 't' WHERE id =
156
+ (SELECT id FROM delayed_jobs j2 WHERE next_in_strand = 'f' AND
157
+ j2.strand = OLD.strand ORDER BY j2.strand_order_override ASC, j2.id ASC LIMIT 1 FOR UPDATE);
158
+ END IF;
159
+ END IF;
160
+ END IF;
161
+ RETURN OLD;
162
+ END;
163
+ $$ LANGUAGE plpgsql;
164
+ SQL
165
+ execute(<<~SQL)
166
+ CREATE OR REPLACE FUNCTION delayed_jobs_before_insert_row_tr_fn () RETURNS trigger AS $$
167
+ BEGIN
168
+ IF NEW.strand IS NOT NULL THEN
169
+ PERFORM pg_advisory_xact_lock(half_md5_as_bigint(NEW.strand));
170
+ IF (SELECT COUNT(*) FROM (
171
+ SELECT 1 AS one FROM delayed_jobs WHERE strand = NEW.strand LIMIT NEW.max_concurrent
172
+ ) subquery_for_count) = NEW.max_concurrent THEN
173
+ NEW.next_in_strand := 'f';
174
+ END IF;
175
+ END IF;
176
+ RETURN NEW;
177
+ END;
178
+ $$ LANGUAGE plpgsql;
179
+ SQL
180
+ end
181
+ end
182
+
183
+ connection.transaction do
184
+ reversible do |direction|
185
+ direction.up do
186
+ drop_triggers
187
+ execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON delayed_jobs FOR EACH ROW WHEN (NEW.strand IS NOT NULL OR NEW.singleton IS NOT NULL) EXECUTE PROCEDURE delayed_jobs_before_insert_row_tr_fn()")
188
+ execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON delayed_jobs FOR EACH ROW WHEN ((OLD.strand IS NOT NULL OR OLD.singleton IS NOT NULL) AND OLD.next_in_strand=true) EXECUTE PROCEDURE delayed_jobs_after_delete_row_tr_fn()")
189
+ end
190
+ direction.down do
191
+ drop_triggers
192
+ execute("CREATE TRIGGER delayed_jobs_before_insert_row_tr BEFORE INSERT ON delayed_jobs FOR EACH ROW WHEN (NEW.strand IS NOT NULL) EXECUTE PROCEDURE delayed_jobs_before_insert_row_tr_fn()")
193
+ execute("CREATE TRIGGER delayed_jobs_after_delete_row_tr AFTER DELETE ON delayed_jobs FOR EACH ROW WHEN (OLD.strand IS NOT NULL AND OLD.next_in_strand = 't') EXECUTE PROCEDURE delayed_jobs_after_delete_row_tr_fn()")
194
+ end
195
+ end
196
+ end
197
+ end
198
+
199
+ def drop_triggers
200
+ execute("DROP TRIGGER delayed_jobs_before_insert_row_tr ON delayed_jobs")
201
+ execute("DROP TRIGGER delayed_jobs_after_delete_row_tr ON delayed_jobs")
202
+ end
203
+ end
data/exe/inst_jobs CHANGED
@@ -1,4 +1,5 @@
1
1
  #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
2
3
 
3
- require File.expand_path('config/environment')
4
- Delayed::CLI.new.run()
4
+ require_relative "config/environment"
5
+ Delayed::CLI.new.run
@@ -1,11 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class ActiveRecord::Base
4
- def self.load_for_delayed_job(id)
5
- if id
6
- find(id)
7
- else
8
- super
3
+ module ActiveRecord
4
+ class Base
5
+ def self.load_for_delayed_job(id)
6
+ if id
7
+ find(id)
8
+ else
9
+ super
10
+ end
9
11
  end
10
12
  end
11
13
  end
@@ -31,17 +33,15 @@ module Delayed
31
33
 
32
34
  class << self
33
35
  def create(attributes, &block)
34
- return super if connection.prepared_statements
35
-
36
+ on_conflict = attributes.delete(:on_conflict)
36
37
  # modified from ActiveRecord::Persistence.create and ActiveRecord::Persistence#_insert_record
37
38
  job = new(attributes, &block)
38
- job.single_step_create
39
+ job.single_step_create(on_conflict: on_conflict)
39
40
  end
40
41
  end
41
42
 
42
- def single_step_create
43
+ def single_step_create(on_conflict: nil)
43
44
  connection = self.class.connection
44
- return save if connection.prepared_statements
45
45
 
46
46
  # a before_save callback that we're skipping
47
47
  initialize_defaults
@@ -49,12 +49,10 @@ module Delayed
49
49
  current_time = current_time_from_proper_timezone
50
50
 
51
51
  all_timestamp_attributes_in_model.each do |column|
52
- if !attribute_present?(column)
53
- _write_attribute(column, current_time)
54
- end
52
+ _write_attribute(column, current_time) unless attribute_present?(column)
55
53
  end
56
54
 
57
- if Rails.version >= '6'
55
+ if Rails.version >= "6"
58
56
  attribute_names = attribute_names_for_partial_writes
59
57
  attribute_names = attributes_for_create(attribute_names)
60
58
  values = attributes_with_values(attribute_names)
@@ -62,21 +60,57 @@ module Delayed
62
60
  attribute_names = partial_writes? ? keys_for_partial_write : self.attribute_names
63
61
  values = attributes_with_values_for_create(attribute_names)
64
62
  end
63
+
65
64
  im = self.class.arel_table.compile_insert(self.class.send(:_substitute_values, values))
66
- sql, _binds = connection.send(:to_sql_and_binds, im, [])
65
+
66
+ lock_and_insert = values["strand"] && instance_of?(Job)
67
+ # can't use prepared statements if we're combining multiple statemenets
68
+ sql, binds = if lock_and_insert
69
+ connection.unprepared_statement do
70
+ connection.send(:to_sql_and_binds, im)
71
+ end
72
+ else
73
+ connection.send(:to_sql_and_binds, im)
74
+ end
75
+ sql = +sql
76
+
77
+ if singleton && instance_of?(Job)
78
+ sql << " ON CONFLICT (singleton) WHERE singleton IS NOT NULL AND locked_by IS NULL DO "
79
+ sql << case on_conflict
80
+ when :patient, :loose
81
+ "NOTHING"
82
+ when :overwrite
83
+ "UPDATE SET run_at=EXCLUDED.run_at, handler=EXCLUDED.handler"
84
+ else # :use_earliest
85
+ "UPDATE SET run_at=EXCLUDED.run_at WHERE EXCLUDED.run_at<delayed_jobs.run_at"
86
+ end
87
+ end
67
88
 
68
89
  # https://www.postgresql.org/docs/9.5/libpq-exec.html
69
- sql = "#{sql} RETURNING id"
70
- # > Multiple queries sent in a single PQexec call are processed in a single transaction,
71
- # unless there are explicit BEGIN/COMMIT commands included in the query string to divide
72
- # it into multiple transactions.
73
- # but we don't need to lock when inserting into Delayed::Failed
74
- sql = "SELECT pg_advisory_xact_lock(#{connection.quote_table_name('half_md5_as_bigint')}(#{connection.quote(values['strand'])})); #{sql}" if values["strand"] && self.class == Job
75
- result = connection.execute(sql, "#{self} Create")
76
- self.id = result.values.first.first
77
- result.clear
78
- @new_record = false
79
- changes_applied
90
+ sql << " RETURNING id"
91
+
92
+ if lock_and_insert
93
+ # > Multiple queries sent in a single PQexec call are processed in a single transaction,
94
+ # unless there are explicit BEGIN/COMMIT commands included in the query string to divide
95
+ # it into multiple transactions.
96
+ # but we don't need to lock when inserting into Delayed::Failed
97
+ if values["strand"] && instance_of?(Job)
98
+ fn_name = connection.quote_table_name("half_md5_as_bigint")
99
+ sql = "SELECT pg_advisory_xact_lock(#{fn_name}(#{connection.quote(values['strand'])})); #{sql}"
100
+ end
101
+ result = connection.execute(sql, "#{self} Create")
102
+ self.id = result.values.first&.first
103
+ result.clear
104
+ else
105
+ result = connection.exec_query(sql, "#{self} Create", binds)
106
+ self.id = connection.send(:last_inserted_id, result)
107
+ end
108
+
109
+ # it might not get set if there was an existing record, and we didn't update it
110
+ if id
111
+ @new_record = false
112
+ changes_applied
113
+ end
80
114
 
81
115
  self
82
116
  end
@@ -103,9 +137,11 @@ module Delayed
103
137
  # to raise the lock level
104
138
  before_create :lock_strand_on_create
105
139
  def lock_strand_on_create
106
- if strand.present? && self.class == Job
107
- self.class.connection.execute("SELECT pg_advisory_xact_lock(#{self.class.connection.quote_table_name('half_md5_as_bigint')}(#{self.class.connection.quote(strand)}))")
108
- end
140
+ return unless strand.present? && instance_of?(Job)
141
+
142
+ fn_name = self.class.connection.quote_table_name("half_md5_as_bigint")
143
+ quoted_strand_name = self.class.connection.quote(strand)
144
+ self.class.connection.execute("SELECT pg_advisory_xact_lock(#{fn_name}(#{quoted_strand_name}))")
109
145
  end
110
146
 
111
147
  # This overwrites the previous behavior
@@ -124,7 +160,7 @@ module Delayed
124
160
  end
125
161
 
126
162
  def self.failed
127
- where("failed_at IS NOT NULL")
163
+ where.not(failed_at: nil)
128
164
  end
129
165
 
130
166
  def self.running
@@ -132,51 +168,54 @@ module Delayed
132
168
  end
133
169
 
134
170
  # a nice stress test:
135
- # 10_000.times { |i| Kernel.delay(strand: 's1', run_at: (24.hours.ago + (rand(24.hours.to_i))).system("echo #{i} >> test1.txt") }
171
+ # 10_000.times do |i|
172
+ # Kernel.delay(strand: 's1', run_at: (24.hours.ago + (rand(24.hours.to_i))).system("echo #{i} >> test1.txt")
173
+ # end
136
174
  # 500.times { |i| "ohai".delay(run_at: (12.hours.ago + (rand(24.hours.to_i))).reverse }
137
175
  # then fire up your workers
138
176
  # you can check out strand correctness: diff test1.txt <(sort -n test1.txt)
139
- def self.ready_to_run(forced_latency: nil)
140
- now = db_time_now
141
- now -= forced_latency if forced_latency
142
- where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", now, true)
143
- end
177
+ def self.ready_to_run(forced_latency: nil)
178
+ now = db_time_now
179
+ now -= forced_latency if forced_latency
180
+ where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", now, true)
181
+ end
182
+
144
183
  def self.by_priority
145
184
  order(:priority, :run_at, :id)
146
185
  end
147
186
 
148
187
  # When a worker is exiting, make sure we don't have any locked jobs.
149
188
  def self.clear_locks!(worker_name)
150
- where(:locked_by => worker_name).update_all(:locked_by => nil, :locked_at => nil)
189
+ where(locked_by: worker_name).update_all(locked_by: nil, locked_at: nil)
151
190
  end
152
191
 
153
192
  def self.strand_size(strand)
154
- self.where(:strand => strand).count
193
+ where(strand: strand).count
155
194
  end
156
195
 
157
- def self.running_jobs()
158
- self.running.order(:locked_at)
196
+ def self.running_jobs
197
+ running.order(:locked_at)
159
198
  end
160
199
 
161
200
  def self.scope_for_flavor(flavor, query)
162
201
  scope = case flavor.to_s
163
- when 'current'
164
- self.current
165
- when 'future'
166
- self.future
167
- when 'failed'
168
- Delayed::Job::Failed
169
- when 'strand'
170
- self.where(:strand => query)
171
- when 'tag'
172
- self.where(:tag => query)
173
- else
174
- raise ArgumentError, "invalid flavor: #{flavor.inspect}"
175
- end
176
-
177
- if %w(current future).include?(flavor.to_s)
202
+ when "current"
203
+ current
204
+ when "future"
205
+ future
206
+ when "failed"
207
+ Delayed::Job::Failed
208
+ when "strand"
209
+ where(strand: query)
210
+ when "tag"
211
+ where(tag: query)
212
+ else
213
+ raise ArgumentError, "invalid flavor: #{flavor.inspect}"
214
+ end
215
+
216
+ if %w[current future].include?(flavor.to_s)
178
217
  queue = query.presence || Delayed::Settings.queue
179
- scope = scope.where(:queue => queue)
218
+ scope = scope.where(queue: queue)
180
219
  end
181
220
 
182
221
  scope
@@ -193,8 +232,8 @@ module Delayed
193
232
  limit,
194
233
  offset = 0,
195
234
  query = nil)
196
- scope = self.scope_for_flavor(flavor, query)
197
- order = flavor.to_s == 'future' ? 'run_at' : 'id desc'
235
+ scope = scope_for_flavor(flavor, query)
236
+ order = flavor.to_s == "future" ? "run_at" : "id desc"
198
237
  scope.order(order).limit(limit).offset(offset).to_a
199
238
  end
200
239
 
@@ -202,7 +241,7 @@ module Delayed
202
241
  # see list_jobs for documentation on arguments
203
242
  def self.jobs_count(flavor,
204
243
  query = nil)
205
- scope = self.scope_for_flavor(flavor, query)
244
+ scope = scope_for_flavor(flavor, query)
206
245
  scope.count
207
246
  end
208
247
 
@@ -211,30 +250,33 @@ module Delayed
211
250
  # to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
212
251
  # or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
213
252
  def self.bulk_update(action, opts)
214
- raise("Can't #{action.to_s} failed jobs") if opts[:flavor].to_s == 'failed' && action.to_s != 'destroy'
253
+ raise("Can't #{action} failed jobs") if opts[:flavor].to_s == "failed" && action.to_s != "destroy"
254
+
215
255
  scope = if opts[:ids]
216
- if opts[:flavor] == 'failed'
217
- Delayed::Job::Failed.where(:id => opts[:ids])
218
- else
219
- self.where(:id => opts[:ids])
220
- end
221
- elsif opts[:flavor]
256
+ if opts[:flavor] == "failed"
257
+ Delayed::Job::Failed.where(id: opts[:ids])
258
+ else
259
+ where(id: opts[:ids])
260
+ end
261
+ elsif opts[:flavor]
222
262
 
223
- self.scope_for_flavor(opts[:flavor], opts[:query])
224
- end
263
+ scope_for_flavor(opts[:flavor], opts[:query])
264
+ end
225
265
 
226
266
  return 0 unless scope
227
267
 
228
268
  case action.to_s
229
- when 'hold'
269
+ when "hold"
230
270
  scope = scope.where(locked_by: nil)
231
- scope.update_all(:locked_by => ON_HOLD_LOCKED_BY, :locked_at => db_time_now, :attempts => ON_HOLD_COUNT)
232
- when 'unhold'
271
+ scope.update_all(locked_by: ON_HOLD_LOCKED_BY, locked_at: db_time_now, attempts: ON_HOLD_COUNT)
272
+ when "unhold"
233
273
  now = db_time_now
234
274
  scope = scope.where(locked_by: ON_HOLD_LOCKED_BY)
235
- scope.update_all(["locked_by = NULL, locked_at = NULL, attempts = 0, run_at = (CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at = NULL", now, now])
236
- when 'destroy'
237
- scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == 'failed'
275
+ scope.update_all([<<~SQL.squish, now, now])
276
+ locked_by=NULL, locked_at=NULL, attempts=0, run_at=(CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at=NULL
277
+ SQL
278
+ when "destroy"
279
+ scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == "failed"
238
280
  scope.delete_all
239
281
  end
240
282
  end
@@ -245,16 +287,17 @@ module Delayed
245
287
  def self.tag_counts(flavor,
246
288
  limit,
247
289
  offset = 0)
248
- raise(ArgumentError, "invalid flavor: #{flavor}") unless %w(current all).include?(flavor.to_s)
290
+ raise(ArgumentError, "invalid flavor: #{flavor}") unless %w[current all].include?(flavor.to_s)
291
+
249
292
  scope = case flavor.to_s
250
- when 'current'
251
- self.current
252
- when 'all'
253
- self
254
- end
293
+ when "current"
294
+ current
295
+ when "all"
296
+ self
297
+ end
255
298
 
256
299
  scope = scope.group(:tag).offset(offset).limit(limit)
257
- scope.order(Arel.sql("COUNT(tag) DESC")).count.map { |t,c| { :tag => t, :count => c } }
300
+ scope.order(Arel.sql("COUNT(tag) DESC")).count.map { |t, c| { tag: t, count: c } }
258
301
  end
259
302
 
260
303
  def self.maybe_silence_periodic_log(&block)
@@ -278,7 +321,7 @@ module Delayed
278
321
 
279
322
  loop do
280
323
  jobs = maybe_silence_periodic_log do
281
- if connection.adapter_name == 'PostgreSQL' && !Settings.select_random_from_batch
324
+ if connection.adapter_name == "PostgreSQL" && !Settings.select_random_from_batch
282
325
  # In Postgres, we can lock a job and return which row was locked in a single
283
326
  # query by using RETURNING. Combine that with the ROW_NUMBER() window function
284
327
  # to assign a distinct locked_at value to each job locked, when doing multiple
@@ -286,22 +329,20 @@ module Delayed
286
329
  effective_worker_names = Array(worker_names)
287
330
 
288
331
  lock = nil
289
- lock = "FOR UPDATE SKIP LOCKED" if connection.postgresql_version >= 90500
332
+ lock = "FOR UPDATE SKIP LOCKED" if connection.postgresql_version >= 90_500
290
333
  target_jobs = all_available(queue,
291
334
  min_priority,
292
335
  max_priority,
293
- forced_latency: forced_latency).
294
- limit(effective_worker_names.length + prefetch).
295
- lock(lock)
296
- jobs_with_row_number = all.from(target_jobs).
297
- select("id, ROW_NUMBER() OVER () AS row_number")
336
+ forced_latency: forced_latency)
337
+ .limit(effective_worker_names.length + prefetch)
338
+ .lock(lock)
339
+ jobs_with_row_number = all.from(target_jobs)
340
+ .select("id, ROW_NUMBER() OVER () AS row_number")
298
341
  updates = +"locked_by = CASE row_number "
299
342
  effective_worker_names.each_with_index do |worker, i|
300
343
  updates << "WHEN #{i + 1} THEN #{connection.quote(worker)} "
301
344
  end
302
- if prefetch_owner
303
- updates << "ELSE #{connection.quote(prefetch_owner)} "
304
- end
345
+ updates << "ELSE #{connection.quote(prefetch_owner)} " if prefetch_owner
305
346
  updates << "END, locked_at = #{connection.quote(db_time_now)}"
306
347
 
307
348
  # Originally this was done with a subquery, but this allows the query planner to
@@ -311,22 +352,22 @@ module Delayed
311
352
  # For more details, see:
312
353
  # * https://dba.stackexchange.com/a/69497/55285
313
354
  # * https://github.com/feikesteenbergen/demos/blob/b7ecee8b2a79bf04cbcd74972e6bfb81903aee5d/bugs/update_limit_bug.txt
314
- query = "WITH limited_jobs AS (#{jobs_with_row_number.to_sql}) " \
315
- "UPDATE #{quoted_table_name} SET #{updates} FROM limited_jobs WHERE limited_jobs.id=#{quoted_table_name}.id " \
316
- "RETURNING #{quoted_table_name}.*"
355
+ query = <<~SQL.squish
356
+ WITH limited_jobs AS (#{jobs_with_row_number.to_sql})
357
+ UPDATE #{quoted_table_name} SET #{updates} FROM limited_jobs WHERE limited_jobs.id=#{quoted_table_name}.id
358
+ RETURNING #{quoted_table_name}.*
359
+ SQL
317
360
 
318
361
  jobs = find_by_sql(query)
319
362
  # because this is an atomic query, we don't have to return more jobs than we needed
320
363
  # to try and lock them, nor is there a possibility we need to try again because
321
364
  # all of the jobs we tried to lock had already been locked by someone else
322
- if worker_names.is_a?(Array)
323
- result = jobs.index_by(&:locked_by)
324
- # all of the prefetched jobs can come back as an array
325
- result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
326
- return result
327
- else
328
- return jobs.first
329
- end
365
+ return jobs.first unless worker_names.is_a?(Array)
366
+
367
+ result = jobs.index_by(&:locked_by)
368
+ # all of the prefetched jobs can come back as an array
369
+ result[prefetch_owner] = jobs.select { |j| j.locked_by == prefetch_owner } if prefetch_owner
370
+ return result
330
371
  else
331
372
  batch_size = Settings.fetch_batch_size
332
373
  batch_size *= worker_names.length if worker_names.is_a?(Array)
@@ -336,13 +377,13 @@ module Delayed
336
377
  if jobs.empty?
337
378
  return worker_names.is_a?(Array) ? {} : nil
338
379
  end
339
- if Settings.select_random_from_batch
340
- jobs = jobs.sort_by { rand }
341
- end
380
+
381
+ jobs = jobs.sort_by { rand } if Settings.select_random_from_batch
342
382
  if worker_names.is_a?(Array)
343
383
  result = {}
344
384
  jobs.each do |job|
345
385
  break if worker_names.empty?
386
+
346
387
  worker_name = worker_names.first
347
388
  if job.send(:lock_exclusively!, worker_name)
348
389
  result[worker_name] = job
@@ -351,10 +392,10 @@ module Delayed
351
392
  end
352
393
  return result
353
394
  else
354
- job = jobs.detect do |job|
395
+ locked_job = jobs.detect do |job|
355
396
  job.send(:lock_exclusively!, worker_names)
356
397
  end
357
- return job if job
398
+ return locked_job if locked_job
358
399
  end
359
400
  end
360
401
  end
@@ -376,27 +417,9 @@ module Delayed
376
417
  check_queue(queue)
377
418
  check_priorities(min_priority, max_priority)
378
419
 
379
- self.ready_to_run(forced_latency: forced_latency).
380
- where(:priority => min_priority..max_priority, :queue => queue).
381
- by_priority
382
- end
383
-
384
- # used internally by create_singleton to take the appropriate lock
385
- # depending on the db driver
386
- def self.transaction_for_singleton(strand, on_conflict)
387
- return yield if on_conflict == :loose
388
- self.transaction do
389
- if on_conflict == :patient
390
- pg_function = 'pg_try_advisory_xact_lock'
391
- execute_method = :select_value
392
- else
393
- pg_function = 'pg_advisory_xact_lock'
394
- execute_method = :execute
395
- end
396
- result = connection.send(execute_method, sanitize_sql(["SELECT #{pg_function}(#{connection.quote_table_name('half_md5_as_bigint')}(?))", strand]))
397
- return if result == false && on_conflict == :patient
398
- yield
399
- end
420
+ ready_to_run(forced_latency: forced_latency)
421
+ .where(priority: min_priority..max_priority, queue: queue)
422
+ .by_priority
400
423
  end
401
424
 
402
425
  # Create the job on the specified strand, but only if there aren't any
@@ -404,10 +427,11 @@ module Delayed
404
427
  # (in other words, the job will still be created if there's another job
405
428
  # on the strand but it's already running)
406
429
  def self.create_singleton(options)
407
- strand = options[:strand]
430
+ strand = options[:singleton]
408
431
  on_conflict = options.delete(:on_conflict) || :use_earliest
409
- transaction_for_singleton(strand, on_conflict) do
410
- job = self.where(:strand => strand, :locked_at => nil).next_in_strand_order.first
432
+
433
+ transaction_for_singleton(singleton, on_conflict) do
434
+ job = where(strand: strand, locked_at: nil).next_in_strand_order.first
411
435
  new_job = new(options)
412
436
  if job
413
437
  new_job.initialize_defaults
@@ -431,7 +455,7 @@ module Delayed
431
455
 
432
456
  def self.processes_locked_locally(name: nil)
433
457
  name ||= Socket.gethostname rescue x
434
- where("locked_by LIKE ?", "#{name}:%").pluck(:locked_by).map{|locked_by| locked_by.split(":").last.to_i}
458
+ where("locked_by LIKE ?", "#{name}:%").pluck(:locked_by).map { |locked_by| locked_by.split(":").last.to_i }
435
459
  end
436
460
 
437
461
  def self.unlock_orphaned_prefetched_jobs
@@ -454,12 +478,14 @@ module Delayed
454
478
  def lock_exclusively!(worker)
455
479
  now = self.class.db_time_now
456
480
  # We don't own this job so we will update the locked_by name and the locked_at
457
- affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(:locked_at => now, :locked_by => worker)
481
+ affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(
482
+ locked_at: now, locked_by: worker
483
+ )
458
484
  if affected_rows == 1
459
485
  mark_as_locked!(now, worker)
460
- return true
486
+ true
461
487
  else
462
- return false
488
+ false
463
489
  end
464
490
  end
465
491
 
@@ -469,9 +495,9 @@ module Delayed
469
495
  affected_rows = self.class.where(id: self, locked_by: from).update_all(locked_at: now, locked_by: to)
470
496
  if affected_rows == 1
471
497
  mark_as_locked!(now, to)
472
- return true
498
+ true
473
499
  else
474
- return false
500
+ false
475
501
  end
476
502
  end
477
503
 
@@ -483,34 +509,43 @@ module Delayed
483
509
  if respond_to?(:changes_applied)
484
510
  changes_applied
485
511
  else
486
- changed_attributes['locked_at'] = time
487
- changed_attributes['locked_by'] = worker
512
+ changed_attributes["locked_at"] = time
513
+ changed_attributes["locked_by"] = worker
488
514
  end
489
515
  end
490
516
  protected :lock_exclusively!, :mark_as_locked!
491
517
 
492
518
  def create_and_lock!(worker)
493
519
  raise "job already exists" unless new_record?
520
+
521
+ # we don't want to process unique constraint violations of
522
+ # running singleton jobs; always do it as two steps
523
+ if singleton
524
+ single_step_create
525
+ lock_exclusively!(worker)
526
+ return
527
+ end
528
+
494
529
  self.locked_at = Delayed::Job.db_time_now
495
530
  self.locked_by = worker
496
531
  single_step_create
497
532
  end
498
533
 
499
534
  def fail!
500
- attrs = self.attributes
501
- attrs['original_job_id'] = attrs.delete('id')
502
- attrs['failed_at'] ||= self.class.db_time_now
503
- attrs.delete('next_in_strand')
504
- attrs.delete('max_concurrent')
535
+ attrs = attributes
536
+ attrs["original_job_id"] = attrs.delete("id")
537
+ attrs["failed_at"] ||= self.class.db_time_now
538
+ attrs.delete("next_in_strand")
539
+ attrs.delete("max_concurrent")
505
540
  self.class.transaction do
506
541
  failed_job = Failed.create(attrs)
507
- self.destroy
542
+ destroy
508
543
  failed_job
509
544
  end
510
545
  rescue
511
546
  # we got an error while failing the job -- we need to at least get
512
547
  # the job out of the queue
513
- self.destroy
548
+ destroy
514
549
  # re-raise so the worker logs the error, at least
515
550
  raise
516
551
  end
@@ -520,7 +555,6 @@ module Delayed
520
555
  self.table_name = :failed_jobs
521
556
  end
522
557
  end
523
-
524
558
  end
525
559
  end
526
560
  end