inst-jobs 2.0.0 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
- data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
- data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
- data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
- data/db/migrate/20211101190934_update_after_delete_trigger_for_singleton_index.rb +137 -0
- data/db/migrate/20211207094200_update_after_delete_trigger_for_singleton_transition_cases.rb +171 -0
- data/db/migrate/20211220112800_fix_singleton_race_condition_insert.rb +59 -0
- data/db/migrate/20211220113000_fix_singleton_race_condition_delete.rb +207 -0
- data/db/migrate/20220127091200_fix_singleton_unique_constraint.rb +31 -0
- data/db/migrate/20220128084800_update_insert_trigger_for_singleton_unique_constraint_change.rb +60 -0
- data/db/migrate/20220128084900_update_delete_trigger_for_singleton_unique_constraint_change.rb +209 -0
- data/db/migrate/20220203063200_remove_old_singleton_index.rb +31 -0
- data/db/migrate/20220328152900_add_failed_jobs_indicies.rb +12 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +226 -168
- data/lib/delayed/backend/base.rb +119 -72
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +28 -23
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +43 -52
- data/lib/delayed/performable_method.rb +6 -8
- data/lib/delayed/periodic.rb +72 -68
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +205 -168
- data/lib/delayed/rails_reloader_plugin.rb +30 -0
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +96 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -17
- data/lib/delayed/work_queue/parent_process/client.rb +55 -53
- data/lib/delayed/work_queue/parent_process/server.rb +245 -207
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +32 -33
- data/lib/delayed/worker/health_check.rb +35 -27
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +11 -12
- data/lib/delayed/worker.rb +257 -244
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -37
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +152 -139
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +10 -9
- data/spec/delayed/message_sending_spec.rb +16 -9
- data/spec/delayed/periodic_spec.rb +14 -21
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +8 -9
- data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
- data/spec/delayed/work_queue/parent_process/server_spec.rb +118 -42
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
- data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
- data/spec/delayed/worker/health_check_spec.rb +60 -52
- data/spec/delayed/worker_spec.rb +53 -24
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +801 -440
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +157 -149
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +57 -56
- metadata +183 -103
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -535
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/gemfiles/42.gemfile +0 -7
- data/spec/gemfiles/50.gemfile +0 -7
- data/spec/gemfiles/51.gemfile +0 -7
- data/spec/gemfiles/52.gemfile +0 -7
- data/spec/gemfiles/60.gemfile +0 -7
- data/spec/redis_job_spec.rb +0 -148
data/lib/delayed_job.rb
CHANGED
@@ -14,43 +14,43 @@ module Delayed
|
|
14
14
|
end
|
15
15
|
end
|
16
16
|
|
17
|
-
require
|
18
|
-
require
|
19
|
-
require
|
20
|
-
require
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
require
|
26
|
-
|
27
|
-
|
28
|
-
require
|
29
|
-
require
|
30
|
-
require
|
31
|
-
require
|
32
|
-
require
|
33
|
-
require
|
34
|
-
require
|
35
|
-
require
|
36
|
-
require
|
37
|
-
require
|
38
|
-
require
|
39
|
-
require
|
40
|
-
require
|
41
|
-
require
|
42
|
-
require
|
43
|
-
|
44
|
-
require
|
45
|
-
require
|
46
|
-
require
|
47
|
-
|
48
|
-
require
|
49
|
-
require
|
50
|
-
|
51
|
-
require
|
17
|
+
require "rails"
|
18
|
+
require "active_support/core_ext/module/attribute_accessors"
|
19
|
+
require "active_record"
|
20
|
+
require "after_transaction_commit"
|
21
|
+
require "debug_inspector"
|
22
|
+
|
23
|
+
require "delayed/core_ext/kernel"
|
24
|
+
|
25
|
+
require "delayed/settings"
|
26
|
+
require "delayed/yaml_extensions"
|
27
|
+
|
28
|
+
require "delayed/backend/base"
|
29
|
+
require "delayed/backend/active_record"
|
30
|
+
require "delayed/batch"
|
31
|
+
require "delayed/cli"
|
32
|
+
require "delayed/daemon"
|
33
|
+
require "delayed/job_tracking"
|
34
|
+
require "delayed/lifecycle"
|
35
|
+
require "delayed/log_tailer"
|
36
|
+
require "delayed/logging"
|
37
|
+
require "delayed/message_sending"
|
38
|
+
require "delayed/performable_method"
|
39
|
+
require "delayed/periodic"
|
40
|
+
require "delayed/plugin"
|
41
|
+
require "delayed/pool"
|
42
|
+
require "delayed/worker"
|
43
|
+
|
44
|
+
require "delayed/worker/health_check"
|
45
|
+
require "delayed/worker/consul_health_check"
|
46
|
+
require "delayed/worker/null_health_check"
|
47
|
+
|
48
|
+
require "delayed/work_queue/in_process"
|
49
|
+
require "delayed/work_queue/parent_process"
|
50
|
+
|
51
|
+
require "delayed/engine"
|
52
52
|
|
53
53
|
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
54
54
|
|
55
|
-
Object.
|
56
|
-
Module.
|
55
|
+
Object.include Delayed::MessageSending
|
56
|
+
Module.include Delayed::MessageSending::ClassMethods
|
data/lib/inst-jobs.rb
CHANGED
@@ -1,8 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
describe 'Delayed::Backed::ActiveRecord::Job' do
|
3
|
+
describe "Delayed::Backed::ActiveRecord::Job" do
|
6
4
|
before :all do
|
7
5
|
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
8
6
|
end
|
@@ -11,210 +9,214 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
11
9
|
Delayed::Testing.clear_all!
|
12
10
|
end
|
13
11
|
|
14
|
-
include_examples
|
12
|
+
include_examples "a delayed_jobs implementation"
|
15
13
|
|
16
|
-
it "
|
14
|
+
it "recovers as well as possible from a failure failing a job" do
|
17
15
|
allow(Delayed::Job::Failed).to receive(:create).and_raise(RuntimeError)
|
18
16
|
job = "test".delay(ignore_transaction: true).reverse
|
19
17
|
job_id = job.id
|
20
|
-
|
21
|
-
|
22
|
-
Delayed::Job.count.
|
18
|
+
expect { job.fail! }.to raise_error(RuntimeError)
|
19
|
+
expect { Delayed::Job.find(job_id) }.to raise_error(ActiveRecord::RecordNotFound)
|
20
|
+
expect(Delayed::Job.count).to eq(0)
|
23
21
|
end
|
24
22
|
|
25
23
|
context "when another worker has worked on a task since the job was found to be available, it" do
|
26
|
-
before
|
27
|
-
@job = Delayed::Job.create :
|
28
|
-
@
|
24
|
+
before do
|
25
|
+
@job = Delayed::Job.create payload_object: SimpleJob.new
|
26
|
+
@job_copy_for_worker2 = Delayed::Job.find(@job.id)
|
29
27
|
end
|
30
28
|
|
31
|
-
it "
|
29
|
+
it "does not allow a second worker to get exclusive access if already successfully processed by worker1" do
|
32
30
|
@job.destroy
|
33
|
-
@
|
31
|
+
expect(@job_copy_for_worker2.send(:lock_exclusively!, "worker2")).to be(false)
|
34
32
|
end
|
35
33
|
|
36
|
-
it "
|
37
|
-
|
38
|
-
@
|
34
|
+
it "doesn't allow a second worker to get exclusive access if failed to be " \
|
35
|
+
"processed by worker1 and run_at time is now in future (due to backing off behaviour)" do
|
36
|
+
@job.update(attempts: 1, run_at: 1.day.from_now)
|
37
|
+
expect(@job_copy_for_worker2.send(:lock_exclusively!, "worker2")).to be(false)
|
39
38
|
end
|
40
39
|
|
41
|
-
it "
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
job.save!
|
51
|
-
end
|
52
|
-
founds.uniq.size.should > 1
|
53
|
-
ensure
|
54
|
-
Delayed::Settings.select_random_from_batch = false
|
40
|
+
it "selects the next job at random if enabled" do
|
41
|
+
Delayed::Settings.select_random_from_batch = true
|
42
|
+
15.times { "test".delay.length }
|
43
|
+
founds = []
|
44
|
+
15.times do
|
45
|
+
job = Delayed::Job.get_and_lock_next_available("tester")
|
46
|
+
founds << job
|
47
|
+
job.unlock
|
48
|
+
job.save!
|
55
49
|
end
|
50
|
+
expect(founds.uniq.size).to be > 1
|
51
|
+
ensure
|
52
|
+
Delayed::Settings.select_random_from_batch = false
|
56
53
|
end
|
57
54
|
end
|
58
55
|
|
59
|
-
it "
|
60
|
-
job = Delayed::Job.create :
|
61
|
-
job.send(:lock_exclusively!,
|
56
|
+
it "unlocks a successfully locked job and persist the job's unlocked state" do
|
57
|
+
job = Delayed::Job.create payload_object: SimpleJob.new
|
58
|
+
expect(job.send(:lock_exclusively!, "worker1")).to be(true)
|
62
59
|
job.reload
|
63
60
|
job.unlock
|
64
61
|
job.save!
|
65
62
|
job.reload
|
66
|
-
job.locked_by.
|
67
|
-
job.locked_at.
|
63
|
+
expect(job.locked_by).to be_nil
|
64
|
+
expect(job.locked_at).to be_nil
|
68
65
|
end
|
69
66
|
|
70
67
|
describe "bulk_update failed jobs" do
|
71
68
|
context "holding/unholding failed jobs" do
|
72
|
-
before
|
73
|
-
@job = Delayed::Job.create :
|
74
|
-
Delayed::Job.get_and_lock_next_available(
|
69
|
+
before do
|
70
|
+
@job = Delayed::Job.create payload_object: SimpleJob.new
|
71
|
+
expect(Delayed::Job.get_and_lock_next_available("worker1")).to eq(@job)
|
75
72
|
@job.fail!
|
76
73
|
end
|
77
74
|
|
78
|
-
it "
|
79
|
-
expect { Delayed::Job.bulk_update(
|
75
|
+
it "raises error when holding failed jobs" do
|
76
|
+
expect { Delayed::Job.bulk_update("hold", flavor: "failed", query: @query) }.to raise_error(RuntimeError)
|
80
77
|
end
|
81
78
|
|
82
|
-
it "
|
83
|
-
expect { Delayed::Job.bulk_update(
|
79
|
+
it "raises error unholding failed jobs" do
|
80
|
+
expect { Delayed::Job.bulk_update("unhold", flavor: "failed", query: @query) }.to raise_error(RuntimeError)
|
84
81
|
end
|
85
82
|
end
|
86
83
|
|
87
84
|
context "deleting failed jobs" do
|
88
|
-
before
|
89
|
-
2.times
|
90
|
-
j = Delayed::Job.create(:
|
91
|
-
j.send(:lock_exclusively!,
|
85
|
+
before do
|
86
|
+
2.times do
|
87
|
+
j = Delayed::Job.create(payload_object: SimpleJob.new)
|
88
|
+
expect(j.send(:lock_exclusively!, "worker1")).to be(true)
|
92
89
|
j.fail!
|
93
|
-
|
90
|
+
end
|
94
91
|
end
|
95
92
|
|
96
|
-
it "
|
97
|
-
target_ids = Delayed::Job::Failed.all[0..2].map
|
98
|
-
Delayed::Job.bulk_update(
|
93
|
+
it "deletes failed jobs by id" do
|
94
|
+
target_ids = Delayed::Job::Failed.all[0..2].map(&:id)
|
95
|
+
expect(Delayed::Job.bulk_update("destroy", ids: target_ids, flavor: "failed",
|
96
|
+
query: @query)).to eq(target_ids.length)
|
99
97
|
end
|
100
98
|
|
101
|
-
it "
|
99
|
+
it "deletes all failed jobs" do
|
102
100
|
failed_count = Delayed::Job::Failed.count
|
103
|
-
Delayed::Job.bulk_update(
|
101
|
+
expect(Delayed::Job.bulk_update("destroy", flavor: "failed", query: @query)).to eq(failed_count)
|
102
|
+
end
|
103
|
+
|
104
|
+
it "deletes all failed jobs before a given date" do
|
105
|
+
Delayed::Job::Failed.first.update!(failed_at: 3.hours.ago)
|
106
|
+
Delayed::Job::Failed.last.update!(failed_at: 1.hour.ago)
|
107
|
+
|
108
|
+
expect(Delayed::Job::Failed.count).to eq 2
|
109
|
+
Delayed::Job::Failed.cleanup_old_jobs(2.hours.ago)
|
110
|
+
expect(Delayed::Job::Failed.count).to eq 1
|
104
111
|
end
|
105
112
|
end
|
106
113
|
end
|
107
114
|
|
108
|
-
context
|
109
|
-
it "
|
110
|
-
expect(Delayed::Job).
|
111
|
-
job = Delayed::Job.enqueue(SimpleJob.new, :
|
112
|
-
job.strand.
|
115
|
+
context "n_strand" do
|
116
|
+
it "defaults to 1" do
|
117
|
+
expect(Delayed::Job).not_to receive(:rand)
|
118
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: "njobs")
|
119
|
+
expect(job.strand).to eq("njobs")
|
113
120
|
end
|
114
121
|
|
115
|
-
it "
|
116
|
-
change_setting(Delayed::Settings, :num_strands,
|
117
|
-
|
118
|
-
|
119
|
-
|
122
|
+
it "sets max_concurrent based on num_strands" do
|
123
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
124
|
+
expect(strand_name).to eql "njobs"
|
125
|
+
"3"
|
126
|
+
}) do
|
127
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: "njobs")
|
128
|
+
expect(job.strand).to eq("njobs")
|
129
|
+
expect(job.max_concurrent).to eq(3)
|
120
130
|
end
|
121
131
|
end
|
122
132
|
|
123
133
|
context "with two parameters" do
|
124
|
-
it "
|
125
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
126
|
-
job.strand.
|
127
|
-
change_setting(Delayed::Settings, :num_strands,
|
134
|
+
it "uses the first param as the setting to read" do
|
135
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 123])
|
136
|
+
expect(job.strand).to eq("njobs/123")
|
137
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
128
138
|
case strand_name
|
129
|
-
when "njobs"
|
130
|
-
else nil
|
139
|
+
when "njobs" then 3
|
131
140
|
end
|
132
141
|
}) do
|
133
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
134
|
-
job.strand.
|
135
|
-
job.max_concurrent.
|
142
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 123])
|
143
|
+
expect(job.strand).to eq("njobs/123")
|
144
|
+
expect(job.max_concurrent).to eq(3)
|
136
145
|
end
|
137
146
|
end
|
138
147
|
|
139
|
-
it "
|
140
|
-
change_setting(Delayed::Settings, :num_strands,
|
148
|
+
it "allows overridding the setting based on the second param" do
|
149
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
141
150
|
case strand_name
|
142
|
-
when "njobs/123"
|
143
|
-
else nil
|
151
|
+
when "njobs/123" then 5
|
144
152
|
end
|
145
153
|
}) do
|
146
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
147
|
-
job.strand.
|
148
|
-
job.max_concurrent.
|
149
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
150
|
-
job.strand.
|
151
|
-
job.max_concurrent.
|
154
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 123])
|
155
|
+
expect(job.strand).to eq("njobs/123")
|
156
|
+
expect(job.max_concurrent).to eq(5)
|
157
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 456])
|
158
|
+
expect(job.strand).to eq("njobs/456")
|
159
|
+
expect(job.max_concurrent).to eq(1)
|
152
160
|
end
|
153
161
|
|
154
|
-
change_setting(Delayed::Settings, :num_strands,
|
162
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
155
163
|
case strand_name
|
156
|
-
when "njobs/123"
|
157
|
-
when "njobs"
|
158
|
-
else nil
|
164
|
+
when "njobs/123" then 5
|
165
|
+
when "njobs" then 3
|
159
166
|
end
|
160
167
|
}) do
|
161
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
162
|
-
job.strand.
|
163
|
-
job.max_concurrent.
|
164
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
165
|
-
job.strand.
|
166
|
-
job.max_concurrent.
|
168
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 123])
|
169
|
+
expect(job.strand).to eq("njobs/123")
|
170
|
+
expect(job.max_concurrent).to eq(5)
|
171
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 456])
|
172
|
+
expect(job.strand).to eq("njobs/456")
|
173
|
+
expect(job.max_concurrent).to eq(3)
|
167
174
|
end
|
168
175
|
end
|
169
176
|
end
|
170
177
|
|
171
178
|
context "max_concurrent triggers" do
|
172
|
-
|
173
|
-
skip("postgres specific") unless ActiveRecord::Base.connection.adapter_name == 'PostgreSQL'
|
174
|
-
end
|
175
|
-
|
176
|
-
it "should set one job as next_in_strand at a time with max_concurrent of 1" do
|
179
|
+
it "sets one job as next_in_strand at a time with max_concurrent of 1" do
|
177
180
|
job1 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
178
181
|
job1.reload
|
179
|
-
job1.next_in_strand.
|
182
|
+
expect(job1.next_in_strand).to be(true)
|
180
183
|
job2 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
181
184
|
job2.reload
|
182
|
-
job2.next_in_strand.
|
185
|
+
expect(job2.next_in_strand).to be(false)
|
183
186
|
run_job(job1)
|
184
187
|
job2.reload
|
185
|
-
job2.next_in_strand.
|
188
|
+
expect(job2.next_in_strand).to be(true)
|
186
189
|
end
|
187
190
|
|
188
|
-
it "
|
189
|
-
change_setting(Delayed::Settings, :num_strands,
|
191
|
+
it "sets multiple jobs as next_in_strand at a time based on max_concurrent" do
|
192
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
190
193
|
case strand_name
|
191
|
-
when "njobs"
|
192
|
-
else nil
|
194
|
+
when "njobs" then 2
|
193
195
|
end
|
194
196
|
}) do
|
195
197
|
job1 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
196
198
|
job1.reload
|
197
|
-
job1.next_in_strand.
|
199
|
+
expect(job1.next_in_strand).to be(true)
|
198
200
|
job2 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
199
201
|
job2.reload
|
200
|
-
job2.next_in_strand.
|
202
|
+
expect(job2.next_in_strand).to be(true)
|
201
203
|
job3 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
202
204
|
job3.reload
|
203
|
-
job3.next_in_strand.
|
205
|
+
expect(job3.next_in_strand).to be(false)
|
204
206
|
run_job(job1)
|
205
207
|
job3.reload
|
206
|
-
job3.next_in_strand.
|
208
|
+
expect(job3.next_in_strand).to be(true)
|
207
209
|
end
|
208
210
|
end
|
209
211
|
end
|
210
212
|
end
|
211
213
|
|
212
214
|
it "unlocks orphaned prefetched_jobs" do
|
213
|
-
job1 = Delayed::Job.new(:
|
214
|
-
job2 = Delayed::Job.new(:
|
215
|
+
job1 = Delayed::Job.new(tag: "tag")
|
216
|
+
job2 = Delayed::Job.new(tag: "tag")
|
215
217
|
|
216
218
|
job1.create_and_lock!("prefetch:a")
|
217
|
-
job1.locked_at = Delayed::Job.db_time_now - 15 * 60
|
219
|
+
job1.locked_at = Delayed::Job.db_time_now - (15 * 60)
|
218
220
|
job1.save!
|
219
221
|
job2.create_and_lock!("prefetch:a")
|
220
222
|
|
@@ -222,69 +224,80 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
222
224
|
expect(Delayed::Job.unlock_orphaned_prefetched_jobs).to eq 0
|
223
225
|
|
224
226
|
expect(Delayed::Job.find(job1.id).locked_by).to be_nil
|
225
|
-
expect(Delayed::Job.find(job2.id).locked_by).to eq
|
227
|
+
expect(Delayed::Job.find(job2.id).locked_by).to eq "prefetch:a"
|
226
228
|
end
|
227
229
|
|
228
230
|
it "gets process ids from locked_by" do
|
229
|
-
3
|
230
|
-
|
231
|
-
expect(Delayed::Job.processes_locked_locally(name:
|
232
|
-
expect(Delayed::Job.processes_locked_locally(name:
|
231
|
+
Array.new(3) { Delayed::Job.create payload_object: SimpleJob.new }
|
232
|
+
Delayed::Job.get_and_lock_next_available(["job42:2", "job42:9001"])
|
233
|
+
expect(Delayed::Job.processes_locked_locally(name: "job42").sort).to eq [2, 9001]
|
234
|
+
expect(Delayed::Job.processes_locked_locally(name: "jobnotme")).to be_empty
|
233
235
|
end
|
234
236
|
|
235
237
|
it "allows fetching multiple jobs at once" do
|
236
|
-
jobs = 3
|
237
|
-
locked_jobs = Delayed::Job.get_and_lock_next_available([
|
238
|
-
locked_jobs.length.
|
239
|
-
locked_jobs.keys.
|
240
|
-
locked_jobs.values.
|
241
|
-
jobs.map(&:reload).map(&:locked_by).
|
238
|
+
jobs = Array.new(3) { Delayed::Job.create payload_object: SimpleJob.new }
|
239
|
+
locked_jobs = Delayed::Job.get_and_lock_next_available(%w[worker1 worker2])
|
240
|
+
expect(locked_jobs.length).to eq(2)
|
241
|
+
expect(locked_jobs.keys).to eq(%w[worker1 worker2])
|
242
|
+
expect(locked_jobs.values).to eq(jobs[0..1])
|
243
|
+
expect(jobs.map(&:reload).map(&:locked_by)).to eq(["worker1", "worker2", nil])
|
242
244
|
end
|
243
245
|
|
244
246
|
it "allows fetching extra jobs" do
|
245
|
-
jobs = 5
|
246
|
-
locked_jobs = Delayed::Job.get_and_lock_next_available([
|
247
|
+
jobs = Array.new(5) { Delayed::Job.create payload_object: SimpleJob.new }
|
248
|
+
locked_jobs = Delayed::Job.get_and_lock_next_available(["worker1"],
|
247
249
|
prefetch: 2,
|
248
|
-
prefetch_owner:
|
250
|
+
prefetch_owner: "work_queue")
|
249
251
|
expect(locked_jobs.length).to eq 2
|
250
|
-
expect(locked_jobs.keys).to eq [
|
251
|
-
expect(locked_jobs[
|
252
|
-
expect(locked_jobs[
|
253
|
-
jobs.map(&:reload).map(&:locked_by).
|
252
|
+
expect(locked_jobs.keys).to eq %w[worker1 work_queue]
|
253
|
+
expect(locked_jobs["worker1"]).to eq jobs[0]
|
254
|
+
expect(locked_jobs["work_queue"]).to eq jobs[1..2]
|
255
|
+
expect(jobs.map(&:reload).map(&:locked_by)).to eq(["worker1", "work_queue", "work_queue", nil, nil])
|
254
256
|
end
|
255
257
|
|
256
|
-
|
257
|
-
it "should not find jobs scheduled for now when we have forced latency" do
|
258
|
+
it "does not find jobs scheduled for now when we have forced latency" do
|
258
259
|
job = create_job
|
259
|
-
Delayed::Job.get_and_lock_next_available(
|
260
|
-
Delayed::Job.get_and_lock_next_available(
|
260
|
+
expect(Delayed::Job.get_and_lock_next_available("worker", forced_latency: 60.0)).to be_nil
|
261
|
+
expect(Delayed::Job.get_and_lock_next_available("worker")).to eq job
|
261
262
|
end
|
262
263
|
|
263
264
|
context "non-transactional", non_transactional: true do
|
264
265
|
it "creates a stranded job in a single statement" do
|
265
|
-
skip "Requires Rails 5.2 or greater" unless Rails.version >= '5.2'
|
266
|
-
|
267
266
|
allow(Delayed::Job.connection).to receive(:prepared_statements).and_return(false)
|
268
|
-
allow(Delayed::Job.connection).to receive(:execute).with(be_include("pg_advisory_xact_lock"),
|
269
|
-
|
267
|
+
allow(Delayed::Job.connection).to receive(:execute).with(be_include("pg_advisory_xact_lock"),
|
268
|
+
anything).and_call_original.once
|
269
|
+
expect(Delayed::Job.connection).not_to receive(:insert)
|
270
270
|
j = create_job(strand: "test1")
|
271
271
|
allow(Delayed::Job.connection).to receive(:execute).and_call_original
|
272
272
|
expect(Delayed::Job.find(j.id)).to eq j
|
273
273
|
end
|
274
274
|
|
275
275
|
it "creates a non-stranded job in a single statement" do
|
276
|
-
skip "Requires Rails 5.2 or greater" unless Rails.version >= '5.2'
|
277
|
-
|
278
276
|
allow(Delayed::Job.connection).to receive(:prepared_statements).and_return(false)
|
279
277
|
call_count = 0
|
280
278
|
allow(Delayed::Job.connection).to receive(:execute).and_wrap_original do |m, (arg1, arg2)|
|
281
279
|
call_count += 1
|
282
280
|
m.call(arg1, arg2)
|
283
281
|
end
|
284
|
-
|
282
|
+
expect(Delayed::Job.connection).not_to receive(:insert)
|
285
283
|
j = create_job(strand: "test1")
|
286
284
|
expect(call_count).to eq 1
|
287
285
|
expect(Delayed::Job.find(j.id)).to eq j
|
288
286
|
end
|
287
|
+
|
288
|
+
it "does not lock a stranded failed job creation" do
|
289
|
+
j = create_job(strand: "test1")
|
290
|
+
# query for metadata to ensure it's loaded before we start mucking with the connection
|
291
|
+
Delayed::Backend::ActiveRecord::Job::Failed.new
|
292
|
+
|
293
|
+
allow(Delayed::Job.connection).to receive(:prepared_statements).and_return(false)
|
294
|
+
allow(Delayed::Job.connection).to receive(:execute).and_wrap_original do |original, *args|
|
295
|
+
expect(args.first).not_to include("pg_advisory_xact_lock")
|
296
|
+
original.call(*args)
|
297
|
+
end
|
298
|
+
expect(Delayed::Job.connection).not_to receive(:insert)
|
299
|
+
j.fail!
|
300
|
+
allow(Delayed::Job.connection).to receive(:execute).and_call_original
|
301
|
+
end
|
289
302
|
end
|
290
303
|
end
|
data/spec/delayed/cli_spec.rb
CHANGED
@@ -1,22 +1,22 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
3
|
+
require "spec_helper"
|
4
4
|
|
5
5
|
RSpec.describe Delayed::CLI do
|
6
|
-
describe
|
7
|
-
it
|
8
|
-
cli = described_class.new(%w
|
6
|
+
describe "#parse_cli_options!" do
|
7
|
+
it "correctly parses the --config option" do
|
8
|
+
cli = described_class.new(%w[run --config /path/to/some/file.yml])
|
9
9
|
options = cli.parse_cli_options!
|
10
|
-
expect(options).to include config_file:
|
10
|
+
expect(options).to include config_file: "/path/to/some/file.yml"
|
11
11
|
end
|
12
12
|
end
|
13
13
|
|
14
|
-
describe
|
14
|
+
describe "#run" do
|
15
15
|
before do
|
16
16
|
expect(Delayed::Settings).to receive(:worker_config).and_return({})
|
17
17
|
end
|
18
18
|
|
19
|
-
it
|
19
|
+
it "prints help when no command is given" do
|
20
20
|
cli = described_class.new([])
|
21
21
|
expect(cli).to receive(:puts).with(/Usage/)
|
22
22
|
cli.run
|
data/spec/delayed/daemon_spec.rb
CHANGED
@@ -1,35 +1,36 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
3
|
+
require "spec_helper"
|
4
4
|
|
5
5
|
RSpec.describe Delayed::Daemon do
|
6
|
+
subject { described_class.new(pid_folder) }
|
7
|
+
|
6
8
|
let(:pid_folder) { "/test/pid/folder" }
|
7
9
|
let(:pid) { 9999 }
|
8
|
-
let(:subject) { described_class.new(pid_folder) }
|
9
10
|
|
10
11
|
before do
|
11
12
|
allow(subject).to receive(:pid).and_return(pid)
|
12
13
|
end
|
13
14
|
|
14
|
-
describe
|
15
|
-
it
|
15
|
+
describe "#stop" do
|
16
|
+
it "prints status if not running" do
|
16
17
|
expect(subject).to receive(:status).with(print: false, pid: pid).and_return(false)
|
17
18
|
expect(subject).to receive(:status).with(no_args)
|
18
|
-
expect(Process).
|
19
|
+
expect(Process).not_to receive(:kill)
|
19
20
|
subject.stop
|
20
21
|
end
|
21
22
|
|
22
|
-
it
|
23
|
+
it "prints status if draining" do
|
23
24
|
expect(subject).to receive(:status).with(print: false, pid: pid).and_return(:draining)
|
24
25
|
expect(subject).to receive(:status).with(no_args)
|
25
|
-
expect(Process).
|
26
|
+
expect(Process).not_to receive(:kill)
|
26
27
|
subject.stop
|
27
28
|
end
|
28
29
|
|
29
|
-
it
|
30
|
+
it "sends QUIT by default" do
|
30
31
|
expect(subject).to receive(:status).with(print: false, pid: pid).and_return(:running)
|
31
32
|
expect(subject).to receive(:puts).with(/Stopping pool/)
|
32
|
-
expect(Process).to receive(:kill).with(
|
33
|
+
expect(Process).to receive(:kill).with("QUIT", pid)
|
33
34
|
expect(subject).to receive(:wait).with(false)
|
34
35
|
subject.stop
|
35
36
|
end
|