inst-jobs 2.0.0 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
- data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
- data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
- data/db/migrate/20210929204903_update_conflicting_singleton_function_to_use_index.rb +27 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +211 -168
- data/lib/delayed/backend/base.rb +110 -72
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +27 -23
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +43 -52
- data/lib/delayed/performable_method.rb +6 -8
- data/lib/delayed/periodic.rb +72 -68
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +205 -168
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +94 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -17
- data/lib/delayed/work_queue/parent_process/client.rb +55 -53
- data/lib/delayed/work_queue/parent_process/server.rb +245 -207
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +32 -33
- data/lib/delayed/worker/health_check.rb +34 -26
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +8 -9
- data/lib/delayed/worker.rb +272 -241
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -37
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +143 -139
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +10 -9
- data/spec/delayed/message_sending_spec.rb +16 -9
- data/spec/delayed/periodic_spec.rb +14 -21
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +7 -8
- data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
- data/spec/delayed/work_queue/parent_process/server_spec.rb +117 -41
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
- data/spec/delayed/worker/consul_health_check_spec.rb +37 -50
- data/spec/delayed/worker/health_check_spec.rb +60 -52
- data/spec/delayed/worker_spec.rb +44 -21
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +550 -437
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +156 -148
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +53 -55
- metadata +148 -82
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -535
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/gemfiles/42.gemfile +0 -7
- data/spec/gemfiles/50.gemfile +0 -7
- data/spec/gemfiles/51.gemfile +0 -7
- data/spec/gemfiles/52.gemfile +0 -7
- data/spec/gemfiles/60.gemfile +0 -7
- data/spec/redis_job_spec.rb +0 -148
data/lib/delayed_job.rb
CHANGED
@@ -14,43 +14,43 @@ module Delayed
|
|
14
14
|
end
|
15
15
|
end
|
16
16
|
|
17
|
-
require
|
18
|
-
require
|
19
|
-
require
|
20
|
-
require
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
require
|
26
|
-
|
27
|
-
|
28
|
-
require
|
29
|
-
require
|
30
|
-
require
|
31
|
-
require
|
32
|
-
require
|
33
|
-
require
|
34
|
-
require
|
35
|
-
require
|
36
|
-
require
|
37
|
-
require
|
38
|
-
require
|
39
|
-
require
|
40
|
-
require
|
41
|
-
require
|
42
|
-
require
|
43
|
-
|
44
|
-
require
|
45
|
-
require
|
46
|
-
require
|
47
|
-
|
48
|
-
require
|
49
|
-
require
|
50
|
-
|
51
|
-
require
|
17
|
+
require "rails"
|
18
|
+
require "active_support/core_ext/module/attribute_accessors"
|
19
|
+
require "active_record"
|
20
|
+
require "after_transaction_commit"
|
21
|
+
require "debug_inspector"
|
22
|
+
|
23
|
+
require "delayed/core_ext/kernel"
|
24
|
+
|
25
|
+
require "delayed/settings"
|
26
|
+
require "delayed/yaml_extensions"
|
27
|
+
|
28
|
+
require "delayed/backend/base"
|
29
|
+
require "delayed/backend/active_record"
|
30
|
+
require "delayed/batch"
|
31
|
+
require "delayed/cli"
|
32
|
+
require "delayed/daemon"
|
33
|
+
require "delayed/job_tracking"
|
34
|
+
require "delayed/lifecycle"
|
35
|
+
require "delayed/log_tailer"
|
36
|
+
require "delayed/logging"
|
37
|
+
require "delayed/message_sending"
|
38
|
+
require "delayed/performable_method"
|
39
|
+
require "delayed/periodic"
|
40
|
+
require "delayed/plugin"
|
41
|
+
require "delayed/pool"
|
42
|
+
require "delayed/worker"
|
43
|
+
|
44
|
+
require "delayed/worker/health_check"
|
45
|
+
require "delayed/worker/consul_health_check"
|
46
|
+
require "delayed/worker/null_health_check"
|
47
|
+
|
48
|
+
require "delayed/work_queue/in_process"
|
49
|
+
require "delayed/work_queue/parent_process"
|
50
|
+
|
51
|
+
require "delayed/engine"
|
52
52
|
|
53
53
|
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
54
54
|
|
55
|
-
Object.
|
56
|
-
Module.
|
55
|
+
Object.include Delayed::MessageSending
|
56
|
+
Module.include Delayed::MessageSending::ClassMethods
|
data/lib/inst-jobs.rb
CHANGED
@@ -1,8 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
describe 'Delayed::Backed::ActiveRecord::Job' do
|
3
|
+
describe "Delayed::Backed::ActiveRecord::Job" do
|
6
4
|
before :all do
|
7
5
|
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
8
6
|
end
|
@@ -11,210 +9,205 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
11
9
|
Delayed::Testing.clear_all!
|
12
10
|
end
|
13
11
|
|
14
|
-
include_examples
|
12
|
+
include_examples "a delayed_jobs implementation"
|
15
13
|
|
16
|
-
it "
|
14
|
+
it "recovers as well as possible from a failure failing a job" do
|
17
15
|
allow(Delayed::Job::Failed).to receive(:create).and_raise(RuntimeError)
|
18
16
|
job = "test".delay(ignore_transaction: true).reverse
|
19
17
|
job_id = job.id
|
20
|
-
|
21
|
-
|
22
|
-
Delayed::Job.count.
|
18
|
+
expect { job.fail! }.to raise_error(RuntimeError)
|
19
|
+
expect { Delayed::Job.find(job_id) }.to raise_error(ActiveRecord::RecordNotFound)
|
20
|
+
expect(Delayed::Job.count).to eq(0)
|
23
21
|
end
|
24
22
|
|
25
23
|
context "when another worker has worked on a task since the job was found to be available, it" do
|
26
|
-
before
|
27
|
-
@job = Delayed::Job.create :
|
28
|
-
@
|
24
|
+
before do
|
25
|
+
@job = Delayed::Job.create payload_object: SimpleJob.new
|
26
|
+
@job_copy_for_worker2 = Delayed::Job.find(@job.id)
|
29
27
|
end
|
30
28
|
|
31
|
-
it "
|
29
|
+
it "does not allow a second worker to get exclusive access if already successfully processed by worker1" do
|
32
30
|
@job.destroy
|
33
|
-
@
|
31
|
+
expect(@job_copy_for_worker2.send(:lock_exclusively!, "worker2")).to eq(false)
|
34
32
|
end
|
35
33
|
|
36
|
-
it "
|
37
|
-
|
38
|
-
@
|
34
|
+
it "doesn't allow a second worker to get exclusive access if failed to be " \
|
35
|
+
"processed by worker1 and run_at time is now in future (due to backing off behaviour)" do
|
36
|
+
@job.update(attempts: 1, run_at: 1.day.from_now)
|
37
|
+
expect(@job_copy_for_worker2.send(:lock_exclusively!, "worker2")).to eq(false)
|
39
38
|
end
|
40
39
|
|
41
|
-
it "
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
job.save!
|
51
|
-
end
|
52
|
-
founds.uniq.size.should > 1
|
53
|
-
ensure
|
54
|
-
Delayed::Settings.select_random_from_batch = false
|
40
|
+
it "selects the next job at random if enabled" do
|
41
|
+
Delayed::Settings.select_random_from_batch = true
|
42
|
+
15.times { "test".delay.length }
|
43
|
+
founds = []
|
44
|
+
15.times do
|
45
|
+
job = Delayed::Job.get_and_lock_next_available("tester")
|
46
|
+
founds << job
|
47
|
+
job.unlock
|
48
|
+
job.save!
|
55
49
|
end
|
50
|
+
expect(founds.uniq.size).to be > 1
|
51
|
+
ensure
|
52
|
+
Delayed::Settings.select_random_from_batch = false
|
56
53
|
end
|
57
54
|
end
|
58
55
|
|
59
|
-
it "
|
60
|
-
job = Delayed::Job.create :
|
61
|
-
job.send(:lock_exclusively!,
|
56
|
+
it "unlocks a successfully locked job and persist the job's unlocked state" do
|
57
|
+
job = Delayed::Job.create payload_object: SimpleJob.new
|
58
|
+
expect(job.send(:lock_exclusively!, "worker1")).to eq(true)
|
62
59
|
job.reload
|
63
60
|
job.unlock
|
64
61
|
job.save!
|
65
62
|
job.reload
|
66
|
-
job.locked_by.
|
67
|
-
job.locked_at.
|
63
|
+
expect(job.locked_by).to eq(nil)
|
64
|
+
expect(job.locked_at).to eq(nil)
|
68
65
|
end
|
69
66
|
|
70
67
|
describe "bulk_update failed jobs" do
|
71
68
|
context "holding/unholding failed jobs" do
|
72
|
-
before
|
73
|
-
@job = Delayed::Job.create :
|
74
|
-
Delayed::Job.get_and_lock_next_available(
|
69
|
+
before do
|
70
|
+
@job = Delayed::Job.create payload_object: SimpleJob.new
|
71
|
+
expect(Delayed::Job.get_and_lock_next_available("worker1")).to eq(@job)
|
75
72
|
@job.fail!
|
76
73
|
end
|
77
74
|
|
78
|
-
it "
|
79
|
-
expect { Delayed::Job.bulk_update(
|
75
|
+
it "raises error when holding failed jobs" do
|
76
|
+
expect { Delayed::Job.bulk_update("hold", flavor: "failed", query: @query) }.to raise_error(RuntimeError)
|
80
77
|
end
|
81
78
|
|
82
|
-
it "
|
83
|
-
expect { Delayed::Job.bulk_update(
|
79
|
+
it "raises error unholding failed jobs" do
|
80
|
+
expect { Delayed::Job.bulk_update("unhold", flavor: "failed", query: @query) }.to raise_error(RuntimeError)
|
84
81
|
end
|
85
82
|
end
|
86
83
|
|
87
84
|
context "deleting failed jobs" do
|
88
|
-
before
|
89
|
-
2.times
|
90
|
-
j = Delayed::Job.create(:
|
91
|
-
j.send(:lock_exclusively!,
|
85
|
+
before do
|
86
|
+
2.times do
|
87
|
+
j = Delayed::Job.create(payload_object: SimpleJob.new)
|
88
|
+
expect(j.send(:lock_exclusively!, "worker1")).to eq(true)
|
92
89
|
j.fail!
|
93
|
-
|
90
|
+
end
|
94
91
|
end
|
95
92
|
|
96
|
-
it "
|
97
|
-
target_ids = Delayed::Job::Failed.all[0..2].map
|
98
|
-
Delayed::Job.bulk_update(
|
93
|
+
it "deletes failed jobs by id" do
|
94
|
+
target_ids = Delayed::Job::Failed.all[0..2].map(&:id)
|
95
|
+
expect(Delayed::Job.bulk_update("destroy", ids: target_ids, flavor: "failed",
|
96
|
+
query: @query)).to eq(target_ids.length)
|
99
97
|
end
|
100
98
|
|
101
|
-
it "
|
99
|
+
it "deletes all failed jobs" do
|
102
100
|
failed_count = Delayed::Job::Failed.count
|
103
|
-
Delayed::Job.bulk_update(
|
101
|
+
expect(Delayed::Job.bulk_update("destroy", flavor: "failed", query: @query)).to eq(failed_count)
|
104
102
|
end
|
105
103
|
end
|
106
104
|
end
|
107
105
|
|
108
|
-
context
|
109
|
-
it "
|
110
|
-
expect(Delayed::Job).
|
111
|
-
job = Delayed::Job.enqueue(SimpleJob.new, :
|
112
|
-
job.strand.
|
106
|
+
context "n_strand" do
|
107
|
+
it "defaults to 1" do
|
108
|
+
expect(Delayed::Job).not_to receive(:rand)
|
109
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: "njobs")
|
110
|
+
expect(job.strand).to eq("njobs")
|
113
111
|
end
|
114
112
|
|
115
|
-
it "
|
116
|
-
change_setting(Delayed::Settings, :num_strands,
|
117
|
-
|
118
|
-
|
119
|
-
|
113
|
+
it "sets max_concurrent based on num_strands" do
|
114
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
115
|
+
expect(strand_name).to eql "njobs"
|
116
|
+
"3"
|
117
|
+
}) do
|
118
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: "njobs")
|
119
|
+
expect(job.strand).to eq("njobs")
|
120
|
+
expect(job.max_concurrent).to eq(3)
|
120
121
|
end
|
121
122
|
end
|
122
123
|
|
123
124
|
context "with two parameters" do
|
124
|
-
it "
|
125
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
126
|
-
job.strand.
|
127
|
-
change_setting(Delayed::Settings, :num_strands,
|
125
|
+
it "uses the first param as the setting to read" do
|
126
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 123])
|
127
|
+
expect(job.strand).to eq("njobs/123")
|
128
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
128
129
|
case strand_name
|
129
|
-
when "njobs"
|
130
|
-
else nil
|
130
|
+
when "njobs" then 3
|
131
131
|
end
|
132
132
|
}) do
|
133
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
134
|
-
job.strand.
|
135
|
-
job.max_concurrent.
|
133
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 123])
|
134
|
+
expect(job.strand).to eq("njobs/123")
|
135
|
+
expect(job.max_concurrent).to eq(3)
|
136
136
|
end
|
137
137
|
end
|
138
138
|
|
139
|
-
it "
|
140
|
-
change_setting(Delayed::Settings, :num_strands,
|
139
|
+
it "allows overridding the setting based on the second param" do
|
140
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
141
141
|
case strand_name
|
142
|
-
when "njobs/123"
|
143
|
-
else nil
|
142
|
+
when "njobs/123" then 5
|
144
143
|
end
|
145
144
|
}) do
|
146
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
147
|
-
job.strand.
|
148
|
-
job.max_concurrent.
|
149
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
150
|
-
job.strand.
|
151
|
-
job.max_concurrent.
|
145
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 123])
|
146
|
+
expect(job.strand).to eq("njobs/123")
|
147
|
+
expect(job.max_concurrent).to eq(5)
|
148
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 456])
|
149
|
+
expect(job.strand).to eq("njobs/456")
|
150
|
+
expect(job.max_concurrent).to eq(1)
|
152
151
|
end
|
153
152
|
|
154
|
-
change_setting(Delayed::Settings, :num_strands,
|
153
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
155
154
|
case strand_name
|
156
|
-
when "njobs/123"
|
157
|
-
when "njobs"
|
158
|
-
else nil
|
155
|
+
when "njobs/123" then 5
|
156
|
+
when "njobs" then 3
|
159
157
|
end
|
160
158
|
}) do
|
161
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
162
|
-
job.strand.
|
163
|
-
job.max_concurrent.
|
164
|
-
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: [
|
165
|
-
job.strand.
|
166
|
-
job.max_concurrent.
|
159
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 123])
|
160
|
+
expect(job.strand).to eq("njobs/123")
|
161
|
+
expect(job.max_concurrent).to eq(5)
|
162
|
+
job = Delayed::Job.enqueue(SimpleJob.new, n_strand: %w[njobs 456])
|
163
|
+
expect(job.strand).to eq("njobs/456")
|
164
|
+
expect(job.max_concurrent).to eq(3)
|
167
165
|
end
|
168
166
|
end
|
169
167
|
end
|
170
168
|
|
171
169
|
context "max_concurrent triggers" do
|
172
|
-
|
173
|
-
skip("postgres specific") unless ActiveRecord::Base.connection.adapter_name == 'PostgreSQL'
|
174
|
-
end
|
175
|
-
|
176
|
-
it "should set one job as next_in_strand at a time with max_concurrent of 1" do
|
170
|
+
it "sets one job as next_in_strand at a time with max_concurrent of 1" do
|
177
171
|
job1 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
178
172
|
job1.reload
|
179
|
-
job1.next_in_strand.
|
173
|
+
expect(job1.next_in_strand).to eq(true)
|
180
174
|
job2 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
181
175
|
job2.reload
|
182
|
-
job2.next_in_strand.
|
176
|
+
expect(job2.next_in_strand).to eq(false)
|
183
177
|
run_job(job1)
|
184
178
|
job2.reload
|
185
|
-
job2.next_in_strand.
|
179
|
+
expect(job2.next_in_strand).to eq(true)
|
186
180
|
end
|
187
181
|
|
188
|
-
it "
|
189
|
-
change_setting(Delayed::Settings, :num_strands,
|
182
|
+
it "sets multiple jobs as next_in_strand at a time based on max_concurrent" do
|
183
|
+
change_setting(Delayed::Settings, :num_strands, lambda { |strand_name|
|
190
184
|
case strand_name
|
191
|
-
when "njobs"
|
192
|
-
else nil
|
185
|
+
when "njobs" then 2
|
193
186
|
end
|
194
187
|
}) do
|
195
188
|
job1 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
196
189
|
job1.reload
|
197
|
-
job1.next_in_strand.
|
190
|
+
expect(job1.next_in_strand).to eq(true)
|
198
191
|
job2 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
199
192
|
job2.reload
|
200
|
-
job2.next_in_strand.
|
193
|
+
expect(job2.next_in_strand).to eq(true)
|
201
194
|
job3 = Delayed::Job.enqueue(SimpleJob.new, n_strand: ["njobs"])
|
202
195
|
job3.reload
|
203
|
-
job3.next_in_strand.
|
196
|
+
expect(job3.next_in_strand).to eq(false)
|
204
197
|
run_job(job1)
|
205
198
|
job3.reload
|
206
|
-
job3.next_in_strand.
|
199
|
+
expect(job3.next_in_strand).to eq(true)
|
207
200
|
end
|
208
201
|
end
|
209
202
|
end
|
210
203
|
end
|
211
204
|
|
212
205
|
it "unlocks orphaned prefetched_jobs" do
|
213
|
-
job1 = Delayed::Job.new(:
|
214
|
-
job2 = Delayed::Job.new(:
|
206
|
+
job1 = Delayed::Job.new(tag: "tag")
|
207
|
+
job2 = Delayed::Job.new(tag: "tag")
|
215
208
|
|
216
209
|
job1.create_and_lock!("prefetch:a")
|
217
|
-
job1.locked_at = Delayed::Job.db_time_now - 15 * 60
|
210
|
+
job1.locked_at = Delayed::Job.db_time_now - (15 * 60)
|
218
211
|
job1.save!
|
219
212
|
job2.create_and_lock!("prefetch:a")
|
220
213
|
|
@@ -222,69 +215,80 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
222
215
|
expect(Delayed::Job.unlock_orphaned_prefetched_jobs).to eq 0
|
223
216
|
|
224
217
|
expect(Delayed::Job.find(job1.id).locked_by).to be_nil
|
225
|
-
expect(Delayed::Job.find(job2.id).locked_by).to eq
|
218
|
+
expect(Delayed::Job.find(job2.id).locked_by).to eq "prefetch:a"
|
226
219
|
end
|
227
220
|
|
228
221
|
it "gets process ids from locked_by" do
|
229
|
-
3.times.map { Delayed::Job.create :
|
230
|
-
|
231
|
-
expect(Delayed::Job.processes_locked_locally(name:
|
232
|
-
expect(Delayed::Job.processes_locked_locally(name:
|
222
|
+
3.times.map { Delayed::Job.create payload_object: SimpleJob.new }
|
223
|
+
Delayed::Job.get_and_lock_next_available(["job42:2", "job42:9001"])
|
224
|
+
expect(Delayed::Job.processes_locked_locally(name: "job42").sort).to eq [2, 9001]
|
225
|
+
expect(Delayed::Job.processes_locked_locally(name: "jobnotme")).to be_empty
|
233
226
|
end
|
234
227
|
|
235
228
|
it "allows fetching multiple jobs at once" do
|
236
|
-
jobs = 3.times.map { Delayed::Job.create :
|
237
|
-
locked_jobs = Delayed::Job.get_and_lock_next_available([
|
238
|
-
locked_jobs.length.
|
239
|
-
locked_jobs.keys.
|
240
|
-
locked_jobs.values.
|
241
|
-
jobs.map(&:reload).map(&:locked_by).
|
229
|
+
jobs = 3.times.map { Delayed::Job.create payload_object: SimpleJob.new }
|
230
|
+
locked_jobs = Delayed::Job.get_and_lock_next_available(%w[worker1 worker2])
|
231
|
+
expect(locked_jobs.length).to eq(2)
|
232
|
+
expect(locked_jobs.keys).to eq(%w[worker1 worker2])
|
233
|
+
expect(locked_jobs.values).to eq(jobs[0..1])
|
234
|
+
expect(jobs.map(&:reload).map(&:locked_by)).to eq(["worker1", "worker2", nil])
|
242
235
|
end
|
243
236
|
|
244
237
|
it "allows fetching extra jobs" do
|
245
|
-
jobs = 5.times.map { Delayed::Job.create :
|
246
|
-
locked_jobs = Delayed::Job.get_and_lock_next_available([
|
238
|
+
jobs = 5.times.map { Delayed::Job.create payload_object: SimpleJob.new }
|
239
|
+
locked_jobs = Delayed::Job.get_and_lock_next_available(["worker1"],
|
247
240
|
prefetch: 2,
|
248
|
-
prefetch_owner:
|
241
|
+
prefetch_owner: "work_queue")
|
249
242
|
expect(locked_jobs.length).to eq 2
|
250
|
-
expect(locked_jobs.keys).to eq [
|
251
|
-
expect(locked_jobs[
|
252
|
-
expect(locked_jobs[
|
253
|
-
jobs.map(&:reload).map(&:locked_by).
|
243
|
+
expect(locked_jobs.keys).to eq %w[worker1 work_queue]
|
244
|
+
expect(locked_jobs["worker1"]).to eq jobs[0]
|
245
|
+
expect(locked_jobs["work_queue"]).to eq jobs[1..2]
|
246
|
+
expect(jobs.map(&:reload).map(&:locked_by)).to eq(["worker1", "work_queue", "work_queue", nil, nil])
|
254
247
|
end
|
255
248
|
|
256
|
-
|
257
|
-
it "should not find jobs scheduled for now when we have forced latency" do
|
249
|
+
it "does not find jobs scheduled for now when we have forced latency" do
|
258
250
|
job = create_job
|
259
|
-
Delayed::Job.get_and_lock_next_available(
|
260
|
-
Delayed::Job.get_and_lock_next_available(
|
251
|
+
expect(Delayed::Job.get_and_lock_next_available("worker", forced_latency: 60.0)).to be_nil
|
252
|
+
expect(Delayed::Job.get_and_lock_next_available("worker")).to eq job
|
261
253
|
end
|
262
254
|
|
263
255
|
context "non-transactional", non_transactional: true do
|
264
256
|
it "creates a stranded job in a single statement" do
|
265
|
-
skip "Requires Rails 5.2 or greater" unless Rails.version >= '5.2'
|
266
|
-
|
267
257
|
allow(Delayed::Job.connection).to receive(:prepared_statements).and_return(false)
|
268
|
-
allow(Delayed::Job.connection).to receive(:execute).with(be_include("pg_advisory_xact_lock"),
|
269
|
-
|
258
|
+
allow(Delayed::Job.connection).to receive(:execute).with(be_include("pg_advisory_xact_lock"),
|
259
|
+
anything).and_call_original.once
|
260
|
+
expect(Delayed::Job.connection).not_to receive(:insert)
|
270
261
|
j = create_job(strand: "test1")
|
271
262
|
allow(Delayed::Job.connection).to receive(:execute).and_call_original
|
272
263
|
expect(Delayed::Job.find(j.id)).to eq j
|
273
264
|
end
|
274
265
|
|
275
266
|
it "creates a non-stranded job in a single statement" do
|
276
|
-
skip "Requires Rails 5.2 or greater" unless Rails.version >= '5.2'
|
277
|
-
|
278
267
|
allow(Delayed::Job.connection).to receive(:prepared_statements).and_return(false)
|
279
268
|
call_count = 0
|
280
269
|
allow(Delayed::Job.connection).to receive(:execute).and_wrap_original do |m, (arg1, arg2)|
|
281
270
|
call_count += 1
|
282
271
|
m.call(arg1, arg2)
|
283
272
|
end
|
284
|
-
|
273
|
+
expect(Delayed::Job.connection).not_to receive(:insert)
|
285
274
|
j = create_job(strand: "test1")
|
286
275
|
expect(call_count).to eq 1
|
287
276
|
expect(Delayed::Job.find(j.id)).to eq j
|
288
277
|
end
|
278
|
+
|
279
|
+
it "does not lock a stranded failed job creation" do
|
280
|
+
j = create_job(strand: "test1")
|
281
|
+
# query for metadata to ensure it's loaded before we start mucking with the connection
|
282
|
+
Delayed::Backend::ActiveRecord::Job::Failed.new
|
283
|
+
|
284
|
+
allow(Delayed::Job.connection).to receive(:prepared_statements).and_return(false)
|
285
|
+
allow(Delayed::Job.connection).to receive(:execute).and_wrap_original do |original, *args|
|
286
|
+
expect(args.first).not_to include("pg_advisory_xact_lock")
|
287
|
+
original.call(*args)
|
288
|
+
end
|
289
|
+
expect(Delayed::Job.connection).not_to receive(:insert)
|
290
|
+
j.fail!
|
291
|
+
allow(Delayed::Job.connection).to receive(:execute).and_call_original
|
292
|
+
end
|
289
293
|
end
|
290
294
|
end
|
data/spec/delayed/cli_spec.rb
CHANGED
@@ -1,22 +1,22 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
3
|
+
require "spec_helper"
|
4
4
|
|
5
5
|
RSpec.describe Delayed::CLI do
|
6
|
-
describe
|
7
|
-
it
|
8
|
-
cli = described_class.new(%w
|
6
|
+
describe "#parse_cli_options!" do
|
7
|
+
it "correctly parses the --config option" do
|
8
|
+
cli = described_class.new(%w[run --config /path/to/some/file.yml])
|
9
9
|
options = cli.parse_cli_options!
|
10
|
-
expect(options).to include config_file:
|
10
|
+
expect(options).to include config_file: "/path/to/some/file.yml"
|
11
11
|
end
|
12
12
|
end
|
13
13
|
|
14
|
-
describe
|
14
|
+
describe "#run" do
|
15
15
|
before do
|
16
16
|
expect(Delayed::Settings).to receive(:worker_config).and_return({})
|
17
17
|
end
|
18
18
|
|
19
|
-
it
|
19
|
+
it "prints help when no command is given" do
|
20
20
|
cli = described_class.new([])
|
21
21
|
expect(cli).to receive(:puts).with(/Usage/)
|
22
22
|
cli.run
|
data/spec/delayed/daemon_spec.rb
CHANGED
@@ -1,35 +1,36 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
3
|
+
require "spec_helper"
|
4
4
|
|
5
5
|
RSpec.describe Delayed::Daemon do
|
6
|
+
subject { described_class.new(pid_folder) }
|
7
|
+
|
6
8
|
let(:pid_folder) { "/test/pid/folder" }
|
7
9
|
let(:pid) { 9999 }
|
8
|
-
let(:subject) { described_class.new(pid_folder) }
|
9
10
|
|
10
11
|
before do
|
11
12
|
allow(subject).to receive(:pid).and_return(pid)
|
12
13
|
end
|
13
14
|
|
14
|
-
describe
|
15
|
-
it
|
15
|
+
describe "#stop" do
|
16
|
+
it "prints status if not running" do
|
16
17
|
expect(subject).to receive(:status).with(print: false, pid: pid).and_return(false)
|
17
18
|
expect(subject).to receive(:status).with(no_args)
|
18
|
-
expect(Process).
|
19
|
+
expect(Process).not_to receive(:kill)
|
19
20
|
subject.stop
|
20
21
|
end
|
21
22
|
|
22
|
-
it
|
23
|
+
it "prints status if draining" do
|
23
24
|
expect(subject).to receive(:status).with(print: false, pid: pid).and_return(:draining)
|
24
25
|
expect(subject).to receive(:status).with(no_args)
|
25
|
-
expect(Process).
|
26
|
+
expect(Process).not_to receive(:kill)
|
26
27
|
subject.stop
|
27
28
|
end
|
28
29
|
|
29
|
-
it
|
30
|
+
it "sends QUIT by default" do
|
30
31
|
expect(subject).to receive(:status).with(print: false, pid: pid).and_return(:running)
|
31
32
|
expect(subject).to receive(:puts).with(/Stopping pool/)
|
32
|
-
expect(Process).to receive(:kill).with(
|
33
|
+
expect(Process).to receive(:kill).with("QUIT", pid)
|
33
34
|
expect(subject).to receive(:wait).with(false)
|
34
35
|
subject.stop
|
35
36
|
end
|