delayed 1.2.1 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +19 -6
- data/Rakefile +5 -1
- data/app/models/delayed/job.rb +42 -29
- data/db/migrate/1_create_delayed_jobs.rb +0 -2
- data/db/migrate/3_add_index_to_delayed_jobs_name.rb +14 -6
- data/db/migrate/4_index_live_jobs.rb +33 -0
- data/db/migrate/5_index_failed_jobs.rb +24 -0
- data/db/migrate/6_set_postgres_fillfactor.rb +31 -0
- data/db/migrate/7_remove_legacy_index.rb +12 -0
- data/lib/delayed/backend/job_preparer.rb +19 -0
- data/lib/delayed/exceptions.rb +4 -1
- data/lib/delayed/helpers/migration.rb +116 -0
- data/lib/delayed/monitor.rb +21 -11
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/worker.rb +1 -1
- data/lib/delayed.rb +1 -0
- data/spec/delayed/__snapshots__/job_spec.rb.snap +271 -0
- data/spec/delayed/__snapshots__/monitor_spec.rb.snap +969 -0
- data/spec/delayed/job_spec.rb +189 -13
- data/spec/delayed/monitor_spec.rb +61 -18
- data/spec/helper.rb +129 -11
- data/spec/sample_jobs.rb +10 -0
- data/spec/worker_spec.rb +18 -0
- metadata +15 -3
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: a42f2f7d06ec85c051b3a72e7e8644a750e703e6994cd89e75b151e098810fde
|
|
4
|
+
data.tar.gz: 122a0e93f5790543eca0484c27f52eff1472f693d1c8eee5c0aef27e1cace575
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 2238ccb8c25ef5e483faf3a6bfa43f1c72a2b81e2cd5312071d8dbc72ef96397e3b6dceaac61db962028524e9a6b06769005ce64e9a5a1b7b818a81a40554c95
|
|
7
|
+
data.tar.gz: b255dee5f83ffe62155edaf65e552a3f6042656b60515b7721a0707ff78c2ed8f284d5e298b9448c2ee34cb8d574447619f9e78ec92da5f1dee461f4ebbe4b70
|
data/README.md
CHANGED
|
@@ -72,8 +72,10 @@ gem 'delayed'
|
|
|
72
72
|
|
|
73
73
|
Then run `bundle install`.
|
|
74
74
|
|
|
75
|
-
|
|
76
|
-
|
|
75
|
+
#### Database Setup
|
|
76
|
+
|
|
77
|
+
Before you can enqueue and run jobs, you will need a `delayed_jobs` table. You
|
|
78
|
+
can create this table by running the following command:
|
|
77
79
|
|
|
78
80
|
```bash
|
|
79
81
|
rake delayed:install:migrations
|
|
@@ -81,11 +83,21 @@ rails db:migrate
|
|
|
81
83
|
```
|
|
82
84
|
|
|
83
85
|
This will produce a series of migrations ready to be run in sequence.
|
|
84
|
-
(Re-running the command
|
|
85
|
-
|
|
86
|
-
|
|
86
|
+
(Re-running the command will **not** duplicate previous migrations, so you
|
|
87
|
+
should aim to re-run it after each new release of the gem to pick up any new
|
|
88
|
+
schema changes.)
|
|
89
|
+
|
|
90
|
+
⚠️ **Important: If you already have an existing `delayed_jobs` table...** ⚠️
|
|
91
|
+
- ...you may need to adjust the generated migrations to avoid conflicts.
|
|
92
|
+
- ...you should inspect the migrations closely, as they are not guaranteed to be
|
|
93
|
+
"safe" to run with zero downtime. (We recommend using the
|
|
94
|
+
[strong_migrations](https://github.com/ankane/strong_migrations) gem to help
|
|
95
|
+
identify any unsafe operations, and at a certain scale all bets are off
|
|
96
|
+
without careful testing!)
|
|
97
|
+
|
|
98
|
+
#### ActiveJob Setup
|
|
87
99
|
|
|
88
|
-
|
|
100
|
+
To use this background job processor with ActiveJob, add the following to your application config:
|
|
89
101
|
|
|
90
102
|
```ruby
|
|
91
103
|
config.active_job.queue_adapter = :delayed
|
|
@@ -591,4 +603,5 @@ creating a new issue to get early feedback on your proposed change.
|
|
|
591
603
|
* Fork the project and create a new branch for your contribution.
|
|
592
604
|
* Write your contribution (and any applicable test coverage).
|
|
593
605
|
* Make sure all tests pass (`bundle exec rake`).
|
|
606
|
+
* If you are changing SQL queries, re-record snapshots with `RECORD_SNAPSHOTS=1`
|
|
594
607
|
* Submit a pull request.
|
data/Rakefile
CHANGED
|
@@ -3,7 +3,7 @@ Bundler::GemHelper.install_tasks
|
|
|
3
3
|
|
|
4
4
|
require 'rspec/core/rake_task'
|
|
5
5
|
|
|
6
|
-
ADAPTERS = %w(
|
|
6
|
+
ADAPTERS = %w(postgresql sqlite3 mysql2).freeze
|
|
7
7
|
|
|
8
8
|
ADAPTERS.each do |adapter|
|
|
9
9
|
desc "Run RSpec code examples for #{adapter} adapter"
|
|
@@ -20,6 +20,10 @@ task :adapter do
|
|
|
20
20
|
ENV['ADAPTER'] = nil
|
|
21
21
|
end
|
|
22
22
|
|
|
23
|
+
if ENV['RECORD_SNAPSHOTS']
|
|
24
|
+
`rm -rf spec/**/__snapshots__`
|
|
25
|
+
end
|
|
26
|
+
|
|
23
27
|
require 'rubocop/rake_task'
|
|
24
28
|
RuboCop::RakeTask.new
|
|
25
29
|
|
data/app/models/delayed/job.rb
CHANGED
|
@@ -2,18 +2,43 @@ module Delayed
|
|
|
2
2
|
class Job < ::ActiveRecord::Base
|
|
3
3
|
include Delayed::Backend::Base
|
|
4
4
|
|
|
5
|
-
|
|
6
|
-
scope :
|
|
7
|
-
scope :
|
|
8
|
-
scope :
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
5
|
+
# worker config scopes
|
|
6
|
+
scope :by_priority, -> { order(:priority, :run_at) }
|
|
7
|
+
scope :min_priority, ->(priority) { where(arel_table[:priority].gteq(priority)) if priority }
|
|
8
|
+
scope :max_priority, ->(priority) { where(arel_table[:priority].lteq(priority)) if priority }
|
|
9
|
+
scope :for_queues, ->(queues) { where(queue: queues) if queues.any? }
|
|
10
|
+
|
|
11
|
+
# high-level queue states (live => erroring => failed)
|
|
12
|
+
scope :live, -> { where(failed_at: nil) }
|
|
13
|
+
scope :erroring, -> { where(arel_table[:attempts].gt(0)).merge(unscoped.live) }
|
|
12
14
|
scope :failed, -> { where.not(failed_at: nil) }
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
scope :
|
|
16
|
-
scope :
|
|
15
|
+
|
|
16
|
+
# live queue states (future vs pending)
|
|
17
|
+
scope :future, ->(as_of = db_time_now) { merge(unscoped.live).where(arel_table[:run_at].gt(as_of)) }
|
|
18
|
+
scope :pending, ->(as_of = db_time_now) { merge(unscoped.live).where(arel_table[:run_at].lteq(as_of)) }
|
|
19
|
+
|
|
20
|
+
# pending queue states (claimed vs claimable)
|
|
21
|
+
scope :claimed, ->(as_of = db_time_now) {
|
|
22
|
+
where(arel_table[:locked_at].gteq(db_time_now - lock_timeout))
|
|
23
|
+
.merge(unscoped.pending(as_of))
|
|
24
|
+
}
|
|
25
|
+
scope :claimed_by, ->(worker, as_of = db_time_now) {
|
|
26
|
+
where(locked_by: worker.name)
|
|
27
|
+
.claimed(as_of)
|
|
28
|
+
}
|
|
29
|
+
scope :claimable, ->(as_of = db_time_now) {
|
|
30
|
+
where(locked_at: nil)
|
|
31
|
+
.or(where(arel_table[:locked_at].lt(db_time_now - lock_timeout)))
|
|
32
|
+
.merge(unscoped.pending(as_of))
|
|
33
|
+
}
|
|
34
|
+
scope :claimable_by, ->(worker, as_of = db_time_now) {
|
|
35
|
+
claimable(as_of)
|
|
36
|
+
.or(claimed_by(worker, as_of))
|
|
37
|
+
.min_priority(worker.min_priority)
|
|
38
|
+
.max_priority(worker.max_priority)
|
|
39
|
+
.for_queues(worker.queues)
|
|
40
|
+
.by_priority
|
|
41
|
+
}
|
|
17
42
|
|
|
18
43
|
before_save :set_default_run_at, :set_name
|
|
19
44
|
|
|
@@ -26,30 +51,18 @@ module Delayed
|
|
|
26
51
|
|
|
27
52
|
set_delayed_job_table_name
|
|
28
53
|
|
|
29
|
-
def self.
|
|
30
|
-
|
|
31
|
-
"((run_at <= ? AND (locked_at IS NULL OR locked_at < ?)) OR locked_by = ?) AND failed_at IS NULL",
|
|
32
|
-
db_time_now,
|
|
33
|
-
db_time_now - (max_run_time + REENQUEUE_BUFFER),
|
|
34
|
-
worker_name,
|
|
35
|
-
)
|
|
54
|
+
def self.lock_timeout
|
|
55
|
+
Worker.max_run_time + REENQUEUE_BUFFER
|
|
36
56
|
end
|
|
37
57
|
|
|
38
58
|
# When a worker is exiting, make sure we don't have any locked jobs.
|
|
39
|
-
def self.clear_locks!(
|
|
40
|
-
|
|
59
|
+
def self.clear_locks!(worker)
|
|
60
|
+
claimed_by(worker).update_all(locked_by: nil, locked_at: nil)
|
|
41
61
|
end
|
|
42
62
|
|
|
43
|
-
def self.reserve(worker,
|
|
44
|
-
ready_scope =
|
|
45
|
-
ready_to_run(worker.name, max_run_time)
|
|
46
|
-
.min_priority(worker.min_priority)
|
|
47
|
-
.max_priority(worker.max_priority)
|
|
48
|
-
.for_queues(worker.queues)
|
|
49
|
-
.by_priority
|
|
50
|
-
|
|
63
|
+
def self.reserve(worker, as_of = db_time_now)
|
|
51
64
|
ActiveSupport::Notifications.instrument('delayed.worker.reserve_jobs', worker_tags(worker)) do
|
|
52
|
-
reserve_with_scope(
|
|
65
|
+
reserve_with_scope(claimable_by(worker, as_of), worker, as_of)
|
|
53
66
|
end
|
|
54
67
|
end
|
|
55
68
|
|
|
@@ -12,8 +12,6 @@ class CreateDelayedJobs < ActiveRecord::Migration[6.0]
|
|
|
12
12
|
table.string :queue # The name of the queue this job is in
|
|
13
13
|
table.timestamps null: true
|
|
14
14
|
end
|
|
15
|
-
|
|
16
|
-
add_index :delayed_jobs, [:priority, :run_at], name: "delayed_jobs_priority"
|
|
17
15
|
end
|
|
18
16
|
|
|
19
17
|
def self.down
|
|
@@ -1,11 +1,19 @@
|
|
|
1
1
|
class AddIndexToDelayedJobsName < ActiveRecord::Migration[6.0]
|
|
2
|
-
|
|
2
|
+
include Delayed::Helpers::Migration
|
|
3
|
+
|
|
4
|
+
# Set to the maximum amount of time you want this migration to run:
|
|
5
|
+
WAIT_TIMEOUT = 5.minutes
|
|
6
|
+
|
|
7
|
+
# Concurrent index creation cannot be run inside a transaction:
|
|
8
|
+
disable_ddl_transaction! if concurrent_index_creation_supported?
|
|
3
9
|
|
|
4
10
|
def change
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
11
|
+
opts = {}
|
|
12
|
+
|
|
13
|
+
# Postgres supports creating indexes concurrently, which avoids locking the table
|
|
14
|
+
# while the index is building:
|
|
15
|
+
opts[:algorithm] = :concurrently if concurrent_index_creation_supported?
|
|
16
|
+
|
|
17
|
+
upsert_index :delayed_jobs, :name, wait_timeout: WAIT_TIMEOUT, **opts
|
|
10
18
|
end
|
|
11
19
|
end
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
class IndexLiveJobs < ActiveRecord::Migration[6.0]
|
|
2
|
+
include Delayed::Helpers::Migration
|
|
3
|
+
|
|
4
|
+
# Set to the maximum amount of time you want this migration to run:
|
|
5
|
+
WAIT_TIMEOUT = 5.minutes
|
|
6
|
+
|
|
7
|
+
# Concurrent index creation cannot be run inside a transaction:
|
|
8
|
+
disable_ddl_transaction! if concurrent_index_creation_supported?
|
|
9
|
+
|
|
10
|
+
def change
|
|
11
|
+
opts = { name: 'idx_delayed_jobs_live' }
|
|
12
|
+
columns = %i(priority run_at locked_at queue attempts)
|
|
13
|
+
|
|
14
|
+
# Postgres supports creating indexes concurrently,
|
|
15
|
+
# which avoids locking the table while the index is building:
|
|
16
|
+
opts[:algorithm] = :concurrently if concurrent_index_creation_supported?
|
|
17
|
+
|
|
18
|
+
if connection.supports_partial_index?
|
|
19
|
+
# Postgres and SQLite both support partial indexes, allowing us to pre-filter out failed jobs:
|
|
20
|
+
opts[:where] = '(failed_at IS NULL)'
|
|
21
|
+
else
|
|
22
|
+
# If partial indexes aren't supported, failed_at will be included in the primary index:
|
|
23
|
+
columns = %i(failed_at) + columns
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
# On PostgreSQL, we do not index `locked_at` or `locked_by` to optimize for HOT updates during pickup.
|
|
27
|
+
# See: https://www.postgresql.org/docs/current/storage-hot.html
|
|
28
|
+
# (On other databases, we can include `locked_at` to allow for more "covering" index lookups)
|
|
29
|
+
columns -= %i(locked_at) if connection.adapter_name == 'PostgreSQL'
|
|
30
|
+
|
|
31
|
+
upsert_index :delayed_jobs, columns, wait_timeout: WAIT_TIMEOUT, **opts
|
|
32
|
+
end
|
|
33
|
+
end
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
class IndexFailedJobs < ActiveRecord::Migration[6.0]
|
|
2
|
+
include Delayed::Helpers::Migration
|
|
3
|
+
|
|
4
|
+
# Set to the maximum amount of time you want this migration to run:
|
|
5
|
+
WAIT_TIMEOUT = 5.minutes
|
|
6
|
+
|
|
7
|
+
# Concurrent index creation cannot be run inside a transaction:
|
|
8
|
+
disable_ddl_transaction! if concurrent_index_creation_supported?
|
|
9
|
+
|
|
10
|
+
def change
|
|
11
|
+
# You can delete this migration if your database does not support partial indexes.
|
|
12
|
+
return unless connection.supports_partial_index?
|
|
13
|
+
|
|
14
|
+
# If partial indexes are supported, then the "live" index does not cover failed jobs.
|
|
15
|
+
# To aid in monitoring, this adds a separate (smaller) index for failed jobs:
|
|
16
|
+
opts = { name: 'idx_delayed_jobs_failed', where: '(failed_at IS NOT NULL)' }
|
|
17
|
+
|
|
18
|
+
# Postgres supports creating indexes concurrently, which avoids locking the table
|
|
19
|
+
# while the index is building:
|
|
20
|
+
opts[:algorithm] = :concurrently if concurrent_index_creation_supported?
|
|
21
|
+
|
|
22
|
+
upsert_index :delayed_jobs, %i(priority queue), wait_timeout: WAIT_TIMEOUT, **opts
|
|
23
|
+
end
|
|
24
|
+
end
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
class SetPostgresFillfactor < ActiveRecord::Migration[6.0]
|
|
2
|
+
# On PostgreSQL, we do not index `locked_at` or `locked_by` to optimize for "HOT updates" during pickup.
|
|
3
|
+
# See: https://www.postgresql.org/docs/current/storage-hot.html
|
|
4
|
+
#
|
|
5
|
+
# To increase the odds that a given page has room for a HOT update, we reduce the
|
|
6
|
+
# "fillfactor" (percentage filled by default), and set a more aggressive autovacume target:
|
|
7
|
+
def up
|
|
8
|
+
return unless connection.adapter_name == 'PostgreSQL'
|
|
9
|
+
|
|
10
|
+
execute <<~SQL
|
|
11
|
+
ALTER TABLE delayed_jobs SET (
|
|
12
|
+
autovacuum_vacuum_scale_factor = 0.02,
|
|
13
|
+
fillfactor = 33
|
|
14
|
+
);
|
|
15
|
+
ALTER INDEX idx_delayed_jobs_live SET (
|
|
16
|
+
fillfactor = 33
|
|
17
|
+
);
|
|
18
|
+
SQL
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
def down
|
|
22
|
+
return unless connection.adapter_name == 'PostgreSQL'
|
|
23
|
+
|
|
24
|
+
execute <<~SQL
|
|
25
|
+
ALTER TABLE delayed_jobs
|
|
26
|
+
RESET (autovacuum_vacuum_scale_factor, fillfactor);
|
|
27
|
+
ALTER INDEX idx_delayed_jobs_live
|
|
28
|
+
RESET (fillfactor);
|
|
29
|
+
SQL
|
|
30
|
+
end
|
|
31
|
+
end
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
class RemoveLegacyIndex < ActiveRecord::Migration[6.0]
|
|
2
|
+
include Delayed::Helpers::Migration
|
|
3
|
+
|
|
4
|
+
disable_ddl_transaction! if concurrent_index_creation_supported?
|
|
5
|
+
|
|
6
|
+
def change
|
|
7
|
+
opts = { name: 'delayed_jobs_priority' }
|
|
8
|
+
opts[:algorithm] = :concurrently if concurrent_index_creation_supported?
|
|
9
|
+
|
|
10
|
+
remove_index_if_exists :delayed_jobs, %i(priority run_at), **opts, wait_timeout: 5.minutes
|
|
11
|
+
end
|
|
12
|
+
end
|
|
@@ -12,6 +12,7 @@ module Delayed
|
|
|
12
12
|
set_payload
|
|
13
13
|
set_queue_name
|
|
14
14
|
set_priority
|
|
15
|
+
handle_dst
|
|
15
16
|
handle_deprecation
|
|
16
17
|
options
|
|
17
18
|
end
|
|
@@ -32,6 +33,24 @@ module Delayed
|
|
|
32
33
|
options[:priority] ||= Delayed::Worker.default_priority
|
|
33
34
|
end
|
|
34
35
|
|
|
36
|
+
def scheduled_into_fall_back_hour?
|
|
37
|
+
options[:run_at] &&
|
|
38
|
+
!options[:run_at].in_time_zone.dst? &&
|
|
39
|
+
(options[:run_at] - 1.hour).dst?
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def handle_dst
|
|
43
|
+
# The DB column does not retain timezone information. As a result, if we
|
|
44
|
+
# are running with `:local` timezone, then any future-scheduled jobs
|
|
45
|
+
# that fall into the "fall back" DST transition need to rounded up to
|
|
46
|
+
# the later hour or they will cause a "spinloop" of immediate retries.
|
|
47
|
+
if Job.default_timezone == :local && scheduled_into_fall_back_hour?
|
|
48
|
+
run_at_was = options[:run_at]
|
|
49
|
+
options[:run_at] = (run_at_was + 1.hour).beginning_of_hour
|
|
50
|
+
Delayed.say("Adjusted run_at from #{run_at_was} to #{options[:run_at]} to account for fall back DST transition", :warn)
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
|
|
35
54
|
def handle_deprecation
|
|
36
55
|
unless options[:payload_object].respond_to?(:perform)
|
|
37
56
|
raise ArgumentError,
|
data/lib/delayed/exceptions.rb
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
1
1
|
require 'timeout'
|
|
2
2
|
|
|
3
3
|
module Delayed
|
|
4
|
-
|
|
4
|
+
# We inherit from Exception because we want timeouts to bubble up to the
|
|
5
|
+
# worker thread where they can be handled appropriately. (If we inherited from
|
|
6
|
+
# StandardError, jobs are more likely to inadvertently `rescue` it directly.)
|
|
7
|
+
class WorkerTimeout < Exception # rubocop:disable Lint/InheritException
|
|
5
8
|
def message
|
|
6
9
|
seconds = Delayed::Worker.max_run_time.to_i
|
|
7
10
|
"#{super} (Delayed::Worker.max_run_time is only #{seconds} second#{seconds == 1 ? '' : 's'})"
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Delayed
|
|
4
|
+
module Helpers
|
|
5
|
+
module Migration
|
|
6
|
+
def self.included(base)
|
|
7
|
+
base.extend(ClassMethods)
|
|
8
|
+
delegate :concurrent_index_creation_supported?, to: :class
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
module ClassMethods
|
|
12
|
+
def concurrent_index_creation_supported?
|
|
13
|
+
connection.index_algorithms.key?(:concurrently)
|
|
14
|
+
end
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def upsert_index(*args, **opts)
|
|
18
|
+
dir(:up) { _add_or_replace_index(*args, **opts) }
|
|
19
|
+
dir(:down) { _drop_index_if_exists(*args, **opts) }
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def remove_index_if_exists(*args, **opts)
|
|
23
|
+
dir(:up) { _drop_index_if_exists(*args, **opts) }
|
|
24
|
+
dir(:down) { _add_or_replace_index(*args, **opts) }
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
RETRY_EXCEPTIONS = [
|
|
28
|
+
ActiveRecord::LockWaitTimeout,
|
|
29
|
+
ActiveRecord::StatementTimeout,
|
|
30
|
+
(PG::LockNotAvailable if defined?(PG::LockNotAvailable)),
|
|
31
|
+
].compact.freeze
|
|
32
|
+
|
|
33
|
+
def with_retry_loop(wait_timeout: 5.minutes, **opts)
|
|
34
|
+
with_timeouts(**opts) do
|
|
35
|
+
loop do
|
|
36
|
+
yield
|
|
37
|
+
break
|
|
38
|
+
rescue *RETRY_EXCEPTIONS => e
|
|
39
|
+
raise if Delayed::Job.db_time_now - @migration_start > wait_timeout
|
|
40
|
+
|
|
41
|
+
Delayed.logger.warn("Index creation failed for #{opts[:name]}: #{e.message}. Retrying...")
|
|
42
|
+
end
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
def with_timeouts(statement_timeout: 1.minute, lock_timeout: 5.seconds)
|
|
47
|
+
dir(:both) { set_timeouts!(statement_timeout: statement_timeout, lock_timeout: lock_timeout) }
|
|
48
|
+
yield
|
|
49
|
+
ensure
|
|
50
|
+
dir(:both) { set_timeouts!(statement_timeout: nil, lock_timeout: nil) }
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
private
|
|
54
|
+
|
|
55
|
+
def _add_or_replace_index(table, columns, **opts)
|
|
56
|
+
index = _lookup_index(table, columns, **opts)
|
|
57
|
+
if index && !_index_matches?(index, **opts)
|
|
58
|
+
Delayed.logger.warn("Recreating index #{index.name} (is invalid or does not match desired options)")
|
|
59
|
+
_drop_index(table, name: index.name, **opts)
|
|
60
|
+
end
|
|
61
|
+
_add_index(table, columns, **opts) if !index || !_index_matches?(index, **opts)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def _drop_index_if_exists(table, columns, **opts)
|
|
65
|
+
index = _lookup_index(table, columns, **opts)
|
|
66
|
+
_drop_index(table, name: index.name, **opts) if index
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def _add_index(*args, **opts)
|
|
70
|
+
index_opts = opts.slice!(:wait_timeout, :statement_timeout, :lock_timeout)
|
|
71
|
+
with_retry_loop(**opts) { add_index(*args, **index_opts) }
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
def _drop_index(table, name:, **opts)
|
|
75
|
+
opts.slice!(:wait_timeout, :statement_timeout, :lock_timeout)
|
|
76
|
+
with_retry_loop(**opts) { remove_index(table, name: name) }
|
|
77
|
+
end
|
|
78
|
+
|
|
79
|
+
def _lookup_index(table, columns, **opts)
|
|
80
|
+
connection.indexes(table).find { |idx| idx.name == opts[:name] || idx.columns == Array(columns).map(&:to_s) }
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
def _index_matches?(index, **opts)
|
|
84
|
+
using_default = :btree unless connection.adapter_name == 'SQLite'
|
|
85
|
+
|
|
86
|
+
{ unique: false, where: nil, using: using_default, include: nil, valid?: true }.all? do |key, default|
|
|
87
|
+
!index.respond_to?(key) || opts.fetch(key, default) == index.public_send(key)
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
def dir(direction, &block)
|
|
92
|
+
reversible do |dir|
|
|
93
|
+
dir.up(&block) if %i(up both).include?(direction)
|
|
94
|
+
dir.down(&block) if %i(down both).include?(direction)
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
def set_timeouts!(statement_timeout:, lock_timeout:)
|
|
99
|
+
case connection.adapter_name
|
|
100
|
+
when 'PostgreSQL'
|
|
101
|
+
execute("SET statement_timeout TO #{pg_seconds(statement_timeout) || 'DEFAULT'};")
|
|
102
|
+
execute("SET lock_timeout TO #{pg_seconds(lock_timeout) || 'DEFAULT'};")
|
|
103
|
+
when "MySQL", "Mysql2"
|
|
104
|
+
execute("SET SESSION wait_timeout = #{statement_timeout&.seconds || 'DEFAULT'};")
|
|
105
|
+
execute("SET SESSION lock_wait_timeout = #{lock_timeout&.seconds || 'DEFAULT'};")
|
|
106
|
+
else
|
|
107
|
+
Delayed.logger.info("[delayed] #{connection.adapter_name} does not support setting statement or lock timeouts (skipping).")
|
|
108
|
+
end
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
def pg_seconds(duration)
|
|
112
|
+
"'#{duration.seconds}s'" if duration
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
end
|
|
116
|
+
end
|
data/lib/delayed/monitor.rb
CHANGED
|
@@ -20,21 +20,27 @@ module Delayed
|
|
|
20
20
|
def initialize
|
|
21
21
|
@jobs = Job.group(priority_case_statement).group(:queue)
|
|
22
22
|
@jobs = @jobs.where(queue: Worker.queues) if Worker.queues.any?
|
|
23
|
+
@memo = {}
|
|
23
24
|
end
|
|
24
25
|
|
|
25
26
|
def run!
|
|
27
|
+
@memo = {}
|
|
26
28
|
ActiveSupport::Notifications.instrument('delayed.monitor.run', default_tags) do
|
|
27
29
|
METRICS.each { |metric| emit_metric!(metric) }
|
|
28
30
|
end
|
|
29
31
|
interruptable_sleep(sleep_delay)
|
|
30
32
|
end
|
|
31
33
|
|
|
34
|
+
def query_for(metric)
|
|
35
|
+
send(:"#{metric}_grouped")
|
|
36
|
+
end
|
|
37
|
+
|
|
32
38
|
private
|
|
33
39
|
|
|
34
40
|
attr_reader :jobs
|
|
35
41
|
|
|
36
42
|
def emit_metric!(metric)
|
|
37
|
-
|
|
43
|
+
query_for(metric).reverse_merge(default_results).each do |(priority, queue), value|
|
|
38
44
|
ActiveSupport::Notifications.instrument(
|
|
39
45
|
"delayed.job.#{metric}",
|
|
40
46
|
default_tags.merge(priority: Priority.new(priority).to_s, queue: queue, value: value),
|
|
@@ -63,15 +69,19 @@ module Delayed
|
|
|
63
69
|
end
|
|
64
70
|
|
|
65
71
|
def count_grouped
|
|
66
|
-
|
|
72
|
+
if Job.connection.supports_partial_index?
|
|
73
|
+
failed_count_grouped.merge(jobs.live.count) { |_, l, f| l + f }
|
|
74
|
+
else
|
|
75
|
+
jobs.count
|
|
76
|
+
end
|
|
67
77
|
end
|
|
68
78
|
|
|
69
79
|
def future_count_grouped
|
|
70
|
-
jobs.
|
|
80
|
+
jobs.future.count
|
|
71
81
|
end
|
|
72
82
|
|
|
73
83
|
def locked_count_grouped
|
|
74
|
-
jobs.
|
|
84
|
+
@memo[:locked_count_grouped] ||= jobs.claimed.count
|
|
75
85
|
end
|
|
76
86
|
|
|
77
87
|
def erroring_count_grouped
|
|
@@ -79,7 +89,7 @@ module Delayed
|
|
|
79
89
|
end
|
|
80
90
|
|
|
81
91
|
def failed_count_grouped
|
|
82
|
-
jobs.failed.count
|
|
92
|
+
@memo[:failed_count_grouped] ||= jobs.failed.count
|
|
83
93
|
end
|
|
84
94
|
|
|
85
95
|
def max_lock_age_grouped
|
|
@@ -102,19 +112,19 @@ module Delayed
|
|
|
102
112
|
end
|
|
103
113
|
|
|
104
114
|
def workable_count_grouped
|
|
105
|
-
jobs.
|
|
115
|
+
jobs.claimable.count
|
|
106
116
|
end
|
|
107
117
|
|
|
108
|
-
|
|
109
|
-
jobs.working.count
|
|
110
|
-
end
|
|
118
|
+
alias working_count_grouped locked_count_grouped
|
|
111
119
|
|
|
112
120
|
def oldest_locked_job_grouped
|
|
113
|
-
jobs.
|
|
121
|
+
jobs.claimed
|
|
122
|
+
.select("#{priority_case_statement} AS priority, queue, MIN(locked_at) AS locked_at")
|
|
114
123
|
end
|
|
115
124
|
|
|
116
125
|
def oldest_workable_job_grouped
|
|
117
|
-
jobs.
|
|
126
|
+
@memo[:oldest_workable_job_grouped] ||= jobs.claimable
|
|
127
|
+
.select("(#{priority_case_statement}) AS priority, queue, MIN(run_at) AS run_at")
|
|
118
128
|
end
|
|
119
129
|
|
|
120
130
|
def priority_case_statement
|
data/lib/delayed/version.rb
CHANGED
data/lib/delayed/worker.rb
CHANGED
data/lib/delayed.rb
CHANGED