solid_queue_mongoid 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.claude/settings.local.json +38 -0
- data/.idea/copilot.data.migration.ask2agent.xml +6 -0
- data/.idea/inspectionProfiles/Project_Default.xml +5 -0
- data/.idea/jsLibraryMappings.xml +6 -0
- data/.idea/misc.xml +17 -0
- data/.idea/modules/bigdecimal-4.0.iml +18 -0
- data/.idea/modules/builder-3.3.iml +18 -0
- data/.idea/modules/concurrent-ruby-1.3.iml +21 -0
- data/.idea/modules/connection_pool-3.0.iml +18 -0
- data/.idea/modules/crass-1.0.iml +19 -0
- data/.idea/modules/docile-1.4.iml +20 -0
- data/.idea/modules/drb-2.2.iml +18 -0
- data/.idea/modules/erb-6.0.iml +23 -0
- data/.idea/modules/et-orbi-1.4.iml +20 -0
- data/.idea/modules/fugit-1.12.iml +18 -0
- data/.idea/modules/irb-1.17.iml +26 -0
- data/.idea/modules/json-2.18.iml +18 -0
- data/.idea/modules/lint_roller-1.1.iml +18 -0
- data/.idea/modules/mongo-2.23.iml +19 -0
- data/.idea/modules/nokogiri-1.19.iml +19 -0
- data/.idea/modules/parser-3.3.10.iml +19 -0
- data/.idea/modules/pp-0.6.iml +18 -0
- data/.idea/modules/prettyprint-0.2.iml +22 -0
- data/.idea/modules/prism-1.9.iml +20 -0
- data/.idea/modules/raabro-1.4.iml +18 -0
- data/.idea/modules/rake-13.3.iml +22 -0
- data/.idea/modules/rdoc-7.2.iml +22 -0
- data/.idea/modules/regexp_parser-2.11.iml +20 -0
- data/.idea/modules/specifications.iml +18 -0
- data/.idea/modules/thor-1.5.iml +20 -0
- data/.idea/modules/timeout-0.6.iml +22 -0
- data/.idea/modules/tsort-0.2.iml +22 -0
- data/.idea/modules/unicode-emoji-4.2.iml +19 -0
- data/.idea/modules.xml +36 -0
- data/.idea/solid_queue_mongoid.iml +3297 -0
- data/.idea/vcs.xml +6 -0
- data/.idea/workspace.xml +353 -0
- data/.rspec +3 -0
- data/.rubocop.yml +47 -0
- data/ARCHITECTURE.md +91 -0
- data/CHANGELOG.md +27 -0
- data/CODE_OF_CONDUCT.md +132 -0
- data/LICENSE.txt +21 -0
- data/README.md +249 -0
- data/Rakefile +12 -0
- data/lib/solid_queue_mongoid/models/blocked_execution.rb +125 -0
- data/lib/solid_queue_mongoid/models/claimed_execution.rb +134 -0
- data/lib/solid_queue_mongoid/models/classes.rb +32 -0
- data/lib/solid_queue_mongoid/models/execution/dispatching.rb +23 -0
- data/lib/solid_queue_mongoid/models/execution/job_attributes.rb +54 -0
- data/lib/solid_queue_mongoid/models/execution.rb +65 -0
- data/lib/solid_queue_mongoid/models/failed_execution.rb +74 -0
- data/lib/solid_queue_mongoid/models/job/clearable.rb +28 -0
- data/lib/solid_queue_mongoid/models/job/concurrency_controls.rb +93 -0
- data/lib/solid_queue_mongoid/models/job/executable.rb +142 -0
- data/lib/solid_queue_mongoid/models/job/recurrable.rb +14 -0
- data/lib/solid_queue_mongoid/models/job/retryable.rb +51 -0
- data/lib/solid_queue_mongoid/models/job/schedulable.rb +55 -0
- data/lib/solid_queue_mongoid/models/job.rb +103 -0
- data/lib/solid_queue_mongoid/models/pause.rb +25 -0
- data/lib/solid_queue_mongoid/models/process/executor.rb +30 -0
- data/lib/solid_queue_mongoid/models/process/prunable.rb +49 -0
- data/lib/solid_queue_mongoid/models/process.rb +73 -0
- data/lib/solid_queue_mongoid/models/queue.rb +65 -0
- data/lib/solid_queue_mongoid/models/queue_selector.rb +101 -0
- data/lib/solid_queue_mongoid/models/ready_execution.rb +70 -0
- data/lib/solid_queue_mongoid/models/record.rb +147 -0
- data/lib/solid_queue_mongoid/models/recurring_execution.rb +62 -0
- data/lib/solid_queue_mongoid/models/recurring_task/arguments.rb +29 -0
- data/lib/solid_queue_mongoid/models/recurring_task.rb +194 -0
- data/lib/solid_queue_mongoid/models/scheduled_execution.rb +43 -0
- data/lib/solid_queue_mongoid/models/semaphore.rb +179 -0
- data/lib/solid_queue_mongoid/railtie.rb +29 -0
- data/lib/solid_queue_mongoid/version.rb +5 -0
- data/lib/solid_queue_mongoid.rb +136 -0
- data/lib/tasks/solid_queue_mongoid.rake +51 -0
- data/release.sh +13 -0
- data/sig/solid_queue_mongoid.rbs +4 -0
- metadata +173 -0
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Execution < Record
|
|
5
|
+
include JobAttributes
|
|
6
|
+
|
|
7
|
+
class UndiscardableError < StandardError; end
|
|
8
|
+
|
|
9
|
+
scope :ordered, -> { order_by(priority: :asc, job_id: :asc) }
|
|
10
|
+
|
|
11
|
+
class << self
|
|
12
|
+
def type
|
|
13
|
+
model_name.element.sub("_execution", "").to_sym
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def create_all_from_jobs(jobs)
|
|
17
|
+
jobs.each do |job|
|
|
18
|
+
attrs = attributes_from_job(job).merge(job_id: job.id)
|
|
19
|
+
next if where(job_id: job.id).exists?
|
|
20
|
+
|
|
21
|
+
create_or_find_by!(attrs)
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def discard_all_in_batches(batch_size: 500)
|
|
26
|
+
pending = count
|
|
27
|
+
discarded = 0
|
|
28
|
+
|
|
29
|
+
loop do
|
|
30
|
+
job_ids = limit(batch_size).order_by(job_id: :asc).pluck(:job_id)
|
|
31
|
+
break if job_ids.empty?
|
|
32
|
+
|
|
33
|
+
discarded = Job.where(:id.in => job_ids).delete_all
|
|
34
|
+
where(:job_id.in => job_ids).delete_all
|
|
35
|
+
pending -= discarded
|
|
36
|
+
|
|
37
|
+
break if pending <= 0 || discarded.zero?
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def discard_all_from_jobs(jobs)
|
|
42
|
+
job_ids = jobs.map(&:id)
|
|
43
|
+
discard_jobs(job_ids)
|
|
44
|
+
where(:job_id.in => job_ids).delete_all
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
private
|
|
48
|
+
|
|
49
|
+
def discard_jobs(job_ids)
|
|
50
|
+
Job.where(:id.in => job_ids).delete_all
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
def type
|
|
55
|
+
self.class.type
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def discard
|
|
59
|
+
SolidQueue.instrument(:discard, job_id: job_id, status: type) do
|
|
60
|
+
job.destroy
|
|
61
|
+
destroy
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
end
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class FailedExecution < Execution
|
|
5
|
+
include Dispatching
|
|
6
|
+
|
|
7
|
+
field :error, type: Hash # stores exception_class, message, backtrace
|
|
8
|
+
|
|
9
|
+
attr_accessor :exception
|
|
10
|
+
|
|
11
|
+
before_save :expand_error_details_from_exception, if: :exception
|
|
12
|
+
|
|
13
|
+
index({ created_at: 1 })
|
|
14
|
+
|
|
15
|
+
class << self
|
|
16
|
+
def retry_all(jobs)
|
|
17
|
+
SolidQueue.instrument(:retry_all, jobs_size: jobs.size) do |payload|
|
|
18
|
+
job_ids = jobs.map(&:id)
|
|
19
|
+
payload[:size] = dispatch_jobs(lock_all_from_jobs_ids(job_ids))
|
|
20
|
+
end
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
private
|
|
24
|
+
|
|
25
|
+
def lock_all_from_jobs_ids(job_ids)
|
|
26
|
+
where(:job_id.in => job_ids).pluck(:job_id)
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def retry
|
|
31
|
+
SolidQueue.instrument(:retry, job_id: job.id) do
|
|
32
|
+
Mongoid.transaction do
|
|
33
|
+
job.reset_execution_counters
|
|
34
|
+
job.prepare_for_execution
|
|
35
|
+
destroy!
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
# Error attribute accessors matching SolidQueue API
|
|
41
|
+
%i[exception_class message backtrace].each do |attribute|
|
|
42
|
+
define_method(attribute) { error&.with_indifferent_access&.[](attribute.to_s) }
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
private
|
|
46
|
+
|
|
47
|
+
# BSON documents are limited to 16 MB. Reserve a generous budget for the
|
|
48
|
+
# backtrace so a deep stack can never blow the limit.
|
|
49
|
+
BACKTRACE_SIZE_LIMIT = 50_000 # bytes of JSON
|
|
50
|
+
|
|
51
|
+
def expand_error_details_from_exception
|
|
52
|
+
return unless exception
|
|
53
|
+
|
|
54
|
+
self.error = {
|
|
55
|
+
"exception_class" => exception.class.name,
|
|
56
|
+
"message" => exception.message,
|
|
57
|
+
"backtrace" => truncate_backtrace(exception.backtrace)
|
|
58
|
+
}
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def truncate_backtrace(lines)
|
|
62
|
+
return lines if lines.nil?
|
|
63
|
+
|
|
64
|
+
truncated = []
|
|
65
|
+
lines.each do |line|
|
|
66
|
+
truncated << line
|
|
67
|
+
break if truncated.to_json.bytesize > BACKTRACE_SIZE_LIMIT
|
|
68
|
+
end
|
|
69
|
+
# Remove the last line that pushed us over the limit
|
|
70
|
+
truncated.pop if truncated.to_json.bytesize > BACKTRACE_SIZE_LIMIT
|
|
71
|
+
truncated
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
end
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Job
|
|
5
|
+
module Clearable
|
|
6
|
+
extend ActiveSupport::Concern
|
|
7
|
+
|
|
8
|
+
included do
|
|
9
|
+
scope :clearable, lambda { |finished_before: SolidQueue.clear_finished_jobs_after.ago, class_name: nil|
|
|
10
|
+
scope = where(:finished_at.ne => nil).where(:finished_at.lte => finished_before)
|
|
11
|
+
scope = scope.where(class_name: class_name) if class_name.present?
|
|
12
|
+
scope
|
|
13
|
+
}
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
class_methods do
|
|
17
|
+
def clear_finished_in_batches(batch_size: 500, finished_before: SolidQueue.clear_finished_jobs_after.ago,
|
|
18
|
+
class_name: nil, sleep_between_batches: 0)
|
|
19
|
+
loop do
|
|
20
|
+
deleted = clearable(finished_before: finished_before, class_name: class_name).limit(batch_size).delete_all
|
|
21
|
+
sleep(sleep_between_batches) if sleep_between_batches.positive?
|
|
22
|
+
break if deleted.zero?
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Job
|
|
5
|
+
module ConcurrencyControls
|
|
6
|
+
extend ActiveSupport::Concern
|
|
7
|
+
|
|
8
|
+
included do
|
|
9
|
+
has_one :blocked_execution, class_name: "SolidQueue::BlockedExecution", dependent: :destroy
|
|
10
|
+
|
|
11
|
+
before_destroy :unblock_next_blocked_job, if: -> { concurrency_limited? && ready? }
|
|
12
|
+
end
|
|
13
|
+
|
|
14
|
+
class_methods do
|
|
15
|
+
def release_all_concurrency_locks(jobs)
|
|
16
|
+
Semaphore.signal_all(jobs.select(&:concurrency_limited?))
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def unblock_next_blocked_job
|
|
21
|
+
return unless release_concurrency_lock
|
|
22
|
+
|
|
23
|
+
release_next_blocked_job
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def concurrency_limited?
|
|
27
|
+
concurrency_key.present?
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def blocked?
|
|
31
|
+
blocked_execution.present?
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
# Returns the concurrency limit: uses the stored field when set,
|
|
35
|
+
# otherwise delegates to job_class (AR-compatible behaviour).
|
|
36
|
+
def concurrency_limit
|
|
37
|
+
limit = read_attribute(:concurrency_limit)
|
|
38
|
+
return limit if limit.present?
|
|
39
|
+
|
|
40
|
+
job_class.respond_to?(:concurrency_limit) ? job_class.concurrency_limit : nil
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
# Returns the concurrency duration from job_class (not stored in DB).
|
|
44
|
+
def concurrency_duration
|
|
45
|
+
job_class.respond_to?(:concurrency_duration) ? job_class.concurrency_duration : 5.minutes
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
private
|
|
49
|
+
|
|
50
|
+
def concurrency_on_conflict
|
|
51
|
+
return "block".inquiry unless job_class.respond_to?(:concurrency_on_conflict)
|
|
52
|
+
|
|
53
|
+
job_class.concurrency_on_conflict.to_s.inquiry
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def acquire_concurrency_lock
|
|
57
|
+
return true unless concurrency_limited?
|
|
58
|
+
|
|
59
|
+
Semaphore.wait(self)
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def release_concurrency_lock
|
|
63
|
+
return false unless concurrency_limited?
|
|
64
|
+
|
|
65
|
+
Semaphore.signal(self)
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def handle_concurrency_conflict
|
|
69
|
+
if concurrency_on_conflict.discard?
|
|
70
|
+
destroy
|
|
71
|
+
else
|
|
72
|
+
block
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def block
|
|
77
|
+
BlockedExecution.create_or_find_by!(job_id: id)
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def release_next_blocked_job
|
|
81
|
+
BlockedExecution.release_one(concurrency_key)
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
def job_class
|
|
85
|
+
@job_class ||= class_name.safe_constantize
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def execution
|
|
89
|
+
super || blocked_execution
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
end
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Job
|
|
5
|
+
module Executable
|
|
6
|
+
extend ActiveSupport::Concern
|
|
7
|
+
|
|
8
|
+
included do
|
|
9
|
+
include ConcurrencyControls, Schedulable, Retryable
|
|
10
|
+
|
|
11
|
+
has_one :ready_execution, class_name: "SolidQueue::ReadyExecution", dependent: :destroy
|
|
12
|
+
has_one :claimed_execution, class_name: "SolidQueue::ClaimedExecution", dependent: :destroy
|
|
13
|
+
|
|
14
|
+
after_create :prepare_for_execution
|
|
15
|
+
|
|
16
|
+
scope :finished, -> { where(:finished_at.ne => nil) }
|
|
17
|
+
scope :failed, -> { where(:id.in => SolidQueue::FailedExecution.all.pluck(:job_id)) }
|
|
18
|
+
scope :pending, -> { where(finished_at: nil) }
|
|
19
|
+
end
|
|
20
|
+
|
|
21
|
+
class_methods do # rubocop:disable Metrics/BlockLength
|
|
22
|
+
# Dispatch a collection of jobs, partitioned by schedule and concurrency.
|
|
23
|
+
def prepare_all_for_execution(jobs)
|
|
24
|
+
due, not_yet_due = jobs.partition(&:due?)
|
|
25
|
+
dispatch_all(due) + schedule_all(not_yet_due)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def dispatch_all(jobs)
|
|
29
|
+
with_concurrency_limits, without_concurrency_limits = jobs.partition(&:concurrency_limited?)
|
|
30
|
+
|
|
31
|
+
dispatch_all_at_once(without_concurrency_limits)
|
|
32
|
+
dispatch_all_one_by_one(with_concurrency_limits)
|
|
33
|
+
|
|
34
|
+
successfully_dispatched(jobs)
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
private
|
|
38
|
+
|
|
39
|
+
def dispatch_all_at_once(jobs)
|
|
40
|
+
ReadyExecution.create_all_from_jobs(jobs)
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def dispatch_all_one_by_one(jobs)
|
|
44
|
+
jobs.each(&:dispatch)
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def successfully_dispatched(jobs)
|
|
48
|
+
jobs.map(&:id)
|
|
49
|
+
dispatched_and_ready(jobs) + dispatched_and_blocked(jobs)
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def dispatched_and_ready(jobs)
|
|
53
|
+
job_ids = jobs.map(&:id)
|
|
54
|
+
where(:id.in => ReadyExecution.where(:job_id.in => job_ids).pluck(:job_id))
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def dispatched_and_blocked(jobs)
|
|
58
|
+
job_ids = jobs.map(&:id)
|
|
59
|
+
where(:id.in => BlockedExecution.where(:job_id.in => job_ids).pluck(:job_id))
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
# status helpers matching SolidQueue runtime expectations
|
|
64
|
+
%w[ready claimed failed].each do |status|
|
|
65
|
+
define_method("#{status}?") { public_send("#{status}_execution").present? }
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def prepare_for_execution
|
|
69
|
+
if due?
|
|
70
|
+
dispatch
|
|
71
|
+
else
|
|
72
|
+
schedule
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def dispatch
|
|
77
|
+
if due?
|
|
78
|
+
if acquire_concurrency_lock
|
|
79
|
+
ready
|
|
80
|
+
else
|
|
81
|
+
handle_concurrency_conflict
|
|
82
|
+
end
|
|
83
|
+
else
|
|
84
|
+
schedule
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
# Called by ClaimedExecution#release — bypasses the semaphore check.
|
|
89
|
+
def dispatch_bypassing_concurrency_limits
|
|
90
|
+
ready
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
def finished!
|
|
94
|
+
if SolidQueue.preserve_finished_jobs?
|
|
95
|
+
update(finished_at: Time.current)
|
|
96
|
+
# Clean up the claimed execution if still present (e.g. called directly
|
|
97
|
+
# outside of ClaimedExecution#finished which does its own destroy!).
|
|
98
|
+
claimed_execution&.destroy
|
|
99
|
+
else
|
|
100
|
+
destroy!
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
alias finish finished!
|
|
105
|
+
|
|
106
|
+
def finished?
|
|
107
|
+
finished_at.present?
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
def status
|
|
111
|
+
if finished?
|
|
112
|
+
:finished
|
|
113
|
+
elsif (exec = execution)
|
|
114
|
+
exec.type
|
|
115
|
+
end
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
def discard
|
|
119
|
+
execution&.discard
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
def ready
|
|
123
|
+
existing = ReadyExecution.where(job_id: id).first
|
|
124
|
+
return existing if existing
|
|
125
|
+
|
|
126
|
+
re = ReadyExecution.new(job_id: id)
|
|
127
|
+
re.queue_name = queue_name
|
|
128
|
+
re.priority = priority
|
|
129
|
+
re.save!
|
|
130
|
+
re
|
|
131
|
+
rescue Mongoid::Errors::Validations, Mongo::Error::OperationFailure
|
|
132
|
+
ReadyExecution.where(job_id: id).first
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
def execution
|
|
136
|
+
%w[ready claimed failed].reduce(nil) do |acc, status|
|
|
137
|
+
acc || public_send("#{status}_execution")
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
end
|
|
141
|
+
end
|
|
142
|
+
end
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Job
|
|
5
|
+
module Recurrable
|
|
6
|
+
extend ActiveSupport::Concern
|
|
7
|
+
|
|
8
|
+
included do
|
|
9
|
+
has_one :recurring_execution, class_name: "SolidQueue::RecurringExecution",
|
|
10
|
+
foreign_key: :job_id, dependent: :destroy
|
|
11
|
+
end
|
|
12
|
+
end
|
|
13
|
+
end
|
|
14
|
+
end
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Job
|
|
5
|
+
module Retryable
|
|
6
|
+
extend ActiveSupport::Concern
|
|
7
|
+
|
|
8
|
+
included do
|
|
9
|
+
has_one :failed_execution, class_name: "SolidQueue::FailedExecution", dependent: :destroy
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
def retry
|
|
13
|
+
failed_execution&.retry
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def can_retry?
|
|
17
|
+
max = read_attribute(:max_retries) || 0
|
|
18
|
+
count = read_attribute(:retry_count) || 0
|
|
19
|
+
count < max
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
def failed_with(exception)
|
|
23
|
+
FailedExecution.create!(job_id: id, exception: exception)
|
|
24
|
+
rescue Mongoid::Errors::Validations, Mongo::Error::OperationFailure => e
|
|
25
|
+
raise unless duplicate_key_error?(e)
|
|
26
|
+
|
|
27
|
+
existing = FailedExecution.find_by(job_id: id)
|
|
28
|
+
if existing
|
|
29
|
+
existing.exception = exception
|
|
30
|
+
existing.save!
|
|
31
|
+
else
|
|
32
|
+
retry
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def reset_execution_counters
|
|
37
|
+
return unless arguments.is_a?(Hash)
|
|
38
|
+
|
|
39
|
+
arguments["executions"] = 0
|
|
40
|
+
arguments["exception_executions"] = {}
|
|
41
|
+
save!
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
private
|
|
45
|
+
|
|
46
|
+
def duplicate_key_error?(err)
|
|
47
|
+
err.message.to_s.include?("E11000") || err.message.to_s.include?("duplicate key")
|
|
48
|
+
end
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
end
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Job
|
|
5
|
+
module Schedulable
|
|
6
|
+
extend ActiveSupport::Concern
|
|
7
|
+
|
|
8
|
+
included do
|
|
9
|
+
field :scheduled_at, type: Time
|
|
10
|
+
|
|
11
|
+
index({ scheduled_at: 1 }, { sparse: true })
|
|
12
|
+
|
|
13
|
+
has_one :scheduled_execution, class_name: "SolidQueue::ScheduledExecution", dependent: :destroy
|
|
14
|
+
|
|
15
|
+
scope :scheduled, -> { where(finished_at: nil) }
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
class_methods do
|
|
19
|
+
def schedule_all(jobs)
|
|
20
|
+
schedule_all_at_once(jobs)
|
|
21
|
+
successfully_scheduled(jobs)
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
private
|
|
25
|
+
|
|
26
|
+
def schedule_all_at_once(jobs)
|
|
27
|
+
ScheduledExecution.create_all_from_jobs(jobs)
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
def successfully_scheduled(jobs)
|
|
31
|
+
job_ids = jobs.map(&:id)
|
|
32
|
+
where(:id.in => ScheduledExecution.where(:job_id.in => job_ids).pluck(:job_id))
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
# A job is due if it has no scheduled_at or it's in the past/present.
|
|
37
|
+
def due?
|
|
38
|
+
scheduled_at.nil? || scheduled_at <= Time.current
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
# True when a ScheduledExecution document exists.
|
|
42
|
+
def scheduled?
|
|
43
|
+
scheduled_execution.present?
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
def schedule
|
|
47
|
+
ScheduledExecution.create_or_find_by!(job_id: id)
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def execution
|
|
51
|
+
super || scheduled_execution
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Job < Record
|
|
5
|
+
class EnqueueError < StandardError; end
|
|
6
|
+
|
|
7
|
+
include Clearable
|
|
8
|
+
include Recurrable
|
|
9
|
+
include Executable # includes ConcurrencyControls, Schedulable, Retryable
|
|
10
|
+
|
|
11
|
+
field :queue_name, type: String
|
|
12
|
+
field :class_name, type: String
|
|
13
|
+
field :arguments, type: Hash, default: {}
|
|
14
|
+
field :priority, type: Integer, default: 0
|
|
15
|
+
field :active_job_id, type: String
|
|
16
|
+
field :concurrency_key, type: String
|
|
17
|
+
field :concurrency_limit, type: Integer # stored per-job; overrides job_class.concurrency_limit
|
|
18
|
+
field :finished_at, type: Time
|
|
19
|
+
field :max_retries, type: Integer, default: 0
|
|
20
|
+
field :retry_count, type: Integer, default: 0
|
|
21
|
+
|
|
22
|
+
index({ queue_name: 1 })
|
|
23
|
+
index({ class_name: 1 })
|
|
24
|
+
index({ priority: 1 })
|
|
25
|
+
index({ active_job_id: 1 }, { sparse: true })
|
|
26
|
+
index({ finished_at: 1 }, { sparse: true })
|
|
27
|
+
index({ concurrency_key: 1 }, { sparse: true })
|
|
28
|
+
|
|
29
|
+
validates :queue_name, :class_name, presence: true
|
|
30
|
+
|
|
31
|
+
DEFAULT_PRIORITY = 0
|
|
32
|
+
DEFAULT_QUEUE_NAME = "default"
|
|
33
|
+
|
|
34
|
+
class << self
|
|
35
|
+
# Primary enqueue entry point — called by the ActiveJob adapter.
|
|
36
|
+
def enqueue(active_job, scheduled_at: Time.current)
|
|
37
|
+
active_job.scheduled_at = scheduled_at
|
|
38
|
+
|
|
39
|
+
create_from_active_job(active_job).tap do |enqueued_job|
|
|
40
|
+
if enqueued_job.persisted?
|
|
41
|
+
active_job.provider_job_id = enqueued_job.id.to_s
|
|
42
|
+
active_job.successfully_enqueued = true
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
# Bulk enqueue — called by the ActiveJob adapter for perform_all_later.
|
|
48
|
+
def enqueue_all(active_jobs)
|
|
49
|
+
active_jobs.each { |job| job.scheduled_at = Time.current }
|
|
50
|
+
active_jobs_by_job_id = active_jobs.index_by(&:job_id)
|
|
51
|
+
|
|
52
|
+
jobs = create_all_from_active_jobs(active_jobs)
|
|
53
|
+
|
|
54
|
+
prepare_all_for_execution(jobs).tap do |enqueued_jobs|
|
|
55
|
+
enqueued_jobs.each do |enqueued_job|
|
|
56
|
+
aj = active_jobs_by_job_id[enqueued_job.active_job_id]
|
|
57
|
+
next unless aj
|
|
58
|
+
|
|
59
|
+
aj.provider_job_id = enqueued_job.id.to_s
|
|
60
|
+
aj.successfully_enqueued = true
|
|
61
|
+
end
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
active_jobs.count(&:successfully_enqueued?)
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
private
|
|
68
|
+
|
|
69
|
+
def create_from_active_job(active_job)
|
|
70
|
+
create!(**attributes_from_active_job(active_job))
|
|
71
|
+
rescue StandardError => e
|
|
72
|
+
enqueue_error = EnqueueError.new("#{e.class.name}: #{e.message}").tap do |err|
|
|
73
|
+
err.set_backtrace(e.backtrace)
|
|
74
|
+
end
|
|
75
|
+
raise enqueue_error
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
def create_all_from_active_jobs(active_jobs)
|
|
79
|
+
active_jobs.filter_map do |active_job|
|
|
80
|
+
create_from_active_job(active_job)
|
|
81
|
+
rescue EnqueueError
|
|
82
|
+
nil
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def attributes_from_active_job(active_job)
|
|
87
|
+
{
|
|
88
|
+
queue_name: active_job.queue_name || DEFAULT_QUEUE_NAME,
|
|
89
|
+
active_job_id: active_job.job_id,
|
|
90
|
+
priority: active_job.priority || DEFAULT_PRIORITY,
|
|
91
|
+
scheduled_at: active_job.scheduled_at,
|
|
92
|
+
class_name: active_job.class.name,
|
|
93
|
+
arguments: active_job.serialize,
|
|
94
|
+
concurrency_key: active_job.try(:concurrency_key)
|
|
95
|
+
}.compact
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def deserialize_for_active_job
|
|
100
|
+
ActiveJob::Base.deserialize(arguments)
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
end
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Pause < Record
|
|
5
|
+
field :queue_name, type: String
|
|
6
|
+
|
|
7
|
+
index({ queue_name: 1 }, { unique: true })
|
|
8
|
+
|
|
9
|
+
validates :queue_name, presence: true, uniqueness: true
|
|
10
|
+
|
|
11
|
+
class << self
|
|
12
|
+
def pause_queue(queue_name)
|
|
13
|
+
create_or_find_by!(queue_name: queue_name)
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def resume_queue(queue_name)
|
|
17
|
+
where(queue_name: queue_name).delete_all
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def paused?(queue_name)
|
|
21
|
+
where(queue_name: queue_name).exists?
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
end
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Process
|
|
5
|
+
module Executor
|
|
6
|
+
extend ActiveSupport::Concern
|
|
7
|
+
|
|
8
|
+
included do
|
|
9
|
+
has_many :claimed_executions, class_name: "SolidQueue::ClaimedExecution",
|
|
10
|
+
foreign_key: :process_id
|
|
11
|
+
|
|
12
|
+
after_destroy :release_all_claimed_executions
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def fail_all_claimed_executions_with(error)
|
|
16
|
+
claimed_executions.fail_all_with(error) if claims_executions?
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def release_all_claimed_executions
|
|
20
|
+
claimed_executions.release_all if claims_executions?
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
private
|
|
24
|
+
|
|
25
|
+
def claims_executions?
|
|
26
|
+
kind.nil? || kind == "Worker"
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|