solid_queue_mongoid 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.claude/settings.local.json +38 -0
- data/.idea/copilot.data.migration.ask2agent.xml +6 -0
- data/.idea/inspectionProfiles/Project_Default.xml +5 -0
- data/.idea/jsLibraryMappings.xml +6 -0
- data/.idea/misc.xml +17 -0
- data/.idea/modules/bigdecimal-4.0.iml +18 -0
- data/.idea/modules/builder-3.3.iml +18 -0
- data/.idea/modules/concurrent-ruby-1.3.iml +21 -0
- data/.idea/modules/connection_pool-3.0.iml +18 -0
- data/.idea/modules/crass-1.0.iml +19 -0
- data/.idea/modules/docile-1.4.iml +20 -0
- data/.idea/modules/drb-2.2.iml +18 -0
- data/.idea/modules/erb-6.0.iml +23 -0
- data/.idea/modules/et-orbi-1.4.iml +20 -0
- data/.idea/modules/fugit-1.12.iml +18 -0
- data/.idea/modules/irb-1.17.iml +26 -0
- data/.idea/modules/json-2.18.iml +18 -0
- data/.idea/modules/lint_roller-1.1.iml +18 -0
- data/.idea/modules/mongo-2.23.iml +19 -0
- data/.idea/modules/nokogiri-1.19.iml +19 -0
- data/.idea/modules/parser-3.3.10.iml +19 -0
- data/.idea/modules/pp-0.6.iml +18 -0
- data/.idea/modules/prettyprint-0.2.iml +22 -0
- data/.idea/modules/prism-1.9.iml +20 -0
- data/.idea/modules/raabro-1.4.iml +18 -0
- data/.idea/modules/rake-13.3.iml +22 -0
- data/.idea/modules/rdoc-7.2.iml +22 -0
- data/.idea/modules/regexp_parser-2.11.iml +20 -0
- data/.idea/modules/specifications.iml +18 -0
- data/.idea/modules/thor-1.5.iml +20 -0
- data/.idea/modules/timeout-0.6.iml +22 -0
- data/.idea/modules/tsort-0.2.iml +22 -0
- data/.idea/modules/unicode-emoji-4.2.iml +19 -0
- data/.idea/modules.xml +36 -0
- data/.idea/solid_queue_mongoid.iml +3297 -0
- data/.idea/vcs.xml +6 -0
- data/.idea/workspace.xml +353 -0
- data/.rspec +3 -0
- data/.rubocop.yml +47 -0
- data/ARCHITECTURE.md +91 -0
- data/CHANGELOG.md +27 -0
- data/CODE_OF_CONDUCT.md +132 -0
- data/LICENSE.txt +21 -0
- data/README.md +249 -0
- data/Rakefile +12 -0
- data/lib/solid_queue_mongoid/models/blocked_execution.rb +125 -0
- data/lib/solid_queue_mongoid/models/claimed_execution.rb +134 -0
- data/lib/solid_queue_mongoid/models/classes.rb +32 -0
- data/lib/solid_queue_mongoid/models/execution/dispatching.rb +23 -0
- data/lib/solid_queue_mongoid/models/execution/job_attributes.rb +54 -0
- data/lib/solid_queue_mongoid/models/execution.rb +65 -0
- data/lib/solid_queue_mongoid/models/failed_execution.rb +74 -0
- data/lib/solid_queue_mongoid/models/job/clearable.rb +28 -0
- data/lib/solid_queue_mongoid/models/job/concurrency_controls.rb +93 -0
- data/lib/solid_queue_mongoid/models/job/executable.rb +142 -0
- data/lib/solid_queue_mongoid/models/job/recurrable.rb +14 -0
- data/lib/solid_queue_mongoid/models/job/retryable.rb +51 -0
- data/lib/solid_queue_mongoid/models/job/schedulable.rb +55 -0
- data/lib/solid_queue_mongoid/models/job.rb +103 -0
- data/lib/solid_queue_mongoid/models/pause.rb +25 -0
- data/lib/solid_queue_mongoid/models/process/executor.rb +30 -0
- data/lib/solid_queue_mongoid/models/process/prunable.rb +49 -0
- data/lib/solid_queue_mongoid/models/process.rb +73 -0
- data/lib/solid_queue_mongoid/models/queue.rb +65 -0
- data/lib/solid_queue_mongoid/models/queue_selector.rb +101 -0
- data/lib/solid_queue_mongoid/models/ready_execution.rb +70 -0
- data/lib/solid_queue_mongoid/models/record.rb +147 -0
- data/lib/solid_queue_mongoid/models/recurring_execution.rb +62 -0
- data/lib/solid_queue_mongoid/models/recurring_task/arguments.rb +29 -0
- data/lib/solid_queue_mongoid/models/recurring_task.rb +194 -0
- data/lib/solid_queue_mongoid/models/scheduled_execution.rb +43 -0
- data/lib/solid_queue_mongoid/models/semaphore.rb +179 -0
- data/lib/solid_queue_mongoid/railtie.rb +29 -0
- data/lib/solid_queue_mongoid/version.rb +5 -0
- data/lib/solid_queue_mongoid.rb +136 -0
- data/lib/tasks/solid_queue_mongoid.rake +51 -0
- data/release.sh +13 -0
- data/sig/solid_queue_mongoid.rbs +4 -0
- metadata +173 -0
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Process
|
|
5
|
+
module Prunable
|
|
6
|
+
extend ActiveSupport::Concern
|
|
7
|
+
|
|
8
|
+
included do
|
|
9
|
+
# Processes whose heartbeat is older than the alive threshold
|
|
10
|
+
scope :prunable, lambda {
|
|
11
|
+
where(:last_heartbeat_at.lte => SolidQueue.process_alive_threshold.ago)
|
|
12
|
+
}
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
class_methods do
|
|
16
|
+
# Called by supervisor maintenance loop.
|
|
17
|
+
# +excluding+ is the current supervisor Process record (or nil).
|
|
18
|
+
def prune(excluding: nil)
|
|
19
|
+
SolidQueue.instrument(:prune_processes, size: 0) do |payload|
|
|
20
|
+
scope = prunable
|
|
21
|
+
scope = scope.not.where(id: excluding.id) if excluding.present?
|
|
22
|
+
|
|
23
|
+
scope.each do |process|
|
|
24
|
+
payload[:size] += 1
|
|
25
|
+
process.prune
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# Integration-spec compatible helper: prune processes whose heartbeat
|
|
31
|
+
# is older than +timeout+ ago.
|
|
32
|
+
def prune_stale_processes(timeout: SolidQueue.process_alive_threshold)
|
|
33
|
+
where(:last_heartbeat_at.lte => timeout.ago).each(&:prune)
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def prune
|
|
38
|
+
error = begin
|
|
39
|
+
::SolidQueue::Processes::ProcessPrunedError.new(last_heartbeat_at)
|
|
40
|
+
rescue NameError
|
|
41
|
+
RuntimeError.new("Process pruned: last heartbeat at #{last_heartbeat_at}")
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
fail_all_claimed_executions_with(error)
|
|
45
|
+
deregister(pruned: true)
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
end
|
|
49
|
+
end
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Process < Record
|
|
5
|
+
include Process::Executor
|
|
6
|
+
include Process::Prunable
|
|
7
|
+
|
|
8
|
+
field :kind, type: String
|
|
9
|
+
field :name, type: String
|
|
10
|
+
field :hostname, type: String
|
|
11
|
+
field :pid, type: Integer
|
|
12
|
+
field :supervisor_id, type: BSON::ObjectId
|
|
13
|
+
field :metadata, type: Hash, default: {}
|
|
14
|
+
field :last_heartbeat_at, type: Time
|
|
15
|
+
|
|
16
|
+
belongs_to :supervisor, class_name: "SolidQueue::Process", optional: true,
|
|
17
|
+
inverse_of: :supervisees
|
|
18
|
+
has_many :supervisees, class_name: "SolidQueue::Process",
|
|
19
|
+
inverse_of: :supervisor, foreign_key: :supervisor_id
|
|
20
|
+
|
|
21
|
+
index({ hostname: 1, pid: 1 })
|
|
22
|
+
index({ last_heartbeat_at: 1 })
|
|
23
|
+
index({ kind: 1 })
|
|
24
|
+
index({ supervisor_id: 1 }, { sparse: true })
|
|
25
|
+
|
|
26
|
+
validates :hostname, :pid, presence: true
|
|
27
|
+
|
|
28
|
+
class << self
|
|
29
|
+
# Called by SolidQueue::Processes::Registrable#register.
|
|
30
|
+
# Must accept: kind:, name:, pid:, hostname:, and optional metadata:/supervisor:
|
|
31
|
+
def register(kind:, name:, pid:, hostname:, supervisor: nil, metadata: {}, **rest)
|
|
32
|
+
attrs = { kind: kind, name: name, pid: pid, hostname: hostname,
|
|
33
|
+
supervisor: supervisor, metadata: (metadata || {}).merge(rest),
|
|
34
|
+
last_heartbeat_at: Time.current }
|
|
35
|
+
SolidQueue.instrument(:register_process, kind: kind, name: name, pid: pid, hostname: hostname) do |payload|
|
|
36
|
+
create!(attrs).tap do |process|
|
|
37
|
+
payload[:process_id] = process.id
|
|
38
|
+
end
|
|
39
|
+
rescue StandardError => e
|
|
40
|
+
payload[:error] = e
|
|
41
|
+
raise
|
|
42
|
+
end
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
def heartbeat
|
|
47
|
+
# Reload to clear any stale state before updating heartbeat
|
|
48
|
+
begin
|
|
49
|
+
reload
|
|
50
|
+
rescue StandardError
|
|
51
|
+
nil
|
|
52
|
+
end
|
|
53
|
+
update!(last_heartbeat_at: Time.current)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def deregister(pruned: false)
|
|
57
|
+
SolidQueue.instrument(:deregister_process, process: self, pruned: pruned) do |payload|
|
|
58
|
+
destroy!
|
|
59
|
+
|
|
60
|
+
supervisees.each(&:deregister) unless supervised? || pruned
|
|
61
|
+
rescue StandardError => e
|
|
62
|
+
payload[:error] = e
|
|
63
|
+
raise
|
|
64
|
+
end
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
private
|
|
68
|
+
|
|
69
|
+
def supervised?
|
|
70
|
+
supervisor_id.present?
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
end
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
# Plain Ruby class — mirrors upstream SolidQueue::Queue (1.3.x).
|
|
5
|
+
# State is derived from actual Job/Pause/ReadyExecution documents;
|
|
6
|
+
# no separate Queue collection is maintained.
|
|
7
|
+
class Queue
|
|
8
|
+
attr_accessor :name
|
|
9
|
+
|
|
10
|
+
class << self
|
|
11
|
+
def all
|
|
12
|
+
Job.distinct(:queue_name).map { |queue_name| new(queue_name) }
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def find_by_name(name)
|
|
16
|
+
new(name)
|
|
17
|
+
end
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def initialize(name)
|
|
21
|
+
@name = name
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def paused?
|
|
25
|
+
Pause.where(queue_name: name).exists?
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def pause
|
|
29
|
+
Pause.pause_queue(name)
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def resume
|
|
33
|
+
Pause.resume_queue(name)
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def clear
|
|
37
|
+
ReadyExecution.queued_as(name).discard_all_in_batches
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def size
|
|
41
|
+
@size ||= ReadyExecution.queued_as(name).count
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def latency
|
|
45
|
+
@latency ||= begin
|
|
46
|
+
now = Time.current
|
|
47
|
+
oldest_enqueued_at = ReadyExecution.queued_as(name).min(:created_at) || now
|
|
48
|
+
(now - oldest_enqueued_at).to_i
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def human_latency
|
|
53
|
+
ActiveSupport::Duration.build(latency).inspect
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def ==(other)
|
|
57
|
+
name == (other.respond_to?(:name) ? other.name : other)
|
|
58
|
+
end
|
|
59
|
+
alias eql? ==
|
|
60
|
+
|
|
61
|
+
def hash
|
|
62
|
+
name.hash
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
end
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
# Mirrors SolidQueue::QueueSelector but uses Mongo queries for wildcard resolution.
|
|
5
|
+
class QueueSelector
|
|
6
|
+
attr_reader :raw_queues, :relation
|
|
7
|
+
|
|
8
|
+
def initialize(queue_list, relation)
|
|
9
|
+
@raw_queues = Array(queue_list).map { |q| q.to_s.strip }.presence || ["*"]
|
|
10
|
+
@relation = relation
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
# Returns an array of Mongoid criteria scoped to individual queue names,
|
|
14
|
+
# or a single all/none criteria when appropriate.
|
|
15
|
+
def scoped_relations
|
|
16
|
+
if all?
|
|
17
|
+
[relation.all]
|
|
18
|
+
elsif none?
|
|
19
|
+
[]
|
|
20
|
+
else
|
|
21
|
+
queue_names.map { |queue_name| relation.queued_as(queue_name) }
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
private
|
|
26
|
+
|
|
27
|
+
def all?
|
|
28
|
+
include_all_queues? && paused_queues.empty?
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
def none?
|
|
32
|
+
queue_names.empty?
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
def queue_names
|
|
36
|
+
@queue_names ||= eligible_queues - paused_queues
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def eligible_queues
|
|
40
|
+
if include_all_queues?
|
|
41
|
+
all_queues
|
|
42
|
+
else
|
|
43
|
+
in_raw_order(exact_names + prefixed_names)
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
def include_all_queues?
|
|
48
|
+
raw_queues.include?("*")
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
# Pull all distinct queue names currently present in this relation.
|
|
52
|
+
def all_queues
|
|
53
|
+
relation.distinct(:queue_name)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def exact_names
|
|
57
|
+
raw_queues.select { |q| exact_name?(q) }
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
def prefixed_names
|
|
61
|
+
return [] if prefixes.empty?
|
|
62
|
+
|
|
63
|
+
prefixes.flat_map do |prefix|
|
|
64
|
+
# Use anchored regex for mongo prefix match
|
|
65
|
+
relation.where(queue_name: /\A#{Regexp.escape(prefix)}/).distinct(:queue_name)
|
|
66
|
+
end.uniq
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def prefixes
|
|
70
|
+
@prefixes ||= raw_queues.select { |q| prefixed_name?(q) }.map { |q| q.chomp("*") }
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def exact_name?(queue)
|
|
74
|
+
!queue.include?("*")
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def prefixed_name?(queue)
|
|
78
|
+
queue.end_with?("*")
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
def paused_queues
|
|
82
|
+
@paused_queues ||= Pause.all.pluck(:queue_name)
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
def in_raw_order(queues)
|
|
86
|
+
return queues if queues.size <= 1 || prefixes.empty?
|
|
87
|
+
|
|
88
|
+
queues = queues.dup
|
|
89
|
+
raw_queues.flat_map { |raw| delete_in_order(raw, queues) }.compact
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
def delete_in_order(raw_queue, queues)
|
|
93
|
+
if exact_name?(raw_queue)
|
|
94
|
+
queues.delete(raw_queue)
|
|
95
|
+
elsif prefixed_name?(raw_queue)
|
|
96
|
+
prefix = raw_queue.chomp("*")
|
|
97
|
+
queues.select { |q| q.start_with?(prefix) }.tap { |matches| queues -= matches }
|
|
98
|
+
end
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
end
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class ReadyExecution < Execution
|
|
5
|
+
assumes_attributes_from_job # inherits queue_name and priority
|
|
6
|
+
|
|
7
|
+
scope :queued_as, ->(queue_name) { where(queue_name: queue_name) }
|
|
8
|
+
|
|
9
|
+
index({ queue_name: 1, priority: 1, created_at: 1 })
|
|
10
|
+
|
|
11
|
+
class << self
|
|
12
|
+
# Primary entry point called by SolidQueue::Worker.
|
|
13
|
+
# Atomically claims up to +limit+ executions from +queue_list+ for +process_id+.
|
|
14
|
+
def claim(queue_list, limit, process_id)
|
|
15
|
+
QueueSelector.new(queue_list, self).scoped_relations.flat_map do |queue_relation|
|
|
16
|
+
select_and_lock(queue_relation, process_id, limit).tap do |locked|
|
|
17
|
+
limit -= locked.size
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
# Integration spec compatible wrapper:
|
|
23
|
+
# claim_batch(limit, process:, queues:)
|
|
24
|
+
def claim_batch(limit, process:, queues: "*")
|
|
25
|
+
claim(queues, limit, process.id)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
# Called by Worker#all_work_completed?.
|
|
29
|
+
def aggregated_count_across(queue_list)
|
|
30
|
+
QueueSelector.new(queue_list, self).scoped_relations.sum(&:count)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
private
|
|
34
|
+
|
|
35
|
+
# Atomically remove a ReadyExecution and create a ClaimedExecution.
|
|
36
|
+
# Uses findOneAndDelete for each slot to guarantee no double-claim.
|
|
37
|
+
def select_and_lock(queue_relation, process_id, limit)
|
|
38
|
+
return [] if limit <= 0
|
|
39
|
+
|
|
40
|
+
claimed = []
|
|
41
|
+
ClaimedExecution.claiming(
|
|
42
|
+
select_candidates(queue_relation, limit),
|
|
43
|
+
process_id
|
|
44
|
+
) do |claimed_set|
|
|
45
|
+
claimed = claimed_set
|
|
46
|
+
end
|
|
47
|
+
claimed
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def select_candidates(queue_relation, limit)
|
|
51
|
+
job_ids = []
|
|
52
|
+
limit.times do
|
|
53
|
+
raw = queue_relation.collection.find_one_and_delete(
|
|
54
|
+
queue_relation.selector,
|
|
55
|
+
sort: { "priority" => -1, "created_at" => 1 }
|
|
56
|
+
)
|
|
57
|
+
break unless raw
|
|
58
|
+
|
|
59
|
+
job_ids << raw["job_id"]
|
|
60
|
+
end
|
|
61
|
+
job_ids
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def discard_jobs(job_ids)
|
|
65
|
+
Job.release_all_concurrency_locks(Job.where(:id.in => job_ids).to_a)
|
|
66
|
+
Job.where(:id.in => job_ids).delete_all
|
|
67
|
+
end
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
end
|
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class Record
|
|
5
|
+
include Mongoid::Document
|
|
6
|
+
include Mongoid::Timestamps
|
|
7
|
+
|
|
8
|
+
# Subclasses declare their MySQL index name → MongoDB field spec mappings here.
|
|
9
|
+
INDEX_HINTS = {}.freeze
|
|
10
|
+
|
|
11
|
+
# Override Mongoid's index_specifications to use per-class instance variables
|
|
12
|
+
# instead of the shared cattr_accessor class variable.
|
|
13
|
+
# This prevents index cross-contamination between models.
|
|
14
|
+
class << self
|
|
15
|
+
def index_specifications
|
|
16
|
+
@index_specifications ||= []
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def index_specifications=(val)
|
|
20
|
+
@_sq_index_specs = val
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# Override Mongoid's index() to use our per-class storage.
|
|
24
|
+
def index(spec, options = nil)
|
|
25
|
+
specification = Mongoid::Indexable::Specification.new(self, spec, options)
|
|
26
|
+
return if index_specifications.include?(specification)
|
|
27
|
+
|
|
28
|
+
index_specifications.push(specification)
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# Dynamic collection naming with prefix
|
|
33
|
+
def self.inherited(subclass)
|
|
34
|
+
super
|
|
35
|
+
|
|
36
|
+
collection_name = subclass.name.demodulize.tableize
|
|
37
|
+
prefixed_name = "#{SolidQueue.collection_prefix}#{collection_name}"
|
|
38
|
+
|
|
39
|
+
subclass.store_in collection: prefixed_name, client: -> { SolidQueue.client.to_s }
|
|
40
|
+
|
|
41
|
+
# Each subclass gets its own empty index_specifications array.
|
|
42
|
+
# Indexes must be explicitly defined in each subclass (not inherited from parent)
|
|
43
|
+
# because Mongoid::Indexable::Specification stores a reference to the klass,
|
|
44
|
+
# and copying parent specs would create indexes on the parent's collection.
|
|
45
|
+
subclass.instance_variable_set(:@_sq_index_specs, [])
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
class << self
|
|
49
|
+
# MongoDB has no row-level locking; this is a no-op stub so SolidQueue
|
|
50
|
+
# code that chains .non_blocking_lock still works.
|
|
51
|
+
def non_blocking_lock
|
|
52
|
+
all
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
# Translate solid_queue MySQL index names to MongoDB hint specs and apply
|
|
56
|
+
# via Mongoid's .hint(). Unknown names are ignored (no hint applied).
|
|
57
|
+
# Subclasses may override INDEX_HINTS to register their own mappings.
|
|
58
|
+
def use_index(*indexes)
|
|
59
|
+
specs = indexes.filter_map do |name|
|
|
60
|
+
name.is_a?(Hash) ? name : self::INDEX_HINTS[name.to_sym]
|
|
61
|
+
end
|
|
62
|
+
specs.any? ? hint(specs.first) : all
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
# MongoDB supports unique indexes which serve the same purpose.
|
|
66
|
+
def supports_insert_conflict_target?
|
|
67
|
+
true
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
# Mongoid 9 supports multi-document transactions via replica sets.
|
|
71
|
+
# We wrap in a MongoDB session transaction when available; fall back to a
|
|
72
|
+
# plain yield for non-replica-set environments (e.g. tests with a standalone).
|
|
73
|
+
def transaction(_requires_new: false, &block)
|
|
74
|
+
Mongoid::QueryCache.clear_cache
|
|
75
|
+
Mongoid.default_client.with_session do |session|
|
|
76
|
+
session.start_transaction
|
|
77
|
+
result = yield
|
|
78
|
+
session.commit_transaction
|
|
79
|
+
result
|
|
80
|
+
end
|
|
81
|
+
rescue Mongo::Error::InvalidSession, Mongo::Error::OperationFailure => e
|
|
82
|
+
# Not in a replica set or session not supported — execute without transaction
|
|
83
|
+
raise if e.message.to_s.include?("Transaction numbers are only allowed")
|
|
84
|
+
|
|
85
|
+
yield
|
|
86
|
+
rescue StandardError
|
|
87
|
+
yield
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
# Mongoid equivalent of ActiveRecord's create_or_find_by!.
|
|
91
|
+
# Tries to create; on duplicate-key error or uniqueness validation error
|
|
92
|
+
# finds the existing record. Falls back to finding by job_id alone when
|
|
93
|
+
# the full attrs lookup would miss the existing record (e.g. queue_name
|
|
94
|
+
# is not in attrs but was set via a before_create callback).
|
|
95
|
+
def create_or_find_by!(attrs, &block)
|
|
96
|
+
record = new(attrs)
|
|
97
|
+
block.call(record) if block_given?
|
|
98
|
+
record.save!
|
|
99
|
+
record
|
|
100
|
+
rescue Mongoid::Errors::Validations => e
|
|
101
|
+
# If the only errors are uniqueness-related, fall back to find the existing record
|
|
102
|
+
raise unless uniqueness_only_error?(e.document)
|
|
103
|
+
|
|
104
|
+
find_by_unique_key(attrs) || where(attrs).first || record
|
|
105
|
+
rescue Mongo::Error::OperationFailure => e
|
|
106
|
+
raise unless duplicate_key_error?(e)
|
|
107
|
+
|
|
108
|
+
find_by_unique_key(attrs) || where(attrs).first || raise(e)
|
|
109
|
+
end
|
|
110
|
+
|
|
111
|
+
# find_by that raises Mongoid::Errors::DocumentNotFound when missing.
|
|
112
|
+
def find_by!(attrs)
|
|
113
|
+
find_by(attrs) || raise(Mongoid::Errors::DocumentNotFound.new(self, attrs))
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
private
|
|
117
|
+
|
|
118
|
+
def duplicate_key_error?(err)
|
|
119
|
+
msg = err.respond_to?(:message) ? err.message.to_s : err.to_s
|
|
120
|
+
msg.include?("E11000") || msg.include?("duplicate key")
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
# Try to find an existing record using just the unique key field(s).
|
|
124
|
+
# Used as fallback when find_by(full_attrs) misses because some fields
|
|
125
|
+
# (e.g. queue_name) are only set by callbacks, not passed in attrs.
|
|
126
|
+
# Uses where().first to avoid DocumentNotFound exceptions.
|
|
127
|
+
def find_by_unique_key(attrs)
|
|
128
|
+
return where(job_id: attrs[:job_id]).first if attrs[:job_id]
|
|
129
|
+
return where(key: attrs[:key]).first if attrs[:key]
|
|
130
|
+
|
|
131
|
+
nil
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
def uniqueness_only_error?(document)
|
|
135
|
+
return false unless document.respond_to?(:errors)
|
|
136
|
+
|
|
137
|
+
document.errors.all? do |error|
|
|
138
|
+
error.type == :taken || error.message.to_s.include?("already been taken") ||
|
|
139
|
+
(error.attribute.to_s != "base" &&
|
|
140
|
+
document.class.validators
|
|
141
|
+
.select { |v| v.is_a?(Mongoid::Validatable::UniquenessValidator) }
|
|
142
|
+
.any? { |v| v.attributes.include?(error.attribute.to_sym) })
|
|
143
|
+
end
|
|
144
|
+
end
|
|
145
|
+
end
|
|
146
|
+
end
|
|
147
|
+
end
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module SolidQueue
|
|
4
|
+
class RecurringExecution < Record
|
|
5
|
+
class AlreadyRecorded < StandardError; end
|
|
6
|
+
|
|
7
|
+
field :task_key, type: String
|
|
8
|
+
field :run_at, type: Time
|
|
9
|
+
|
|
10
|
+
# optional: job may have been purged already
|
|
11
|
+
belongs_to :job, class_name: "SolidQueue::Job", optional: true
|
|
12
|
+
|
|
13
|
+
index({ task_key: 1, run_at: 1 }, { unique: true })
|
|
14
|
+
index({ job_id: 1 })
|
|
15
|
+
|
|
16
|
+
# Clearable when the associated job no longer exists.
|
|
17
|
+
scope :clearable, lambda {
|
|
18
|
+
existing_job_ids = SolidQueue::Job.all.pluck(:id)
|
|
19
|
+
where(:job_id.nin => existing_job_ids).or(job_id: nil)
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
class << self
|
|
23
|
+
# Called by RecurringTask#enqueue_and_record.
|
|
24
|
+
# Wraps the block; records the execution only if the job was successfully enqueued.
|
|
25
|
+
def record(task_key, run_at, &block)
|
|
26
|
+
Mongoid.transaction do
|
|
27
|
+
block.call.tap do |active_job|
|
|
28
|
+
if active_job&.successfully_enqueued?
|
|
29
|
+
create_or_insert!(
|
|
30
|
+
task_key: task_key,
|
|
31
|
+
run_at: run_at,
|
|
32
|
+
job_id: active_job.provider_job_id
|
|
33
|
+
)
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Atomic insert — raises AlreadyRecorded on duplicate (same task_key + run_at).
|
|
40
|
+
def create_or_insert!(task_key:, run_at:, job_id: nil)
|
|
41
|
+
create!(task_key: task_key, run_at: run_at, job_id: job_id)
|
|
42
|
+
rescue Mongoid::Errors::Validations, Mongo::Error::OperationFailure => e
|
|
43
|
+
raise AlreadyRecorded if duplicate_key_error?(e)
|
|
44
|
+
|
|
45
|
+
raise
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def clear_in_batches(batch_size: 500)
|
|
49
|
+
loop do
|
|
50
|
+
deleted = clearable.limit(batch_size).delete_all
|
|
51
|
+
break if deleted.zero?
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
private
|
|
56
|
+
|
|
57
|
+
def duplicate_key_error?(err)
|
|
58
|
+
err.message.to_s.include?("E11000") || err.message.to_s.include?("duplicate key")
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "active_job/serializers"
|
|
4
|
+
require "active_job/arguments"
|
|
5
|
+
|
|
6
|
+
module SolidQueue
|
|
7
|
+
class RecurringTask
|
|
8
|
+
# Serializer for recurring task arguments — stores as an ActiveJob-serialized
|
|
9
|
+
# JSON array in MongoDB. Matches solid_queue 1.3.2 RecurringTask::Arguments exactly.
|
|
10
|
+
module Arguments
|
|
11
|
+
class << self
|
|
12
|
+
# Called when writing to MongoDB: serialize ActiveJob arguments to an Array.
|
|
13
|
+
def mongoize(data)
|
|
14
|
+
data.nil? ? [] : ActiveJob::Arguments.serialize(Array(data))
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
# Called when reading from MongoDB: deserialize back to Ruby objects.
|
|
18
|
+
def demongoize(data)
|
|
19
|
+
data.nil? ? [] : ActiveJob::Arguments.deserialize(Array(data))
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
# Called for query conditions.
|
|
23
|
+
def evolve(data)
|
|
24
|
+
mongoize(data)
|
|
25
|
+
end
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|