que 1.0.0.beta → 1.0.0.beta5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/.github/workflows/tests.yml +43 -0
- data/CHANGELOG.1.0.beta.md +137 -0
- data/CHANGELOG.md +34 -12
- data/README.md +67 -7
- data/bin/command_line_interface.rb +61 -49
- data/docs/README.md +785 -32
- data/lib/que/active_record/connection.rb +4 -4
- data/lib/que/active_record/model.rb +4 -4
- data/lib/que/connection.rb +35 -16
- data/lib/que/connection_pool.rb +2 -2
- data/lib/que/job.rb +1 -1
- data/lib/que/{job_cache.rb → job_buffer.rb} +96 -72
- data/lib/que/job_methods.rb +4 -0
- data/lib/que/locker.rb +170 -139
- data/lib/que/poller.rb +1 -1
- data/lib/que/rails/railtie.rb +2 -4
- data/lib/que/result_queue.rb +2 -2
- data/lib/que/sequel/model.rb +14 -16
- data/lib/que/utils/constantization.rb +1 -1
- data/lib/que/utils/logging.rb +2 -1
- data/lib/que/utils/middleware.rb +26 -13
- data/lib/que/version.rb +1 -1
- data/lib/que/worker.rb +43 -21
- data/lib/que.rb +9 -4
- data/que.gemspec +2 -2
- metadata +11 -25
- data/docs/active_job.md +0 -6
- data/docs/advanced_setup.md +0 -49
- data/docs/command_line_interface.md +0 -45
- data/docs/error_handling.md +0 -94
- data/docs/inspecting_the_queue.md +0 -64
- data/docs/job_helper_methods.md +0 -27
- data/docs/logging.md +0 -31
- data/docs/managing_workers.md +0 -25
- data/docs/middleware.md +0 -15
- data/docs/migrating.md +0 -27
- data/docs/multiple_queues.md +0 -31
- data/docs/shutting_down_safely.md +0 -7
- data/docs/using_plain_connections.md +0 -65
- data/docs/using_sequel.md +0 -33
- data/docs/writing_reliable_jobs.md +0 -108
@@ -17,17 +17,17 @@ module Que
|
|
17
17
|
|
18
18
|
# Use Rails' executor (if present) to make sure that the connection
|
19
19
|
# we're using isn't taken from us while the block runs. See
|
20
|
-
# https://github.com/
|
21
|
-
def wrap_in_rails_executor
|
20
|
+
# https://github.com/que-rb/que/issues/166#issuecomment-274218910
|
21
|
+
def wrap_in_rails_executor(&block)
|
22
22
|
if defined?(::Rails.application.executor)
|
23
|
-
::Rails.application.executor.wrap
|
23
|
+
::Rails.application.executor.wrap(&block)
|
24
24
|
else
|
25
25
|
yield
|
26
26
|
end
|
27
27
|
end
|
28
28
|
end
|
29
29
|
|
30
|
-
module
|
30
|
+
module JobMiddleware
|
31
31
|
class << self
|
32
32
|
def call(job)
|
33
33
|
yield
|
@@ -3,7 +3,7 @@
|
|
3
3
|
module Que
|
4
4
|
module ActiveRecord
|
5
5
|
class Model < ::ActiveRecord::Base
|
6
|
-
self.table_name =
|
6
|
+
self.table_name = 'public.que_jobs'
|
7
7
|
|
8
8
|
t = arel_table
|
9
9
|
|
@@ -16,11 +16,11 @@ module Que
|
|
16
16
|
scope :finished, -> { where(t[:finished_at].not_eq(nil)) }
|
17
17
|
scope :not_finished, -> { where(t[:finished_at].eq(nil)) }
|
18
18
|
|
19
|
-
scope :scheduled, -> { where(t[:run_at].gt("now()")) }
|
20
|
-
scope :not_scheduled, -> { where(t[:run_at].lteq("now()")) }
|
19
|
+
scope :scheduled, -> { where(t[:run_at].gt (Arel.sql("now()"))) }
|
20
|
+
scope :not_scheduled, -> { where(t[:run_at].lteq(Arel.sql("now()"))) }
|
21
21
|
|
22
22
|
scope :ready, -> { not_errored.not_expired.not_finished.not_scheduled }
|
23
|
-
scope :not_ready, -> { where(t[:error_count].gt(0).or(t[:expired_at].not_eq(nil)).or(t[:finished_at].not_eq(nil)).or(t[:run_at].gt("now()"))) }
|
23
|
+
scope :not_ready, -> { where(t[:error_count].gt(0).or(t[:expired_at].not_eq(nil)).or(t[:finished_at].not_eq(nil)).or(t[:run_at].gt(Arel.sql("now()")))) }
|
24
24
|
|
25
25
|
class << self
|
26
26
|
def by_job_class(job_class)
|
data/lib/que/connection.rb
CHANGED
@@ -30,8 +30,11 @@ module Que
|
|
30
30
|
when self
|
31
31
|
conn
|
32
32
|
when PG::Connection
|
33
|
-
conn.
|
34
|
-
|
33
|
+
if conn.instance_variable_defined?(:@que_wrapper)
|
34
|
+
conn.instance_variable_get(:@que_wrapper)
|
35
|
+
else
|
36
|
+
conn.instance_variable_set(:@que_wrapper, new(conn))
|
37
|
+
end
|
35
38
|
else
|
36
39
|
raise Error, "Unsupported input for Connection.wrap: #{conn.class}"
|
37
40
|
end
|
@@ -43,7 +46,7 @@ module Que
|
|
43
46
|
@prepared_statements = Set.new
|
44
47
|
end
|
45
48
|
|
46
|
-
def execute(command, params =
|
49
|
+
def execute(command, params = [])
|
47
50
|
sql =
|
48
51
|
case command
|
49
52
|
when Symbol then SQL[command]
|
@@ -51,8 +54,17 @@ module Que
|
|
51
54
|
else raise Error, "Bad command! #{command.inspect}"
|
52
55
|
end
|
53
56
|
|
54
|
-
params = convert_params(params)
|
55
|
-
|
57
|
+
params = convert_params(params)
|
58
|
+
|
59
|
+
result =
|
60
|
+
Que.run_sql_middleware(sql, params) do
|
61
|
+
# Some versions of the PG gem dislike an empty/nil params argument.
|
62
|
+
if params.empty?
|
63
|
+
wrapped_connection.async_exec(sql)
|
64
|
+
else
|
65
|
+
wrapped_connection.async_exec(sql, params)
|
66
|
+
end
|
67
|
+
end
|
56
68
|
|
57
69
|
Que.internal_log :connection_execute, self do
|
58
70
|
{
|
@@ -107,6 +119,10 @@ module Que
|
|
107
119
|
loop { break if next_notification.nil? }
|
108
120
|
end
|
109
121
|
|
122
|
+
def server_version
|
123
|
+
wrapped_connection.server_version
|
124
|
+
end
|
125
|
+
|
110
126
|
def in_transaction?
|
111
127
|
wrapped_connection.transaction_status != ::PG::PQTRANS_IDLE
|
112
128
|
end
|
@@ -129,21 +145,24 @@ module Que
|
|
129
145
|
end
|
130
146
|
end
|
131
147
|
|
132
|
-
def execute_sql(sql, params)
|
133
|
-
# Some versions of the PG gem dislike an empty/nil params argument.
|
134
|
-
if params && !params.empty?
|
135
|
-
wrapped_connection.async_exec(sql, params)
|
136
|
-
else
|
137
|
-
wrapped_connection.async_exec(sql)
|
138
|
-
end
|
139
|
-
end
|
140
|
-
|
141
148
|
# Procs used to convert strings from Postgres into Ruby types.
|
142
149
|
CAST_PROCS = {
|
143
150
|
# Boolean
|
144
|
-
16
|
151
|
+
16 => -> (value) {
|
152
|
+
case value
|
153
|
+
when String then value == 't'.freeze
|
154
|
+
else !!value
|
155
|
+
end
|
156
|
+
},
|
157
|
+
|
145
158
|
# Timestamp with time zone
|
146
|
-
1184 =>
|
159
|
+
1184 => -> (value) {
|
160
|
+
case value
|
161
|
+
when Time then value
|
162
|
+
when String then Time.parse(value)
|
163
|
+
else raise "Unexpected time class: #{value.class} (#{value.inspect})"
|
164
|
+
end
|
165
|
+
}
|
147
166
|
}
|
148
167
|
|
149
168
|
# JSON, JSONB
|
data/lib/que/connection_pool.rb
CHANGED
data/lib/que/job.rb
CHANGED
@@ -5,89 +5,88 @@
|
|
5
5
|
# minimum priority, and stopping gracefully.
|
6
6
|
|
7
7
|
module Que
|
8
|
-
class
|
8
|
+
class JobBuffer
|
9
9
|
attr_reader :maximum_size, :minimum_size, :priority_queues
|
10
10
|
|
11
|
+
# Since we use a mutex, which is not reentrant, we have to be a little
|
12
|
+
# careful to not call a method that locks the mutex when we've already
|
13
|
+
# locked it. So, as a general rule, public methods handle locking the mutex
|
14
|
+
# when necessary, while private methods handle the actual underlying data
|
15
|
+
# changes. This lets us reuse those private methods without running into
|
16
|
+
# locking issues.
|
17
|
+
|
11
18
|
def initialize(
|
12
19
|
maximum_size:,
|
13
20
|
minimum_size:,
|
14
21
|
priorities:
|
15
22
|
)
|
16
23
|
@maximum_size = Que.assert(Integer, maximum_size)
|
17
|
-
Que.assert(maximum_size >= 0) { "maximum_size for a
|
24
|
+
Que.assert(maximum_size >= 0) { "maximum_size for a JobBuffer must be at least zero!" }
|
18
25
|
|
19
26
|
@minimum_size = Que.assert(Integer, minimum_size)
|
20
|
-
Que.assert(minimum_size >= 0) { "minimum_size for a
|
27
|
+
Que.assert(minimum_size >= 0) { "minimum_size for a JobBuffer must be at least zero!" }
|
21
28
|
|
22
29
|
Que.assert(minimum_size <= maximum_size) do
|
23
|
-
"minimum
|
24
|
-
"greater than the maximum
|
30
|
+
"minimum buffer size (#{minimum_size}) is " \
|
31
|
+
"greater than the maximum buffer size (#{maximum_size})!"
|
25
32
|
end
|
26
33
|
|
27
|
-
@stop
|
28
|
-
@array
|
29
|
-
@
|
34
|
+
@stop = false
|
35
|
+
@array = []
|
36
|
+
@mutex = Mutex.new
|
30
37
|
|
31
|
-
# Make sure that priority = nil sorts highest.
|
32
38
|
@priority_queues = Hash[
|
33
|
-
|
34
|
-
|
39
|
+
# Make sure that priority = nil sorts highest.
|
40
|
+
priorities.sort_by{|p| p || MAXIMUM_PRIORITY}.map do |p|
|
41
|
+
[p, PriorityQueue.new(priority: p, job_buffer: self)]
|
35
42
|
end
|
36
43
|
].freeze
|
37
44
|
end
|
38
45
|
|
39
46
|
def push(*metajobs)
|
40
|
-
Que.internal_log(:
|
47
|
+
Que.internal_log(:job_buffer_push, self) do
|
41
48
|
{
|
42
49
|
maximum_size: maximum_size,
|
43
50
|
ids: metajobs.map(&:id),
|
44
|
-
current_queue:
|
51
|
+
current_queue: to_a,
|
45
52
|
}
|
46
53
|
end
|
47
54
|
|
48
55
|
sync do
|
49
|
-
return metajobs if
|
56
|
+
return metajobs if _stopping?
|
50
57
|
|
51
|
-
@array.
|
58
|
+
@array.concat(metajobs).sort!
|
52
59
|
|
53
60
|
# Relying on the hash's contents being sorted, here.
|
54
61
|
priority_queues.reverse_each do |_, pq|
|
55
|
-
pq.
|
56
|
-
|
57
|
-
break if job.nil?
|
58
|
-
pq.push(job)
|
62
|
+
pq.populate do
|
63
|
+
_shift_job(pq.priority)
|
59
64
|
end
|
60
65
|
end
|
61
66
|
|
62
|
-
# If we passed the maximum
|
67
|
+
# If we passed the maximum buffer size, drop the lowest sort keys and
|
63
68
|
# return their ids to be unlocked.
|
64
|
-
overage = -
|
69
|
+
overage = -_buffer_space
|
65
70
|
pop(overage) if overage > 0
|
66
71
|
end
|
67
72
|
end
|
68
73
|
|
69
74
|
def shift(priority = nil)
|
70
75
|
queue = priority_queues.fetch(priority) { raise Error, "not a permitted priority! #{priority}" }
|
71
|
-
queue.pop
|
76
|
+
queue.pop || shift_job(priority)
|
72
77
|
end
|
73
78
|
|
74
79
|
def shift_job(priority = nil)
|
75
|
-
sync
|
76
|
-
if stopping?
|
77
|
-
false
|
78
|
-
elsif (job = @array.first) && job.priority_sufficient?(priority)
|
79
|
-
@array.shift
|
80
|
-
end
|
81
|
-
end
|
80
|
+
sync { _shift_job(priority) }
|
82
81
|
end
|
83
82
|
|
84
83
|
def accept?(metajobs)
|
85
|
-
return [] if stopping?
|
86
|
-
|
87
84
|
metajobs.sort!
|
88
85
|
|
89
86
|
sync do
|
90
|
-
|
87
|
+
return [] if _stopping?
|
88
|
+
|
89
|
+
start_index = _buffer_space
|
91
90
|
final_index = metajobs.length - 1
|
92
91
|
|
93
92
|
return metajobs if start_index > final_index
|
@@ -106,10 +105,6 @@ module Que
|
|
106
105
|
end
|
107
106
|
end
|
108
107
|
|
109
|
-
def jobs_needed?
|
110
|
-
minimum_size > size
|
111
|
-
end
|
112
|
-
|
113
108
|
def waiting_count
|
114
109
|
count = 0
|
115
110
|
priority_queues.each_value do |pq|
|
@@ -126,7 +121,7 @@ module Que
|
|
126
121
|
count = pq.waiting_count
|
127
122
|
|
128
123
|
if lowest_priority
|
129
|
-
count +=
|
124
|
+
count += buffer_space
|
130
125
|
lowest_priority = false
|
131
126
|
end
|
132
127
|
|
@@ -136,14 +131,12 @@ module Que
|
|
136
131
|
hash
|
137
132
|
end
|
138
133
|
|
139
|
-
def
|
140
|
-
sync
|
141
|
-
maximum_size - size
|
142
|
-
end
|
134
|
+
def buffer_space
|
135
|
+
sync { _buffer_space }
|
143
136
|
end
|
144
137
|
|
145
138
|
def size
|
146
|
-
sync {
|
139
|
+
sync { _size }
|
147
140
|
end
|
148
141
|
|
149
142
|
def to_a
|
@@ -156,73 +149,98 @@ module Que
|
|
156
149
|
end
|
157
150
|
|
158
151
|
def clear
|
159
|
-
sync { pop(
|
152
|
+
sync { pop(_size) }
|
160
153
|
end
|
161
154
|
|
162
155
|
def stopping?
|
163
|
-
sync {
|
156
|
+
sync { _stopping? }
|
157
|
+
end
|
158
|
+
|
159
|
+
def job_available?(priority)
|
160
|
+
(job = @array.first) && job.priority_sufficient?(priority)
|
164
161
|
end
|
165
162
|
|
166
163
|
private
|
167
164
|
|
165
|
+
def _buffer_space
|
166
|
+
maximum_size - _size
|
167
|
+
end
|
168
|
+
|
168
169
|
def pop(count)
|
169
170
|
@array.pop(count)
|
170
171
|
end
|
171
172
|
|
172
|
-
def
|
173
|
-
|
173
|
+
def _shift_job(priority)
|
174
|
+
if _stopping?
|
175
|
+
false
|
176
|
+
elsif (job = @array.first) && job.priority_sufficient?(priority)
|
177
|
+
@array.shift
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
def _size
|
182
|
+
@array.size
|
183
|
+
end
|
184
|
+
|
185
|
+
def _stopping?
|
186
|
+
!!@stop
|
187
|
+
end
|
188
|
+
|
189
|
+
def sync(&block)
|
190
|
+
@mutex.synchronize(&block)
|
174
191
|
end
|
175
192
|
|
176
193
|
# A queue object dedicated to a specific worker priority. It's basically a
|
177
194
|
# Queue object from the standard library, but it's able to reach into the
|
178
|
-
#
|
195
|
+
# JobBuffer's buffer in order to satisfy a pop.
|
179
196
|
class PriorityQueue
|
180
|
-
attr_reader :
|
197
|
+
attr_reader :job_buffer, :priority, :mutex
|
181
198
|
|
182
199
|
def initialize(
|
183
|
-
|
200
|
+
job_buffer:,
|
184
201
|
priority:
|
185
202
|
)
|
186
|
-
@
|
187
|
-
@priority
|
188
|
-
@waiting
|
189
|
-
@stopping
|
190
|
-
@items
|
191
|
-
@
|
192
|
-
@cv
|
203
|
+
@job_buffer = job_buffer
|
204
|
+
@priority = priority
|
205
|
+
@waiting = 0
|
206
|
+
@stopping = false
|
207
|
+
@items = [] # Items pending distribution to waiting threads.
|
208
|
+
@mutex = Mutex.new
|
209
|
+
@cv = ConditionVariable.new
|
193
210
|
end
|
194
211
|
|
195
212
|
def pop
|
196
213
|
sync do
|
197
214
|
loop do
|
198
|
-
|
199
|
-
|
200
|
-
|
215
|
+
if @stopping
|
216
|
+
return false
|
217
|
+
elsif item = @items.pop
|
201
218
|
return item
|
219
|
+
elsif job_buffer.job_available?(priority)
|
220
|
+
return false
|
202
221
|
end
|
203
222
|
|
204
|
-
job = job_cache.shift_job(priority)
|
205
|
-
return job unless job.nil? # False means we're stopping.
|
206
|
-
|
207
223
|
@waiting += 1
|
208
|
-
@cv.wait
|
224
|
+
@cv.wait(mutex)
|
209
225
|
@waiting -= 1
|
210
226
|
end
|
211
227
|
end
|
212
228
|
end
|
213
229
|
|
214
|
-
def
|
230
|
+
def stop
|
215
231
|
sync do
|
216
|
-
|
217
|
-
@
|
218
|
-
@cv.signal
|
232
|
+
@stopping = true
|
233
|
+
@cv.broadcast
|
219
234
|
end
|
220
235
|
end
|
221
236
|
|
222
|
-
def
|
237
|
+
def populate
|
223
238
|
sync do
|
224
|
-
|
225
|
-
|
239
|
+
waiting_count.times do
|
240
|
+
job = yield
|
241
|
+
break if job.nil? # False would mean we're stopping.
|
242
|
+
_push(job)
|
243
|
+
end
|
226
244
|
end
|
227
245
|
end
|
228
246
|
|
@@ -232,8 +250,14 @@ module Que
|
|
232
250
|
|
233
251
|
private
|
234
252
|
|
235
|
-
def sync
|
236
|
-
|
253
|
+
def sync(&block)
|
254
|
+
mutex.synchronize(&block)
|
255
|
+
end
|
256
|
+
|
257
|
+
def _push(item)
|
258
|
+
Que.assert(waiting_count > 0)
|
259
|
+
@items << item
|
260
|
+
@cv.signal
|
237
261
|
end
|
238
262
|
end
|
239
263
|
end
|