que 1.0.0.beta → 1.0.0.beta5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/.github/workflows/tests.yml +43 -0
- data/CHANGELOG.1.0.beta.md +137 -0
- data/CHANGELOG.md +34 -12
- data/README.md +67 -7
- data/bin/command_line_interface.rb +61 -49
- data/docs/README.md +785 -32
- data/lib/que/active_record/connection.rb +4 -4
- data/lib/que/active_record/model.rb +4 -4
- data/lib/que/connection.rb +35 -16
- data/lib/que/connection_pool.rb +2 -2
- data/lib/que/job.rb +1 -1
- data/lib/que/{job_cache.rb → job_buffer.rb} +96 -72
- data/lib/que/job_methods.rb +4 -0
- data/lib/que/locker.rb +170 -139
- data/lib/que/poller.rb +1 -1
- data/lib/que/rails/railtie.rb +2 -4
- data/lib/que/result_queue.rb +2 -2
- data/lib/que/sequel/model.rb +14 -16
- data/lib/que/utils/constantization.rb +1 -1
- data/lib/que/utils/logging.rb +2 -1
- data/lib/que/utils/middleware.rb +26 -13
- data/lib/que/version.rb +1 -1
- data/lib/que/worker.rb +43 -21
- data/lib/que.rb +9 -4
- data/que.gemspec +2 -2
- metadata +11 -25
- data/docs/active_job.md +0 -6
- data/docs/advanced_setup.md +0 -49
- data/docs/command_line_interface.md +0 -45
- data/docs/error_handling.md +0 -94
- data/docs/inspecting_the_queue.md +0 -64
- data/docs/job_helper_methods.md +0 -27
- data/docs/logging.md +0 -31
- data/docs/managing_workers.md +0 -25
- data/docs/middleware.md +0 -15
- data/docs/migrating.md +0 -27
- data/docs/multiple_queues.md +0 -31
- data/docs/shutting_down_safely.md +0 -7
- data/docs/using_plain_connections.md +0 -65
- data/docs/using_sequel.md +0 -33
- data/docs/writing_reliable_jobs.md +0 -108
data/lib/que/locker.rb
CHANGED
@@ -19,35 +19,17 @@ module Que
|
|
19
19
|
%{
|
20
20
|
DELETE FROM public.que_lockers
|
21
21
|
WHERE pid = pg_backend_pid()
|
22
|
-
OR
|
22
|
+
OR NOT EXISTS (SELECT 1 FROM pg_stat_activity WHERE pid = public.que_lockers.pid)
|
23
23
|
}
|
24
24
|
|
25
25
|
SQL[:register_locker] =
|
26
26
|
%{
|
27
|
-
INSERT INTO public.que_lockers
|
28
|
-
(
|
29
|
-
pid,
|
30
|
-
worker_count,
|
31
|
-
worker_priorities,
|
32
|
-
ruby_pid,
|
33
|
-
ruby_hostname,
|
34
|
-
listening,
|
35
|
-
queues
|
36
|
-
)
|
37
|
-
VALUES
|
38
|
-
(
|
39
|
-
pg_backend_pid(),
|
40
|
-
$1::integer,
|
41
|
-
$2::integer[],
|
42
|
-
$3::integer,
|
43
|
-
$4::text,
|
44
|
-
$5::boolean,
|
45
|
-
$6::text[]
|
46
|
-
)
|
27
|
+
INSERT INTO public.que_lockers (pid, worker_count, worker_priorities, ruby_pid, ruby_hostname, listening, queues)
|
28
|
+
VALUES (pg_backend_pid(), $1::integer, $2::integer[], $3::integer, $4::text, $5::boolean, $6::text[])
|
47
29
|
}
|
48
30
|
|
49
31
|
class Locker
|
50
|
-
attr_reader :thread, :workers, :
|
32
|
+
attr_reader :thread, :workers, :job_buffer, :locks
|
51
33
|
|
52
34
|
MESSAGE_RESOLVERS = {}
|
53
35
|
RESULT_RESOLVERS = {}
|
@@ -55,31 +37,29 @@ module Que
|
|
55
37
|
MESSAGE_RESOLVERS[:job_available] =
|
56
38
|
-> (messages) {
|
57
39
|
metajobs = messages.map { |key| Metajob.new(key) }
|
58
|
-
push_jobs(lock_jobs(
|
40
|
+
push_jobs(lock_jobs(job_buffer.accept?(metajobs)))
|
59
41
|
}
|
60
42
|
|
61
43
|
RESULT_RESOLVERS[:job_finished] =
|
62
44
|
-> (messages) { finish_jobs(messages.map{|m| m.fetch(:metajob)}) }
|
63
45
|
|
64
|
-
DEFAULT_POLL_INTERVAL
|
65
|
-
DEFAULT_WAIT_PERIOD
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
DEFAULT_WORKER_PRIORITIES = [10, 30, 50].freeze
|
46
|
+
DEFAULT_POLL_INTERVAL = 5.0
|
47
|
+
DEFAULT_WAIT_PERIOD = 50
|
48
|
+
DEFAULT_MINIMUM_BUFFER_SIZE = 2
|
49
|
+
DEFAULT_MAXIMUM_BUFFER_SIZE = 8
|
50
|
+
DEFAULT_WORKER_PRIORITIES = [10, 30, 50, nil, nil, nil].freeze
|
70
51
|
|
71
52
|
def initialize(
|
72
|
-
queues:
|
73
|
-
|
74
|
-
listen:
|
75
|
-
poll:
|
76
|
-
poll_interval:
|
77
|
-
wait_period:
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
on_worker_start: nil
|
53
|
+
queues: [Que.default_queue],
|
54
|
+
connection_url: nil,
|
55
|
+
listen: true,
|
56
|
+
poll: true,
|
57
|
+
poll_interval: DEFAULT_POLL_INTERVAL,
|
58
|
+
wait_period: DEFAULT_WAIT_PERIOD,
|
59
|
+
maximum_buffer_size: DEFAULT_MAXIMUM_BUFFER_SIZE,
|
60
|
+
minimum_buffer_size: DEFAULT_MINIMUM_BUFFER_SIZE,
|
61
|
+
worker_priorities: DEFAULT_WORKER_PRIORITIES,
|
62
|
+
on_worker_start: nil
|
83
63
|
)
|
84
64
|
|
85
65
|
# Sanity-check all our arguments, since some users may instantiate Locker
|
@@ -89,34 +69,32 @@ module Que
|
|
89
69
|
|
90
70
|
Que.assert Numeric, poll_interval
|
91
71
|
Que.assert Numeric, wait_period
|
92
|
-
Que.assert Integer, worker_count
|
93
72
|
|
94
73
|
Que.assert Array, worker_priorities
|
95
|
-
worker_priorities.each { |p| Que.assert(Integer, p) }
|
96
|
-
|
97
|
-
all_worker_priorities = worker_priorities.values_at(0...worker_count)
|
74
|
+
worker_priorities.each { |p| Que.assert([Integer, NilClass], p) }
|
98
75
|
|
99
|
-
# We use a
|
76
|
+
# We use a JobBuffer to track jobs and pass them to workers, and a
|
100
77
|
# ResultQueue to receive messages from workers.
|
101
|
-
@
|
102
|
-
maximum_size:
|
103
|
-
minimum_size:
|
104
|
-
priorities:
|
78
|
+
@job_buffer = JobBuffer.new(
|
79
|
+
maximum_size: maximum_buffer_size,
|
80
|
+
minimum_size: minimum_buffer_size,
|
81
|
+
priorities: worker_priorities.uniq,
|
105
82
|
)
|
106
83
|
|
107
84
|
@result_queue = ResultQueue.new
|
108
85
|
|
86
|
+
@stop = false
|
87
|
+
|
109
88
|
Que.internal_log :locker_instantiate, self do
|
110
89
|
{
|
111
|
-
queues:
|
112
|
-
listen:
|
113
|
-
poll:
|
114
|
-
poll_interval:
|
115
|
-
wait_period:
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
worker_priorities: worker_priorities,
|
90
|
+
queues: queues,
|
91
|
+
listen: listen,
|
92
|
+
poll: poll,
|
93
|
+
poll_interval: poll_interval,
|
94
|
+
wait_period: wait_period,
|
95
|
+
maximum_buffer_size: maximum_buffer_size,
|
96
|
+
minimum_buffer_size: minimum_buffer_size,
|
97
|
+
worker_priorities: worker_priorities,
|
120
98
|
}
|
121
99
|
end
|
122
100
|
|
@@ -126,16 +104,11 @@ module Que
|
|
126
104
|
@queue_names = queues.is_a?(Hash) ? queues.keys : queues
|
127
105
|
@wait_period = wait_period.to_f / 1000 # Milliseconds to seconds.
|
128
106
|
|
129
|
-
# If the worker_count exceeds the array of priorities it'll result in
|
130
|
-
# extra workers that will work jobs of any priority. For example, the
|
131
|
-
# default worker_count of 6 and the default worker priorities of [10, 30,
|
132
|
-
# 50] will result in three workers that only work jobs that meet those
|
133
|
-
# priorities, and three workers that will work any job.
|
134
107
|
@workers =
|
135
|
-
|
108
|
+
worker_priorities.map do |priority|
|
136
109
|
Worker.new(
|
137
110
|
priority: priority,
|
138
|
-
|
111
|
+
job_buffer: @job_buffer,
|
139
112
|
result_queue: @result_queue,
|
140
113
|
start_callback: on_worker_start,
|
141
114
|
)
|
@@ -144,18 +117,46 @@ module Que
|
|
144
117
|
# To prevent race conditions, let every worker get into a ready state
|
145
118
|
# before starting up the locker thread.
|
146
119
|
loop do
|
147
|
-
break if
|
120
|
+
break if job_buffer.waiting_count == workers.count
|
148
121
|
sleep 0.001
|
149
122
|
end
|
150
123
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
124
|
+
# If we weren't passed a specific connection_url, borrow a connection from
|
125
|
+
# the pool and derive the connection string from it.
|
126
|
+
connection_args =
|
127
|
+
if connection_url
|
128
|
+
uri = URI.parse(connection_url)
|
129
|
+
|
130
|
+
opts =
|
131
|
+
{
|
132
|
+
host: uri.host,
|
133
|
+
user: uri.user,
|
134
|
+
password: uri.password,
|
135
|
+
port: uri.port || 5432,
|
136
|
+
dbname: uri.path[1..-1],
|
137
|
+
}
|
138
|
+
|
139
|
+
if uri.query
|
140
|
+
opts.merge!(Hash[uri.query.split("&").map{|s| s.split('=')}.map{|a,b| [a.to_sym, b]}])
|
141
|
+
end
|
142
|
+
|
143
|
+
opts
|
155
144
|
else
|
156
|
-
Que.pool
|
145
|
+
Que.pool.checkout do |conn|
|
146
|
+
c = conn.wrapped_connection
|
147
|
+
|
148
|
+
{
|
149
|
+
host: c.host,
|
150
|
+
user: c.user,
|
151
|
+
password: c.pass,
|
152
|
+
port: c.port,
|
153
|
+
dbname: c.db,
|
154
|
+
}
|
155
|
+
end
|
157
156
|
end
|
158
157
|
|
158
|
+
@connection = Que::Connection.wrap(PG::Connection.open(connection_args))
|
159
|
+
|
159
160
|
@thread =
|
160
161
|
Thread.new do
|
161
162
|
# An error causing this thread to exit is a bug in Que, which we want
|
@@ -165,47 +166,35 @@ module Que
|
|
165
166
|
# Give this thread priority, so it can promptly respond to NOTIFYs.
|
166
167
|
Thread.current.priority = 1
|
167
168
|
|
168
|
-
|
169
|
-
|
170
|
-
connection.
|
171
|
-
execute("SHOW application_name").
|
172
|
-
first.
|
173
|
-
fetch(:application_name)
|
174
|
-
|
175
|
-
begin
|
176
|
-
@connection = connection
|
177
|
-
|
178
|
-
connection.execute(
|
169
|
+
begin
|
170
|
+
unless connection_args.has_key?(:application_name)
|
171
|
+
@connection.execute(
|
179
172
|
"SELECT set_config('application_name', $1, false)",
|
180
|
-
["Que Locker: #{connection.backend_pid}"]
|
173
|
+
["Que Locker: #{@connection.backend_pid}"]
|
181
174
|
)
|
175
|
+
end
|
182
176
|
|
183
|
-
|
177
|
+
Poller.setup(@connection)
|
184
178
|
|
179
|
+
@listener =
|
185
180
|
if listen
|
186
|
-
|
181
|
+
Listener.new(connection: @connection)
|
187
182
|
end
|
188
183
|
|
184
|
+
@pollers =
|
189
185
|
if poll
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
end
|
186
|
+
queues.map do |queue, interval|
|
187
|
+
Poller.new(
|
188
|
+
connection: @connection,
|
189
|
+
queue: queue,
|
190
|
+
poll_interval: interval || poll_interval,
|
191
|
+
)
|
192
|
+
end
|
198
193
|
end
|
199
194
|
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
"SELECT set_config('application_name', $1, false)",
|
204
|
-
[original_application_name]
|
205
|
-
)
|
206
|
-
|
207
|
-
Poller.cleanup(connection)
|
208
|
-
end
|
195
|
+
work_loop
|
196
|
+
ensure
|
197
|
+
@connection.wrapped_connection.close
|
209
198
|
end
|
210
199
|
end
|
211
200
|
end
|
@@ -215,7 +204,7 @@ module Que
|
|
215
204
|
end
|
216
205
|
|
217
206
|
def stop
|
218
|
-
@
|
207
|
+
@job_buffer.stop
|
219
208
|
@stop = true
|
220
209
|
end
|
221
210
|
|
@@ -249,17 +238,7 @@ module Que
|
|
249
238
|
begin
|
250
239
|
@listener.listen if @listener
|
251
240
|
|
252
|
-
|
253
|
-
# a bad locker record, so clean up before registering.
|
254
|
-
connection.execute :clean_lockers
|
255
|
-
connection.execute :register_locker, [
|
256
|
-
@workers.count,
|
257
|
-
"{#{@workers.map(&:priority).map{|p| p || 'NULL'}.join(',')}}",
|
258
|
-
Process.pid,
|
259
|
-
CURRENT_HOSTNAME,
|
260
|
-
!!@listener,
|
261
|
-
"{\"#{@queue_names.join('","')}\"}",
|
262
|
-
]
|
241
|
+
startup
|
263
242
|
|
264
243
|
{} while cycle
|
265
244
|
|
@@ -268,11 +247,7 @@ module Que
|
|
268
247
|
event: :locker_stop,
|
269
248
|
)
|
270
249
|
|
271
|
-
|
272
|
-
|
273
|
-
@workers.each(&:wait_until_stopped)
|
274
|
-
|
275
|
-
handle_results
|
250
|
+
shutdown
|
276
251
|
ensure
|
277
252
|
connection.execute :clean_lockers
|
278
253
|
|
@@ -280,6 +255,20 @@ module Que
|
|
280
255
|
end
|
281
256
|
end
|
282
257
|
|
258
|
+
def startup
|
259
|
+
# A previous locker that didn't exit cleanly may have left behind
|
260
|
+
# a bad locker record, so clean up before registering.
|
261
|
+
connection.execute :clean_lockers
|
262
|
+
connection.execute :register_locker, [
|
263
|
+
@workers.count,
|
264
|
+
"{#{@workers.map(&:priority).map{|p| p || 'NULL'}.join(',')}}",
|
265
|
+
Process.pid,
|
266
|
+
CURRENT_HOSTNAME,
|
267
|
+
!!@listener,
|
268
|
+
"{\"#{@queue_names.join('","')}\"}",
|
269
|
+
]
|
270
|
+
end
|
271
|
+
|
283
272
|
def cycle
|
284
273
|
# Poll at the start of a cycle, so that when the worker starts up we can
|
285
274
|
# load up the queue with jobs immediately.
|
@@ -300,31 +289,73 @@ module Que
|
|
300
289
|
!@stop
|
301
290
|
end
|
302
291
|
|
292
|
+
def shutdown
|
293
|
+
unlock_jobs(@job_buffer.clear)
|
294
|
+
wait_for_shutdown
|
295
|
+
handle_results
|
296
|
+
end
|
297
|
+
|
298
|
+
def wait_for_shutdown
|
299
|
+
@workers.each(&:wait_until_stopped)
|
300
|
+
end
|
301
|
+
|
303
302
|
def poll
|
304
303
|
# Only poll when there are pollers to use (that is, when polling is
|
305
|
-
# enabled)
|
306
|
-
|
307
|
-
return unless pollers && job_cache.jobs_needed?
|
304
|
+
# enabled).
|
305
|
+
return unless pollers
|
308
306
|
|
309
|
-
|
310
|
-
|
311
|
-
|
307
|
+
# Figure out what job priorities we have to fill.
|
308
|
+
priorities = job_buffer.available_priorities
|
309
|
+
|
310
|
+
# Only poll when there are workers ready for jobs.
|
311
|
+
return if priorities.empty?
|
312
|
+
|
313
|
+
all_metajobs = []
|
312
314
|
|
313
|
-
|
315
|
+
pollers.each do |poller|
|
316
|
+
Que.internal_log(:locker_polling, self) {
|
317
|
+
{
|
318
|
+
priorities: priorities,
|
319
|
+
held_locks: @locks.to_a,
|
320
|
+
queue: poller.queue,
|
321
|
+
}
|
322
|
+
}
|
314
323
|
|
315
324
|
if metajobs = poller.poll(priorities: priorities, held_locks: @locks)
|
325
|
+
metajobs.sort!
|
326
|
+
all_metajobs.concat(metajobs)
|
327
|
+
|
328
|
+
# Update the desired priorities list to take the priorities that we
|
329
|
+
# just retrieved into account.
|
316
330
|
metajobs.each do |metajob|
|
317
|
-
|
331
|
+
job_priority = metajob.job.fetch(:priority)
|
332
|
+
|
333
|
+
priorities.each do |priority, count|
|
334
|
+
if job_priority <= priority
|
335
|
+
new_priority = count - 1
|
336
|
+
|
337
|
+
if new_priority <= 0
|
338
|
+
priorities.delete(priority)
|
339
|
+
else
|
340
|
+
priorities[priority] = new_priority
|
341
|
+
end
|
342
|
+
|
343
|
+
break
|
344
|
+
end
|
345
|
+
end
|
318
346
|
end
|
319
347
|
|
320
|
-
|
348
|
+
break if priorities.empty?
|
321
349
|
end
|
322
350
|
end
|
351
|
+
|
352
|
+
all_metajobs.each { |metajob| mark_id_as_locked(metajob.id) }
|
353
|
+
push_jobs(all_metajobs)
|
323
354
|
end
|
324
355
|
|
325
356
|
def wait
|
326
|
-
if @listener
|
327
|
-
|
357
|
+
if l = @listener
|
358
|
+
l.wait_for_grouped_messages(@wait_period).each do |type, messages|
|
328
359
|
if resolver = MESSAGE_RESOLVERS[type]
|
329
360
|
instance_exec messages, &resolver
|
330
361
|
else
|
@@ -353,7 +384,7 @@ module Que
|
|
353
384
|
metajobs.reject! { |m| @locks.include?(m.id) }
|
354
385
|
return metajobs if metajobs.empty?
|
355
386
|
|
356
|
-
ids = metajobs.map{|m| m.id.to_i}
|
387
|
+
ids = metajobs.map { |m| m.id.to_i }
|
357
388
|
|
358
389
|
Que.internal_log :locker_locking, self do
|
359
390
|
{
|
@@ -362,12 +393,12 @@ module Que
|
|
362
393
|
}
|
363
394
|
end
|
364
395
|
|
396
|
+
materalize_cte = connection.server_version >= 12_00_00
|
397
|
+
|
365
398
|
jobs =
|
366
399
|
connection.execute \
|
367
400
|
<<-SQL
|
368
|
-
WITH jobs AS (
|
369
|
-
SELECT * FROM que_jobs WHERE id IN (#{ids.join(', ')})
|
370
|
-
)
|
401
|
+
WITH jobs AS #{materalize_cte ? 'MATERIALIZED' : ''} (SELECT * FROM que_jobs WHERE id IN (#{ids.join(', ')}))
|
371
402
|
SELECT * FROM jobs WHERE pg_try_advisory_lock(id)
|
372
403
|
SQL
|
373
404
|
|
@@ -408,12 +439,12 @@ module Que
|
|
408
439
|
|
409
440
|
good, bad = metajobs.partition{|mj| verified_ids.include?(mj.id)}
|
410
441
|
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
if bad.any? || displaced.any?
|
415
|
-
unlock_jobs(bad + displaced)
|
442
|
+
# Need to unlock any low-importance jobs the new ones may displace.
|
443
|
+
if displaced = @job_buffer.push(*good)
|
444
|
+
bad.concat(displaced)
|
416
445
|
end
|
446
|
+
|
447
|
+
unlock_jobs(bad)
|
417
448
|
end
|
418
449
|
|
419
450
|
def finish_jobs(metajobs)
|
data/lib/que/poller.rb
CHANGED
data/lib/que/rails/railtie.rb
CHANGED
@@ -2,12 +2,10 @@
|
|
2
2
|
|
3
3
|
module Que
|
4
4
|
module Rails
|
5
|
-
class Railtie < Rails::Railtie
|
5
|
+
class Railtie < ::Rails::Railtie
|
6
6
|
config.que = Que
|
7
7
|
|
8
|
-
Que.
|
9
|
-
|
10
|
-
Que.logger = proc { Rails.logger }
|
8
|
+
Que.logger = proc { ::Rails.logger }
|
11
9
|
Que.connection = ::ActiveRecord if defined? ::ActiveRecord
|
12
10
|
end
|
13
11
|
end
|
data/lib/que/result_queue.rb
CHANGED
data/lib/que/sequel/model.rb
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
::Sequel.extension :pg_json_ops
|
4
|
+
|
3
5
|
module Que
|
4
6
|
module Sequel
|
5
7
|
QUALIFIED_TABLE = ::Sequel.qualify(:public, :que_jobs)
|
@@ -7,10 +9,10 @@ module Que
|
|
7
9
|
class Model < ::Sequel::Model(QUALIFIED_TABLE)
|
8
10
|
dataset_module do
|
9
11
|
conditions = {
|
10
|
-
errored:
|
11
|
-
expired:
|
12
|
-
finished:
|
13
|
-
scheduled:
|
12
|
+
errored: QUALIFIED_TABLE[:error_count] > 0,
|
13
|
+
expired: QUALIFIED_TABLE[:expired_at] !~ nil,
|
14
|
+
finished: QUALIFIED_TABLE[:finished_at] !~ nil,
|
15
|
+
scheduled: QUALIFIED_TABLE[:run_at] > ::Sequel::CURRENT_TIMESTAMP,
|
14
16
|
}
|
15
17
|
|
16
18
|
conditions.each do |name, condition|
|
@@ -18,32 +20,28 @@ module Que
|
|
18
20
|
subset :"not_#{name}", ~condition
|
19
21
|
end
|
20
22
|
|
21
|
-
subset :ready, conditions.values.map(&:~).inject
|
22
|
-
subset :not_ready, conditions.values. inject
|
23
|
+
subset :ready, conditions.values.map(&:~).inject(:&)
|
24
|
+
subset :not_ready, conditions.values. inject(:|)
|
23
25
|
|
24
26
|
def by_job_class(job_class)
|
25
27
|
job_class = job_class.name if job_class.is_a?(Class)
|
26
28
|
where(
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
::Sequel.qualify(QUALIFIED_TABLE, :job_class) => "ActiveJob::QueueAdapters::QueAdapter::JobWrapper",
|
31
|
-
::Sequel.lit("public.que_jobs.args->0->>'job_class'") => job_class,
|
32
|
-
}
|
33
|
-
)
|
29
|
+
(QUALIFIED_TABLE[:job_class] =~ job_class) |
|
30
|
+
(QUALIFIED_TABLE[:job_class] =~ "ActiveJob::QueueAdapters::QueAdapter::JobWrapper") &
|
31
|
+
(QUALIFIED_TABLE[:args].pg_jsonb[0].get_text("job_class") =~ job_class)
|
34
32
|
)
|
35
33
|
end
|
36
34
|
|
37
35
|
def by_queue(queue)
|
38
|
-
where(
|
36
|
+
where(QUALIFIED_TABLE[:queue] => queue)
|
39
37
|
end
|
40
38
|
|
41
39
|
def by_tag(tag)
|
42
|
-
where(
|
40
|
+
where(QUALIFIED_TABLE[:data].pg_jsonb.contains(JSON.dump(tags: [tag])))
|
43
41
|
end
|
44
42
|
|
45
43
|
def by_args(*args)
|
46
|
-
where(
|
44
|
+
where(QUALIFIED_TABLE[:args].pg_jsonb.contains(JSON.dump(args)))
|
47
45
|
end
|
48
46
|
end
|
49
47
|
end
|
data/lib/que/utils/logging.rb
CHANGED
data/lib/que/utils/middleware.rb
CHANGED
@@ -5,27 +5,40 @@
|
|
5
5
|
module Que
|
6
6
|
module Utils
|
7
7
|
module Middleware
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
8
|
+
TYPES = [
|
9
|
+
:job,
|
10
|
+
:sql,
|
11
|
+
].freeze
|
12
|
+
|
13
|
+
TYPES.each do |type|
|
14
|
+
module_eval <<-CODE
|
15
|
+
def #{type}_middleware
|
16
|
+
@#{type}_middleware ||= []
|
17
|
+
end
|
15
18
|
|
16
|
-
|
17
|
-
|
19
|
+
def run_#{type}_middleware(*args)
|
20
|
+
m = #{type}_middleware
|
21
|
+
|
22
|
+
if m.empty?
|
23
|
+
yield
|
24
|
+
else
|
25
|
+
invoke_middleware(middleware: m.dup, args: args) { yield }
|
26
|
+
end
|
27
|
+
end
|
28
|
+
CODE
|
18
29
|
end
|
19
30
|
|
20
31
|
private
|
21
32
|
|
22
|
-
def invoke_middleware(middleware:,
|
33
|
+
def invoke_middleware(middleware:, args:, &block)
|
23
34
|
if m = middleware.shift
|
24
|
-
|
25
|
-
|
35
|
+
r = nil
|
36
|
+
m.call(*args) do
|
37
|
+
r = invoke_middleware(middleware: middleware, args: args, &block)
|
26
38
|
end
|
39
|
+
r
|
27
40
|
else
|
28
|
-
|
41
|
+
yield
|
29
42
|
end
|
30
43
|
end
|
31
44
|
end
|
data/lib/que/version.rb
CHANGED