que 1.0.0.beta → 1.0.0.beta2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/CHANGELOG.1.0.beta.md +127 -0
- data/CHANGELOG.md +29 -7
- data/README.md +14 -1
- data/bin/command_line_interface.rb +11 -23
- data/docs/active_job.md +1 -1
- data/docs/command_line_interface.md +3 -3
- data/docs/middleware.md +25 -4
- data/lib/que.rb +9 -4
- data/lib/que/active_record/connection.rb +3 -3
- data/lib/que/active_record/model.rb +3 -3
- data/lib/que/connection.rb +24 -15
- data/lib/que/connection_pool.rb +2 -2
- data/lib/que/job.rb +1 -1
- data/lib/que/{job_cache.rb → job_buffer.rb} +72 -53
- data/lib/que/locker.rb +156 -126
- data/lib/que/poller.rb +1 -1
- data/lib/que/rails/railtie.rb +3 -3
- data/lib/que/result_queue.rb +2 -2
- data/lib/que/utils/constantization.rb +1 -1
- data/lib/que/utils/logging.rb +2 -1
- data/lib/que/utils/middleware.rb +26 -13
- data/lib/que/version.rb +1 -1
- data/lib/que/worker.rb +6 -6
- metadata +5 -4
data/lib/que/connection.rb
CHANGED
@@ -30,8 +30,11 @@ module Que
|
|
30
30
|
when self
|
31
31
|
conn
|
32
32
|
when PG::Connection
|
33
|
-
conn.
|
34
|
-
|
33
|
+
if conn.instance_variable_defined?(:@que_wrapper)
|
34
|
+
conn.instance_variable_get(:@que_wrapper)
|
35
|
+
else
|
36
|
+
conn.instance_variable_set(:@que_wrapper, new(conn))
|
37
|
+
end
|
35
38
|
else
|
36
39
|
raise Error, "Unsupported input for Connection.wrap: #{conn.class}"
|
37
40
|
end
|
@@ -43,7 +46,7 @@ module Que
|
|
43
46
|
@prepared_statements = Set.new
|
44
47
|
end
|
45
48
|
|
46
|
-
def execute(command, params =
|
49
|
+
def execute(command, params = [])
|
47
50
|
sql =
|
48
51
|
case command
|
49
52
|
when Symbol then SQL[command]
|
@@ -51,8 +54,17 @@ module Que
|
|
51
54
|
else raise Error, "Bad command! #{command.inspect}"
|
52
55
|
end
|
53
56
|
|
54
|
-
params = convert_params(params)
|
55
|
-
|
57
|
+
params = convert_params(params)
|
58
|
+
|
59
|
+
result =
|
60
|
+
Que.run_sql_middleware(sql, params) do
|
61
|
+
# Some versions of the PG gem dislike an empty/nil params argument.
|
62
|
+
if params.empty?
|
63
|
+
wrapped_connection.async_exec(sql)
|
64
|
+
else
|
65
|
+
wrapped_connection.async_exec(sql, params)
|
66
|
+
end
|
67
|
+
end
|
56
68
|
|
57
69
|
Que.internal_log :connection_execute, self do
|
58
70
|
{
|
@@ -129,19 +141,16 @@ module Que
|
|
129
141
|
end
|
130
142
|
end
|
131
143
|
|
132
|
-
def execute_sql(sql, params)
|
133
|
-
# Some versions of the PG gem dislike an empty/nil params argument.
|
134
|
-
if params && !params.empty?
|
135
|
-
wrapped_connection.async_exec(sql, params)
|
136
|
-
else
|
137
|
-
wrapped_connection.async_exec(sql)
|
138
|
-
end
|
139
|
-
end
|
140
|
-
|
141
144
|
# Procs used to convert strings from Postgres into Ruby types.
|
142
145
|
CAST_PROCS = {
|
143
146
|
# Boolean
|
144
|
-
16
|
147
|
+
16 => -> (value) {
|
148
|
+
case value
|
149
|
+
when String then value == 't'.freeze
|
150
|
+
else !!value
|
151
|
+
end
|
152
|
+
},
|
153
|
+
|
145
154
|
# Timestamp with time zone
|
146
155
|
1184 => Time.method(:parse),
|
147
156
|
}
|
data/lib/que/connection_pool.rb
CHANGED
data/lib/que/job.rb
CHANGED
@@ -5,63 +5,70 @@
|
|
5
5
|
# minimum priority, and stopping gracefully.
|
6
6
|
|
7
7
|
module Que
|
8
|
-
class
|
8
|
+
class JobBuffer
|
9
9
|
attr_reader :maximum_size, :minimum_size, :priority_queues
|
10
10
|
|
11
|
+
# Since we use a mutex, which is not reentrant, we have to be a little
|
12
|
+
# careful to not call a method that locks the mutex when we've already
|
13
|
+
# locked it. So, as a general rule, public methods handle locking the mutex
|
14
|
+
# when necessary, while private methods handle the actual underlying data
|
15
|
+
# changes. This lets us reuse those private methods without running into
|
16
|
+
# locking issues.
|
17
|
+
|
11
18
|
def initialize(
|
12
19
|
maximum_size:,
|
13
20
|
minimum_size:,
|
14
21
|
priorities:
|
15
22
|
)
|
16
23
|
@maximum_size = Que.assert(Integer, maximum_size)
|
17
|
-
Que.assert(maximum_size >= 0) { "maximum_size for a
|
24
|
+
Que.assert(maximum_size >= 0) { "maximum_size for a JobBuffer must be at least zero!" }
|
18
25
|
|
19
26
|
@minimum_size = Que.assert(Integer, minimum_size)
|
20
|
-
Que.assert(minimum_size >= 0) { "minimum_size for a
|
27
|
+
Que.assert(minimum_size >= 0) { "minimum_size for a JobBuffer must be at least zero!" }
|
21
28
|
|
22
29
|
Que.assert(minimum_size <= maximum_size) do
|
23
|
-
"minimum
|
24
|
-
"greater than the maximum
|
30
|
+
"minimum buffer size (#{minimum_size}) is " \
|
31
|
+
"greater than the maximum buffer size (#{maximum_size})!"
|
25
32
|
end
|
26
33
|
|
27
|
-
@stop
|
28
|
-
@array
|
29
|
-
@
|
34
|
+
@stop = false
|
35
|
+
@array = []
|
36
|
+
@mutex = Mutex.new
|
30
37
|
|
31
|
-
# Make sure that priority = nil sorts highest.
|
32
38
|
@priority_queues = Hash[
|
33
|
-
|
34
|
-
|
39
|
+
# Make sure that priority = nil sorts highest.
|
40
|
+
priorities.sort_by{|p| p || MAXIMUM_PRIORITY}.map do |p|
|
41
|
+
[p, PriorityQueue.new(priority: p, job_buffer: self)]
|
35
42
|
end
|
36
43
|
].freeze
|
37
44
|
end
|
38
45
|
|
39
46
|
def push(*metajobs)
|
40
|
-
Que.internal_log(:
|
47
|
+
Que.internal_log(:job_buffer_push, self) do
|
41
48
|
{
|
42
49
|
maximum_size: maximum_size,
|
43
50
|
ids: metajobs.map(&:id),
|
44
|
-
current_queue:
|
51
|
+
current_queue: to_a,
|
45
52
|
}
|
46
53
|
end
|
47
54
|
|
48
55
|
sync do
|
49
|
-
return metajobs if
|
56
|
+
return metajobs if _stopping?
|
50
57
|
|
51
|
-
@array.
|
58
|
+
@array.concat(metajobs).sort!
|
52
59
|
|
53
60
|
# Relying on the hash's contents being sorted, here.
|
54
61
|
priority_queues.reverse_each do |_, pq|
|
55
62
|
pq.waiting_count.times do
|
56
|
-
job =
|
57
|
-
break if job.nil?
|
63
|
+
job = _shift_job(pq.priority)
|
64
|
+
break if job.nil? # False would mean we're stopping.
|
58
65
|
pq.push(job)
|
59
66
|
end
|
60
67
|
end
|
61
68
|
|
62
|
-
# If we passed the maximum
|
69
|
+
# If we passed the maximum buffer size, drop the lowest sort keys and
|
63
70
|
# return their ids to be unlocked.
|
64
|
-
overage = -
|
71
|
+
overage = -_buffer_space
|
65
72
|
pop(overage) if overage > 0
|
66
73
|
end
|
67
74
|
end
|
@@ -72,22 +79,16 @@ module Que
|
|
72
79
|
end
|
73
80
|
|
74
81
|
def shift_job(priority = nil)
|
75
|
-
sync
|
76
|
-
if stopping?
|
77
|
-
false
|
78
|
-
elsif (job = @array.first) && job.priority_sufficient?(priority)
|
79
|
-
@array.shift
|
80
|
-
end
|
81
|
-
end
|
82
|
+
sync { _shift_job(priority) }
|
82
83
|
end
|
83
84
|
|
84
85
|
def accept?(metajobs)
|
85
|
-
return [] if stopping?
|
86
|
-
|
87
86
|
metajobs.sort!
|
88
87
|
|
89
88
|
sync do
|
90
|
-
|
89
|
+
return [] if _stopping?
|
90
|
+
|
91
|
+
start_index = _buffer_space
|
91
92
|
final_index = metajobs.length - 1
|
92
93
|
|
93
94
|
return metajobs if start_index > final_index
|
@@ -126,7 +127,7 @@ module Que
|
|
126
127
|
count = pq.waiting_count
|
127
128
|
|
128
129
|
if lowest_priority
|
129
|
-
count +=
|
130
|
+
count += buffer_space
|
130
131
|
lowest_priority = false
|
131
132
|
end
|
132
133
|
|
@@ -136,14 +137,12 @@ module Que
|
|
136
137
|
hash
|
137
138
|
end
|
138
139
|
|
139
|
-
def
|
140
|
-
sync
|
141
|
-
maximum_size - size
|
142
|
-
end
|
140
|
+
def buffer_space
|
141
|
+
sync { _buffer_space }
|
143
142
|
end
|
144
143
|
|
145
144
|
def size
|
146
|
-
sync {
|
145
|
+
sync { _size }
|
147
146
|
end
|
148
147
|
|
149
148
|
def to_a
|
@@ -156,40 +155,60 @@ module Que
|
|
156
155
|
end
|
157
156
|
|
158
157
|
def clear
|
159
|
-
sync { pop(
|
158
|
+
sync { pop(_size) }
|
160
159
|
end
|
161
160
|
|
162
161
|
def stopping?
|
163
|
-
sync {
|
162
|
+
sync { _stopping? }
|
164
163
|
end
|
165
164
|
|
166
165
|
private
|
167
166
|
|
167
|
+
def _buffer_space
|
168
|
+
maximum_size - _size
|
169
|
+
end
|
170
|
+
|
168
171
|
def pop(count)
|
169
172
|
@array.pop(count)
|
170
173
|
end
|
171
174
|
|
172
|
-
def
|
173
|
-
|
175
|
+
def _shift_job(priority)
|
176
|
+
if _stopping?
|
177
|
+
false
|
178
|
+
elsif (job = @array.first) && job.priority_sufficient?(priority)
|
179
|
+
@array.shift
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
def _size
|
184
|
+
@array.size
|
185
|
+
end
|
186
|
+
|
187
|
+
def _stopping?
|
188
|
+
!!@stop
|
189
|
+
end
|
190
|
+
|
191
|
+
def sync(&block)
|
192
|
+
@mutex.synchronize(&block)
|
174
193
|
end
|
175
194
|
|
176
195
|
# A queue object dedicated to a specific worker priority. It's basically a
|
177
196
|
# Queue object from the standard library, but it's able to reach into the
|
178
|
-
#
|
197
|
+
# JobBuffer's buffer in order to satisfy a pop.
|
179
198
|
class PriorityQueue
|
180
|
-
attr_reader :
|
199
|
+
attr_reader :job_buffer, :priority, :mutex
|
181
200
|
|
182
201
|
def initialize(
|
183
|
-
|
202
|
+
job_buffer:,
|
184
203
|
priority:
|
185
204
|
)
|
186
|
-
@
|
187
|
-
@priority
|
188
|
-
@waiting
|
189
|
-
@stopping
|
190
|
-
@items
|
191
|
-
@
|
192
|
-
@cv
|
205
|
+
@job_buffer = job_buffer
|
206
|
+
@priority = priority
|
207
|
+
@waiting = 0
|
208
|
+
@stopping = false
|
209
|
+
@items = [] # Items pending distribution to waiting threads.
|
210
|
+
@mutex = Mutex.new
|
211
|
+
@cv = ConditionVariable.new
|
193
212
|
end
|
194
213
|
|
195
214
|
def pop
|
@@ -201,11 +220,11 @@ module Que
|
|
201
220
|
return item
|
202
221
|
end
|
203
222
|
|
204
|
-
job =
|
223
|
+
job = job_buffer.shift_job(priority)
|
205
224
|
return job unless job.nil? # False means we're stopping.
|
206
225
|
|
207
226
|
@waiting += 1
|
208
|
-
@cv.wait
|
227
|
+
@cv.wait(mutex)
|
209
228
|
@waiting -= 1
|
210
229
|
end
|
211
230
|
end
|
@@ -232,8 +251,8 @@ module Que
|
|
232
251
|
|
233
252
|
private
|
234
253
|
|
235
|
-
def sync
|
236
|
-
|
254
|
+
def sync(&block)
|
255
|
+
mutex.synchronize(&block)
|
237
256
|
end
|
238
257
|
end
|
239
258
|
end
|
data/lib/que/locker.rb
CHANGED
@@ -19,35 +19,17 @@ module Que
|
|
19
19
|
%{
|
20
20
|
DELETE FROM public.que_lockers
|
21
21
|
WHERE pid = pg_backend_pid()
|
22
|
-
OR
|
22
|
+
OR NOT EXISTS (SELECT 1 FROM pg_stat_activity WHERE pid = public.que_lockers.pid)
|
23
23
|
}
|
24
24
|
|
25
25
|
SQL[:register_locker] =
|
26
26
|
%{
|
27
|
-
INSERT INTO public.que_lockers
|
28
|
-
(
|
29
|
-
pid,
|
30
|
-
worker_count,
|
31
|
-
worker_priorities,
|
32
|
-
ruby_pid,
|
33
|
-
ruby_hostname,
|
34
|
-
listening,
|
35
|
-
queues
|
36
|
-
)
|
37
|
-
VALUES
|
38
|
-
(
|
39
|
-
pg_backend_pid(),
|
40
|
-
$1::integer,
|
41
|
-
$2::integer[],
|
42
|
-
$3::integer,
|
43
|
-
$4::text,
|
44
|
-
$5::boolean,
|
45
|
-
$6::text[]
|
46
|
-
)
|
27
|
+
INSERT INTO public.que_lockers (pid, worker_count, worker_priorities, ruby_pid, ruby_hostname, listening, queues)
|
28
|
+
VALUES (pg_backend_pid(), $1::integer, $2::integer[], $3::integer, $4::text, $5::boolean, $6::text[])
|
47
29
|
}
|
48
30
|
|
49
31
|
class Locker
|
50
|
-
attr_reader :thread, :workers, :
|
32
|
+
attr_reader :thread, :workers, :job_buffer, :locks
|
51
33
|
|
52
34
|
MESSAGE_RESOLVERS = {}
|
53
35
|
RESULT_RESOLVERS = {}
|
@@ -55,31 +37,31 @@ module Que
|
|
55
37
|
MESSAGE_RESOLVERS[:job_available] =
|
56
38
|
-> (messages) {
|
57
39
|
metajobs = messages.map { |key| Metajob.new(key) }
|
58
|
-
push_jobs(lock_jobs(
|
40
|
+
push_jobs(lock_jobs(job_buffer.accept?(metajobs)))
|
59
41
|
}
|
60
42
|
|
61
43
|
RESULT_RESOLVERS[:job_finished] =
|
62
44
|
-> (messages) { finish_jobs(messages.map{|m| m.fetch(:metajob)}) }
|
63
45
|
|
64
|
-
DEFAULT_POLL_INTERVAL
|
65
|
-
DEFAULT_WAIT_PERIOD
|
66
|
-
|
67
|
-
|
68
|
-
DEFAULT_WORKER_COUNT
|
69
|
-
DEFAULT_WORKER_PRIORITIES
|
46
|
+
DEFAULT_POLL_INTERVAL = 5.0
|
47
|
+
DEFAULT_WAIT_PERIOD = 50
|
48
|
+
DEFAULT_MINIMUM_BUFFER_SIZE = 2
|
49
|
+
DEFAULT_MAXIMUM_BUFFER_SIZE = 8
|
50
|
+
DEFAULT_WORKER_COUNT = 6
|
51
|
+
DEFAULT_WORKER_PRIORITIES = [10, 30, 50].freeze
|
70
52
|
|
71
53
|
def initialize(
|
72
|
-
queues:
|
73
|
-
|
74
|
-
listen:
|
75
|
-
poll:
|
76
|
-
poll_interval:
|
77
|
-
wait_period:
|
78
|
-
|
79
|
-
|
80
|
-
worker_count:
|
81
|
-
worker_priorities:
|
82
|
-
on_worker_start:
|
54
|
+
queues: [Que.default_queue],
|
55
|
+
connection_url: nil,
|
56
|
+
listen: true,
|
57
|
+
poll: true,
|
58
|
+
poll_interval: DEFAULT_POLL_INTERVAL,
|
59
|
+
wait_period: DEFAULT_WAIT_PERIOD,
|
60
|
+
maximum_buffer_size: DEFAULT_MAXIMUM_BUFFER_SIZE,
|
61
|
+
minimum_buffer_size: DEFAULT_MINIMUM_BUFFER_SIZE,
|
62
|
+
worker_count: DEFAULT_WORKER_COUNT,
|
63
|
+
worker_priorities: DEFAULT_WORKER_PRIORITIES,
|
64
|
+
on_worker_start: nil
|
83
65
|
)
|
84
66
|
|
85
67
|
# Sanity-check all our arguments, since some users may instantiate Locker
|
@@ -96,27 +78,29 @@ module Que
|
|
96
78
|
|
97
79
|
all_worker_priorities = worker_priorities.values_at(0...worker_count)
|
98
80
|
|
99
|
-
# We use a
|
81
|
+
# We use a JobBuffer to track jobs and pass them to workers, and a
|
100
82
|
# ResultQueue to receive messages from workers.
|
101
|
-
@
|
102
|
-
maximum_size:
|
103
|
-
minimum_size:
|
83
|
+
@job_buffer = JobBuffer.new(
|
84
|
+
maximum_size: maximum_buffer_size,
|
85
|
+
minimum_size: minimum_buffer_size,
|
104
86
|
priorities: all_worker_priorities.uniq,
|
105
87
|
)
|
106
88
|
|
107
89
|
@result_queue = ResultQueue.new
|
108
90
|
|
91
|
+
@stop = false
|
92
|
+
|
109
93
|
Que.internal_log :locker_instantiate, self do
|
110
94
|
{
|
111
|
-
queues:
|
112
|
-
listen:
|
113
|
-
poll:
|
114
|
-
poll_interval:
|
115
|
-
wait_period:
|
116
|
-
|
117
|
-
|
118
|
-
worker_count:
|
119
|
-
worker_priorities:
|
95
|
+
queues: queues,
|
96
|
+
listen: listen,
|
97
|
+
poll: poll,
|
98
|
+
poll_interval: poll_interval,
|
99
|
+
wait_period: wait_period,
|
100
|
+
maximum_buffer_size: maximum_buffer_size,
|
101
|
+
minimum_buffer_size: minimum_buffer_size,
|
102
|
+
worker_count: worker_count,
|
103
|
+
worker_priorities: worker_priorities,
|
120
104
|
}
|
121
105
|
end
|
122
106
|
|
@@ -135,7 +119,7 @@ module Que
|
|
135
119
|
all_worker_priorities.map do |priority|
|
136
120
|
Worker.new(
|
137
121
|
priority: priority,
|
138
|
-
|
122
|
+
job_buffer: @job_buffer,
|
139
123
|
result_queue: @result_queue,
|
140
124
|
start_callback: on_worker_start,
|
141
125
|
)
|
@@ -144,18 +128,39 @@ module Que
|
|
144
128
|
# To prevent race conditions, let every worker get into a ready state
|
145
129
|
# before starting up the locker thread.
|
146
130
|
loop do
|
147
|
-
break if
|
131
|
+
break if job_buffer.waiting_count == workers.count
|
148
132
|
sleep 0.001
|
149
133
|
end
|
150
134
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
135
|
+
# If we weren't passed a specific connection_url, borrow a connection from
|
136
|
+
# the pool and derive the connection string from it.
|
137
|
+
connection_args =
|
138
|
+
if connection_url
|
139
|
+
uri = URI.parse(connection_url)
|
140
|
+
|
141
|
+
{
|
142
|
+
host: uri.host,
|
143
|
+
user: uri.user,
|
144
|
+
password: uri.password,
|
145
|
+
port: uri.port || 5432,
|
146
|
+
dbname: uri.path[1..-1],
|
147
|
+
}.merge(Hash[uri.query.split("&").map{|s| s.split('=')}.map{|a,b| [a.to_sym, b]}])
|
155
148
|
else
|
156
|
-
Que.pool
|
149
|
+
Que.pool.checkout do |conn|
|
150
|
+
c = conn.wrapped_connection
|
151
|
+
|
152
|
+
{
|
153
|
+
host: c.host,
|
154
|
+
user: c.user,
|
155
|
+
password: c.pass,
|
156
|
+
port: c.port,
|
157
|
+
dbname: c.db,
|
158
|
+
}
|
159
|
+
end
|
157
160
|
end
|
158
161
|
|
162
|
+
@connection = Que::Connection.wrap(PG::Connection.open(connection_args))
|
163
|
+
|
159
164
|
@thread =
|
160
165
|
Thread.new do
|
161
166
|
# An error causing this thread to exit is a bug in Que, which we want
|
@@ -165,47 +170,35 @@ module Que
|
|
165
170
|
# Give this thread priority, so it can promptly respond to NOTIFYs.
|
166
171
|
Thread.current.priority = 1
|
167
172
|
|
168
|
-
|
169
|
-
|
170
|
-
connection.
|
171
|
-
execute("SHOW application_name").
|
172
|
-
first.
|
173
|
-
fetch(:application_name)
|
174
|
-
|
175
|
-
begin
|
176
|
-
@connection = connection
|
177
|
-
|
178
|
-
connection.execute(
|
173
|
+
begin
|
174
|
+
unless connection_args.has_key?(:application_name)
|
175
|
+
@connection.execute(
|
179
176
|
"SELECT set_config('application_name', $1, false)",
|
180
|
-
["Que Locker: #{connection.backend_pid}"]
|
177
|
+
["Que Locker: #{@connection.backend_pid}"]
|
181
178
|
)
|
179
|
+
end
|
182
180
|
|
183
|
-
|
181
|
+
Poller.setup(@connection)
|
184
182
|
|
183
|
+
@listener =
|
185
184
|
if listen
|
186
|
-
|
185
|
+
Listener.new(connection: @connection)
|
187
186
|
end
|
188
187
|
|
188
|
+
@pollers =
|
189
189
|
if poll
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
end
|
190
|
+
queues.map do |queue, interval|
|
191
|
+
Poller.new(
|
192
|
+
connection: @connection,
|
193
|
+
queue: queue,
|
194
|
+
poll_interval: interval || poll_interval,
|
195
|
+
)
|
196
|
+
end
|
198
197
|
end
|
199
198
|
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
"SELECT set_config('application_name', $1, false)",
|
204
|
-
[original_application_name]
|
205
|
-
)
|
206
|
-
|
207
|
-
Poller.cleanup(connection)
|
208
|
-
end
|
199
|
+
work_loop
|
200
|
+
ensure
|
201
|
+
@connection.wrapped_connection.close
|
209
202
|
end
|
210
203
|
end
|
211
204
|
end
|
@@ -215,7 +208,7 @@ module Que
|
|
215
208
|
end
|
216
209
|
|
217
210
|
def stop
|
218
|
-
@
|
211
|
+
@job_buffer.stop
|
219
212
|
@stop = true
|
220
213
|
end
|
221
214
|
|
@@ -249,17 +242,7 @@ module Que
|
|
249
242
|
begin
|
250
243
|
@listener.listen if @listener
|
251
244
|
|
252
|
-
|
253
|
-
# a bad locker record, so clean up before registering.
|
254
|
-
connection.execute :clean_lockers
|
255
|
-
connection.execute :register_locker, [
|
256
|
-
@workers.count,
|
257
|
-
"{#{@workers.map(&:priority).map{|p| p || 'NULL'}.join(',')}}",
|
258
|
-
Process.pid,
|
259
|
-
CURRENT_HOSTNAME,
|
260
|
-
!!@listener,
|
261
|
-
"{\"#{@queue_names.join('","')}\"}",
|
262
|
-
]
|
245
|
+
startup
|
263
246
|
|
264
247
|
{} while cycle
|
265
248
|
|
@@ -268,11 +251,7 @@ module Que
|
|
268
251
|
event: :locker_stop,
|
269
252
|
)
|
270
253
|
|
271
|
-
|
272
|
-
|
273
|
-
@workers.each(&:wait_until_stopped)
|
274
|
-
|
275
|
-
handle_results
|
254
|
+
shutdown
|
276
255
|
ensure
|
277
256
|
connection.execute :clean_lockers
|
278
257
|
|
@@ -280,6 +259,20 @@ module Que
|
|
280
259
|
end
|
281
260
|
end
|
282
261
|
|
262
|
+
def startup
|
263
|
+
# A previous locker that didn't exit cleanly may have left behind
|
264
|
+
# a bad locker record, so clean up before registering.
|
265
|
+
connection.execute :clean_lockers
|
266
|
+
connection.execute :register_locker, [
|
267
|
+
@workers.count,
|
268
|
+
"{#{@workers.map(&:priority).map{|p| p || 'NULL'}.join(',')}}",
|
269
|
+
Process.pid,
|
270
|
+
CURRENT_HOSTNAME,
|
271
|
+
!!@listener,
|
272
|
+
"{\"#{@queue_names.join('","')}\"}",
|
273
|
+
]
|
274
|
+
end
|
275
|
+
|
283
276
|
def cycle
|
284
277
|
# Poll at the start of a cycle, so that when the worker starts up we can
|
285
278
|
# load up the queue with jobs immediately.
|
@@ -300,31 +293,70 @@ module Que
|
|
300
293
|
!@stop
|
301
294
|
end
|
302
295
|
|
296
|
+
def shutdown
|
297
|
+
unlock_jobs(@job_buffer.clear)
|
298
|
+
wait_for_shutdown
|
299
|
+
handle_results
|
300
|
+
end
|
301
|
+
|
302
|
+
def wait_for_shutdown
|
303
|
+
@workers.each(&:wait_until_stopped)
|
304
|
+
end
|
305
|
+
|
303
306
|
def poll
|
304
307
|
# Only poll when there are pollers to use (that is, when polling is
|
305
308
|
# enabled) and when the local queue has dropped below the configured
|
306
309
|
# minimum size.
|
307
|
-
return unless pollers &&
|
310
|
+
return unless pollers && job_buffer.jobs_needed?
|
308
311
|
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
+
# Figure out what job priorities we have to fill.
|
313
|
+
priorities = job_buffer.available_priorities
|
314
|
+
all_metajobs = []
|
312
315
|
|
313
|
-
|
316
|
+
pollers.each do |poller|
|
317
|
+
Que.internal_log(:locker_polling, self) {
|
318
|
+
{
|
319
|
+
priorities: priorities,
|
320
|
+
held_locks: @locks.to_a,
|
321
|
+
queue: poller.queue,
|
322
|
+
}
|
323
|
+
}
|
314
324
|
|
315
325
|
if metajobs = poller.poll(priorities: priorities, held_locks: @locks)
|
326
|
+
metajobs.sort!
|
327
|
+
all_metajobs.concat(metajobs)
|
328
|
+
|
329
|
+
# Update the desired priorities list to take the priorities that we
|
330
|
+
# just retrieved into account.
|
316
331
|
metajobs.each do |metajob|
|
317
|
-
|
332
|
+
job_priority = metajob.job.fetch(:priority)
|
333
|
+
|
334
|
+
priorities.each do |priority, count|
|
335
|
+
if job_priority <= priority
|
336
|
+
new_priority = count - 1
|
337
|
+
|
338
|
+
if new_priority <= 0
|
339
|
+
priorities.delete(priority)
|
340
|
+
else
|
341
|
+
priorities[priority] = new_priority
|
342
|
+
end
|
343
|
+
|
344
|
+
break
|
345
|
+
end
|
346
|
+
end
|
318
347
|
end
|
319
348
|
|
320
|
-
|
349
|
+
break if priorities.empty?
|
321
350
|
end
|
322
351
|
end
|
352
|
+
|
353
|
+
all_metajobs.each { |metajob| mark_id_as_locked(metajob.id) }
|
354
|
+
push_jobs(all_metajobs)
|
323
355
|
end
|
324
356
|
|
325
357
|
def wait
|
326
|
-
if @listener
|
327
|
-
|
358
|
+
if l = @listener
|
359
|
+
l.wait_for_grouped_messages(@wait_period).each do |type, messages|
|
328
360
|
if resolver = MESSAGE_RESOLVERS[type]
|
329
361
|
instance_exec messages, &resolver
|
330
362
|
else
|
@@ -353,7 +385,7 @@ module Que
|
|
353
385
|
metajobs.reject! { |m| @locks.include?(m.id) }
|
354
386
|
return metajobs if metajobs.empty?
|
355
387
|
|
356
|
-
ids = metajobs.map{|m| m.id.to_i}
|
388
|
+
ids = metajobs.map { |m| m.id.to_i }
|
357
389
|
|
358
390
|
Que.internal_log :locker_locking, self do
|
359
391
|
{
|
@@ -365,9 +397,7 @@ module Que
|
|
365
397
|
jobs =
|
366
398
|
connection.execute \
|
367
399
|
<<-SQL
|
368
|
-
WITH jobs AS (
|
369
|
-
SELECT * FROM que_jobs WHERE id IN (#{ids.join(', ')})
|
370
|
-
)
|
400
|
+
WITH jobs AS (SELECT * FROM que_jobs WHERE id IN (#{ids.join(', ')}))
|
371
401
|
SELECT * FROM jobs WHERE pg_try_advisory_lock(id)
|
372
402
|
SQL
|
373
403
|
|
@@ -408,12 +438,12 @@ module Que
|
|
408
438
|
|
409
439
|
good, bad = metajobs.partition{|mj| verified_ids.include?(mj.id)}
|
410
440
|
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
if bad.any? || displaced.any?
|
415
|
-
unlock_jobs(bad + displaced)
|
441
|
+
# Need to unlock any low-importance jobs the new ones may displace.
|
442
|
+
if displaced = @job_buffer.push(*good)
|
443
|
+
bad.concat(displaced)
|
416
444
|
end
|
445
|
+
|
446
|
+
unlock_jobs(bad)
|
417
447
|
end
|
418
448
|
|
419
449
|
def finish_jobs(metajobs)
|