que 1.0.0.beta → 1.0.0.beta5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/lib/que/locker.rb CHANGED
@@ -19,35 +19,17 @@ module Que
19
19
  %{
20
20
  DELETE FROM public.que_lockers
21
21
  WHERE pid = pg_backend_pid()
22
- OR pid NOT IN (SELECT pid FROM pg_stat_activity)
22
+ OR NOT EXISTS (SELECT 1 FROM pg_stat_activity WHERE pid = public.que_lockers.pid)
23
23
  }
24
24
 
25
25
  SQL[:register_locker] =
26
26
  %{
27
- INSERT INTO public.que_lockers
28
- (
29
- pid,
30
- worker_count,
31
- worker_priorities,
32
- ruby_pid,
33
- ruby_hostname,
34
- listening,
35
- queues
36
- )
37
- VALUES
38
- (
39
- pg_backend_pid(),
40
- $1::integer,
41
- $2::integer[],
42
- $3::integer,
43
- $4::text,
44
- $5::boolean,
45
- $6::text[]
46
- )
27
+ INSERT INTO public.que_lockers (pid, worker_count, worker_priorities, ruby_pid, ruby_hostname, listening, queues)
28
+ VALUES (pg_backend_pid(), $1::integer, $2::integer[], $3::integer, $4::text, $5::boolean, $6::text[])
47
29
  }
48
30
 
49
31
  class Locker
50
- attr_reader :thread, :workers, :job_cache, :locks
32
+ attr_reader :thread, :workers, :job_buffer, :locks
51
33
 
52
34
  MESSAGE_RESOLVERS = {}
53
35
  RESULT_RESOLVERS = {}
@@ -55,31 +37,29 @@ module Que
55
37
  MESSAGE_RESOLVERS[:job_available] =
56
38
  -> (messages) {
57
39
  metajobs = messages.map { |key| Metajob.new(key) }
58
- push_jobs(lock_jobs(job_cache.accept?(metajobs)))
40
+ push_jobs(lock_jobs(job_buffer.accept?(metajobs)))
59
41
  }
60
42
 
61
43
  RESULT_RESOLVERS[:job_finished] =
62
44
  -> (messages) { finish_jobs(messages.map{|m| m.fetch(:metajob)}) }
63
45
 
64
- DEFAULT_POLL_INTERVAL = 5.0
65
- DEFAULT_WAIT_PERIOD = 50
66
- DEFAULT_MINIMUM_QUEUE_SIZE = 2
67
- DEFAULT_MAXIMUM_QUEUE_SIZE = 8
68
- DEFAULT_WORKER_COUNT = 6
69
- DEFAULT_WORKER_PRIORITIES = [10, 30, 50].freeze
46
+ DEFAULT_POLL_INTERVAL = 5.0
47
+ DEFAULT_WAIT_PERIOD = 50
48
+ DEFAULT_MINIMUM_BUFFER_SIZE = 2
49
+ DEFAULT_MAXIMUM_BUFFER_SIZE = 8
50
+ DEFAULT_WORKER_PRIORITIES = [10, 30, 50, nil, nil, nil].freeze
70
51
 
71
52
  def initialize(
72
- queues: [Que.default_queue],
73
- connection: nil,
74
- listen: true,
75
- poll: true,
76
- poll_interval: DEFAULT_POLL_INTERVAL,
77
- wait_period: DEFAULT_WAIT_PERIOD,
78
- maximum_queue_size: DEFAULT_MAXIMUM_QUEUE_SIZE,
79
- minimum_queue_size: DEFAULT_MINIMUM_QUEUE_SIZE,
80
- worker_count: DEFAULT_WORKER_COUNT,
81
- worker_priorities: DEFAULT_WORKER_PRIORITIES,
82
- on_worker_start: nil
53
+ queues: [Que.default_queue],
54
+ connection_url: nil,
55
+ listen: true,
56
+ poll: true,
57
+ poll_interval: DEFAULT_POLL_INTERVAL,
58
+ wait_period: DEFAULT_WAIT_PERIOD,
59
+ maximum_buffer_size: DEFAULT_MAXIMUM_BUFFER_SIZE,
60
+ minimum_buffer_size: DEFAULT_MINIMUM_BUFFER_SIZE,
61
+ worker_priorities: DEFAULT_WORKER_PRIORITIES,
62
+ on_worker_start: nil
83
63
  )
84
64
 
85
65
  # Sanity-check all our arguments, since some users may instantiate Locker
@@ -89,34 +69,32 @@ module Que
89
69
 
90
70
  Que.assert Numeric, poll_interval
91
71
  Que.assert Numeric, wait_period
92
- Que.assert Integer, worker_count
93
72
 
94
73
  Que.assert Array, worker_priorities
95
- worker_priorities.each { |p| Que.assert(Integer, p) }
96
-
97
- all_worker_priorities = worker_priorities.values_at(0...worker_count)
74
+ worker_priorities.each { |p| Que.assert([Integer, NilClass], p) }
98
75
 
99
- # We use a JobCache to track jobs and pass them to workers, and a
76
+ # We use a JobBuffer to track jobs and pass them to workers, and a
100
77
  # ResultQueue to receive messages from workers.
101
- @job_cache = JobCache.new(
102
- maximum_size: maximum_queue_size,
103
- minimum_size: minimum_queue_size,
104
- priorities: all_worker_priorities.uniq,
78
+ @job_buffer = JobBuffer.new(
79
+ maximum_size: maximum_buffer_size,
80
+ minimum_size: minimum_buffer_size,
81
+ priorities: worker_priorities.uniq,
105
82
  )
106
83
 
107
84
  @result_queue = ResultQueue.new
108
85
 
86
+ @stop = false
87
+
109
88
  Que.internal_log :locker_instantiate, self do
110
89
  {
111
- queues: queues,
112
- listen: listen,
113
- poll: poll,
114
- poll_interval: poll_interval,
115
- wait_period: wait_period,
116
- maximum_queue_size: maximum_queue_size,
117
- minimum_queue_size: minimum_queue_size,
118
- worker_count: worker_count,
119
- worker_priorities: worker_priorities,
90
+ queues: queues,
91
+ listen: listen,
92
+ poll: poll,
93
+ poll_interval: poll_interval,
94
+ wait_period: wait_period,
95
+ maximum_buffer_size: maximum_buffer_size,
96
+ minimum_buffer_size: minimum_buffer_size,
97
+ worker_priorities: worker_priorities,
120
98
  }
121
99
  end
122
100
 
@@ -126,16 +104,11 @@ module Que
126
104
  @queue_names = queues.is_a?(Hash) ? queues.keys : queues
127
105
  @wait_period = wait_period.to_f / 1000 # Milliseconds to seconds.
128
106
 
129
- # If the worker_count exceeds the array of priorities it'll result in
130
- # extra workers that will work jobs of any priority. For example, the
131
- # default worker_count of 6 and the default worker priorities of [10, 30,
132
- # 50] will result in three workers that only work jobs that meet those
133
- # priorities, and three workers that will work any job.
134
107
  @workers =
135
- all_worker_priorities.map do |priority|
108
+ worker_priorities.map do |priority|
136
109
  Worker.new(
137
110
  priority: priority,
138
- job_cache: @job_cache,
111
+ job_buffer: @job_buffer,
139
112
  result_queue: @result_queue,
140
113
  start_callback: on_worker_start,
141
114
  )
@@ -144,18 +117,46 @@ module Que
144
117
  # To prevent race conditions, let every worker get into a ready state
145
118
  # before starting up the locker thread.
146
119
  loop do
147
- break if job_cache.waiting_count == workers.count
120
+ break if job_buffer.waiting_count == workers.count
148
121
  sleep 0.001
149
122
  end
150
123
 
151
- pool =
152
- if connection
153
- # Wrap the given connection in a dummy connection pool.
154
- ConnectionPool.new { |&block| block.call(connection) }
124
+ # If we weren't passed a specific connection_url, borrow a connection from
125
+ # the pool and derive the connection string from it.
126
+ connection_args =
127
+ if connection_url
128
+ uri = URI.parse(connection_url)
129
+
130
+ opts =
131
+ {
132
+ host: uri.host,
133
+ user: uri.user,
134
+ password: uri.password,
135
+ port: uri.port || 5432,
136
+ dbname: uri.path[1..-1],
137
+ }
138
+
139
+ if uri.query
140
+ opts.merge!(Hash[uri.query.split("&").map{|s| s.split('=')}.map{|a,b| [a.to_sym, b]}])
141
+ end
142
+
143
+ opts
155
144
  else
156
- Que.pool
145
+ Que.pool.checkout do |conn|
146
+ c = conn.wrapped_connection
147
+
148
+ {
149
+ host: c.host,
150
+ user: c.user,
151
+ password: c.pass,
152
+ port: c.port,
153
+ dbname: c.db,
154
+ }
155
+ end
157
156
  end
158
157
 
158
+ @connection = Que::Connection.wrap(PG::Connection.open(connection_args))
159
+
159
160
  @thread =
160
161
  Thread.new do
161
162
  # An error causing this thread to exit is a bug in Que, which we want
@@ -165,47 +166,35 @@ module Que
165
166
  # Give this thread priority, so it can promptly respond to NOTIFYs.
166
167
  Thread.current.priority = 1
167
168
 
168
- pool.checkout do |connection|
169
- original_application_name =
170
- connection.
171
- execute("SHOW application_name").
172
- first.
173
- fetch(:application_name)
174
-
175
- begin
176
- @connection = connection
177
-
178
- connection.execute(
169
+ begin
170
+ unless connection_args.has_key?(:application_name)
171
+ @connection.execute(
179
172
  "SELECT set_config('application_name', $1, false)",
180
- ["Que Locker: #{connection.backend_pid}"]
173
+ ["Que Locker: #{@connection.backend_pid}"]
181
174
  )
175
+ end
182
176
 
183
- Poller.setup(connection)
177
+ Poller.setup(@connection)
184
178
 
179
+ @listener =
185
180
  if listen
186
- @listener = Listener.new(connection: connection)
181
+ Listener.new(connection: @connection)
187
182
  end
188
183
 
184
+ @pollers =
189
185
  if poll
190
- @pollers =
191
- queues.map do |queue, interval|
192
- Poller.new(
193
- connection: connection,
194
- queue: queue,
195
- poll_interval: interval || poll_interval,
196
- )
197
- end
186
+ queues.map do |queue, interval|
187
+ Poller.new(
188
+ connection: @connection,
189
+ queue: queue,
190
+ poll_interval: interval || poll_interval,
191
+ )
192
+ end
198
193
  end
199
194
 
200
- work_loop
201
- ensure
202
- connection.execute(
203
- "SELECT set_config('application_name', $1, false)",
204
- [original_application_name]
205
- )
206
-
207
- Poller.cleanup(connection)
208
- end
195
+ work_loop
196
+ ensure
197
+ @connection.wrapped_connection.close
209
198
  end
210
199
  end
211
200
  end
@@ -215,7 +204,7 @@ module Que
215
204
  end
216
205
 
217
206
  def stop
218
- @job_cache.stop
207
+ @job_buffer.stop
219
208
  @stop = true
220
209
  end
221
210
 
@@ -249,17 +238,7 @@ module Que
249
238
  begin
250
239
  @listener.listen if @listener
251
240
 
252
- # A previous locker that didn't exit cleanly may have left behind
253
- # a bad locker record, so clean up before registering.
254
- connection.execute :clean_lockers
255
- connection.execute :register_locker, [
256
- @workers.count,
257
- "{#{@workers.map(&:priority).map{|p| p || 'NULL'}.join(',')}}",
258
- Process.pid,
259
- CURRENT_HOSTNAME,
260
- !!@listener,
261
- "{\"#{@queue_names.join('","')}\"}",
262
- ]
241
+ startup
263
242
 
264
243
  {} while cycle
265
244
 
@@ -268,11 +247,7 @@ module Que
268
247
  event: :locker_stop,
269
248
  )
270
249
 
271
- unlock_jobs(@job_cache.clear)
272
-
273
- @workers.each(&:wait_until_stopped)
274
-
275
- handle_results
250
+ shutdown
276
251
  ensure
277
252
  connection.execute :clean_lockers
278
253
 
@@ -280,6 +255,20 @@ module Que
280
255
  end
281
256
  end
282
257
 
258
+ def startup
259
+ # A previous locker that didn't exit cleanly may have left behind
260
+ # a bad locker record, so clean up before registering.
261
+ connection.execute :clean_lockers
262
+ connection.execute :register_locker, [
263
+ @workers.count,
264
+ "{#{@workers.map(&:priority).map{|p| p || 'NULL'}.join(',')}}",
265
+ Process.pid,
266
+ CURRENT_HOSTNAME,
267
+ !!@listener,
268
+ "{\"#{@queue_names.join('","')}\"}",
269
+ ]
270
+ end
271
+
283
272
  def cycle
284
273
  # Poll at the start of a cycle, so that when the worker starts up we can
285
274
  # load up the queue with jobs immediately.
@@ -300,31 +289,73 @@ module Que
300
289
  !@stop
301
290
  end
302
291
 
292
+ def shutdown
293
+ unlock_jobs(@job_buffer.clear)
294
+ wait_for_shutdown
295
+ handle_results
296
+ end
297
+
298
+ def wait_for_shutdown
299
+ @workers.each(&:wait_until_stopped)
300
+ end
301
+
303
302
  def poll
304
303
  # Only poll when there are pollers to use (that is, when polling is
305
- # enabled) and when the local queue has dropped below the configured
306
- # minimum size.
307
- return unless pollers && job_cache.jobs_needed?
304
+ # enabled).
305
+ return unless pollers
308
306
 
309
- pollers.each do |poller|
310
- priorities = job_cache.available_priorities
311
- break if priorities.empty?
307
+ # Figure out what job priorities we have to fill.
308
+ priorities = job_buffer.available_priorities
309
+
310
+ # Only poll when there are workers ready for jobs.
311
+ return if priorities.empty?
312
+
313
+ all_metajobs = []
312
314
 
313
- Que.internal_log(:locker_polling, self) { {priorities: priorities, held_locks: @locks.to_a, queue: poller.queue} }
315
+ pollers.each do |poller|
316
+ Que.internal_log(:locker_polling, self) {
317
+ {
318
+ priorities: priorities,
319
+ held_locks: @locks.to_a,
320
+ queue: poller.queue,
321
+ }
322
+ }
314
323
 
315
324
  if metajobs = poller.poll(priorities: priorities, held_locks: @locks)
325
+ metajobs.sort!
326
+ all_metajobs.concat(metajobs)
327
+
328
+ # Update the desired priorities list to take the priorities that we
329
+ # just retrieved into account.
316
330
  metajobs.each do |metajob|
317
- mark_id_as_locked(metajob.id)
331
+ job_priority = metajob.job.fetch(:priority)
332
+
333
+ priorities.each do |priority, count|
334
+ if job_priority <= priority
335
+ new_priority = count - 1
336
+
337
+ if new_priority <= 0
338
+ priorities.delete(priority)
339
+ else
340
+ priorities[priority] = new_priority
341
+ end
342
+
343
+ break
344
+ end
345
+ end
318
346
  end
319
347
 
320
- push_jobs(metajobs)
348
+ break if priorities.empty?
321
349
  end
322
350
  end
351
+
352
+ all_metajobs.each { |metajob| mark_id_as_locked(metajob.id) }
353
+ push_jobs(all_metajobs)
323
354
  end
324
355
 
325
356
  def wait
326
- if @listener
327
- @listener.wait_for_grouped_messages(@wait_period).each do |type, messages|
357
+ if l = @listener
358
+ l.wait_for_grouped_messages(@wait_period).each do |type, messages|
328
359
  if resolver = MESSAGE_RESOLVERS[type]
329
360
  instance_exec messages, &resolver
330
361
  else
@@ -353,7 +384,7 @@ module Que
353
384
  metajobs.reject! { |m| @locks.include?(m.id) }
354
385
  return metajobs if metajobs.empty?
355
386
 
356
- ids = metajobs.map{|m| m.id.to_i}
387
+ ids = metajobs.map { |m| m.id.to_i }
357
388
 
358
389
  Que.internal_log :locker_locking, self do
359
390
  {
@@ -362,12 +393,12 @@ module Que
362
393
  }
363
394
  end
364
395
 
396
+ materalize_cte = connection.server_version >= 12_00_00
397
+
365
398
  jobs =
366
399
  connection.execute \
367
400
  <<-SQL
368
- WITH jobs AS (
369
- SELECT * FROM que_jobs WHERE id IN (#{ids.join(', ')})
370
- )
401
+ WITH jobs AS #{materalize_cte ? 'MATERIALIZED' : ''} (SELECT * FROM que_jobs WHERE id IN (#{ids.join(', ')}))
371
402
  SELECT * FROM jobs WHERE pg_try_advisory_lock(id)
372
403
  SQL
373
404
 
@@ -408,12 +439,12 @@ module Que
408
439
 
409
440
  good, bad = metajobs.partition{|mj| verified_ids.include?(mj.id)}
410
441
 
411
- displaced = @job_cache.push(*good) || []
412
-
413
- # Unlock any low-importance jobs the new ones may displace.
414
- if bad.any? || displaced.any?
415
- unlock_jobs(bad + displaced)
442
+ # Need to unlock any low-importance jobs the new ones may displace.
443
+ if displaced = @job_buffer.push(*good)
444
+ bad.concat(displaced)
416
445
  end
446
+
447
+ unlock_jobs(bad)
417
448
  end
418
449
 
419
450
  def finish_jobs(metajobs)
data/lib/que/poller.rb CHANGED
@@ -144,7 +144,7 @@ module Que
144
144
 
145
145
  return unless should_poll?
146
146
 
147
- expected_count = priorities.inject(0){|s,(p,c)| s + c}
147
+ expected_count = priorities.inject(0){|s,(_,c)| s + c}
148
148
 
149
149
  jobs =
150
150
  connection.execute_prepared(
@@ -2,12 +2,10 @@
2
2
 
3
3
  module Que
4
4
  module Rails
5
- class Railtie < Rails::Railtie
5
+ class Railtie < ::Rails::Railtie
6
6
  config.que = Que
7
7
 
8
- Que.run_asynchronously = true if Rails.env.test?
9
-
10
- Que.logger = proc { Rails.logger }
8
+ Que.logger = proc { ::Rails.logger }
11
9
  Que.connection = ::ActiveRecord if defined? ::ActiveRecord
12
10
  end
13
11
  end
@@ -28,8 +28,8 @@ module Que
28
28
 
29
29
  private
30
30
 
31
- def sync
32
- @mutex.synchronize { yield }
31
+ def sync(&block)
32
+ @mutex.synchronize(&block)
33
33
  end
34
34
  end
35
35
  end
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ ::Sequel.extension :pg_json_ops
4
+
3
5
  module Que
4
6
  module Sequel
5
7
  QUALIFIED_TABLE = ::Sequel.qualify(:public, :que_jobs)
@@ -7,10 +9,10 @@ module Que
7
9
  class Model < ::Sequel::Model(QUALIFIED_TABLE)
8
10
  dataset_module do
9
11
  conditions = {
10
- errored: ::Sequel.qualify(QUALIFIED_TABLE, :error_count) > 0,
11
- expired: ::Sequel.~(::Sequel.qualify(QUALIFIED_TABLE, :expired_at) => nil),
12
- finished: ::Sequel.~(::Sequel.qualify(QUALIFIED_TABLE, :finished_at) => nil),
13
- scheduled: ::Sequel.qualify(QUALIFIED_TABLE, :run_at) > ::Sequel::CURRENT_TIMESTAMP,
12
+ errored: QUALIFIED_TABLE[:error_count] > 0,
13
+ expired: QUALIFIED_TABLE[:expired_at] !~ nil,
14
+ finished: QUALIFIED_TABLE[:finished_at] !~ nil,
15
+ scheduled: QUALIFIED_TABLE[:run_at] > ::Sequel::CURRENT_TIMESTAMP,
14
16
  }
15
17
 
16
18
  conditions.each do |name, condition|
@@ -18,32 +20,28 @@ module Que
18
20
  subset :"not_#{name}", ~condition
19
21
  end
20
22
 
21
- subset :ready, conditions.values.map(&:~).inject{|a, b| a & b}
22
- subset :not_ready, conditions.values. inject{|a, b| a | b}
23
+ subset :ready, conditions.values.map(&:~).inject(:&)
24
+ subset :not_ready, conditions.values. inject(:|)
23
25
 
24
26
  def by_job_class(job_class)
25
27
  job_class = job_class.name if job_class.is_a?(Class)
26
28
  where(
27
- ::Sequel.|(
28
- {::Sequel.qualify(QUALIFIED_TABLE, :job_class) => job_class},
29
- {
30
- ::Sequel.qualify(QUALIFIED_TABLE, :job_class) => "ActiveJob::QueueAdapters::QueAdapter::JobWrapper",
31
- ::Sequel.lit("public.que_jobs.args->0->>'job_class'") => job_class,
32
- }
33
- )
29
+ (QUALIFIED_TABLE[:job_class] =~ job_class) |
30
+ (QUALIFIED_TABLE[:job_class] =~ "ActiveJob::QueueAdapters::QueAdapter::JobWrapper") &
31
+ (QUALIFIED_TABLE[:args].pg_jsonb[0].get_text("job_class") =~ job_class)
34
32
  )
35
33
  end
36
34
 
37
35
  def by_queue(queue)
38
- where(::Sequel.qualify(QUALIFIED_TABLE, :queue) => queue)
36
+ where(QUALIFIED_TABLE[:queue] => queue)
39
37
  end
40
38
 
41
39
  def by_tag(tag)
42
- where(::Sequel.lit("public.que_jobs.data @> ?", JSON.dump(tags: [tag])))
40
+ where(QUALIFIED_TABLE[:data].pg_jsonb.contains(JSON.dump(tags: [tag])))
43
41
  end
44
42
 
45
43
  def by_args(*args)
46
- where(::Sequel.lit("public.que_jobs.args @> ?", JSON.dump(args)))
44
+ where(QUALIFIED_TABLE[:args].pg_jsonb.contains(JSON.dump(args)))
47
45
  end
48
46
  end
49
47
  end
@@ -4,7 +4,7 @@ module Que
4
4
  module Utils
5
5
  module Constantization
6
6
  def constantize(string)
7
- assert String, string
7
+ Que.assert String, string
8
8
 
9
9
  if string.respond_to?(:constantize)
10
10
  string.constantize
@@ -5,7 +5,8 @@
5
5
  module Que
6
6
  module Utils
7
7
  module Logging
8
- attr_accessor :logger, :log_formatter, :internal_logger
8
+ attr_accessor :logger, :internal_logger
9
+ attr_writer :log_formatter
9
10
 
10
11
  def log(event:, level: :info, **extra)
11
12
  data = _default_log_data
@@ -5,27 +5,40 @@
5
5
  module Que
6
6
  module Utils
7
7
  module Middleware
8
- def run_middleware(job, &block)
9
- invoke_middleware(
10
- middleware: middleware.dup,
11
- job: job,
12
- block: block,
13
- )
14
- end
8
+ TYPES = [
9
+ :job,
10
+ :sql,
11
+ ].freeze
12
+
13
+ TYPES.each do |type|
14
+ module_eval <<-CODE
15
+ def #{type}_middleware
16
+ @#{type}_middleware ||= []
17
+ end
15
18
 
16
- def middleware
17
- @middleware ||= []
19
+ def run_#{type}_middleware(*args)
20
+ m = #{type}_middleware
21
+
22
+ if m.empty?
23
+ yield
24
+ else
25
+ invoke_middleware(middleware: m.dup, args: args) { yield }
26
+ end
27
+ end
28
+ CODE
18
29
  end
19
30
 
20
31
  private
21
32
 
22
- def invoke_middleware(middleware:, job:, block:)
33
+ def invoke_middleware(middleware:, args:, &block)
23
34
  if m = middleware.shift
24
- m.call(job) do
25
- invoke_middleware(middleware: middleware, job: job, block: block)
35
+ r = nil
36
+ m.call(*args) do
37
+ r = invoke_middleware(middleware: middleware, args: args, &block)
26
38
  end
39
+ r
27
40
  else
28
- block.call
41
+ yield
29
42
  end
30
43
  end
31
44
  end
data/lib/que/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Que
4
- VERSION = '1.0.0.beta'
4
+ VERSION = '1.0.0.beta5'
5
5
  end