que 0.0.1 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.rspec +0 -1
- data/README.md +85 -44
- data/Rakefile +412 -0
- data/lib/generators/que/install_generator.rb +22 -0
- data/lib/generators/que/templates/add_que.rb +9 -0
- data/lib/que.rb +55 -5
- data/lib/que/adapters/active_record.rb +9 -0
- data/lib/que/adapters/base.rb +49 -0
- data/lib/que/adapters/connection_pool.rb +14 -0
- data/lib/que/adapters/pg.rb +17 -0
- data/lib/que/adapters/sequel.rb +14 -0
- data/lib/que/job.rb +128 -149
- data/lib/que/railtie.rb +20 -0
- data/lib/que/rake_tasks.rb +35 -0
- data/lib/que/sql.rb +121 -0
- data/lib/que/version.rb +1 -1
- data/lib/que/worker.rb +93 -156
- data/que.gemspec +8 -6
- data/spec/adapters/active_record_spec.rb +39 -0
- data/spec/adapters/connection_pool_spec.rb +12 -0
- data/spec/adapters/pg_spec.rb +5 -0
- data/spec/adapters/sequel_spec.rb +25 -0
- data/spec/connection_spec.rb +12 -0
- data/spec/helper_spec.rb +19 -0
- data/spec/pool_spec.rb +116 -0
- data/spec/queue_spec.rb +134 -0
- data/spec/spec_helper.rb +48 -25
- data/spec/support/helpers.rb +9 -0
- data/spec/support/jobs.rb +33 -0
- data/spec/support/shared_examples/adapter.rb +16 -0
- data/spec/support/shared_examples/multithreaded_adapter.rb +42 -0
- data/spec/work_spec.rb +247 -0
- data/spec/worker_spec.rb +117 -0
- metadata +73 -15
- data/spec/unit/error_spec.rb +0 -45
- data/spec/unit/queue_spec.rb +0 -67
- data/spec/unit/work_spec.rb +0 -168
- data/spec/unit/worker_spec.rb +0 -31
@@ -0,0 +1,22 @@
|
|
1
|
+
require 'rails/generators'
|
2
|
+
require 'rails/generators/migration'
|
3
|
+
require 'active_record'
|
4
|
+
|
5
|
+
module Que
|
6
|
+
class InstallGenerator < Rails::Generators::Base
|
7
|
+
include Rails::Generators::Migration
|
8
|
+
|
9
|
+
namespace "que:install"
|
10
|
+
self.source_paths << File.join(File.dirname(__FILE__), 'templates')
|
11
|
+
desc "Generates a migration to add Que's job table."
|
12
|
+
|
13
|
+
def self.next_migration_number(dirname)
|
14
|
+
next_migration_number = current_migration_number(dirname) + 1
|
15
|
+
ActiveRecord::Migration.next_migration_number(next_migration_number)
|
16
|
+
end
|
17
|
+
|
18
|
+
def create_migration_file
|
19
|
+
migration_template 'add_que.rb', 'db/migrate/add_que.rb'
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
data/lib/que.rb
CHANGED
@@ -1,10 +1,60 @@
|
|
1
|
-
require 'que/version'
|
2
|
-
|
3
1
|
module Que
|
4
|
-
autoload :
|
5
|
-
autoload :
|
2
|
+
autoload :Adapters, 'que/adapters/base'
|
3
|
+
autoload :Job, 'que/job'
|
4
|
+
autoload :SQL, 'que/sql'
|
5
|
+
autoload :Version, 'que/version'
|
6
|
+
autoload :Worker, 'que/worker'
|
6
7
|
|
7
8
|
class << self
|
8
|
-
attr_accessor :logger
|
9
|
+
attr_accessor :logger, :error_handler
|
10
|
+
attr_writer :adapter
|
11
|
+
|
12
|
+
def adapter
|
13
|
+
@adapter || raise("Que connection not established!")
|
14
|
+
end
|
15
|
+
|
16
|
+
def connection=(connection)
|
17
|
+
self.adapter = if connection.to_s == 'ActiveRecord'
|
18
|
+
Adapters::ActiveRecord.new
|
19
|
+
else
|
20
|
+
case connection.class.to_s
|
21
|
+
when 'Sequel::Postgres::Database' then Adapters::Sequel.new(connection)
|
22
|
+
when 'ConnectionPool' then Adapters::ConnectionPool.new(connection)
|
23
|
+
when 'PG::Connection' then Adapters::PG.new(connection)
|
24
|
+
when 'NilClass' then connection
|
25
|
+
else raise "Que connection not recognized: #{connection.inspect}"
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def create!
|
31
|
+
execute SQL[:create_table]
|
32
|
+
end
|
33
|
+
|
34
|
+
def drop!
|
35
|
+
execute "DROP TABLE que_jobs"
|
36
|
+
end
|
37
|
+
|
38
|
+
def clear!
|
39
|
+
execute "DELETE FROM que_jobs"
|
40
|
+
end
|
41
|
+
|
42
|
+
def execute(command, *args)
|
43
|
+
case command
|
44
|
+
when Symbol then adapter.execute_prepared(command, *args)
|
45
|
+
when String then adapter.execute(command, *args)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def log(level, text)
|
50
|
+
logger.send level, "[Que] #{text}" if logger
|
51
|
+
end
|
52
|
+
|
53
|
+
# Duplicate some Worker config methods to the Que module for convenience.
|
54
|
+
[:mode, :mode=, :worker_count=, :sleep_period, :sleep_period=].each do |meth|
|
55
|
+
define_method(meth){|*args| Worker.send(meth, *args)}
|
56
|
+
end
|
9
57
|
end
|
10
58
|
end
|
59
|
+
|
60
|
+
require 'que/railtie' if defined? Rails::Railtie
|
@@ -0,0 +1,49 @@
|
|
1
|
+
module Que
|
2
|
+
module Adapters
|
3
|
+
autoload :ActiveRecord, 'que/adapters/active_record'
|
4
|
+
autoload :ConnectionPool, 'que/adapters/connection_pool'
|
5
|
+
autoload :PG, 'que/adapters/pg'
|
6
|
+
autoload :Sequel, 'que/adapters/sequel'
|
7
|
+
|
8
|
+
class Base
|
9
|
+
def initialize(thing = nil)
|
10
|
+
@statement_mutex = Mutex.new
|
11
|
+
end
|
12
|
+
|
13
|
+
# The only method that adapters really need to implement. Should lock a
|
14
|
+
# PG::Connection (or something that acts like a PG::Connection) so that
|
15
|
+
# no other threads are using it and yield it to the block.
|
16
|
+
def checkout(&block)
|
17
|
+
raise NotImplementedError
|
18
|
+
end
|
19
|
+
|
20
|
+
def execute(*args)
|
21
|
+
checkout { |conn| conn.async_exec(*args) }
|
22
|
+
end
|
23
|
+
|
24
|
+
def execute_prepared(name, params = [])
|
25
|
+
checkout do |conn|
|
26
|
+
unless statements_prepared(conn)[name]
|
27
|
+
conn.prepare("que_#{name}", SQL[name])
|
28
|
+
statements_prepared(conn)[name] = true
|
29
|
+
end
|
30
|
+
|
31
|
+
conn.exec_prepared("que_#{name}", params)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
private
|
36
|
+
|
37
|
+
# Each adapter needs to remember which of its connections have prepared
|
38
|
+
# which statements. This is a shared data structure, so protect it. We
|
39
|
+
# assume that the hash of statements for a particular connection is only
|
40
|
+
# being accessed by the thread that's checked it out, though.
|
41
|
+
def statements_prepared(conn)
|
42
|
+
@statement_mutex.synchronize do
|
43
|
+
@statements_prepared ||= {}
|
44
|
+
@statements_prepared[conn] ||= {}
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
data/lib/que/job.rb
CHANGED
@@ -1,185 +1,164 @@
|
|
1
1
|
require 'json'
|
2
2
|
|
3
3
|
module Que
|
4
|
-
class Job
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
# 3 = Time-sensitive. Sooner is better than later.
|
9
|
-
# 4 = Time-insensitive. Shouldn't get delayed forever, though.
|
10
|
-
# 5 = Whenever. Timing doesn't matter. May be a huge backlog of these.
|
4
|
+
class Job
|
5
|
+
def initialize(attrs)
|
6
|
+
@attrs = attrs
|
7
|
+
end
|
11
8
|
|
12
|
-
|
9
|
+
# Subclasses should define their own run methods, but keep an empty one
|
10
|
+
# here so we can just do Que::Job.queue in testing.
|
11
|
+
def run(*args)
|
12
|
+
end
|
13
13
|
|
14
|
-
|
15
|
-
|
14
|
+
def _run
|
15
|
+
start = Time.now
|
16
16
|
|
17
|
-
|
17
|
+
run *@attrs[:args]
|
18
|
+
destroy unless @destroyed
|
18
19
|
|
19
|
-
|
20
|
-
|
21
|
-
def default_priority
|
22
|
-
@default_priority ||= 5
|
23
|
-
end
|
20
|
+
Que.log :info, "Worked job in #{((Time.now - start) * 1000).round(1)} ms: #{inspect}"
|
21
|
+
end
|
24
22
|
|
23
|
+
private
|
24
|
+
|
25
|
+
def destroy
|
26
|
+
Que.execute :destroy_job, [@attrs[:priority], @attrs[:run_at], @attrs[:job_id]]
|
27
|
+
@destroyed = true
|
28
|
+
end
|
29
|
+
|
30
|
+
class << self
|
25
31
|
def queue(*args)
|
26
|
-
|
32
|
+
if args.last.is_a?(Hash)
|
33
|
+
options = args.pop
|
34
|
+
run_at = options.delete(:run_at)
|
35
|
+
priority = options.delete(:priority)
|
36
|
+
args << options if options.any?
|
37
|
+
end
|
38
|
+
|
39
|
+
attrs = {:job_class => to_s, :args => JSON.dump(args)}
|
40
|
+
|
41
|
+
if t = run_at || @default_run_at && @default_run_at.call
|
42
|
+
attrs[:run_at] = t
|
43
|
+
end
|
44
|
+
|
45
|
+
if p = priority || @default_priority
|
46
|
+
attrs[:priority] = p
|
47
|
+
end
|
48
|
+
|
49
|
+
if Que.mode == :sync
|
50
|
+
run_job(attrs)
|
51
|
+
else
|
52
|
+
Que.execute *insert_sql(attrs)
|
53
|
+
end
|
27
54
|
end
|
28
55
|
|
29
|
-
def work
|
56
|
+
def work
|
57
|
+
# Job.work will typically be called in a loop, where we'd sleep when
|
58
|
+
# there's no more work to be done, so its return value should reflect
|
59
|
+
# whether we should hit the database again or not. So, return truthy
|
60
|
+
# if we worked a job or encountered a typical error while working a
|
61
|
+
# job, and falsy if we found nothing to do or hit a connection error.
|
62
|
+
|
30
63
|
# Since we're taking session-level advisory locks, we have to hold the
|
31
64
|
# same connection throughout the process of getting a job, working it,
|
32
65
|
# deleting it, and removing the lock.
|
33
|
-
|
34
|
-
DB.synchronize do
|
66
|
+
Que.adapter.checkout do
|
35
67
|
begin
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
model.work
|
55
|
-
time = Time.now - start
|
56
|
-
Que.logger.info "Worked job in #{(time * 1000).round(1)} ms: #{model.inspect}" if Que.logger
|
57
|
-
|
58
|
-
# Most jobs destroy themselves transactionally in #work. If not,
|
59
|
-
# take care of them. Jobs that don't destroy themselves run the risk
|
60
|
-
# of being repeated after a crash.
|
61
|
-
model.destroy unless model.destroyed?
|
62
|
-
|
63
|
-
# Make sure to return the finished job.
|
64
|
-
model
|
65
|
-
rescue Retry
|
66
|
-
# Don't destroy the job or mark it as having errored. It can be
|
67
|
-
# retried as soon as it is unlocked.
|
68
|
+
if row = Que.execute(:lock_job).first
|
69
|
+
# Edge case: It's possible to have grabbed a job that's already
|
70
|
+
# been worked, if the SELECT took its MVCC snapshot while the
|
71
|
+
# job was processing, but didn't attempt the advisory lock until
|
72
|
+
# it was finished. Now that we have the job lock, we know that a
|
73
|
+
# previous worker would have deleted it by now, so we just
|
74
|
+
# double check that it still exists before working it.
|
75
|
+
|
76
|
+
# Note that there is currently no spec for this behavior, since
|
77
|
+
# I'm not sure how to reliably commit a transaction that deletes
|
78
|
+
# the job in a separate thread between this lock and check.
|
79
|
+
return true if Que.execute(:check_job, [row['priority'], row['run_at'], row['job_id']]).none?
|
80
|
+
|
81
|
+
run_job(row)
|
82
|
+
else
|
83
|
+
Que.log :info, "No jobs available..."
|
84
|
+
nil
|
85
|
+
end
|
68
86
|
rescue => error
|
69
|
-
|
70
|
-
|
87
|
+
begin
|
88
|
+
if row
|
89
|
+
# Borrowed the exponential backoff formula and error data format from delayed_job.
|
90
|
+
count = row['error_count'].to_i + 1
|
91
|
+
run_at = Time.now + (count ** 4 + 3)
|
92
|
+
message = "#{error.message}\n#{error.backtrace.join("\n")}"
|
93
|
+
Que.execute :set_error, [count, run_at, message, row['priority'], row['run_at'], row['job_id']]
|
94
|
+
end
|
95
|
+
rescue
|
96
|
+
# If we can't reach the DB for some reason, too bad, but don't
|
97
|
+
# let it crash the work loop.
|
98
|
+
end
|
71
99
|
|
72
|
-
|
73
|
-
|
100
|
+
if Que.error_handler
|
101
|
+
Que.error_handler.call(error) rescue nil
|
74
102
|
end
|
75
103
|
|
76
|
-
|
104
|
+
# If it's a garden variety error, we can just return true, pick up
|
105
|
+
# another job, no big deal. If it's a PG::Error, though, assume
|
106
|
+
# it's a disconnection or something and that we shouldn't just hit
|
107
|
+
# the database again right away.
|
108
|
+
return !error.is_a?(PG::Error)
|
77
109
|
ensure
|
78
|
-
|
110
|
+
# Clear the advisory lock we took when locking the job. Important
|
111
|
+
# to do this so that they don't pile up in the database.
|
112
|
+
Que.execute "SELECT pg_advisory_unlock_all()" if row
|
79
113
|
end
|
80
114
|
end
|
81
115
|
end
|
82
116
|
|
83
117
|
private
|
84
118
|
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
119
|
+
# Column names are not escaped, so this method should not be called with untrusted hashes.
|
120
|
+
def insert_sql(hash)
|
121
|
+
number = 0
|
122
|
+
columns = []
|
123
|
+
placeholders = []
|
124
|
+
values = []
|
125
|
+
|
126
|
+
hash.each do |key, value|
|
127
|
+
columns << key
|
128
|
+
placeholders << "$#{number += 1}"
|
129
|
+
values << value
|
130
|
+
end
|
94
131
|
|
95
|
-
|
132
|
+
["INSERT INTO que_jobs (#{columns.join(', ')}) VALUES (#{placeholders.join(', ')})", values]
|
96
133
|
end
|
97
|
-
end
|
98
|
-
|
99
|
-
# Send the args attribute to the perform() method.
|
100
|
-
def work
|
101
|
-
perform(*JSON.parse(args))
|
102
|
-
end
|
103
|
-
|
104
|
-
# Call perform on a job to run it. No perform method means NOOP.
|
105
|
-
def perform(*args)
|
106
|
-
end
|
107
134
|
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
135
|
+
def run_job(attrs)
|
136
|
+
attrs = indifferentiate(attrs)
|
137
|
+
attrs[:args] = indifferentiate(JSON.load(attrs[:args]))
|
138
|
+
const_get("::#{attrs[:job_class]}").new(attrs).tap(&:_run)
|
139
|
+
end
|
113
140
|
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
when :async then DB.after_commit { Worker.wake! }
|
141
|
+
def indifferentiate(input)
|
142
|
+
case input
|
143
|
+
when Hash
|
144
|
+
h = indifferent_hash
|
145
|
+
input.each { |k, v| h[k] = indifferentiate(v) }
|
146
|
+
h
|
147
|
+
when Array
|
148
|
+
input.map { |v| indifferentiate(v) }
|
149
|
+
else
|
150
|
+
input
|
125
151
|
end
|
126
152
|
end
|
127
153
|
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
154
|
+
def indifferent_hash
|
155
|
+
# Tiny hack to better support Rails.
|
156
|
+
if {}.respond_to?(:with_indifferent_access)
|
157
|
+
{}.with_indifferent_access
|
158
|
+
else
|
159
|
+
Hash.new { |hash, key| hash[key.to_s] if Symbol === key }
|
160
|
+
end
|
161
|
+
end
|
134
162
|
end
|
135
|
-
|
136
|
-
sql = <<-SQL
|
137
|
-
WITH RECURSIVE cte AS (
|
138
|
-
SELECT (job).*, pg_try_advisory_lock((job).job_id) AS locked
|
139
|
-
FROM (
|
140
|
-
SELECT job
|
141
|
-
FROM jobs AS job
|
142
|
-
WHERE ((run_at <= now()) AND (priority <= ?))
|
143
|
-
ORDER BY priority, run_at, job_id
|
144
|
-
LIMIT 1
|
145
|
-
) AS t1
|
146
|
-
UNION ALL (
|
147
|
-
SELECT (job).*, pg_try_advisory_lock((job).job_id) AS locked
|
148
|
-
FROM (
|
149
|
-
SELECT (
|
150
|
-
SELECT job
|
151
|
-
FROM jobs AS job
|
152
|
-
WHERE ((run_at <= now()) AND (priority <= ?) AND ((priority, run_at, job_id) > (cte.priority, cte.run_at, cte.job_id)))
|
153
|
-
ORDER BY priority, run_at, job_id
|
154
|
-
LIMIT 1
|
155
|
-
) AS job
|
156
|
-
FROM cte
|
157
|
-
WHERE NOT cte.locked
|
158
|
-
LIMIT 1
|
159
|
-
) AS t1)
|
160
|
-
)
|
161
|
-
SELECT *
|
162
|
-
FROM cte
|
163
|
-
WHERE locked
|
164
|
-
SQL
|
165
|
-
|
166
|
-
LOCK = DB[sql, :$priority, :$priority].prepare(:first, :lock_job)
|
167
|
-
|
168
|
-
# An alternate scheme using LATERAL, which will arrive in Postgres 9.3.
|
169
|
-
# Basically the same, but benchmark to see if it's faster/just as reliable.
|
170
|
-
|
171
|
-
# with recursive
|
172
|
-
# t as (select *, pg_try_advisory_lock(s.job_id) as locked
|
173
|
-
# from (select * from jobs j
|
174
|
-
# where run_at >= now()
|
175
|
-
# order by priority, run_at, job_id limit 1) s
|
176
|
-
# union all
|
177
|
-
# select j.*, pg_try_advisory_lock(j.job_id)
|
178
|
-
# from (select * from t where not locked) t,
|
179
|
-
# lateral (select * from jobs
|
180
|
-
# where run_at >= now()
|
181
|
-
# and (priority,run_at,job_id) > (t.priority,t.run_at,t.job_id)
|
182
|
-
# order by priority, run_at, job_id limit 1) j
|
183
|
-
# select * from t where locked;
|
184
163
|
end
|
185
164
|
end
|