queue_classic 2.2.3 → 2.3.0beta
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/generators/queue_classic/install_generator.rb +4 -2
- data/lib/queue_classic.rb +2 -35
- data/lib/queue_classic/conf.rb +33 -0
- data/lib/queue_classic/conn.rb +63 -76
- data/lib/queue_classic/queue.rb +24 -9
- data/lib/queue_classic/setup.rb +10 -6
- data/lib/queue_classic/tasks.rb +34 -23
- data/lib/queue_classic/worker.rb +29 -7
- data/readme.md +15 -9
- data/test/benchmark_test.rb +12 -16
- data/test/{conn_test.rb → conf_test.rb} +7 -3
- data/test/helper.rb +5 -12
- data/test/queue_test.rb +23 -26
- data/test/worker_test.rb +26 -10
- metadata +9 -8
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: c38026847651ea48626d07f07fffd717a56bd13f
|
4
|
+
data.tar.gz: 49543473a45f7b5ca199f5675fa82c8d97dafbfd
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 67bac654f7508dd9104cacd47fc9721c5c4e28550b68c08e9bbd3d5ae870b80415172141a9766b5e341e9fb3b2cc72cd7a28bdc0719a8fcbbc7bba25353e5780
|
7
|
+
data.tar.gz: 95335e3b6990954cb10f99d157fe7a8b175da9e27d09b25c57a1cd84eee15f62dc9ed607bba286eb3fb75a4785add817c2f3c9c5408895606a8f34dc6a35b022
|
@@ -8,7 +8,8 @@ module QC
|
|
8
8
|
|
9
9
|
namespace "queue_classic:install"
|
10
10
|
self.source_paths << File.join(File.dirname(__FILE__), 'templates')
|
11
|
-
desc 'Generates (but does not run) a migration to add
|
11
|
+
desc 'Generates (but does not run) a migration to add ' +
|
12
|
+
'a queue_classic table.'
|
12
13
|
|
13
14
|
def self.next_migration_number(dirname)
|
14
15
|
next_migration_number = current_migration_number(dirname) + 1
|
@@ -16,7 +17,8 @@ module QC
|
|
16
17
|
end
|
17
18
|
|
18
19
|
def create_migration_file
|
19
|
-
migration_template 'add_queue_classic.rb',
|
20
|
+
migration_template 'add_queue_classic.rb',
|
21
|
+
'db/migrate/add_queue_classic.rb'
|
20
22
|
end
|
21
23
|
end
|
22
24
|
end
|
data/lib/queue_classic.rb
CHANGED
@@ -1,35 +1,4 @@
|
|
1
1
|
module QC
|
2
|
-
# You can use the APP_NAME to query for
|
3
|
-
# postgres related process information in the
|
4
|
-
# pg_stat_activity table.
|
5
|
-
APP_NAME = ENV["QC_APP_NAME"] || "queue_classic"
|
6
|
-
|
7
|
-
# Number of seconds to block on the listen chanel for new jobs.
|
8
|
-
WAIT_TIME = (ENV["QC_LISTEN_TIME"] || 5).to_i
|
9
|
-
|
10
|
-
# Why do you want to change the table name?
|
11
|
-
# Just deal with the default OK?
|
12
|
-
# If you do want to change this, you will
|
13
|
-
# need to update the PL/pgSQL lock_head() function.
|
14
|
-
# Come on. Don't do it.... Just stick with the default.
|
15
|
-
TABLE_NAME = "queue_classic_jobs"
|
16
|
-
|
17
|
-
# Each row in the table will have a column that
|
18
|
-
# notes the queue. You can point your workers
|
19
|
-
# at different queues but only one at a time.
|
20
|
-
QUEUE = ENV["QUEUE"] || "default"
|
21
|
-
|
22
|
-
# Set this to 1 for strict FIFO.
|
23
|
-
# There is nothing special about 9....
|
24
|
-
TOP_BOUND = (ENV["QC_TOP_BOUND"] || 9).to_i
|
25
|
-
|
26
|
-
# Set this variable if you wish for
|
27
|
-
# the worker to fork a UNIX process for
|
28
|
-
# each locked job. Remember to re-establish
|
29
|
-
# any database connections. See the worker
|
30
|
-
# for more details.
|
31
|
-
FORK_WORKER = !ENV["QC_FORK_WORKER"].nil?
|
32
|
-
|
33
2
|
# Defer method calls on the QC module to the
|
34
3
|
# default queue. This facilitates QC.enqueue()
|
35
4
|
def self.method_missing(sym, *args, &block)
|
@@ -46,9 +15,7 @@ module QC
|
|
46
15
|
end
|
47
16
|
|
48
17
|
def self.default_queue
|
49
|
-
@default_queue ||=
|
50
|
-
Queue.new(QUEUE)
|
51
|
-
end
|
18
|
+
@default_queue ||= Queue.new
|
52
19
|
end
|
53
20
|
|
54
21
|
def self.log_yield(data)
|
@@ -70,7 +37,7 @@ module QC
|
|
70
37
|
if block_given?
|
71
38
|
start = Time.now
|
72
39
|
result = yield
|
73
|
-
data.merge(:elapsed => Integer((Time.now -
|
40
|
+
data.merge(:elapsed => Integer((Time.now - start)*1000))
|
74
41
|
end
|
75
42
|
data.reduce(out=String.new) do |s, tup|
|
76
43
|
s << [tup.first, tup.last].join("=") << " "
|
@@ -0,0 +1,33 @@
|
|
1
|
+
require 'uri'
|
2
|
+
|
3
|
+
module QC
|
4
|
+
module Conf
|
5
|
+
|
6
|
+
def self.env(k); ENV[k]; end
|
7
|
+
def self.env!(k); env(k) || raise("Must set #{k}."); end
|
8
|
+
|
9
|
+
def self.debug?
|
10
|
+
!env('DEBUG').nil?
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.db_url
|
14
|
+
url = env("QC_DATABASE_URL") ||
|
15
|
+
env("DATABASE_URL") ||
|
16
|
+
raise(ArgumentError, "Must set QC_DATABASE_URL or DATABASE_URL.")
|
17
|
+
URI.parse(url)
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.normalized_db_url(url=nil)
|
21
|
+
url ||= db_url
|
22
|
+
host = url.host
|
23
|
+
host = host.gsub(/%2F/i, '/') if host
|
24
|
+
[host, # host or percent-encoded socket path
|
25
|
+
url.port || 5432,
|
26
|
+
nil, '', #opts, tty
|
27
|
+
url.path.gsub("/",""), # database name
|
28
|
+
url.user,
|
29
|
+
url.password]
|
30
|
+
end
|
31
|
+
|
32
|
+
end
|
33
|
+
end
|
data/lib/queue_classic/conn.rb
CHANGED
@@ -1,117 +1,104 @@
|
|
1
|
+
require 'queue_classic/conf'
|
1
2
|
require 'thread'
|
2
3
|
require 'uri'
|
3
4
|
require 'pg'
|
4
5
|
|
5
6
|
module QC
|
6
|
-
|
7
|
-
|
8
|
-
|
7
|
+
class Conn
|
8
|
+
# Number of seconds to block on the listen chanel for new jobs.
|
9
|
+
WAIT_TIME = (ENV["QC_LISTEN_TIME"] || 5).to_i
|
10
|
+
# You can use the APP_NAME to query for
|
11
|
+
# postgres related process information in the
|
12
|
+
# pg_stat_activity table.
|
13
|
+
APP_NAME = ENV["QC_APP_NAME"] || "queue_classic"
|
14
|
+
|
15
|
+
|
16
|
+
def self.connect
|
17
|
+
QC.log(:at => "establish_conn")
|
18
|
+
conn = PGconn.connect(*Conf.normalized_db_url)
|
19
|
+
if conn.status != PGconn::CONNECTION_OK
|
20
|
+
log(:error => conn.error)
|
21
|
+
end
|
22
|
+
if !Conf.debug?
|
23
|
+
conn.exec("SET client_min_messages TO 'warning'")
|
24
|
+
end
|
25
|
+
conn.exec("SET application_name = '#{APP_NAME}'")
|
26
|
+
conn
|
27
|
+
end
|
28
|
+
|
29
|
+
def initialize
|
30
|
+
@c = self.class.connect
|
31
|
+
@max_attempts = 2
|
32
|
+
end
|
9
33
|
|
10
34
|
def execute(stmt, *params)
|
11
|
-
|
12
|
-
|
13
|
-
begin
|
35
|
+
QC.log(:measure => "conn.exec", :sql => stmt.inspect) do
|
36
|
+
with_retry(@max_attempts) do
|
14
37
|
params = nil if params.empty?
|
15
|
-
r =
|
38
|
+
r = @c.exec(stmt, params)
|
16
39
|
result = []
|
17
40
|
r.each {|t| result << t}
|
18
41
|
result.length > 1 ? result : result.pop
|
19
|
-
rescue PGError => e
|
20
|
-
log(:error => e.inspect)
|
21
|
-
disconnect
|
22
|
-
raise
|
23
42
|
end
|
24
43
|
end
|
25
44
|
end
|
26
45
|
|
27
46
|
def wait(chan)
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
def transaction
|
35
|
-
begin
|
36
|
-
execute("BEGIN")
|
37
|
-
yield
|
38
|
-
execute("COMMIT")
|
39
|
-
rescue Exception
|
40
|
-
execute("ROLLBACK")
|
41
|
-
raise
|
47
|
+
with_retry(@max_attempts) do
|
48
|
+
execute('LISTEN "' + chan + '"')
|
49
|
+
wait_for_notify(WAIT_TIME)
|
50
|
+
execute('UNLISTEN "' + chan + '"')
|
51
|
+
drain_notify
|
42
52
|
end
|
43
53
|
end
|
44
54
|
|
45
|
-
def
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
def connection
|
50
|
-
@connection ||= connect
|
51
|
-
end
|
52
|
-
|
53
|
-
def connection=(connection)
|
54
|
-
unless connection.is_a? PG::Connection
|
55
|
-
c = connection.class
|
56
|
-
err = "connection must be an instance of PG::Connection, but was #{c}"
|
57
|
-
raise(ArgumentError, err)
|
58
|
-
end
|
59
|
-
@connection = connection
|
55
|
+
def reconnect
|
56
|
+
disconnect
|
57
|
+
@c = self.class.connect
|
60
58
|
end
|
61
59
|
|
62
60
|
def disconnect
|
63
|
-
begin
|
64
|
-
ensure @
|
61
|
+
begin @c && @c.finish
|
62
|
+
ensure @c = nil
|
65
63
|
end
|
66
64
|
end
|
67
65
|
|
68
|
-
def
|
69
|
-
|
70
|
-
|
71
|
-
if conn.status != PGconn::CONNECTION_OK
|
72
|
-
log(:error => conn.error)
|
66
|
+
def abort_open_transaction
|
67
|
+
if @c.transaction_status != PGconn::PQTRANS_IDLE
|
68
|
+
@c.exec('ROLLBACK')
|
73
69
|
end
|
74
|
-
conn.exec("SET application_name = '#{QC::APP_NAME}'")
|
75
|
-
conn
|
76
|
-
end
|
77
|
-
|
78
|
-
def normalize_db_url(url)
|
79
|
-
host = url.host
|
80
|
-
host = host.gsub(/%2F/i, '/') if host
|
81
|
-
|
82
|
-
[
|
83
|
-
host, # host or percent-encoded socket path
|
84
|
-
url.port || 5432,
|
85
|
-
nil, '', #opts, tty
|
86
|
-
url.path.gsub("/",""), # database name
|
87
|
-
url.user,
|
88
|
-
url.password
|
89
|
-
]
|
90
|
-
end
|
91
|
-
|
92
|
-
def db_url
|
93
|
-
return @db_url if @db_url
|
94
|
-
url = ENV["QC_DATABASE_URL"] ||
|
95
|
-
ENV["DATABASE_URL"] ||
|
96
|
-
raise(ArgumentError, "missing QC_DATABASE_URL or DATABASE_URL")
|
97
|
-
@db_url = URI.parse(url)
|
98
70
|
end
|
99
71
|
|
100
72
|
private
|
101
73
|
|
102
|
-
def
|
103
|
-
|
74
|
+
def with_retry(n)
|
75
|
+
completed = false
|
76
|
+
attempts = 0
|
77
|
+
result = nil
|
78
|
+
last_error = nil
|
79
|
+
until completed || attempts == n
|
80
|
+
attempts += 1
|
81
|
+
begin
|
82
|
+
result = yield
|
83
|
+
completed = true
|
84
|
+
rescue => e
|
85
|
+
QC.log(:error => e.class, :at => 'conn-retry', :attempts => attempts)
|
86
|
+
last_error = e
|
87
|
+
reconnect
|
88
|
+
end
|
89
|
+
end
|
90
|
+
completed ? result : raise(last_error)
|
104
91
|
end
|
105
92
|
|
106
93
|
def wait_for_notify(t)
|
107
94
|
Array.new.tap do |msgs|
|
108
|
-
|
95
|
+
@c.wait_for_notify(t) {|event, pid, msg| msgs << msg}
|
109
96
|
end
|
110
97
|
end
|
111
98
|
|
112
99
|
def drain_notify
|
113
|
-
until
|
114
|
-
log(:at => "drain_notifications")
|
100
|
+
until @c.notifies.nil?
|
101
|
+
QC.log(:at => "drain_notifications")
|
115
102
|
end
|
116
103
|
end
|
117
104
|
|
data/lib/queue_classic/queue.rb
CHANGED
@@ -4,24 +4,32 @@ require 'json'
|
|
4
4
|
|
5
5
|
module QC
|
6
6
|
class Queue
|
7
|
+
TABLE_NAME = "queue_classic_jobs"
|
8
|
+
# Each row in the table will have a column that
|
9
|
+
# notes the queue.
|
10
|
+
QUEUE_NAME = ENV["QUEUE"] || "default"
|
11
|
+
# Set this to 1 for strict FIFO.
|
12
|
+
TOP_BOUND = (ENV["QC_TOP_BOUND"] || 9).to_i
|
7
13
|
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
@
|
14
|
+
|
15
|
+
attr_reader :conn, :name, :top_bound
|
16
|
+
def initialize(opts={})
|
17
|
+
@conn = opts[:conn] || Conn.new
|
18
|
+
@name = opts[:name] || QUEUE_NAME
|
19
|
+
@top_bound = opts[:top_bound] || TOP_BOUND
|
12
20
|
end
|
13
21
|
|
14
22
|
def enqueue(method, *args)
|
15
23
|
QC.log_yield(:measure => 'queue.enqueue') do
|
16
24
|
s="INSERT INTO #{TABLE_NAME} (q_name, method, args) VALUES ($1, $2, $3)"
|
17
|
-
res =
|
25
|
+
res = conn.execute(s, name, method, JSON.dump(args))
|
18
26
|
end
|
19
27
|
end
|
20
28
|
|
21
29
|
def lock
|
22
30
|
QC.log_yield(:measure => 'queue.lock') do
|
23
31
|
s = "SELECT * FROM lock_head($1, $2)"
|
24
|
-
if r =
|
32
|
+
if r = conn.execute(s, name, top_bound)
|
25
33
|
{:id => r["id"],
|
26
34
|
:method => r["method"],
|
27
35
|
:args => JSON.parse(r["args"])}
|
@@ -29,23 +37,30 @@ module QC
|
|
29
37
|
end
|
30
38
|
end
|
31
39
|
|
40
|
+
def wait
|
41
|
+
QC.log_yield(:measure => 'queue.wait') do
|
42
|
+
conn.wait(name)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
32
46
|
def delete(id)
|
33
47
|
QC.log_yield(:measure => 'queue.delete') do
|
34
|
-
|
48
|
+
s = "DELETE FROM #{TABLE_NAME} where id = $1"
|
49
|
+
conn.execute(s, id)
|
35
50
|
end
|
36
51
|
end
|
37
52
|
|
38
53
|
def delete_all
|
39
54
|
QC.log_yield(:measure => 'queue.delete_all') do
|
40
55
|
s = "DELETE FROM #{TABLE_NAME} WHERE q_name = $1"
|
41
|
-
|
56
|
+
conn.execute(s, name)
|
42
57
|
end
|
43
58
|
end
|
44
59
|
|
45
60
|
def count
|
46
61
|
QC.log_yield(:measure => 'queue.count') do
|
47
62
|
s = "SELECT COUNT(*) FROM #{TABLE_NAME} WHERE q_name = $1"
|
48
|
-
r =
|
63
|
+
r = conn.execute(s, name)
|
49
64
|
r["count"].to_i
|
50
65
|
end
|
51
66
|
end
|
data/lib/queue_classic/setup.rb
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
require 'queue_classic/conn'
|
2
|
+
|
1
3
|
module QC
|
2
4
|
module Setup
|
3
5
|
Root = File.expand_path("../..", File.dirname(__FILE__))
|
@@ -5,14 +7,16 @@ module QC
|
|
5
7
|
CreateTable = File.join(Root, "/sql/create_table.sql")
|
6
8
|
DropSqlFunctions = File.join(Root, "/sql/drop_ddl.sql")
|
7
9
|
|
8
|
-
def self.create
|
9
|
-
Conn.
|
10
|
-
|
10
|
+
def self.create(conn=nil)
|
11
|
+
conn ||= Conn.new
|
12
|
+
conn.execute(File.read(CreateTable))
|
13
|
+
conn.execute(File.read(SqlFunctions))
|
11
14
|
end
|
12
15
|
|
13
|
-
def self.drop
|
14
|
-
Conn.
|
15
|
-
|
16
|
+
def self.drop(conn=nil)
|
17
|
+
conn ||= Conn.new
|
18
|
+
conn.execute("DROP TABLE IF EXISTS queue_classic_jobs CASCADE")
|
19
|
+
conn.execute(File.read(DropSqlFunctions))
|
16
20
|
end
|
17
21
|
end
|
18
22
|
end
|
data/lib/queue_classic/tasks.rb
CHANGED
@@ -1,31 +1,42 @@
|
|
1
|
-
|
1
|
+
if Rake::Task.task_defined? "qc:count"
|
2
|
+
ActiveSupport::Deprecation.warn <<-MSG
|
3
|
+
queue_classic Rake tasks are now loaded automatically for Rails applications.
|
4
|
+
Loading the tasks yourself is deprecated. Please update your Rakefile and remove:
|
2
5
|
|
3
|
-
|
4
|
-
|
5
|
-
task :work => "qc:work"
|
6
|
-
end
|
6
|
+
require 'queue_classic'
|
7
|
+
require 'queue_classic/tasks'
|
7
8
|
|
8
|
-
|
9
|
-
|
10
|
-
task :
|
11
|
-
trap('INT') {exit}
|
12
|
-
trap('TERM') {@worker.stop}
|
13
|
-
@worker = QC::Worker.new
|
14
|
-
@worker.start
|
15
|
-
end
|
9
|
+
MSG
|
10
|
+
else
|
11
|
+
task :environment
|
16
12
|
|
17
|
-
|
18
|
-
|
19
|
-
|
13
|
+
namespace :jobs do
|
14
|
+
desc "Alias for qc:work"
|
15
|
+
task :work => "qc:work"
|
20
16
|
end
|
21
17
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
18
|
+
namespace :qc do
|
19
|
+
desc "Start a new worker for the (default or $QUEUE) queue"
|
20
|
+
task :work => :environment do
|
21
|
+
trap('INT') {exit}
|
22
|
+
trap('TERM') {@worker.stop}
|
23
|
+
@worker = QC::Worker.new
|
24
|
+
@worker.start
|
25
|
+
end
|
26
|
+
|
27
|
+
desc "Returns the number of jobs in the (default or QUEUE) queue"
|
28
|
+
task :count => :environment do
|
29
|
+
puts QC::Worker.new.queue.count
|
30
|
+
end
|
31
|
+
|
32
|
+
desc "Setup queue_classic tables and functions in database"
|
33
|
+
task :create => :environment do
|
34
|
+
QC::Setup.create
|
35
|
+
end
|
26
36
|
|
27
|
-
|
28
|
-
|
29
|
-
|
37
|
+
desc "Remove queue_classic tables and functions from database."
|
38
|
+
task :drop => :environment do
|
39
|
+
QC::Setup.drop
|
40
|
+
end
|
30
41
|
end
|
31
42
|
end
|
data/lib/queue_classic/worker.rb
CHANGED
@@ -1,16 +1,28 @@
|
|
1
|
+
require 'thread'
|
1
2
|
require 'queue_classic'
|
2
3
|
require 'queue_classic/queue'
|
3
|
-
require 'queue_classic/conn'
|
4
4
|
|
5
5
|
module QC
|
6
6
|
class Worker
|
7
|
+
# Set this variable if you wish for
|
8
|
+
# the worker to fork a UNIX process for
|
9
|
+
# each locked job. Remember to re-establish
|
10
|
+
# any database connections. See the worker
|
11
|
+
# for more details.
|
12
|
+
FORK_WORKER = !ENV["QC_FORK_WORKER"].nil?
|
13
|
+
# The worker is capable of processing many jobs at a time.
|
14
|
+
# It uses FORK(2) to accomplish parallel processing. CONCURRENCY
|
15
|
+
# is used to set an uppoer bound on how many worker processes can
|
16
|
+
# run concurrently.
|
17
|
+
CONCURRENCY = Integer(ENV["QC_CONCURRENCY"] || 1)
|
7
18
|
|
8
19
|
attr_accessor :queue, :running
|
9
20
|
# In the case no arguments are passed to the initializer,
|
10
21
|
# the defaults are pulled from the environment variables.
|
11
22
|
def initialize(args={})
|
12
|
-
@fork_worker = args[:fork_worker] ||
|
13
|
-
@
|
23
|
+
@fork_worker = args[:fork_worker] || FORK_WORKER || (CONCURRENCY > 1)
|
24
|
+
@limiter = SizedQueue.new(args[:concurrency] || CONCURRENCY)
|
25
|
+
@queue = args[:queue] || QC.default_queue
|
14
26
|
log(args.merge(:at => "worker_initialized"))
|
15
27
|
@running = true
|
16
28
|
end
|
@@ -35,9 +47,19 @@ module QC
|
|
35
47
|
# Define setup_child to hook into the forking process.
|
36
48
|
# Using setup_child is good for re-establishing database connections.
|
37
49
|
def fork_and_work
|
38
|
-
|
39
|
-
|
40
|
-
|
50
|
+
# If the limiter is full, then we will block until space permits.
|
51
|
+
@limiter.enq(1)
|
52
|
+
Thread.new do
|
53
|
+
begin
|
54
|
+
cpid = fork {setup_child; work}
|
55
|
+
log(:at => :fork, :pid => cpid)
|
56
|
+
Process.wait(cpid)
|
57
|
+
ensure
|
58
|
+
# Once we are done with our work and our process has exited,
|
59
|
+
# we can allow another process to run.
|
60
|
+
@limiter.deq
|
61
|
+
end
|
62
|
+
end
|
41
63
|
end
|
42
64
|
|
43
65
|
# This method will lock a job & process the job.
|
@@ -57,7 +79,7 @@ module QC
|
|
57
79
|
job = nil
|
58
80
|
while @running
|
59
81
|
break if job = @queue.lock
|
60
|
-
|
82
|
+
@queue.wait
|
61
83
|
end
|
62
84
|
job
|
63
85
|
end
|
data/readme.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
# queue_classic
|
2
2
|
|
3
|
-
v2.2.
|
3
|
+
v2.2.1
|
4
4
|
|
5
5
|
queue_classic provides a simple interface to a PostgreSQL-backed message queue. queue_classic specializes in concurrent locking and minimizing database load while providing a simple, intuitive developer experience. queue_classic assumes that you are already using PostgreSQL in your production environment and that adding another dependency (e.g. redis, beanstalkd, 0mq) is undesirable.
|
6
6
|
|
@@ -9,12 +9,12 @@ Features:
|
|
9
9
|
* Leverage of PostgreSQL's listen/notify & row locking.
|
10
10
|
* Support for multiple queues with heterogeneous workers.
|
11
11
|
* JSON data format.
|
12
|
-
*
|
13
|
-
* [
|
12
|
+
* Concurrent job processing using forking workers.
|
13
|
+
* [Reduced contention FIFO design](http://www.cs.tau.ac.il/~shanir/nir-pubs-web/Papers/Lock_Free.pdf).
|
14
14
|
|
15
15
|
Contents:
|
16
16
|
|
17
|
-
* [Documentation](http://rubydoc.info/gems/queue_classic/2.2.
|
17
|
+
* [Documentation](http://rubydoc.info/gems/queue_classic/2.2.1/frames)
|
18
18
|
* [Usage](#usage)
|
19
19
|
* [Setup](#setup)
|
20
20
|
* [Configuration](#configuration)
|
@@ -56,11 +56,17 @@ p_queue.enqueue("Kernel.puts", ["hello", "world"])
|
|
56
56
|
|
57
57
|
### Working Jobs
|
58
58
|
|
59
|
-
There are two ways to work jobs. The first approach is to use the Rake task. The second approach is to use a custom executable.
|
59
|
+
There are two ways to work jobs. The first approach is to use the Rake task. The second approach is to use a custom executable. Each approach provides a set of configuration options accessable through the processes' environment:
|
60
|
+
|
61
|
+
* `$CONCURRENCY=1` - The number of child processes to run concurrently.
|
62
|
+
* `$FORK_WORKER=false` - Fork on each job execution. Enabled if `$CONCURRENCY` > 1
|
63
|
+
* `$QUEUE=default` - The name of the queue to process.
|
64
|
+
* `$TOP_BOUND=9` - The section of the queue that is elgible for dequeue operations. Setting this value to 1 will ensure a strict FIFO ordering.
|
60
65
|
|
61
66
|
#### Rake Task
|
62
67
|
|
63
|
-
Require queue_classic in your Rakefile.
|
68
|
+
Require queue_classic in your Rakefile. If you are using Rails, the tasks will
|
69
|
+
be loaded automatically.
|
64
70
|
|
65
71
|
```ruby
|
66
72
|
require 'queue_classic'
|
@@ -73,10 +79,10 @@ Start the worker via the Rakefile.
|
|
73
79
|
$ bundle exec rake qc:work
|
74
80
|
```
|
75
81
|
|
76
|
-
Setup a worker to work a non-default queue.
|
82
|
+
Setup a worker to work a non-default queue while processing 4 jobs at a time.
|
77
83
|
|
78
84
|
```bash
|
79
|
-
$ QUEUE="priority_queue" bundle exec rake qc:work
|
85
|
+
$ CONCURRENCY=4 QUEUE="priority_queue" bundle exec rake qc:work
|
80
86
|
```
|
81
87
|
|
82
88
|
#### Custom Worker
|
@@ -129,7 +135,7 @@ Declare dependencies in Gemfile.
|
|
129
135
|
|
130
136
|
```ruby
|
131
137
|
source "http://rubygems.org"
|
132
|
-
gem "queue_classic", "2.2.
|
138
|
+
gem "queue_classic", "2.2.1"
|
133
139
|
```
|
134
140
|
|
135
141
|
By default, queue_classic will use the QC_DATABASE_URL falling back on DATABASE_URL. The URL must be in the following format: `postgres://username:password@localhost/database_name`. If you use Heroku's PostgreSQL service, this will already be set. If you don't want to set this variable, you can set the connection in an initializer. **QueueClassic will maintain its own connection to the database.** This may double the number of connections to your database. Set QC::Conn.connection to share the connection between Rails & QueueClassic
|
data/test/benchmark_test.rb
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
require File.expand_path("../helper.rb", __FILE__)
|
2
|
+
Thread.abort_on_exception = true
|
2
3
|
|
3
4
|
if ENV["QC_BENCHMARK"]
|
4
5
|
class BenchmarkTest < QCTest
|
@@ -6,31 +7,26 @@ if ENV["QC_BENCHMARK"]
|
|
6
7
|
def test_enqueue
|
7
8
|
n = 10_000
|
8
9
|
start = Time.now
|
9
|
-
n.times
|
10
|
-
QC.enqueue("1.odd?", [])
|
11
|
-
end
|
10
|
+
n.times {QC.enqueue("1.odd?")}
|
12
11
|
assert_equal(n, QC.count)
|
13
|
-
|
14
12
|
elapsed = Time.now - start
|
15
13
|
assert_in_delta(4, elapsed, 1)
|
16
14
|
end
|
17
15
|
|
18
16
|
def test_dequeue
|
19
|
-
|
20
|
-
worker
|
21
|
-
|
22
|
-
n
|
23
|
-
|
24
|
-
|
25
|
-
assert_equal(n,
|
17
|
+
queue = QC::Queue.new
|
18
|
+
worker = QC::Worker.new(:concurrency => 4, :queue => queue)
|
19
|
+
queue.delete_all
|
20
|
+
n = 20
|
21
|
+
|
22
|
+
n.times {queue.enqueue("puts", "hello")}
|
23
|
+
assert_equal(n, queue.count)
|
26
24
|
|
27
25
|
start = Time.now
|
28
|
-
n.times
|
29
|
-
worker.work
|
30
|
-
end
|
31
|
-
elapsed = Time.now - start
|
26
|
+
n.times.map {worker.fork_and_work}.map(&:join)
|
32
27
|
|
33
|
-
|
28
|
+
elapsed = Time.now - start
|
29
|
+
assert_equal(0, queue.count)
|
34
30
|
assert_in_delta(10, elapsed, 3)
|
35
31
|
end
|
36
32
|
|
@@ -1,10 +1,14 @@
|
|
1
1
|
require File.expand_path("../helper.rb", __FILE__)
|
2
2
|
|
3
|
-
class
|
3
|
+
class ConfTest < QCTest
|
4
|
+
|
5
|
+
def setup
|
6
|
+
init_db
|
7
|
+
end
|
4
8
|
|
5
9
|
def test_extracts_the_segemnts_to_connect
|
6
10
|
database_url = "postgres://ryan:secret@localhost:1234/application_db"
|
7
|
-
normalized = QC::
|
11
|
+
normalized = QC::Conf.normalized_db_url(URI.parse(database_url))
|
8
12
|
assert_equal ["localhost",
|
9
13
|
1234,
|
10
14
|
nil, "",
|
@@ -15,7 +19,7 @@ class ConnTest < QCTest
|
|
15
19
|
|
16
20
|
def test_regression_database_url_without_host
|
17
21
|
database_url = "postgres:///my_db"
|
18
|
-
normalized = QC::
|
22
|
+
normalized = QC::Conf.normalized_db_url(URI.parse(database_url))
|
19
23
|
assert_equal [nil, 5432, nil, "", "my_db", nil, nil], normalized
|
20
24
|
end
|
21
25
|
|
data/test/helper.rb
CHANGED
@@ -9,19 +9,12 @@ require "minitest/autorun"
|
|
9
9
|
|
10
10
|
class QCTest < Minitest::Test
|
11
11
|
|
12
|
-
def setup
|
13
|
-
init_db
|
14
|
-
end
|
15
|
-
|
16
|
-
def teardown
|
17
|
-
QC.delete_all
|
18
|
-
end
|
19
|
-
|
20
12
|
def init_db
|
21
|
-
QC::Conn.
|
22
|
-
QC::Setup.drop
|
23
|
-
QC::Setup.create
|
24
|
-
|
13
|
+
c = QC::Conn.new
|
14
|
+
QC::Setup.drop(c)
|
15
|
+
QC::Setup.create(c)
|
16
|
+
c.execute(File.read('./test/helper.sql'))
|
17
|
+
c.disconnect
|
25
18
|
end
|
26
19
|
|
27
20
|
def capture_debug_output
|
data/test/queue_test.rb
CHANGED
@@ -2,6 +2,14 @@ require File.expand_path("../helper.rb", __FILE__)
|
|
2
2
|
|
3
3
|
class QueueTest < QCTest
|
4
4
|
|
5
|
+
def setup
|
6
|
+
init_db
|
7
|
+
end
|
8
|
+
|
9
|
+
def teardown
|
10
|
+
QC.conn.disconnect
|
11
|
+
end
|
12
|
+
|
5
13
|
def test_enqueue
|
6
14
|
QC.enqueue("Klass.method")
|
7
15
|
end
|
@@ -44,8 +52,8 @@ class QueueTest < QCTest
|
|
44
52
|
end
|
45
53
|
|
46
54
|
def test_delete_all_by_queue_name
|
47
|
-
p_queue = QC::Queue.new("priority_queue")
|
48
|
-
s_queue = QC::Queue.new("secondary_queue")
|
55
|
+
p_queue = QC::Queue.new(:name => "priority_queue")
|
56
|
+
s_queue = QC::Queue.new(:name => "secondary_queue")
|
49
57
|
p_queue.enqueue("Klass.method")
|
50
58
|
s_queue.enqueue("Klass.method")
|
51
59
|
assert_equal(1, p_queue.count)
|
@@ -53,32 +61,19 @@ class QueueTest < QCTest
|
|
53
61
|
p_queue.delete_all
|
54
62
|
assert_equal(0, p_queue.count)
|
55
63
|
assert_equal(1, s_queue.count)
|
64
|
+
ensure
|
65
|
+
p_queue.conn.disconnect
|
66
|
+
s_queue.conn.disconnect
|
56
67
|
end
|
57
68
|
|
58
69
|
def test_queue_instance
|
59
|
-
queue = QC::Queue.new("queue_classic_jobs")
|
70
|
+
queue = QC::Queue.new(:name => "queue_classic_jobs")
|
60
71
|
queue.enqueue("Klass.method")
|
61
72
|
assert_equal(1, queue.count)
|
62
73
|
queue.delete(queue.lock[:id])
|
63
74
|
assert_equal(0, queue.count)
|
64
|
-
|
65
|
-
|
66
|
-
def test_repair_after_error
|
67
|
-
queue = QC::Queue.new("queue_classic_jobs")
|
68
|
-
queue.enqueue("Klass.method")
|
69
|
-
assert_equal(1, queue.count)
|
70
|
-
connection = QC::Conn.connection
|
71
|
-
saved_method = connection.method(:exec)
|
72
|
-
def connection.exec(*args)
|
73
|
-
raise PGError
|
74
|
-
end
|
75
|
-
assert_raises(PG::Error) { queue.enqueue("Klass.other_method") }
|
76
|
-
assert_equal(1, queue.count)
|
77
|
-
queue.enqueue("Klass.other_method")
|
78
|
-
assert_equal(2, queue.count)
|
79
|
-
rescue PG::Error
|
80
|
-
QC::Conn.disconnect
|
81
|
-
assert false, "Expected to QC repair after connection error"
|
75
|
+
ensure
|
76
|
+
queue.conn.disconnect
|
82
77
|
end
|
83
78
|
|
84
79
|
def test_custom_default_queue
|
@@ -102,15 +97,17 @@ class QueueTest < QCTest
|
|
102
97
|
end
|
103
98
|
|
104
99
|
def test_enqueue_triggers_notify
|
105
|
-
QC::Conn.
|
106
|
-
QC::
|
107
|
-
|
108
|
-
msgs =
|
100
|
+
c = QC::Conn.new
|
101
|
+
c.execute('LISTEN "' + QC::Queue::QUEUE_NAME + '"')
|
102
|
+
c.send(:drain_notify)
|
103
|
+
msgs = c.send(:wait_for_notify, 0.25)
|
109
104
|
assert_equal(0, msgs.length)
|
110
105
|
|
111
106
|
QC.enqueue("Klass.method")
|
112
|
-
msgs =
|
107
|
+
msgs = c.send(:wait_for_notify, 0.25)
|
113
108
|
assert_equal(1, msgs.length)
|
109
|
+
ensure
|
110
|
+
c.disconnect
|
114
111
|
end
|
115
112
|
|
116
113
|
end
|
data/test/worker_test.rb
CHANGED
@@ -26,6 +26,14 @@ end
|
|
26
26
|
|
27
27
|
class WorkerTest < QCTest
|
28
28
|
|
29
|
+
def setup
|
30
|
+
init_db
|
31
|
+
end
|
32
|
+
|
33
|
+
def teardown
|
34
|
+
QC.conn.disconnect
|
35
|
+
end
|
36
|
+
|
29
37
|
def test_work
|
30
38
|
QC.enqueue("TestObject.no_args")
|
31
39
|
worker = TestWorker.new
|
@@ -94,32 +102,40 @@ class WorkerTest < QCTest
|
|
94
102
|
end
|
95
103
|
|
96
104
|
def test_work_custom_queue
|
97
|
-
p_queue = QC::Queue.new("priority_queue")
|
105
|
+
p_queue = QC::Queue.new(:name=> "priority_queue")
|
98
106
|
p_queue.enqueue("TestObject.two_args", "1", 2)
|
99
|
-
worker = TestWorker.new(
|
107
|
+
worker = TestWorker.new(:queue => p_queue)
|
100
108
|
r = worker.work
|
101
109
|
assert_equal(["1", 2], r)
|
102
110
|
assert_equal(0, worker.failed_count)
|
111
|
+
worker.stop
|
112
|
+
p_queue.conn.disconnect
|
103
113
|
end
|
104
114
|
|
105
115
|
def test_worker_listens_on_chan
|
106
|
-
p_queue = QC::Queue.new("priority_queue")
|
116
|
+
p_queue = QC::Queue.new(:name => "priority_queue")
|
107
117
|
p_queue.enqueue("TestObject.two_args", "1", 2)
|
108
|
-
worker = TestWorker.new(
|
118
|
+
worker = TestWorker.new(
|
119
|
+
:queue => p_queue,
|
120
|
+
:listening_worker => true)
|
109
121
|
r = worker.work
|
110
122
|
assert_equal(["1", 2], r)
|
111
123
|
assert_equal(0, worker.failed_count)
|
124
|
+
worker.stop
|
125
|
+
p_queue.conn.disconnect
|
112
126
|
end
|
113
127
|
|
114
128
|
def test_worker_ueses_one_conn
|
115
129
|
QC.enqueue("TestObject.no_args")
|
116
130
|
worker = TestWorker.new
|
117
131
|
worker.work
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
132
|
+
s = "SELECT * from pg_stat_activity where datname=current_database()"
|
133
|
+
s += " and application_name = '#{QC::Conn::APP_NAME}'"
|
134
|
+
res = QC.conn.execute(s)
|
135
|
+
num_conns = res.length if res.class == Array
|
136
|
+
num_conns = 1 if res.class == Hash
|
137
|
+
assert_equal(1, num_conns,
|
138
|
+
"Multiple connections found -- are there open connections to" +
|
139
|
+
" #{QC::Conf.db_url} in other terminals?\n res=#{res}")
|
123
140
|
end
|
124
|
-
|
125
141
|
end
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: queue_classic
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 2.
|
4
|
+
version: 2.3.0beta
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Ryan Smith (♠ ace hacker)
|
@@ -16,14 +16,14 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - ~>
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: 0.
|
19
|
+
version: 0.16.0
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
24
|
- - ~>
|
25
25
|
- !ruby/object:Gem::Version
|
26
|
-
version: 0.
|
26
|
+
version: 0.16.0
|
27
27
|
description: queue_classic is a queueing library for Ruby apps. (Rails, Sinatra, Etc...)
|
28
28
|
queue_classic features asynchronous job polling, database maintained locks and no
|
29
29
|
ridiculous dependencies. As a matter of fact, queue_classic only requires pg.
|
@@ -38,6 +38,7 @@ files:
|
|
38
38
|
- sql/drop_ddl.sql
|
39
39
|
- lib/generators/queue_classic/install_generator.rb
|
40
40
|
- lib/generators/queue_classic/templates/add_queue_classic.rb
|
41
|
+
- lib/queue_classic/conf.rb
|
41
42
|
- lib/queue_classic/conn.rb
|
42
43
|
- lib/queue_classic/queue.rb
|
43
44
|
- lib/queue_classic/railtie.rb
|
@@ -46,7 +47,7 @@ files:
|
|
46
47
|
- lib/queue_classic/worker.rb
|
47
48
|
- lib/queue_classic.rb
|
48
49
|
- test/benchmark_test.rb
|
49
|
-
- test/
|
50
|
+
- test/conf_test.rb
|
50
51
|
- test/helper.rb
|
51
52
|
- test/queue_test.rb
|
52
53
|
- test/worker_test.rb
|
@@ -65,17 +66,17 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
65
66
|
version: '0'
|
66
67
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
67
68
|
requirements:
|
68
|
-
- - '
|
69
|
+
- - '>'
|
69
70
|
- !ruby/object:Gem::Version
|
70
|
-
version:
|
71
|
+
version: 1.3.1
|
71
72
|
requirements: []
|
72
73
|
rubyforge_project:
|
73
74
|
rubygems_version: 2.0.3
|
74
75
|
signing_key:
|
75
76
|
specification_version: 4
|
76
|
-
summary:
|
77
|
+
summary: postgres backed queue
|
77
78
|
test_files:
|
78
79
|
- test/benchmark_test.rb
|
79
|
-
- test/
|
80
|
+
- test/conf_test.rb
|
80
81
|
- test/queue_test.rb
|
81
82
|
- test/worker_test.rb
|