queue_classic_pg2 3.2.0.RC1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +9 -0
- data/.travis.yml +19 -0
- data/CONTRIBUTING.md +17 -0
- data/Gemfile +9 -0
- data/LICENSE.txt +20 -0
- data/README.md +326 -0
- data/Rakefile +14 -0
- data/changelog +146 -0
- data/lib/generators/queue_classic/install_generator.rb +36 -0
- data/lib/generators/queue_classic/templates/add_queue_classic.rb +9 -0
- data/lib/generators/queue_classic/templates/update_queue_classic_3_0_0.rb +9 -0
- data/lib/generators/queue_classic/templates/update_queue_classic_3_0_2.rb +11 -0
- data/lib/generators/queue_classic/templates/update_queue_classic_3_1_0.rb +9 -0
- data/lib/queue_classic/config.rb +85 -0
- data/lib/queue_classic/conn_adapter.rb +111 -0
- data/lib/queue_classic/queue.rb +119 -0
- data/lib/queue_classic/railtie.rb +9 -0
- data/lib/queue_classic/setup.rb +58 -0
- data/lib/queue_classic/tasks.rb +49 -0
- data/lib/queue_classic/version.rb +3 -0
- data/lib/queue_classic/worker.rb +166 -0
- data/lib/queue_classic.rb +122 -0
- data/queue_classic.gemspec +24 -0
- data/sql/create_table.sql +25 -0
- data/sql/ddl.sql +78 -0
- data/sql/downgrade_from_3_0_0.sql +2 -0
- data/sql/downgrade_from_3_1_0.sql +1 -0
- data/sql/drop_ddl.sql +3 -0
- data/sql/update_to_3_0_0.sql +17 -0
- data/sql/update_to_3_1_0.sql +9 -0
- data/test/benchmark_test.rb +39 -0
- data/test/config_test.rb +121 -0
- data/test/helper.rb +61 -0
- data/test/helper.sql +25 -0
- data/test/lib/queue_classic_rails_connection_test.rb +43 -0
- data/test/lib/queue_classic_test.rb +42 -0
- data/test/queue_test.rb +208 -0
- data/test/worker_test.rb +219 -0
- metadata +112 -0
@@ -0,0 +1,85 @@
|
|
1
|
+
module QC
|
2
|
+
module Config
|
3
|
+
# You can use the APP_NAME to query for
|
4
|
+
# postgres related process information in the
|
5
|
+
# pg_stat_activity table.
|
6
|
+
def app_name
|
7
|
+
@app_name ||= ENV["QC_APP_NAME"] || "queue_classic"
|
8
|
+
end
|
9
|
+
|
10
|
+
# Number of seconds to block on the listen chanel for new jobs.
|
11
|
+
def wait_time
|
12
|
+
@wait_time ||= (ENV["QC_LISTEN_TIME"] || 5).to_i
|
13
|
+
end
|
14
|
+
|
15
|
+
# Why do you want to change the table name?
|
16
|
+
# Just deal with the default OK?
|
17
|
+
# If you do want to change this, you will
|
18
|
+
# need to update the PL/pgSQL lock_head() function.
|
19
|
+
# Come on. Don't do it.... Just stick with the default.
|
20
|
+
def table_name
|
21
|
+
@table_name ||= "queue_classic_jobs"
|
22
|
+
end
|
23
|
+
|
24
|
+
def queue
|
25
|
+
@queue = ENV["QUEUE"] || "default"
|
26
|
+
end
|
27
|
+
|
28
|
+
# The default queue used by `QC.enqueue`.
|
29
|
+
def default_queue
|
30
|
+
@default_queue ||= Queue.new(QC.queue)
|
31
|
+
end
|
32
|
+
|
33
|
+
def default_queue=(queue)
|
34
|
+
@default_queue = queue
|
35
|
+
end
|
36
|
+
|
37
|
+
# Each row in the table will have a column that
|
38
|
+
# notes the queue. You can point your workers
|
39
|
+
# at different queues.
|
40
|
+
def queues
|
41
|
+
@queues ||= (ENV["QUEUES"] && ENV["QUEUES"].split(",").map(&:strip)) || []
|
42
|
+
end
|
43
|
+
|
44
|
+
# Set this to 1 for strict FIFO.
|
45
|
+
# There is nothing special about 9....
|
46
|
+
def top_bound
|
47
|
+
@top_bound ||= (ENV["QC_TOP_BOUND"] || 9).to_i
|
48
|
+
end
|
49
|
+
|
50
|
+
# Set this variable if you wish for
|
51
|
+
# the worker to fork a UNIX process for
|
52
|
+
# each locked job. Remember to re-establish
|
53
|
+
# any database connections. See the worker
|
54
|
+
# for more details.
|
55
|
+
def fork_worker?
|
56
|
+
@fork_worker ||= (!ENV["QC_FORK_WORKER"].nil?)
|
57
|
+
end
|
58
|
+
|
59
|
+
# The worker class instantiated by QC's rake tasks.
|
60
|
+
def default_worker_class
|
61
|
+
|
62
|
+
@worker_class ||= (ENV["QC_DEFAULT_WORKER_CLASS"] && Kernel.const_get(ENV["QC_DEFAULT_WORKER_CLASS"]) ||
|
63
|
+
QC::Worker)
|
64
|
+
|
65
|
+
end
|
66
|
+
|
67
|
+
def default_worker_class=(worker_class)
|
68
|
+
@worker_class = worker_class
|
69
|
+
end
|
70
|
+
|
71
|
+
# reset memoized configuration
|
72
|
+
def reset_config
|
73
|
+
# TODO: we might want to think about storing these in a Hash.
|
74
|
+
@app_name = nil
|
75
|
+
@wait_time = nil
|
76
|
+
@table_name = nil
|
77
|
+
@queue = nil
|
78
|
+
@default_queue = nil
|
79
|
+
@queues = nil
|
80
|
+
@top_bound = nil
|
81
|
+
@fork_worker = nil
|
82
|
+
@worker_class = nil
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
@@ -0,0 +1,111 @@
|
|
1
|
+
require 'uri'
|
2
|
+
require 'pg'
|
3
|
+
|
4
|
+
module QC
|
5
|
+
class ConnAdapter
|
6
|
+
|
7
|
+
attr_accessor :connection
|
8
|
+
def initialize(c=nil)
|
9
|
+
@connection = c.nil? ? establish_new : validate!(c)
|
10
|
+
@mutex = Mutex.new
|
11
|
+
end
|
12
|
+
|
13
|
+
def execute(stmt, *params)
|
14
|
+
@mutex.synchronize do
|
15
|
+
QC.log(:at => "exec_sql", :sql => stmt.inspect)
|
16
|
+
begin
|
17
|
+
params = nil if params.empty?
|
18
|
+
r = @connection.exec(stmt, params)
|
19
|
+
result = []
|
20
|
+
r.each {|t| result << t}
|
21
|
+
result.length > 1 ? result : result.pop
|
22
|
+
rescue PG::Error => e
|
23
|
+
QC.log(:error => e.inspect)
|
24
|
+
@connection.reset
|
25
|
+
raise
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def wait(time, *channels)
|
31
|
+
@mutex.synchronize do
|
32
|
+
listen_cmds = channels.map {|c| 'LISTEN "' + c.to_s + '"'}
|
33
|
+
@connection.exec(listen_cmds.join(';'))
|
34
|
+
wait_for_notify(time)
|
35
|
+
unlisten_cmds = channels.map {|c| 'UNLISTEN "' + c.to_s + '"'}
|
36
|
+
@connection.exec(unlisten_cmds.join(';'))
|
37
|
+
drain_notify
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
def disconnect
|
42
|
+
@mutex.synchronize do
|
43
|
+
begin
|
44
|
+
@connection.close
|
45
|
+
rescue => e
|
46
|
+
QC.log(:at => 'disconnect', :error => e.message)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
def server_version
|
52
|
+
@server_version ||= begin
|
53
|
+
version = execute("SHOW server_version_num;")["server_version_num"]
|
54
|
+
version && version.to_i
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
private
|
59
|
+
|
60
|
+
def wait_for_notify(t)
|
61
|
+
Array.new.tap do |msgs|
|
62
|
+
@connection.wait_for_notify(t) {|event, pid, msg| msgs << msg}
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
def drain_notify
|
67
|
+
until @connection.notifies.nil?
|
68
|
+
QC.log(:at => "drain_notifications")
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
def validate!(c)
|
73
|
+
return c if c.is_a?(PG::Connection)
|
74
|
+
err = "connection must be an instance of PG::Connection, but was #{c.class}"
|
75
|
+
raise(ArgumentError, err)
|
76
|
+
end
|
77
|
+
|
78
|
+
def establish_new
|
79
|
+
QC.log(:at => "establish_conn")
|
80
|
+
conn = PG.connect(*normalize_db_url(db_url))
|
81
|
+
if conn.status != PG::CONNECTION_OK
|
82
|
+
QC.log(:error => conn.error)
|
83
|
+
end
|
84
|
+
conn.exec("SET application_name = '#{QC.app_name}'")
|
85
|
+
conn
|
86
|
+
end
|
87
|
+
|
88
|
+
def normalize_db_url(url)
|
89
|
+
host = url.host
|
90
|
+
host = host.gsub(/%2F/i, '/') if host
|
91
|
+
|
92
|
+
[
|
93
|
+
host, # host or percent-encoded socket path
|
94
|
+
url.port || 5432,
|
95
|
+
nil, '', #opts, tty
|
96
|
+
url.path.gsub("/",""), # database name
|
97
|
+
url.user,
|
98
|
+
url.password
|
99
|
+
]
|
100
|
+
end
|
101
|
+
|
102
|
+
def db_url
|
103
|
+
return @db_url if defined?(@db_url) && @db_url
|
104
|
+
url = ENV["QC_DATABASE_URL"] ||
|
105
|
+
ENV["DATABASE_URL"] ||
|
106
|
+
raise(ArgumentError, "missing QC_DATABASE_URL or DATABASE_URL")
|
107
|
+
@db_url = URI.parse(url)
|
108
|
+
end
|
109
|
+
|
110
|
+
end
|
111
|
+
end
|
@@ -0,0 +1,119 @@
|
|
1
|
+
require_relative 'conn_adapter'
|
2
|
+
require 'json'
|
3
|
+
require 'time'
|
4
|
+
|
5
|
+
module QC
|
6
|
+
# The queue class maps a queue abstraction onto a database table.
|
7
|
+
class Queue
|
8
|
+
|
9
|
+
attr_reader :name, :top_bound
|
10
|
+
def initialize(name, top_bound=nil)
|
11
|
+
@name = name
|
12
|
+
@top_bound = top_bound || QC.top_bound
|
13
|
+
end
|
14
|
+
|
15
|
+
def conn_adapter=(a)
|
16
|
+
@adapter = a
|
17
|
+
end
|
18
|
+
|
19
|
+
def conn_adapter
|
20
|
+
@adapter ||= QC.default_conn_adapter
|
21
|
+
end
|
22
|
+
|
23
|
+
# enqueue(m,a) inserts a row into the jobs table and trigger a notification.
|
24
|
+
# The job's queue is represented by a name column in the row.
|
25
|
+
# There is a trigger on the table which will send a NOTIFY event
|
26
|
+
# on a channel which corresponds to the name of the queue.
|
27
|
+
# The method argument is a string encoded ruby expression. The expression
|
28
|
+
# will be separated by a `.` character and then `eval`d.
|
29
|
+
# Examples of the method argument include: `puts`, `Kernel.puts`,
|
30
|
+
# `MyObject.new.puts`.
|
31
|
+
# The args argument will be encoded as JSON and stored as a JSON datatype
|
32
|
+
# in the row. (If the version of PG does not support JSON,
|
33
|
+
# then the args will be stored as text.
|
34
|
+
# The args are stored as a collection and then splatted inside the worker.
|
35
|
+
# Examples of args include: `'hello world'`, `['hello world']`,
|
36
|
+
# `'hello', 'world'`.
|
37
|
+
# This method returns a hash with the id of the enqueued job.
|
38
|
+
def enqueue(method, *args)
|
39
|
+
QC.log_yield(:measure => 'queue.enqueue') do
|
40
|
+
s = "INSERT INTO #{QC.table_name} (q_name, method, args) VALUES ($1, $2, $3) RETURNING id"
|
41
|
+
conn_adapter.execute(s, name, method, JSON.dump(args))
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
# enqueue_at(t,m,a) inserts a row into the jobs table representing a job
|
46
|
+
# to be executed not before the specified time.
|
47
|
+
# The time argument must be a Time object or a float timestamp. The method
|
48
|
+
# and args argument must be in the form described in the documentation for
|
49
|
+
# the #enqueue method.
|
50
|
+
# This method returns a hash with the id of the enqueued job.
|
51
|
+
def enqueue_at(timestamp, method, *args)
|
52
|
+
offset = Time.at(timestamp).to_i - Time.now.to_i
|
53
|
+
enqueue_in(offset, method, *args)
|
54
|
+
end
|
55
|
+
|
56
|
+
# enqueue_in(t,m,a) inserts a row into the jobs table representing a job
|
57
|
+
# to be executed not before the specified time offset.
|
58
|
+
# The seconds argument must be an integer. The method and args argument
|
59
|
+
# must be in the form described in the documentation for the #enqueue
|
60
|
+
# method.
|
61
|
+
# This method returns a hash with the id of the enqueued job.
|
62
|
+
def enqueue_in(seconds, method, *args)
|
63
|
+
QC.log_yield(:measure => 'queue.enqueue') do
|
64
|
+
s = "INSERT INTO #{QC.table_name} (q_name, method, args, scheduled_at)
|
65
|
+
VALUES ($1, $2, $3, now() + interval '#{seconds.to_i} seconds')
|
66
|
+
RETURNING id"
|
67
|
+
conn_adapter.execute(s, name, method, JSON.dump(args))
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
def lock
|
72
|
+
QC.log_yield(:measure => 'queue.lock') do
|
73
|
+
s = "SELECT * FROM lock_head($1, $2)"
|
74
|
+
if r = conn_adapter.execute(s, name, top_bound)
|
75
|
+
{}.tap do |job|
|
76
|
+
job[:id] = r["id"]
|
77
|
+
job[:q_name] = r["q_name"]
|
78
|
+
job[:method] = r["method"]
|
79
|
+
job[:args] = JSON.parse(r["args"])
|
80
|
+
if r["scheduled_at"]
|
81
|
+
job[:scheduled_at] = Time.parse(r["scheduled_at"])
|
82
|
+
ttl = Integer((Time.now - job[:scheduled_at]) * 1000)
|
83
|
+
QC.measure("time-to-lock=#{ttl}ms source=#{name}")
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
def unlock(id)
|
91
|
+
QC.log_yield(:measure => 'queue.unlock') do
|
92
|
+
s = "UPDATE #{QC.table_name} SET locked_at = NULL WHERE id = $1"
|
93
|
+
conn_adapter.execute(s, id)
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
def delete(id)
|
98
|
+
QC.log_yield(:measure => 'queue.delete') do
|
99
|
+
conn_adapter.execute("DELETE FROM #{QC.table_name} WHERE id = $1", id)
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
def delete_all
|
104
|
+
QC.log_yield(:measure => 'queue.delete_all') do
|
105
|
+
s = "DELETE FROM #{QC.table_name} WHERE q_name = $1"
|
106
|
+
conn_adapter.execute(s, name)
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
def count
|
111
|
+
QC.log_yield(:measure => 'queue.count') do
|
112
|
+
s = "SELECT COUNT(*) FROM #{QC.table_name} WHERE q_name = $1"
|
113
|
+
r = conn_adapter.execute(s, name)
|
114
|
+
r["count"].to_i
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
end
|
119
|
+
end
|
@@ -0,0 +1,58 @@
|
|
1
|
+
module QC
|
2
|
+
module Setup
|
3
|
+
Root = File.expand_path("../..", File.dirname(__FILE__))
|
4
|
+
SqlFunctions = File.join(Root, "/sql/ddl.sql")
|
5
|
+
CreateTable = File.join(Root, "/sql/create_table.sql")
|
6
|
+
DropSqlFunctions = File.join(Root, "/sql/drop_ddl.sql")
|
7
|
+
UpgradeTo_3_0_0 = File.join(Root, "/sql/update_to_3_0_0.sql")
|
8
|
+
DowngradeFrom_3_0_0 = File.join(Root, "/sql/downgrade_from_3_0_0.sql")
|
9
|
+
UpgradeTo_3_1_0 = File.join(Root, "/sql/update_to_3_1_0.sql")
|
10
|
+
DowngradeFrom_3_1_0 = File.join(Root, "/sql/downgrade_from_3_1_0.sql")
|
11
|
+
|
12
|
+
def self.create(c = QC::default_conn_adapter.connection)
|
13
|
+
conn = QC::ConnAdapter.new(c)
|
14
|
+
conn.execute(File.read(CreateTable))
|
15
|
+
conn.execute(File.read(SqlFunctions))
|
16
|
+
conn.disconnect if c.nil? #Don't close a conn we didn't create.
|
17
|
+
end
|
18
|
+
|
19
|
+
def self.drop(c = QC::default_conn_adapter.connection)
|
20
|
+
conn = QC::ConnAdapter.new(c)
|
21
|
+
conn.execute("DROP TABLE IF EXISTS queue_classic_jobs CASCADE")
|
22
|
+
conn.execute(File.read(DropSqlFunctions))
|
23
|
+
conn.disconnect if c.nil? #Don't close a conn we didn't create.
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.update(c = QC::default_conn_adapter.connection)
|
27
|
+
conn = QC::ConnAdapter.new(c)
|
28
|
+
conn.execute(File.read(UpgradeTo_3_0_0))
|
29
|
+
conn.execute(File.read(UpgradeTo_3_1_0))
|
30
|
+
conn.execute(File.read(DropSqlFunctions))
|
31
|
+
conn.execute(File.read(SqlFunctions))
|
32
|
+
end
|
33
|
+
|
34
|
+
def self.update_to_3_0_0(c = QC::default_conn_adapter.connection)
|
35
|
+
conn = QC::ConnAdapter.new(c)
|
36
|
+
conn.execute(File.read(UpgradeTo_3_0_0))
|
37
|
+
conn.execute(File.read(DropSqlFunctions))
|
38
|
+
conn.execute(File.read(SqlFunctions))
|
39
|
+
end
|
40
|
+
|
41
|
+
def self.downgrade_from_3_0_0(c = QC::default_conn_adapter.connection)
|
42
|
+
conn = QC::ConnAdapter.new(c)
|
43
|
+
conn.execute(File.read(DowngradeFrom_3_0_0))
|
44
|
+
end
|
45
|
+
|
46
|
+
def self.update_to_3_1_0(c = QC::default_conn_adapter.connection)
|
47
|
+
conn = QC::ConnAdapter.new(c)
|
48
|
+
conn.execute(File.read(UpgradeTo_3_1_0))
|
49
|
+
conn.execute(File.read(DropSqlFunctions))
|
50
|
+
conn.execute(File.read(SqlFunctions))
|
51
|
+
end
|
52
|
+
|
53
|
+
def self.downgrade_from_3_1_0(c = QC::default_conn_adapter.connection)
|
54
|
+
conn = QC::ConnAdapter.new(c)
|
55
|
+
conn.execute(File.read(DowngradeFrom_3_1_0))
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -0,0 +1,49 @@
|
|
1
|
+
task :environment
|
2
|
+
|
3
|
+
namespace :jobs do
|
4
|
+
desc "Alias for qc:work"
|
5
|
+
task :work => "qc:work"
|
6
|
+
end
|
7
|
+
|
8
|
+
namespace :qc do
|
9
|
+
desc "Start a new worker for the (default or $QUEUE / $QUEUES) queue"
|
10
|
+
task :work => :environment do
|
11
|
+
@worker = QC.default_worker_class.new
|
12
|
+
|
13
|
+
trap('INT') do
|
14
|
+
$stderr.puts("Received INT. Shutting down.")
|
15
|
+
if !@worker.running
|
16
|
+
$stderr.puts("Worker has stopped running. Exit.")
|
17
|
+
exit(1)
|
18
|
+
end
|
19
|
+
@worker.stop
|
20
|
+
end
|
21
|
+
|
22
|
+
trap('TERM') do
|
23
|
+
$stderr.puts("Received Term. Shutting down.")
|
24
|
+
@worker.stop
|
25
|
+
end
|
26
|
+
|
27
|
+
@worker.start
|
28
|
+
end
|
29
|
+
|
30
|
+
desc "Returns the number of jobs in the (default or $QUEUE / $QUEUES) queue"
|
31
|
+
task :count => :environment do
|
32
|
+
puts QC.default_queue.count
|
33
|
+
end
|
34
|
+
|
35
|
+
desc "Setup queue_classic tables and functions in database"
|
36
|
+
task :create => :environment do
|
37
|
+
QC::Setup.create
|
38
|
+
end
|
39
|
+
|
40
|
+
desc "Remove queue_classic tables and functions from database."
|
41
|
+
task :drop => :environment do
|
42
|
+
QC::Setup.drop
|
43
|
+
end
|
44
|
+
|
45
|
+
desc "Update queue_classic tables and functions in database"
|
46
|
+
task :update => :environment do
|
47
|
+
QC::Setup.update
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,166 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
require_relative 'queue'
|
3
|
+
require_relative 'conn_adapter'
|
4
|
+
|
5
|
+
module QC
|
6
|
+
# A Worker object can process jobs from one or many queues.
|
7
|
+
class Worker
|
8
|
+
|
9
|
+
attr_accessor :queues, :running
|
10
|
+
|
11
|
+
# Creates a new worker but does not start the worker. See Worker#start.
|
12
|
+
# This method takes a single hash argument. The following keys are read:
|
13
|
+
# fork_worker:: Worker forks each job execution.
|
14
|
+
# wait_interval:: Time to wait between failed lock attempts
|
15
|
+
# connection:: PG::Connection object.
|
16
|
+
# q_name:: Name of a single queue to process.
|
17
|
+
# q_names:: Names of queues to process. Will process left to right.
|
18
|
+
# top_bound:: Offset to the head of the queue. 1 == strict FIFO.
|
19
|
+
def initialize(args={})
|
20
|
+
@fork_worker = args[:fork_worker] || QC.fork_worker?
|
21
|
+
@wait_interval = args[:wait_interval] || QC.wait_time
|
22
|
+
|
23
|
+
if args[:connection]
|
24
|
+
@conn_adapter = ConnAdapter.new(args[:connection])
|
25
|
+
else
|
26
|
+
@conn_adapter = QC.default_conn_adapter
|
27
|
+
end
|
28
|
+
|
29
|
+
@queues = setup_queues(@conn_adapter,
|
30
|
+
(args[:q_name] || QC.queue),
|
31
|
+
(args[:q_names] || QC.queues),
|
32
|
+
(args[:top_bound] || QC.top_bound))
|
33
|
+
log(args.merge(:at => "worker_initialized"))
|
34
|
+
@running = true
|
35
|
+
end
|
36
|
+
|
37
|
+
# Commences the working of jobs.
|
38
|
+
# start() spins on @running –which is initialized as true.
|
39
|
+
# This method is the primary entry point to starting the worker.
|
40
|
+
# The canonical example of starting a worker is as follows:
|
41
|
+
# QC::Worker.new.start
|
42
|
+
def start
|
43
|
+
QC.unlock_jobs_of_dead_workers
|
44
|
+
|
45
|
+
while @running
|
46
|
+
@fork_worker ? fork_and_work : work
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
# Signals the worker to stop taking new work.
|
51
|
+
# This method has no immediate effect. However, there are
|
52
|
+
# two loops in the worker (one in #start and another in #lock_job)
|
53
|
+
# which check the @running variable to determine if further progress
|
54
|
+
# is desirable. In the case that @running is false, the aforementioned
|
55
|
+
# methods will short circuit and cause the blocking call to #start
|
56
|
+
# to unblock.
|
57
|
+
def stop
|
58
|
+
@running = false
|
59
|
+
end
|
60
|
+
|
61
|
+
# Calls Worker#work but after the current process is forked.
|
62
|
+
# The parent process will wait on the child process to exit.
|
63
|
+
def fork_and_work
|
64
|
+
cpid = fork {setup_child; work}
|
65
|
+
log(:at => :fork, :pid => cpid)
|
66
|
+
Process.wait(cpid)
|
67
|
+
end
|
68
|
+
|
69
|
+
# Blocks on locking a job, and once a job is locked,
|
70
|
+
# it will process the job.
|
71
|
+
def work
|
72
|
+
queue, job = lock_job
|
73
|
+
if queue && job
|
74
|
+
QC.log_yield(:at => "work", :job => job[:id]) do
|
75
|
+
process(queue, job)
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
# Attempt to lock a job in the queue's table.
|
81
|
+
# If a job can be locked, this method returns an array with
|
82
|
+
# 2 elements. The first element is the queue from which the job was locked
|
83
|
+
# and the second is a hash representation of the job.
|
84
|
+
# If a job is returned, its locked_at column has been set in the
|
85
|
+
# job's row. It is the caller's responsibility to delete the job row
|
86
|
+
# from the table when the job is complete.
|
87
|
+
def lock_job
|
88
|
+
log(:at => "lock_job")
|
89
|
+
job = nil
|
90
|
+
while @running
|
91
|
+
@queues.each do |queue|
|
92
|
+
if job = queue.lock
|
93
|
+
return [queue, job]
|
94
|
+
end
|
95
|
+
end
|
96
|
+
@conn_adapter.wait(@wait_interval, *@queues.map {|q| q.name})
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
# A job is processed by evaluating the target code.
|
101
|
+
# if the job is evaluated with no exceptions
|
102
|
+
# then it is deleted from the queue.
|
103
|
+
# If the job has raised an exception the responsibility of what
|
104
|
+
# to do with the job is delegated to Worker#handle_failure.
|
105
|
+
# If the job is not finished and an INT signal is trapped,
|
106
|
+
# this method will unlock the job in the queue.
|
107
|
+
def process(queue, job)
|
108
|
+
start = Time.now
|
109
|
+
finished = false
|
110
|
+
begin
|
111
|
+
call(job).tap do
|
112
|
+
queue.delete(job[:id])
|
113
|
+
finished = true
|
114
|
+
end
|
115
|
+
rescue => e
|
116
|
+
handle_failure(job, e)
|
117
|
+
finished = true
|
118
|
+
ensure
|
119
|
+
if !finished
|
120
|
+
queue.unlock(job[:id])
|
121
|
+
end
|
122
|
+
ttp = Integer((Time.now - start) * 1000)
|
123
|
+
QC.measure("time-to-process=#{ttp} source=#{queue.name}")
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
# Each job includes a method column. We will use ruby's eval
|
128
|
+
# to grab the ruby object from memory. We send the method to
|
129
|
+
# the object and pass the args.
|
130
|
+
def call(job)
|
131
|
+
args = job[:args]
|
132
|
+
receiver_str, _, message = job[:method].rpartition('.')
|
133
|
+
receiver = eval(receiver_str)
|
134
|
+
receiver.send(message, *args)
|
135
|
+
end
|
136
|
+
|
137
|
+
# This method will be called when an exception
|
138
|
+
# is raised during the execution of the job.
|
139
|
+
def handle_failure(job,e)
|
140
|
+
$stderr.puts("count#qc.job-error=1 job=#{job} error=#{e.inspect} at=#{e.backtrace.first}")
|
141
|
+
end
|
142
|
+
|
143
|
+
# This method should be overriden if
|
144
|
+
# your worker is forking and you need to
|
145
|
+
# re-establish database connections
|
146
|
+
def setup_child
|
147
|
+
log(:at => "setup_child")
|
148
|
+
end
|
149
|
+
|
150
|
+
def log(data)
|
151
|
+
QC.log(data)
|
152
|
+
end
|
153
|
+
|
154
|
+
private
|
155
|
+
|
156
|
+
def setup_queues(adapter, queue, queues, top_bound)
|
157
|
+
names = queues.length > 0 ? queues : [queue]
|
158
|
+
names.map do |name|
|
159
|
+
QC::Queue.new(name, top_bound).tap do |q|
|
160
|
+
q.conn_adapter = adapter
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
end
|
166
|
+
end
|