queue_classic 3.1.0.RC1 → 4.0.0.pre.beta1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +192 -0
  3. data/.gitignore +11 -0
  4. data/CHANGELOG.md +192 -0
  5. data/CODE_OF_CONDUCT.md +46 -0
  6. data/CONTRIBUTING.md +17 -0
  7. data/Gemfile +12 -0
  8. data/LICENSE.txt +20 -0
  9. data/{readme.md → README.md} +120 -83
  10. data/Rakefile +16 -0
  11. data/lib/generators/queue_classic/install_generator.rb +6 -0
  12. data/lib/generators/queue_classic/templates/add_queue_classic.rb +3 -1
  13. data/lib/generators/queue_classic/templates/update_queue_classic_3_0_0.rb +3 -1
  14. data/lib/generators/queue_classic/templates/update_queue_classic_3_0_2.rb +3 -1
  15. data/lib/generators/queue_classic/templates/update_queue_classic_3_1_0.rb +3 -1
  16. data/lib/generators/queue_classic/templates/update_queue_classic_4_0_0.rb +11 -0
  17. data/lib/queue_classic/config.rb +86 -0
  18. data/lib/queue_classic/conn_adapter.rb +37 -16
  19. data/lib/queue_classic/queue.rb +76 -18
  20. data/lib/queue_classic/railtie.rb +2 -0
  21. data/lib/queue_classic/setup.rb +24 -7
  22. data/lib/queue_classic/tasks.rb +7 -8
  23. data/lib/queue_classic/version.rb +5 -0
  24. data/lib/queue_classic/worker.rb +18 -12
  25. data/lib/queue_classic.rb +50 -58
  26. data/queue_classic.gemspec +25 -0
  27. data/sql/create_table.sql +7 -14
  28. data/sql/ddl.sql +6 -82
  29. data/sql/downgrade_from_4_0_0.sql +88 -0
  30. data/sql/update_to_3_0_0.sql +5 -5
  31. data/sql/update_to_3_1_0.sql +6 -6
  32. data/sql/update_to_4_0_0.sql +6 -0
  33. data/test/benchmark_test.rb +15 -12
  34. data/test/config_test.rb +123 -0
  35. data/test/helper.rb +47 -3
  36. data/test/helper.sql +25 -0
  37. data/test/lib/queue_classic_rails_connection_test.rb +16 -10
  38. data/test/lib/queue_classic_test.rb +15 -3
  39. data/test/lib/queue_classic_test_with_activerecord_typecast.rb +21 -0
  40. data/test/queue_test.rb +127 -4
  41. data/test/rails-tests/.gitignore +2 -0
  42. data/test/rails-tests/rails523.sh +23 -0
  43. data/test/worker_test.rb +153 -35
  44. metadata +51 -7
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require_relative 'conn_adapter'
2
4
  require 'json'
3
5
  require 'time'
@@ -5,11 +7,11 @@ require 'time'
5
7
  module QC
6
8
  # The queue class maps a queue abstraction onto a database table.
7
9
  class Queue
8
-
9
10
  attr_reader :name, :top_bound
11
+
10
12
  def initialize(name, top_bound=nil)
11
13
  @name = name
12
- @top_bound = top_bound || QC::TOP_BOUND
14
+ @top_bound = top_bound || QC.top_bound
13
15
  end
14
16
 
15
17
  def conn_adapter=(a)
@@ -34,10 +36,20 @@ module QC
34
36
  # The args are stored as a collection and then splatted inside the worker.
35
37
  # Examples of args include: `'hello world'`, `['hello world']`,
36
38
  # `'hello', 'world'`.
39
+ # This method returns a hash with the id of the enqueued job.
37
40
  def enqueue(method, *args)
38
41
  QC.log_yield(:measure => 'queue.enqueue') do
39
- s = "INSERT INTO #{TABLE_NAME} (q_name, method, args) VALUES ($1, $2, $3)"
40
- conn_adapter.execute(s, name, method, JSON.dump(args))
42
+ s = "INSERT INTO #{QC.table_name} (q_name, method, args) VALUES ($1, $2, $3) RETURNING id"
43
+ begin
44
+ retries ||= 0
45
+ conn_adapter.execute(s, name, method, JSON.dump(args))
46
+ rescue PG::Error
47
+ if (retries += 1) < 2
48
+ retry
49
+ else
50
+ raise
51
+ end
52
+ end
41
53
  end
42
54
  end
43
55
 
@@ -46,8 +58,9 @@ module QC
46
58
  # The time argument must be a Time object or a float timestamp. The method
47
59
  # and args argument must be in the form described in the documentation for
48
60
  # the #enqueue method.
61
+ # This method returns a hash with the id of the enqueued job.
49
62
  def enqueue_at(timestamp, method, *args)
50
- offset = Time.at(timestamp) - Time.now
63
+ offset = Time.at(timestamp).to_i - Time.now.to_i
51
64
  enqueue_in(offset, method, *args)
52
65
  end
53
66
 
@@ -56,25 +69,55 @@ module QC
56
69
  # The seconds argument must be an integer. The method and args argument
57
70
  # must be in the form described in the documentation for the #enqueue
58
71
  # method.
72
+ # This method returns a hash with the id of the enqueued job.
59
73
  def enqueue_in(seconds, method, *args)
60
74
  QC.log_yield(:measure => 'queue.enqueue') do
61
- s = "INSERT INTO #{TABLE_NAME} (q_name, method, args, scheduled_at)
62
- VALUES ($1, $2, $3, now() + interval '#{seconds.to_i} seconds')"
63
- res = conn_adapter.execute(s, name, method, JSON.dump(args))
75
+ s = "INSERT INTO #{QC.table_name} (q_name, method, args, scheduled_at)
76
+ VALUES ($1, $2, $3, now() + interval '#{seconds.to_i} seconds')
77
+ RETURNING id"
78
+ begin
79
+ retries ||= 0
80
+ conn_adapter.execute(s, name, method, JSON.dump(args))
81
+ rescue PG::Error
82
+ if (retries += 1) < 2
83
+ retry
84
+ else
85
+ raise
86
+ end
87
+ end
64
88
  end
65
89
  end
66
90
 
67
91
  def lock
68
92
  QC.log_yield(:measure => 'queue.lock') do
69
- s = "SELECT * FROM lock_head($1, $2)"
70
- if r = conn_adapter.execute(s, name, top_bound)
93
+ s = <<~SQL
94
+ WITH selected_job AS (
95
+ SELECT id
96
+ FROM queue_classic_jobs
97
+ WHERE
98
+ locked_at IS NULL AND
99
+ q_name = $1 AND
100
+ scheduled_at <= now()
101
+ LIMIT 1
102
+ FOR NO KEY UPDATE SKIP LOCKED
103
+ )
104
+ UPDATE queue_classic_jobs
105
+ SET
106
+ locked_at = now(),
107
+ locked_by = pg_backend_pid()
108
+ FROM selected_job
109
+ WHERE queue_classic_jobs.id = selected_job.id
110
+ RETURNING *
111
+ SQL
112
+
113
+ if r = conn_adapter.execute(s, name)
71
114
  {}.tap do |job|
72
115
  job[:id] = r["id"]
73
116
  job[:q_name] = r["q_name"]
74
117
  job[:method] = r["method"]
75
118
  job[:args] = JSON.parse(r["args"])
76
119
  if r["scheduled_at"]
77
- job[:scheduled_at] = Time.parse(r["scheduled_at"])
120
+ job[:scheduled_at] = r["scheduled_at"].kind_of?(Time) ? r["scheduled_at"] : Time.parse(r["scheduled_at"])
78
121
  ttl = Integer((Time.now - job[:scheduled_at]) * 1000)
79
122
  QC.measure("time-to-lock=#{ttl}ms source=#{name}")
80
123
  end
@@ -85,31 +128,46 @@ module QC
85
128
 
86
129
  def unlock(id)
87
130
  QC.log_yield(:measure => 'queue.unlock') do
88
- s = "UPDATE #{TABLE_NAME} set locked_at = null where id = $1"
131
+ s = "UPDATE #{QC.table_name} SET locked_at = NULL WHERE id = $1"
89
132
  conn_adapter.execute(s, id)
90
133
  end
91
134
  end
92
135
 
93
136
  def delete(id)
94
137
  QC.log_yield(:measure => 'queue.delete') do
95
- conn_adapter.execute("DELETE FROM #{TABLE_NAME} where id = $1", id)
138
+ conn_adapter.execute("DELETE FROM #{QC.table_name} WHERE id = $1", id)
96
139
  end
97
140
  end
98
141
 
99
142
  def delete_all
100
143
  QC.log_yield(:measure => 'queue.delete_all') do
101
- s = "DELETE FROM #{TABLE_NAME} WHERE q_name = $1"
144
+ s = "DELETE FROM #{QC.table_name} WHERE q_name = $1"
102
145
  conn_adapter.execute(s, name)
103
146
  end
104
147
  end
105
148
 
149
+ # Count the number of jobs in a specific queue. This returns all
150
+ # jobs, including ones that are scheduled in the future.
106
151
  def count
107
- QC.log_yield(:measure => 'queue.count') do
108
- s = "SELECT COUNT(*) FROM #{TABLE_NAME} WHERE q_name = $1"
109
- r = conn_adapter.execute(s, name)
152
+ _count('queue.count', "SELECT COUNT(*) FROM #{QC.table_name} WHERE q_name = $1")
153
+ end
154
+
155
+ # Count the number of jobs in a specific queue, except ones scheduled in the future
156
+ def count_ready
157
+ _count('queue.count_scheduled', "SELECT COUNT(*) FROM #{QC.table_name} WHERE q_name = $1 AND scheduled_at <= now()")
158
+ end
159
+
160
+ # Count the number of jobs in a specific queue scheduled in the future
161
+ def count_scheduled
162
+ _count('queue.count_scheduled', "SELECT COUNT(*) FROM #{QC.table_name} WHERE q_name = $1 AND scheduled_at > now()")
163
+ end
164
+
165
+ private
166
+ def _count(metric_name, sql)
167
+ QC.log_yield(measure: metric_name) do
168
+ r = conn_adapter.execute(sql, name)
110
169
  r["count"].to_i
111
170
  end
112
171
  end
113
-
114
172
  end
115
173
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  require 'rails/railtie'
2
4
 
3
5
  module QC
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module QC
2
4
  module Setup
3
5
  Root = File.expand_path("../..", File.dirname(__FILE__))
@@ -8,51 +10,66 @@ module QC
8
10
  DowngradeFrom_3_0_0 = File.join(Root, "/sql/downgrade_from_3_0_0.sql")
9
11
  UpgradeTo_3_1_0 = File.join(Root, "/sql/update_to_3_1_0.sql")
10
12
  DowngradeFrom_3_1_0 = File.join(Root, "/sql/downgrade_from_3_1_0.sql")
13
+ UpgradeTo_4_0_0 = File.join(Root, "/sql/update_to_4_0_0.sql")
14
+ DowngradeFrom_4_0_0 = File.join(Root, "/sql/downgrade_from_4_0_0.sql")
11
15
 
12
16
  def self.create(c = QC::default_conn_adapter.connection)
13
- conn = QC::ConnAdapter.new(c)
17
+ conn = QC::ConnAdapter.new(connection: c)
14
18
  conn.execute(File.read(CreateTable))
15
19
  conn.execute(File.read(SqlFunctions))
16
20
  conn.disconnect if c.nil? #Don't close a conn we didn't create.
17
21
  end
18
22
 
19
23
  def self.drop(c = QC::default_conn_adapter.connection)
20
- conn = QC::ConnAdapter.new(c)
24
+ conn = QC::ConnAdapter.new(connection: c)
21
25
  conn.execute("DROP TABLE IF EXISTS queue_classic_jobs CASCADE")
22
26
  conn.execute(File.read(DropSqlFunctions))
23
27
  conn.disconnect if c.nil? #Don't close a conn we didn't create.
24
28
  end
25
29
 
26
30
  def self.update(c = QC::default_conn_adapter.connection)
27
- conn = QC::ConnAdapter.new(c)
31
+ conn = QC::ConnAdapter.new(connection: c)
28
32
  conn.execute(File.read(UpgradeTo_3_0_0))
29
33
  conn.execute(File.read(UpgradeTo_3_1_0))
34
+ conn.execute(File.read(UpgradeTo_4_0_0))
30
35
  conn.execute(File.read(DropSqlFunctions))
31
36
  conn.execute(File.read(SqlFunctions))
32
37
  end
33
38
 
34
39
  def self.update_to_3_0_0(c = QC::default_conn_adapter.connection)
35
- conn = QC::ConnAdapter.new(c)
40
+ conn = QC::ConnAdapter.new(connection: c)
36
41
  conn.execute(File.read(UpgradeTo_3_0_0))
37
42
  conn.execute(File.read(DropSqlFunctions))
38
43
  conn.execute(File.read(SqlFunctions))
39
44
  end
40
45
 
41
46
  def self.downgrade_from_3_0_0(c = QC::default_conn_adapter.connection)
42
- conn = QC::ConnAdapter.new(c)
47
+ conn = QC::ConnAdapter.new(connection: c)
43
48
  conn.execute(File.read(DowngradeFrom_3_0_0))
44
49
  end
45
50
 
46
51
  def self.update_to_3_1_0(c = QC::default_conn_adapter.connection)
47
- conn = QC::ConnAdapter.new(c)
52
+ conn = QC::ConnAdapter.new(connection: c)
48
53
  conn.execute(File.read(UpgradeTo_3_1_0))
49
54
  conn.execute(File.read(DropSqlFunctions))
50
55
  conn.execute(File.read(SqlFunctions))
51
56
  end
52
57
 
53
58
  def self.downgrade_from_3_1_0(c = QC::default_conn_adapter.connection)
54
- conn = QC::ConnAdapter.new(c)
59
+ conn = QC::ConnAdapter.new(connection: c)
55
60
  conn.execute(File.read(DowngradeFrom_3_1_0))
56
61
  end
62
+
63
+ def self.update_to_4_0_0(c = QC::default_conn_adapter.connection)
64
+ conn = QC::ConnAdapter.new(connection: c)
65
+ conn.execute(File.read(UpgradeTo_4_0_0))
66
+ conn.execute(File.read(DropSqlFunctions))
67
+ conn.execute(File.read(SqlFunctions))
68
+ end
69
+
70
+ def self.downgrade_from_4_0_0(c = QC::default_conn_adapter.connection)
71
+ conn = QC::ConnAdapter.new(connection: c)
72
+ conn.execute(File.read(DowngradeFrom_4_0_0))
73
+ end
57
74
  end
58
75
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  task :environment
2
4
 
3
5
  namespace :jobs do
@@ -6,16 +8,13 @@ namespace :jobs do
6
8
  end
7
9
 
8
10
  namespace :qc do
9
- desc "Start a new worker for the (default or $QUEUE) queue"
11
+ desc "Start a new worker for the (default or $QUEUE / $QUEUES) queue"
10
12
  task :work => :environment do
11
- @worker = QC::Worker.new
13
+ @worker = QC.default_worker_class.new
12
14
 
13
15
  trap('INT') do
14
- $stderr.puts("Received INT. Shutting down.")
15
- if !@worker.running
16
- $stderr.puts("Worker has stopped running. Exit.")
17
- exit(1)
18
- end
16
+ $stderr.puts("Received INT. Shutting down.")
17
+ abort("Worker has stopped running. Exit.") unless @worker.running
19
18
  @worker.stop
20
19
  end
21
20
 
@@ -27,7 +26,7 @@ namespace :qc do
27
26
  @worker.start
28
27
  end
29
28
 
30
- desc "Returns the number of jobs in the (default or QUEUE) queue"
29
+ desc "Returns the number of jobs in the (default or $QUEUE / $QUEUES) queue"
31
30
  task :count => :environment do
32
31
  puts QC.default_queue.count
33
32
  end
@@ -0,0 +1,5 @@
1
+ # frozen_string_literal: true
2
+
3
+ module QC
4
+ VERSION = "4.0.0-beta1"
5
+ end
@@ -1,3 +1,6 @@
1
+ # frozen_string_literal: true
2
+
3
+ # -*- coding: utf-8 -*-
1
4
  require_relative 'queue'
2
5
  require_relative 'conn_adapter'
3
6
 
@@ -11,24 +14,24 @@ module QC
11
14
  # This method takes a single hash argument. The following keys are read:
12
15
  # fork_worker:: Worker forks each job execution.
13
16
  # wait_interval:: Time to wait between failed lock attempts
14
- # connection:: PGConn object.
17
+ # connection:: PG::Connection object.
15
18
  # q_name:: Name of a single queue to process.
16
19
  # q_names:: Names of queues to process. Will process left to right.
17
20
  # top_bound:: Offset to the head of the queue. 1 == strict FIFO.
18
21
  def initialize(args={})
19
- @fork_worker = args[:fork_worker] || QC::FORK_WORKER
20
- @wait_interval = args[:wait_interval] || QC::WAIT_TIME
22
+ @fork_worker = args[:fork_worker] || QC.fork_worker?
23
+ @wait_interval = args[:wait_interval] || QC.wait_time
21
24
 
22
25
  if args[:connection]
23
- @conn_adapter = ConnAdapter.new(args[:connection])
26
+ @conn_adapter = ConnAdapter.new(connection: args[:connection])
24
27
  else
25
28
  @conn_adapter = QC.default_conn_adapter
26
29
  end
27
30
 
28
31
  @queues = setup_queues(@conn_adapter,
29
- (args[:q_name] || QC::QUEUE),
30
- (args[:q_names] || QC::QUEUES),
31
- (args[:top_bound] || QC::TOP_BOUND))
32
+ (args[:q_name] || QC.queue),
33
+ (args[:q_names] || QC.queues),
34
+ (args[:top_bound] || QC.top_bound))
32
35
  log(args.merge(:at => "worker_initialized"))
33
36
  @running = true
34
37
  end
@@ -101,7 +104,7 @@ module QC
101
104
  # then it is deleted from the queue.
102
105
  # If the job has raised an exception the responsibility of what
103
106
  # to do with the job is delegated to Worker#handle_failure.
104
- # If the job is not finished and an INT signal is traped,
107
+ # If the job is not finished and an INT signal is trapped,
105
108
  # this method will unlock the job in the queue.
106
109
  def process(queue, job)
107
110
  start = Time.now
@@ -111,7 +114,10 @@ module QC
111
114
  queue.delete(job[:id])
112
115
  finished = true
113
116
  end
114
- rescue => e
117
+ rescue StandardError, ScriptError, NoMemoryError => e
118
+ # We really only want to unlock the job for signal and system exit
119
+ # exceptions. If we encounter a ScriptError or a NoMemoryError any
120
+ # future run will likely encounter the same error.
115
121
  handle_failure(job, e)
116
122
  finished = true
117
123
  ensure
@@ -133,10 +139,10 @@ module QC
133
139
  receiver.send(message, *args)
134
140
  end
135
141
 
136
- # This method will be called when an exception
137
- # is raised during the execution of the job.
142
+ # This method will be called when a StandardError, ScriptError or
143
+ # NoMemoryError is raised during the execution of the job.
138
144
  def handle_failure(job,e)
139
- $stderr.puts("count#qc.job-error=1 job=#{job} error=#{e.inspect}")
145
+ $stderr.puts("count#qc.job-error=1 job=#{job} error=#{e.inspect} at=#{e.backtrace.first}")
140
146
  end
141
147
 
142
148
  # This method should be overriden if
data/lib/queue_classic.rb CHANGED
@@ -1,40 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "queue_classic/config"
4
+
1
5
  module QC
2
- # You can use the APP_NAME to query for
3
- # postgres related process information in the
4
- # pg_stat_activity table.
5
- APP_NAME = ENV["QC_APP_NAME"] || "queue_classic"
6
-
7
- # Number of seconds to block on the listen chanel for new jobs.
8
- WAIT_TIME = (ENV["QC_LISTEN_TIME"] || 5).to_i
9
-
10
- # Why do you want to change the table name?
11
- # Just deal with the default OK?
12
- # If you do want to change this, you will
13
- # need to update the PL/pgSQL lock_head() function.
14
- # Come on. Don't do it.... Just stick with the default.
15
- TABLE_NAME = "queue_classic_jobs"
16
-
17
- # Each row in the table will have a column that
18
- # notes the queue. You can point your workers
19
- # at different queues but only one at a time.
20
- QUEUE = ENV["QUEUE"] || "default"
21
- QUEUES = (ENV["QUEUES"] && ENV["QUEUES"].split(",")) || []
22
-
23
- # Set this to 1 for strict FIFO.
24
- # There is nothing special about 9....
25
- TOP_BOUND = (ENV["QC_TOP_BOUND"] || 9).to_i
26
-
27
- # Set this variable if you wish for
28
- # the worker to fork a UNIX process for
29
- # each locked job. Remember to re-establish
30
- # any database connections. See the worker
31
- # for more details.
32
- FORK_WORKER = !ENV["QC_FORK_WORKER"].nil?
6
+ extend QC::Config
7
+
8
+ # Assign constants for backwards compatibility.
9
+ # They should no longer be used. Prefer the corresponding methods.
10
+ # See +QC::Config+ for more details.
11
+ DEPRECATED_CONSTANTS = {
12
+ :APP_NAME => :app_name,
13
+ :WAIT_TIME => :wait_time,
14
+ :TABLE_NAME => :table_name,
15
+ :QUEUE => :queue,
16
+ :QUEUES => :queues,
17
+ :TOP_BOUND => :top_bound,
18
+ :FORK_WORKER => :fork_worker?,
19
+ }
20
+
21
+ def self.const_missing(const_name)
22
+ if DEPRECATED_CONSTANTS.key? const_name
23
+ config_method = DEPRECATED_CONSTANTS[const_name]
24
+ $stderr.puts <<-MSG
25
+ The constant QC::#{const_name} is deprecated and will be removed in the future.
26
+ Please use the method QC.#{config_method} instead.
27
+ MSG
28
+ QC.public_send config_method
29
+ else
30
+ super
31
+ end
32
+ end
33
33
 
34
34
  # Defer method calls on the QC module to the
35
35
  # default queue. This facilitates QC.enqueue()
36
36
  def self.method_missing(sym, *args, &block)
37
- default_queue.send(sym, *args, &block)
37
+ if default_queue.respond_to? sym
38
+ default_queue.public_send(sym, *args, &block)
39
+ else
40
+ super
41
+ end
38
42
  end
39
43
 
40
44
  # Ensure QC.respond_to?(:enqueue) equals true (ruby 1.9 only)
@@ -42,37 +46,21 @@ module QC
42
46
  default_queue.respond_to?(method_name)
43
47
  end
44
48
 
45
- def self.default_queue=(queue)
46
- @default_queue = queue
47
- end
48
-
49
- def self.default_queue
50
- @default_queue ||= begin
51
- Queue.new(QUEUE)
52
- end
53
- end
54
-
55
49
  def self.has_connection?
56
- !@conn_adapter.nil?
50
+ !default_conn_adapter.nil?
57
51
  end
58
52
 
59
53
  def self.default_conn_adapter
60
- return @conn_adapter if defined?(@conn_adapter) && @conn_adapter
61
- if rails_connection_sharing_enabled?
62
- @conn_adapter = ConnAdapter.new(ActiveRecord::Base.connection.raw_connection)
63
- else
64
- @conn_adapter = ConnAdapter.new
65
- end
66
- @conn_adapter
54
+ Thread.current[:qc_conn_adapter] ||= ConnAdapter.new(active_record_connection_share: rails_connection_sharing_enabled?)
67
55
  end
68
56
 
69
57
  def self.default_conn_adapter=(conn)
70
- @conn_adapter = conn
58
+ Thread.current[:qc_conn_adapter] = conn
71
59
  end
72
60
 
73
61
  def self.log_yield(data)
62
+ t0 = Time.now
74
63
  begin
75
- t0 = Time.now
76
64
  yield
77
65
  rescue => e
78
66
  log({:at => "error", :error => e.inspect}.merge(data))
@@ -106,14 +94,18 @@ module QC
106
94
  # This will unlock all jobs any postgres' PID that is not existing anymore
107
95
  # to prevent any infinitely locked jobs
108
96
  def self.unlock_jobs_of_dead_workers
109
- @conn_adapter.execute("UPDATE #{QC::TABLE_NAME} SET locked_at = NULL, locked_by = NULL WHERE locked_by NOT IN (SELECT pid FROM pg_stat_activity);")
97
+ default_conn_adapter.execute("UPDATE #{QC.table_name} SET locked_at = NULL, locked_by = NULL WHERE locked_by NOT IN (SELECT pid FROM pg_stat_activity);")
110
98
  end
111
99
 
112
- private
113
- def self.rails_connection_sharing_enabled?
114
- enabled = ENV.fetch('QC_RAILS_DATABASE', 'true') != 'false'
115
- return false unless enabled
116
- return Object.const_defined?("ActiveRecord") && ActiveRecord::Base.respond_to?("connection")
100
+ # private class methods
101
+ class << self
102
+ private
103
+
104
+ def rails_connection_sharing_enabled?
105
+ enabled = ENV.fetch('QC_RAILS_DATABASE', 'true') != 'false'
106
+ return false unless enabled
107
+ return Object.const_defined?("ActiveRecord") && ActiveRecord::Base.respond_to?("connection")
108
+ end
117
109
  end
118
110
  end
119
111
 
@@ -0,0 +1,25 @@
1
+ # coding: utf-8
2
+ lib = File.expand_path('../lib', __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+ require 'queue_classic/version'
5
+
6
+ Gem::Specification.new do |spec|
7
+ spec.name = "queue_classic"
8
+ spec.email = "r@32k.io"
9
+ spec.version = QC::VERSION
10
+ spec.description = "queue_classic is a queueing library for Ruby apps. (Rails, Sinatra, Etc...) queue_classic features asynchronous job polling, database maintained locks and no ridiculous dependencies. As a matter of fact, queue_classic only requires pg."
11
+ spec.summary = "Simple, efficient worker queue for Ruby & PostgreSQL."
12
+ spec.authors = ["Ryan Smith (♠ ace hacker)"]
13
+ spec.homepage = "http://github.com/QueueClassic/queue_classic"
14
+ spec.license = "MIT"
15
+
16
+ spec.files = `git ls-files -z`.split("\x0")
17
+ spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
18
+ spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
19
+ spec.require_paths = ["lib"]
20
+
21
+ spec.require_paths = %w[lib]
22
+
23
+ spec.add_dependency "pg", ">= 0.17", "< 2.0"
24
+ spec.add_development_dependency "activerecord", ">= 5.0.0", "< 6.1"
25
+ end
data/sql/create_table.sql CHANGED
@@ -1,24 +1,17 @@
1
- do $$ begin
1
+ DO $$ BEGIN
2
2
 
3
3
  CREATE TABLE queue_classic_jobs (
4
4
  id bigserial PRIMARY KEY,
5
- q_name text not null check (length(q_name) > 0),
6
- method text not null check (length(method) > 0),
7
- args text not null,
5
+ q_name text NOT NULL CHECK (length(q_name) > 0),
6
+ method text NOT NULL CHECK (length(method) > 0),
7
+ args jsonb NOT NULL,
8
8
  locked_at timestamptz,
9
9
  locked_by integer,
10
- created_at timestamptz default now(),
11
- scheduled_at timestamptz default now()
10
+ created_at timestamptz DEFAULT now(),
11
+ scheduled_at timestamptz DEFAULT now()
12
12
  );
13
13
 
14
- -- If json type is available, use it for the args column.
15
- perform * from pg_type where typname = 'json';
16
- if found then
17
- alter table queue_classic_jobs alter column args type json using (args::json);
18
- end if;
19
-
20
- end $$ language plpgsql;
14
+ END $$ LANGUAGE plpgsql;
21
15
 
22
16
  CREATE INDEX idx_qc_on_name_only_unlocked ON queue_classic_jobs (q_name, id) WHERE locked_at IS NULL;
23
17
  CREATE INDEX idx_qc_on_scheduled_at_only_unlocked ON queue_classic_jobs (scheduled_at, id) WHERE locked_at IS NULL;
24
-
data/sql/ddl.sql CHANGED
@@ -1,84 +1,8 @@
1
- -- We are declaring the return type to be queue_classic_jobs.
2
- -- This is ok since I am assuming that all of the users added queues will
3
- -- have identical columns to queue_classic_jobs.
4
- -- When QC supports queues with columns other than the default, we will have to change this.
5
-
6
- CREATE OR REPLACE FUNCTION lock_head(q_name varchar, top_boundary integer)
7
- RETURNS SETOF queue_classic_jobs AS $$
8
- DECLARE
9
- unlocked bigint;
10
- relative_top integer;
11
- job_count integer;
12
- BEGIN
13
- -- The purpose is to release contention for the first spot in the table.
14
- -- The select count(*) is going to slow down dequeue performance but allow
15
- -- for more workers. Would love to see some optimization here...
16
-
17
- EXECUTE 'SELECT count(*) FROM '
18
- || '(SELECT * FROM queue_classic_jobs '
19
- || ' WHERE locked_at IS NULL'
20
- || ' AND q_name = '
21
- || quote_literal(q_name)
22
- || ' AND scheduled_at <= '
23
- || quote_literal(now())
24
- || ' LIMIT '
25
- || quote_literal(top_boundary)
26
- || ') limited'
27
- INTO job_count;
28
-
29
- SELECT TRUNC(random() * (top_boundary - 1))
30
- INTO relative_top;
31
-
32
- IF job_count < top_boundary THEN
33
- relative_top = 0;
34
- END IF;
35
-
36
- LOOP
37
- BEGIN
38
- EXECUTE 'SELECT id FROM queue_classic_jobs '
39
- || ' WHERE locked_at IS NULL'
40
- || ' AND q_name = '
41
- || quote_literal(q_name)
42
- || ' AND scheduled_at <= '
43
- || quote_literal(now())
44
- || ' ORDER BY id ASC'
45
- || ' LIMIT 1'
46
- || ' OFFSET ' || quote_literal(relative_top)
47
- || ' FOR UPDATE NOWAIT'
48
- INTO unlocked;
49
- EXIT;
50
- EXCEPTION
51
- WHEN lock_not_available THEN
52
- -- do nothing. loop again and hope we get a lock
53
- END;
54
- END LOOP;
55
-
56
- RETURN QUERY EXECUTE 'UPDATE queue_classic_jobs '
57
- || ' SET locked_at = (CURRENT_TIMESTAMP),'
58
- || ' locked_by = (select pg_backend_pid())'
59
- || ' WHERE id = $1'
60
- || ' AND locked_at is NULL'
61
- || ' RETURNING *'
62
- USING unlocked;
63
-
64
- RETURN;
65
- END;
66
- $$ LANGUAGE plpgsql;
67
-
68
- CREATE OR REPLACE FUNCTION lock_head(tname varchar)
69
- RETURNS SETOF queue_classic_jobs AS $$
70
- BEGIN
71
- RETURN QUERY EXECUTE 'SELECT * FROM lock_head($1,10)' USING tname;
72
- END;
73
- $$ LANGUAGE plpgsql;
74
-
75
1
  -- queue_classic_notify function and trigger
76
- create function queue_classic_notify() returns trigger as $$ begin
77
- perform pg_notify(new.q_name, '');
78
- return null;
79
- end $$ language plpgsql;
2
+ CREATE FUNCTION queue_classic_notify() RETURNS TRIGGER AS $$ BEGIN
3
+ perform pg_notify(new.q_name, ''); RETURN NULL;
4
+ END $$ LANGUAGE plpgsql;
80
5
 
81
- create trigger queue_classic_notify
82
- after insert on queue_classic_jobs
83
- for each row
84
- execute procedure queue_classic_notify();
6
+ CREATE TRIGGER queue_classic_notify
7
+ AFTER INSERT ON queue_classic_jobs FOR EACH ROW
8
+ EXECUTE PROCEDURE queue_classic_notify();