que 0.14.3 → 1.0.0.beta
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/.gitignore +2 -0
- data/CHANGELOG.md +108 -14
- data/LICENSE.txt +1 -1
- data/README.md +49 -45
- data/bin/command_line_interface.rb +239 -0
- data/bin/que +8 -82
- data/docs/README.md +2 -0
- data/docs/active_job.md +6 -0
- data/docs/advanced_setup.md +7 -64
- data/docs/command_line_interface.md +45 -0
- data/docs/error_handling.md +65 -18
- data/docs/inspecting_the_queue.md +30 -80
- data/docs/job_helper_methods.md +27 -0
- data/docs/logging.md +3 -22
- data/docs/managing_workers.md +6 -61
- data/docs/middleware.md +15 -0
- data/docs/migrating.md +4 -7
- data/docs/multiple_queues.md +8 -4
- data/docs/shutting_down_safely.md +1 -1
- data/docs/using_plain_connections.md +39 -15
- data/docs/using_sequel.md +5 -3
- data/docs/writing_reliable_jobs.md +15 -24
- data/lib/que.rb +98 -182
- data/lib/que/active_job/extensions.rb +97 -0
- data/lib/que/active_record/connection.rb +51 -0
- data/lib/que/active_record/model.rb +48 -0
- data/lib/que/connection.rb +179 -0
- data/lib/que/connection_pool.rb +78 -0
- data/lib/que/job.rb +107 -156
- data/lib/que/job_cache.rb +240 -0
- data/lib/que/job_methods.rb +168 -0
- data/lib/que/listener.rb +176 -0
- data/lib/que/locker.rb +466 -0
- data/lib/que/metajob.rb +47 -0
- data/lib/que/migrations.rb +24 -17
- data/lib/que/migrations/4/down.sql +48 -0
- data/lib/que/migrations/4/up.sql +265 -0
- data/lib/que/poller.rb +267 -0
- data/lib/que/rails/railtie.rb +14 -0
- data/lib/que/result_queue.rb +35 -0
- data/lib/que/sequel/model.rb +51 -0
- data/lib/que/utils/assertions.rb +62 -0
- data/lib/que/utils/constantization.rb +19 -0
- data/lib/que/utils/error_notification.rb +68 -0
- data/lib/que/utils/freeze.rb +20 -0
- data/lib/que/utils/introspection.rb +50 -0
- data/lib/que/utils/json_serialization.rb +21 -0
- data/lib/que/utils/logging.rb +78 -0
- data/lib/que/utils/middleware.rb +33 -0
- data/lib/que/utils/queue_management.rb +18 -0
- data/lib/que/utils/transactions.rb +34 -0
- data/lib/que/version.rb +1 -1
- data/lib/que/worker.rb +128 -167
- data/que.gemspec +13 -2
- metadata +37 -80
- data/.rspec +0 -2
- data/.travis.yml +0 -64
- data/Gemfile +0 -24
- data/docs/customizing_que.md +0 -200
- data/lib/generators/que/install_generator.rb +0 -24
- data/lib/generators/que/templates/add_que.rb +0 -13
- data/lib/que/adapters/active_record.rb +0 -40
- data/lib/que/adapters/base.rb +0 -133
- data/lib/que/adapters/connection_pool.rb +0 -16
- data/lib/que/adapters/pg.rb +0 -21
- data/lib/que/adapters/pond.rb +0 -16
- data/lib/que/adapters/sequel.rb +0 -20
- data/lib/que/railtie.rb +0 -16
- data/lib/que/rake_tasks.rb +0 -59
- data/lib/que/sql.rb +0 -170
- data/spec/adapters/active_record_spec.rb +0 -175
- data/spec/adapters/connection_pool_spec.rb +0 -22
- data/spec/adapters/pg_spec.rb +0 -41
- data/spec/adapters/pond_spec.rb +0 -22
- data/spec/adapters/sequel_spec.rb +0 -57
- data/spec/gemfiles/Gemfile.current +0 -19
- data/spec/gemfiles/Gemfile.old +0 -19
- data/spec/gemfiles/Gemfile.older +0 -19
- data/spec/gemfiles/Gemfile.oldest +0 -19
- data/spec/spec_helper.rb +0 -129
- data/spec/support/helpers.rb +0 -25
- data/spec/support/jobs.rb +0 -35
- data/spec/support/shared_examples/adapter.rb +0 -42
- data/spec/support/shared_examples/multi_threaded_adapter.rb +0 -46
- data/spec/unit/configuration_spec.rb +0 -31
- data/spec/unit/connection_spec.rb +0 -14
- data/spec/unit/customization_spec.rb +0 -251
- data/spec/unit/enqueue_spec.rb +0 -245
- data/spec/unit/helper_spec.rb +0 -12
- data/spec/unit/logging_spec.rb +0 -101
- data/spec/unit/migrations_spec.rb +0 -84
- data/spec/unit/pool_spec.rb +0 -365
- data/spec/unit/run_spec.rb +0 -14
- data/spec/unit/states_spec.rb +0 -50
- data/spec/unit/stats_spec.rb +0 -46
- data/spec/unit/transaction_spec.rb +0 -36
- data/spec/unit/work_spec.rb +0 -596
- data/spec/unit/worker_spec.rb +0 -167
- data/tasks/benchmark.rb +0 -3
- data/tasks/rspec.rb +0 -14
- data/tasks/safe_shutdown.rb +0 -67
data/.rspec
DELETED
data/.travis.yml
DELETED
@@ -1,64 +0,0 @@
|
|
1
|
-
language: ruby
|
2
|
-
cache: bundler
|
3
|
-
|
4
|
-
rvm:
|
5
|
-
- 2.2
|
6
|
-
- 2.3
|
7
|
-
- 2.4
|
8
|
-
- 2.5
|
9
|
-
- ruby-head
|
10
|
-
# The Rubinii aren't installing properly on Travis :/
|
11
|
-
# - rbx-2
|
12
|
-
# - rbx
|
13
|
-
|
14
|
-
matrix:
|
15
|
-
allow_failures:
|
16
|
-
# Ruby head failures aren't disastrous, because we won't support it until
|
17
|
-
# it's released, but it's good to be aware of.
|
18
|
-
- rvm: ruby-head
|
19
|
-
|
20
|
-
gemfile:
|
21
|
-
- spec/gemfiles/Gemfile.current
|
22
|
-
- spec/gemfiles/Gemfile.old
|
23
|
-
- spec/gemfiles/Gemfile.older
|
24
|
-
- spec/gemfiles/Gemfile.oldest
|
25
|
-
|
26
|
-
env:
|
27
|
-
- PG_VERSION=9.3
|
28
|
-
- PG_VERSION=9.4
|
29
|
-
- PG_VERSION=9.5
|
30
|
-
- PG_VERSION=9.6
|
31
|
-
|
32
|
-
before_install:
|
33
|
-
# Stop all running Postgreses, so we don't get port conflicts when installing
|
34
|
-
# a new version.
|
35
|
-
- sudo /etc/init.d/postgresql stop
|
36
|
-
# Install whatever version we're using.
|
37
|
-
- sudo apt-get install postgresql-$PG_VERSION
|
38
|
-
# If we just installed Postgres 9.6 it won't have a proper pg_hba set up, so...
|
39
|
-
- sudo mkdir -p /etc/postgresql/9.6/main
|
40
|
-
- sudo cp -v /etc/postgresql/9.{5,6}/main/pg_hba.conf
|
41
|
-
# Hook up the Postgres version we care about to the right port.
|
42
|
-
- sudo sed -i "s/port = ..../port = 5432/g" /etc/postgresql/$PG_VERSION/main/postgresql.conf
|
43
|
-
# If we just installed a new Postgres version it'll be running, so make sure
|
44
|
-
# they're all stopped, again.
|
45
|
-
- sudo /etc/init.d/postgresql stop
|
46
|
-
# Start up the one we care about.
|
47
|
-
- sudo /etc/init.d/postgresql start $PG_VERSION
|
48
|
-
# A newly-installed Postgres won't have a travis user, so create one. But, if
|
49
|
-
# one already exists this will fail, so drop it first. Kinda stupid.
|
50
|
-
- sudo -u postgres dropuser --if-exists -p 5432 travis &>/dev/null
|
51
|
-
- sudo -u postgres createuser -p 5432 travis &>/dev/null
|
52
|
-
|
53
|
-
before_script:
|
54
|
-
- psql -c 'create database "que-test"' -U postgres
|
55
|
-
|
56
|
-
script:
|
57
|
-
# Run the complete test suite:
|
58
|
-
- bundle exec rspec -fd -b -P ./spec/**/*_spec.rb
|
59
|
-
# Run the test suite without adapters/ActiveRecord, to make sure the codebase
|
60
|
-
# doesn't accidentally rely on any ActiveSupport-isms.
|
61
|
-
- bundle exec rspec -fd -b -P ./spec/unit/*_spec.rb
|
62
|
-
|
63
|
-
notifications:
|
64
|
-
email: false
|
data/Gemfile
DELETED
@@ -1,24 +0,0 @@
|
|
1
|
-
source 'https://rubygems.org'
|
2
|
-
|
3
|
-
group :development, :test do
|
4
|
-
gem 'rake', '< 11.0'
|
5
|
-
|
6
|
-
gem 'activerecord', :require => nil
|
7
|
-
gem 'sequel', :require => nil
|
8
|
-
gem 'connection_pool', :require => nil
|
9
|
-
gem 'pond', :require => nil
|
10
|
-
gem 'pg', :require => nil, :platform => :ruby
|
11
|
-
gem 'pg_jruby', :require => nil, :platform => :jruby
|
12
|
-
end
|
13
|
-
|
14
|
-
group :test do
|
15
|
-
gem 'rspec', '~> 2.14.1'
|
16
|
-
gem 'pry'
|
17
|
-
end
|
18
|
-
|
19
|
-
platforms :rbx do
|
20
|
-
gem 'rubysl', '~> 2.0'
|
21
|
-
gem 'json', '~> 1.8'
|
22
|
-
end
|
23
|
-
|
24
|
-
gemspec
|
data/docs/customizing_que.md
DELETED
@@ -1,200 +0,0 @@
|
|
1
|
-
## Customizing Que
|
2
|
-
|
3
|
-
One of Que's goals to be easily extensible and hackable (and if anyone has any suggestions on ways to accomplish that, please [open an issue](https://github.com/chanks/que/issues)). This document is meant to demonstrate some of the ways Que can be used to accomplish different tasks that it's not already designed for.
|
4
|
-
|
5
|
-
Some of these features may be moved into core Que at some point, depending on how commonly useful they are.
|
6
|
-
|
7
|
-
### Recurring Jobs
|
8
|
-
|
9
|
-
Que's support for scheduling jobs makes it easy to implement reliable recurring jobs. For example, suppose you want to run a job every hour that processes the users created in that time:
|
10
|
-
|
11
|
-
```ruby
|
12
|
-
class CronJob < Que::Job
|
13
|
-
# Default repetition interval in seconds. Can be overridden in
|
14
|
-
# subclasses. Can use 1.minute if using Rails.
|
15
|
-
INTERVAL = 60
|
16
|
-
|
17
|
-
attr_reader :start_at, :end_at, :run_again_at, :time_range
|
18
|
-
|
19
|
-
def _run
|
20
|
-
args = attrs[:args].first
|
21
|
-
@start_at, @end_at = Time.at(args.delete('start_at')), Time.at(args.delete('end_at'))
|
22
|
-
@run_again_at = @end_at + self.class::INTERVAL
|
23
|
-
@time_range = @start_at...@end_at
|
24
|
-
|
25
|
-
super
|
26
|
-
|
27
|
-
args['start_at'] = @end_at.to_f
|
28
|
-
args['end_at'] = @run_again_at.to_f
|
29
|
-
self.class.enqueue(args, run_at: @run_again_at)
|
30
|
-
end
|
31
|
-
end
|
32
|
-
|
33
|
-
class MyCronJob < CronJob
|
34
|
-
INTERVAL = 3600
|
35
|
-
|
36
|
-
def run(args)
|
37
|
-
User.where(created_at: time_range).each { ... }
|
38
|
-
end
|
39
|
-
end
|
40
|
-
|
41
|
-
# To enqueue:
|
42
|
-
tf = Time.now
|
43
|
-
t0 = Time.now - 3600
|
44
|
-
MyCronJob.enqueue :start_at => t0.to_f, :end_at => tf.to_f
|
45
|
-
```
|
46
|
-
|
47
|
-
Note that instead of using Time.now in our database query, and requeueing the job at 1.hour.from_now, we use job arguments to track start and end times. This lets us correct for delays in running the job. Suppose that there's a backlog of priority jobs, or that the worker briefly goes down, and this job, which was supposed to run at 11:00 a.m. isn't run until 11:05 a.m. A lazier implementation would look for users created after 1.hour.ago, and miss those that signed up between 10:00 a.m. and 10:05 a.m.
|
48
|
-
|
49
|
-
This also compensates for clock drift. `Time.now` on one of your application servers may not match `Time.now` on another application server may not match `now()` on your database server. The best way to stay reliable is have a single authoritative source on what the current time is, and your best source for authoritative information is always your database (this is why Que uses Postgres' `now()` function when locking jobs, by the way).
|
50
|
-
|
51
|
-
Note also the use of the triple-dot range, which results in a query like `SELECT "users".* FROM "users" WHERE ("users"."created_at" >= '2014-01-08 10:00:00.000000' AND "users"."created_at" < '2014-01-08 11:00:00.000000')` instead of a BETWEEN condition. This ensures that a user created at 11:00 am exactly isn't processed twice, by the jobs starting at both 10 am and 11 am.
|
52
|
-
|
53
|
-
Finally, by passing both the start and end times for the period to be processed, and only using the interval to calculate the period for the following job, we make it easy to change the interval at which the job runs, without the risk of missing or double-processing any users.
|
54
|
-
|
55
|
-
### DelayedJob-style Jobs
|
56
|
-
|
57
|
-
DelayedJob offers a simple API for delaying methods to objects:
|
58
|
-
|
59
|
-
```ruby
|
60
|
-
@user.delay.activate!(@device)
|
61
|
-
```
|
62
|
-
|
63
|
-
The API is pleasant, but implementing it requires storing marshalled Ruby objects in the database, which is both inefficient and prone to bugs - for example, if you deploy an update that changes the name of an instance variable (a contained, internal change that might seem completely innocuous), the marshalled objects in the database will retain the old instance variable name and will behave unexpectedly when unmarshalled into the new Ruby code.
|
64
|
-
|
65
|
-
This is the danger of mixing the ephemeral state of a Ruby object in memory with the more permanent state of a database row. The advantage of Que's API is that, since your arguments are forced through a JSON serialization/deserialization process, it becomes your responsibility when designing a job class to establish an API for yourself (what the arguments to the job are and what they mean) that you will have to stick to in the future.
|
66
|
-
|
67
|
-
That said, if you want to queue jobs in the DelayedJob style, that can be done relatively easily:
|
68
|
-
|
69
|
-
```ruby
|
70
|
-
class Delayed < Que::Job
|
71
|
-
def run(receiver, method, args)
|
72
|
-
Marshal.load(receiver).send method, *Marshal.load(args)
|
73
|
-
end
|
74
|
-
end
|
75
|
-
|
76
|
-
class DelayedAction
|
77
|
-
def initialize(receiver)
|
78
|
-
@receiver = receiver
|
79
|
-
end
|
80
|
-
|
81
|
-
def method_missing(method, *args)
|
82
|
-
Delayed.enqueue Marshal.dump(@receiver), method, Marshal.dump(args)
|
83
|
-
end
|
84
|
-
end
|
85
|
-
|
86
|
-
class Object
|
87
|
-
def delay
|
88
|
-
DelayedAction.new(self)
|
89
|
-
end
|
90
|
-
end
|
91
|
-
```
|
92
|
-
|
93
|
-
You can replace Marshal with YAML if you like.
|
94
|
-
|
95
|
-
### QueueClassic-style Jobs
|
96
|
-
|
97
|
-
You may find it a hassle to keep an individual class file for each type of job. QueueClassic has a simpler design, wherein you simply give it a class method to call, like:
|
98
|
-
|
99
|
-
```ruby
|
100
|
-
QC.enqueue("Kernel.puts", "hello world")
|
101
|
-
```
|
102
|
-
|
103
|
-
You can mimic this style with Que by using a simple job class:
|
104
|
-
|
105
|
-
```ruby
|
106
|
-
class Command < Que::Job
|
107
|
-
def run(method, *args)
|
108
|
-
receiver, message = method.split('.')
|
109
|
-
Object.const_get(receiver).send(message, *args)
|
110
|
-
end
|
111
|
-
end
|
112
|
-
|
113
|
-
# Then:
|
114
|
-
|
115
|
-
Command.enqueue "Kernel.puts", "hello world"
|
116
|
-
```
|
117
|
-
|
118
|
-
### Retaining Finished Jobs
|
119
|
-
|
120
|
-
Que deletes jobs from the queue as they are worked, in order to keep the `que_jobs` table and index small and efficient. If you have a need to hold onto finished jobs, the recommended way to do this is to add a second table to hold them, and then insert them there as they are deleted from the queue. You can use Ruby's inheritance mechanics to do this cleanly:
|
121
|
-
|
122
|
-
```ruby
|
123
|
-
Que.execute "CREATE TABLE finished_jobs AS SELECT * FROM que_jobs LIMIT 0"
|
124
|
-
# Or, better, use a proper CREATE TABLE with not-null constraints, and add whatever indexes you like.
|
125
|
-
|
126
|
-
class MyJobClass < Que::Job
|
127
|
-
def destroy
|
128
|
-
Que.execute "INSERT INTO finished_jobs SELECT * FROM que_jobs WHERE queue = $1::text AND priority = $2::integer AND run_at = $3::timestamptz AND job_id = $4::bigint", @attrs.values_at(:queue, :priority, :run_at, :job_id)
|
129
|
-
super
|
130
|
-
end
|
131
|
-
end
|
132
|
-
```
|
133
|
-
|
134
|
-
Then just have your job classes inherit from MyJobClass instead of Que::Job. If you need to query the jobs table and you want to include both finished and unfinished jobs, you might use:
|
135
|
-
|
136
|
-
```ruby
|
137
|
-
Que.execute "CREATE VIEW all_jobs AS SELECT * FROM que_jobs UNION ALL SELECT * FROM finished_jobs"
|
138
|
-
Que.execute "SELECT * FROM all_jobs"
|
139
|
-
```
|
140
|
-
|
141
|
-
Alternately, if you want a more foolproof solution and you're not scared of PostgreSQL, you can use a trigger:
|
142
|
-
|
143
|
-
```sql
|
144
|
-
CREATE FUNCTION please_save_my_job()
|
145
|
-
RETURNS trigger
|
146
|
-
LANGUAGE plpgsql
|
147
|
-
AS $$
|
148
|
-
BEGIN
|
149
|
-
INSERT INTO finished_jobs SELECT (OLD).*;
|
150
|
-
RETURN OLD;
|
151
|
-
END;
|
152
|
-
$$;
|
153
|
-
|
154
|
-
CREATE TRIGGER keep_all_my_old_jobs BEFORE DELETE ON que_jobs FOR EACH ROW EXECUTE PROCEDURE please_save_my_job();
|
155
|
-
```
|
156
|
-
|
157
|
-
### Not Retrying Certain Failed Jobs
|
158
|
-
|
159
|
-
By default, when jobs fail, Que reschedules them to be retried later. If instead you'd like certain jobs to not be retried, and instead move them elsewhere to be examined later, you can accomplish that easily. First, we need a place for the failed jobs to be stored:
|
160
|
-
|
161
|
-
```sql
|
162
|
-
CREATE TABLE failed_jobs AS SELECT * FROM que_jobs LIMIT 0
|
163
|
-
```
|
164
|
-
|
165
|
-
Then, create a module that you can use in the jobs you don't want to retry:
|
166
|
-
|
167
|
-
```ruby
|
168
|
-
module SkipRetries
|
169
|
-
def run(*args)
|
170
|
-
super
|
171
|
-
rescue
|
172
|
-
sql = <<-SQL
|
173
|
-
WITH failed AS (
|
174
|
-
DELETE
|
175
|
-
FROM que_jobs
|
176
|
-
WHERE queue = $1::text
|
177
|
-
AND priority = $2::smallint
|
178
|
-
AND run_at = $3::timestamptz
|
179
|
-
AND job_id = $4::bigint
|
180
|
-
RETURNING *
|
181
|
-
)
|
182
|
-
INSERT INTO failed_jobs
|
183
|
-
SELECT * FROM failed;
|
184
|
-
SQL
|
185
|
-
|
186
|
-
Que.execute sql, @attrs.values_at(:queue, :priority, :run_at, :job_id)
|
187
|
-
|
188
|
-
raise # Reraises caught error.
|
189
|
-
end
|
190
|
-
end
|
191
|
-
|
192
|
-
class RunOnceJob < Que::Job
|
193
|
-
prepend SkipRetries
|
194
|
-
|
195
|
-
def run(*args)
|
196
|
-
# Do something - if this job runs an error it'll be moved to the
|
197
|
-
# failed_jobs table and not retried.
|
198
|
-
end
|
199
|
-
end
|
200
|
-
```
|
@@ -1,24 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'rails/generators'
|
4
|
-
require 'rails/generators/migration'
|
5
|
-
require 'active_record'
|
6
|
-
|
7
|
-
module Que
|
8
|
-
class InstallGenerator < Rails::Generators::Base
|
9
|
-
include Rails::Generators::Migration
|
10
|
-
|
11
|
-
namespace 'que:install'
|
12
|
-
self.source_paths << File.join(File.dirname(__FILE__), 'templates')
|
13
|
-
desc "Generates a migration to add Que's job table."
|
14
|
-
|
15
|
-
def self.next_migration_number(dirname)
|
16
|
-
next_migration_number = current_migration_number(dirname) + 1
|
17
|
-
ActiveRecord::Migration.next_migration_number(next_migration_number)
|
18
|
-
end
|
19
|
-
|
20
|
-
def create_migration_file
|
21
|
-
migration_template 'add_que.rb', 'db/migrate/add_que.rb'
|
22
|
-
end
|
23
|
-
end
|
24
|
-
end
|
@@ -1,13 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
class AddQue < ActiveRecord::Migration[4.2]
|
4
|
-
def self.up
|
5
|
-
# The current version as of this migration's creation.
|
6
|
-
Que.migrate! :version => 3
|
7
|
-
end
|
8
|
-
|
9
|
-
def self.down
|
10
|
-
# Completely removes Que's job queue.
|
11
|
-
Que.migrate! :version => 0
|
12
|
-
end
|
13
|
-
end
|
@@ -1,40 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Que
|
4
|
-
module Adapters
|
5
|
-
class ActiveRecord < Base
|
6
|
-
def checkout
|
7
|
-
checkout_activerecord_adapter { |conn| yield conn.raw_connection }
|
8
|
-
end
|
9
|
-
|
10
|
-
def cleanup!
|
11
|
-
# ActiveRecord will check out connections to the current thread when
|
12
|
-
# queries are executed and not return them to the pool until
|
13
|
-
# explicitly requested to. The wisdom of this API is questionable, and
|
14
|
-
# it doesn't pose a problem for the typical case of workers using a
|
15
|
-
# single PG connection (since we ensure that connection is checked in
|
16
|
-
# and checked out responsibly), but since ActiveRecord supports
|
17
|
-
# connections to multiple databases, it's easy for people using that
|
18
|
-
# feature to unknowingly leak connections to other databases. So, take
|
19
|
-
# the additional step of telling ActiveRecord to check in all of the
|
20
|
-
# current thread's connections between jobs.
|
21
|
-
::ActiveRecord::Base.clear_active_connections!
|
22
|
-
end
|
23
|
-
|
24
|
-
private
|
25
|
-
|
26
|
-
def checkout_activerecord_adapter(&block)
|
27
|
-
# Use Rails' executor (if present) to make sure that the connection
|
28
|
-
# we're using isn't taken from us while the block runs. See
|
29
|
-
# https://github.com/chanks/que/issues/166#issuecomment-274218910
|
30
|
-
if defined?(Rails.application.executor)
|
31
|
-
Rails.application.executor.wrap do
|
32
|
-
::ActiveRecord::Base.connection_pool.with_connection(&block)
|
33
|
-
end
|
34
|
-
else
|
35
|
-
::ActiveRecord::Base.connection_pool.with_connection(&block)
|
36
|
-
end
|
37
|
-
end
|
38
|
-
end
|
39
|
-
end
|
40
|
-
end
|
data/lib/que/adapters/base.rb
DELETED
@@ -1,133 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'time' # For Time.parse.
|
4
|
-
|
5
|
-
module Que
|
6
|
-
module Adapters
|
7
|
-
autoload :ActiveRecord, 'que/adapters/active_record'
|
8
|
-
autoload :ConnectionPool, 'que/adapters/connection_pool'
|
9
|
-
autoload :PG, 'que/adapters/pg'
|
10
|
-
autoload :Pond, 'que/adapters/pond'
|
11
|
-
autoload :Sequel, 'que/adapters/sequel'
|
12
|
-
|
13
|
-
class Base
|
14
|
-
def initialize(thing = nil)
|
15
|
-
@prepared_statements = {}
|
16
|
-
end
|
17
|
-
|
18
|
-
# The only method that adapters really need to implement. Should lock a
|
19
|
-
# PG::Connection (or something that acts like a PG::Connection) so that
|
20
|
-
# no other threads are using it and yield it to the block. Should also
|
21
|
-
# be re-entrant.
|
22
|
-
def checkout(&block)
|
23
|
-
raise NotImplementedError
|
24
|
-
end
|
25
|
-
|
26
|
-
# Called after Que has returned its connection to whatever pool it's
|
27
|
-
# using.
|
28
|
-
def cleanup!
|
29
|
-
end
|
30
|
-
|
31
|
-
# Called after a job is queued in async mode, to prompt a worker to
|
32
|
-
# wake up after the current transaction commits. Not all adapters will
|
33
|
-
# implement this.
|
34
|
-
def wake_worker_after_commit
|
35
|
-
false
|
36
|
-
end
|
37
|
-
|
38
|
-
def execute(command, params = [])
|
39
|
-
params = params.map do |param|
|
40
|
-
case param
|
41
|
-
# The pg gem unfortunately doesn't convert fractions of time instances, so cast them to a string.
|
42
|
-
when Time then param.strftime("%Y-%m-%d %H:%M:%S.%6N %z")
|
43
|
-
when Array, Hash then JSON.dump(param)
|
44
|
-
else param
|
45
|
-
end
|
46
|
-
end
|
47
|
-
|
48
|
-
cast_result \
|
49
|
-
case command
|
50
|
-
when Symbol then execute_prepared(command, params)
|
51
|
-
when String then execute_sql(command, params)
|
52
|
-
end
|
53
|
-
end
|
54
|
-
|
55
|
-
def in_transaction?
|
56
|
-
checkout { |conn| conn.transaction_status != ::PG::PQTRANS_IDLE }
|
57
|
-
end
|
58
|
-
|
59
|
-
private
|
60
|
-
|
61
|
-
def execute_sql(sql, params)
|
62
|
-
args = params.empty? ? [sql] : [sql, params]
|
63
|
-
checkout { |conn| conn.async_exec(*args) }
|
64
|
-
end
|
65
|
-
|
66
|
-
def execute_prepared(name, params)
|
67
|
-
checkout do |conn|
|
68
|
-
# Prepared statement errors have the potential to foul up the entire
|
69
|
-
# transaction, so if we're in one, err on the side of safety.
|
70
|
-
return execute_sql(SQL[name], params) if !Que.use_prepared_statements || in_transaction?
|
71
|
-
|
72
|
-
statements = @prepared_statements[conn] ||= {}
|
73
|
-
|
74
|
-
begin
|
75
|
-
unless statements[name]
|
76
|
-
conn.prepare("que_#{name}", SQL[name])
|
77
|
-
prepared_just_now = statements[name] = true
|
78
|
-
end
|
79
|
-
|
80
|
-
conn.exec_prepared("que_#{name}", params)
|
81
|
-
rescue ::PG::InvalidSqlStatementName => error
|
82
|
-
# Reconnections on ActiveRecord can cause the same connection
|
83
|
-
# objects to refer to new backends, so recover as well as we can.
|
84
|
-
|
85
|
-
unless prepared_just_now
|
86
|
-
Que.log :level => 'warn', :event => "reprepare_statement", :name => name
|
87
|
-
statements[name] = false
|
88
|
-
retry
|
89
|
-
end
|
90
|
-
|
91
|
-
raise error
|
92
|
-
end
|
93
|
-
end
|
94
|
-
end
|
95
|
-
|
96
|
-
CAST_PROCS = {}
|
97
|
-
|
98
|
-
# Integer, bigint, smallint:
|
99
|
-
CAST_PROCS[23] = CAST_PROCS[20] = CAST_PROCS[21] = proc(&:to_i)
|
100
|
-
|
101
|
-
# Timestamp with time zone.
|
102
|
-
CAST_PROCS[1184] = Time.method(:parse)
|
103
|
-
|
104
|
-
# JSON.
|
105
|
-
CAST_PROCS[114] = -> (value) { JSON.parse(value, create_additions: false) }
|
106
|
-
|
107
|
-
# Boolean:
|
108
|
-
CAST_PROCS[16] = -> (value) {
|
109
|
-
case value
|
110
|
-
when String then value == 't'
|
111
|
-
when TrueClass, FalseClass then value
|
112
|
-
else raise "Unexpected boolean value: #{value.inspect} (#{value.class})"
|
113
|
-
end
|
114
|
-
}
|
115
|
-
|
116
|
-
def cast_result(result)
|
117
|
-
output = result.to_a
|
118
|
-
|
119
|
-
result.fields.each_with_index do |field, index|
|
120
|
-
if converter = CAST_PROCS[result.ftype(index)]
|
121
|
-
output.each do |hash|
|
122
|
-
unless (value = hash[field]).nil?
|
123
|
-
hash[field] = converter.call(value)
|
124
|
-
end
|
125
|
-
end
|
126
|
-
end
|
127
|
-
end
|
128
|
-
|
129
|
-
output.map!(&Que.json_converter)
|
130
|
-
end
|
131
|
-
end
|
132
|
-
end
|
133
|
-
end
|