que 0.14.3 → 1.0.0.beta

Sign up to get free protection for your applications and to get access to all the features.
Files changed (102) hide show
  1. checksums.yaml +5 -5
  2. data/.gitignore +2 -0
  3. data/CHANGELOG.md +108 -14
  4. data/LICENSE.txt +1 -1
  5. data/README.md +49 -45
  6. data/bin/command_line_interface.rb +239 -0
  7. data/bin/que +8 -82
  8. data/docs/README.md +2 -0
  9. data/docs/active_job.md +6 -0
  10. data/docs/advanced_setup.md +7 -64
  11. data/docs/command_line_interface.md +45 -0
  12. data/docs/error_handling.md +65 -18
  13. data/docs/inspecting_the_queue.md +30 -80
  14. data/docs/job_helper_methods.md +27 -0
  15. data/docs/logging.md +3 -22
  16. data/docs/managing_workers.md +6 -61
  17. data/docs/middleware.md +15 -0
  18. data/docs/migrating.md +4 -7
  19. data/docs/multiple_queues.md +8 -4
  20. data/docs/shutting_down_safely.md +1 -1
  21. data/docs/using_plain_connections.md +39 -15
  22. data/docs/using_sequel.md +5 -3
  23. data/docs/writing_reliable_jobs.md +15 -24
  24. data/lib/que.rb +98 -182
  25. data/lib/que/active_job/extensions.rb +97 -0
  26. data/lib/que/active_record/connection.rb +51 -0
  27. data/lib/que/active_record/model.rb +48 -0
  28. data/lib/que/connection.rb +179 -0
  29. data/lib/que/connection_pool.rb +78 -0
  30. data/lib/que/job.rb +107 -156
  31. data/lib/que/job_cache.rb +240 -0
  32. data/lib/que/job_methods.rb +168 -0
  33. data/lib/que/listener.rb +176 -0
  34. data/lib/que/locker.rb +466 -0
  35. data/lib/que/metajob.rb +47 -0
  36. data/lib/que/migrations.rb +24 -17
  37. data/lib/que/migrations/4/down.sql +48 -0
  38. data/lib/que/migrations/4/up.sql +265 -0
  39. data/lib/que/poller.rb +267 -0
  40. data/lib/que/rails/railtie.rb +14 -0
  41. data/lib/que/result_queue.rb +35 -0
  42. data/lib/que/sequel/model.rb +51 -0
  43. data/lib/que/utils/assertions.rb +62 -0
  44. data/lib/que/utils/constantization.rb +19 -0
  45. data/lib/que/utils/error_notification.rb +68 -0
  46. data/lib/que/utils/freeze.rb +20 -0
  47. data/lib/que/utils/introspection.rb +50 -0
  48. data/lib/que/utils/json_serialization.rb +21 -0
  49. data/lib/que/utils/logging.rb +78 -0
  50. data/lib/que/utils/middleware.rb +33 -0
  51. data/lib/que/utils/queue_management.rb +18 -0
  52. data/lib/que/utils/transactions.rb +34 -0
  53. data/lib/que/version.rb +1 -1
  54. data/lib/que/worker.rb +128 -167
  55. data/que.gemspec +13 -2
  56. metadata +37 -80
  57. data/.rspec +0 -2
  58. data/.travis.yml +0 -64
  59. data/Gemfile +0 -24
  60. data/docs/customizing_que.md +0 -200
  61. data/lib/generators/que/install_generator.rb +0 -24
  62. data/lib/generators/que/templates/add_que.rb +0 -13
  63. data/lib/que/adapters/active_record.rb +0 -40
  64. data/lib/que/adapters/base.rb +0 -133
  65. data/lib/que/adapters/connection_pool.rb +0 -16
  66. data/lib/que/adapters/pg.rb +0 -21
  67. data/lib/que/adapters/pond.rb +0 -16
  68. data/lib/que/adapters/sequel.rb +0 -20
  69. data/lib/que/railtie.rb +0 -16
  70. data/lib/que/rake_tasks.rb +0 -59
  71. data/lib/que/sql.rb +0 -170
  72. data/spec/adapters/active_record_spec.rb +0 -175
  73. data/spec/adapters/connection_pool_spec.rb +0 -22
  74. data/spec/adapters/pg_spec.rb +0 -41
  75. data/spec/adapters/pond_spec.rb +0 -22
  76. data/spec/adapters/sequel_spec.rb +0 -57
  77. data/spec/gemfiles/Gemfile.current +0 -19
  78. data/spec/gemfiles/Gemfile.old +0 -19
  79. data/spec/gemfiles/Gemfile.older +0 -19
  80. data/spec/gemfiles/Gemfile.oldest +0 -19
  81. data/spec/spec_helper.rb +0 -129
  82. data/spec/support/helpers.rb +0 -25
  83. data/spec/support/jobs.rb +0 -35
  84. data/spec/support/shared_examples/adapter.rb +0 -42
  85. data/spec/support/shared_examples/multi_threaded_adapter.rb +0 -46
  86. data/spec/unit/configuration_spec.rb +0 -31
  87. data/spec/unit/connection_spec.rb +0 -14
  88. data/spec/unit/customization_spec.rb +0 -251
  89. data/spec/unit/enqueue_spec.rb +0 -245
  90. data/spec/unit/helper_spec.rb +0 -12
  91. data/spec/unit/logging_spec.rb +0 -101
  92. data/spec/unit/migrations_spec.rb +0 -84
  93. data/spec/unit/pool_spec.rb +0 -365
  94. data/spec/unit/run_spec.rb +0 -14
  95. data/spec/unit/states_spec.rb +0 -50
  96. data/spec/unit/stats_spec.rb +0 -46
  97. data/spec/unit/transaction_spec.rb +0 -36
  98. data/spec/unit/work_spec.rb +0 -596
  99. data/spec/unit/worker_spec.rb +0 -167
  100. data/tasks/benchmark.rb +0 -3
  101. data/tasks/rspec.rb +0 -14
  102. data/tasks/safe_shutdown.rb +0 -67
data/bin/que CHANGED
@@ -1,88 +1,14 @@
1
1
  #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
2
3
 
3
- require 'optparse'
4
- require 'ostruct'
5
- require 'logger'
4
+ require_relative 'command_line_interface'
6
5
 
7
6
  $stdout.sync = true
8
7
 
9
- options = OpenStruct.new
8
+ exit_code =
9
+ Que::CommandLineInterface.parse(
10
+ args: ARGV.dup,
11
+ output: $stdout,
12
+ )
10
13
 
11
- OptionParser.new do |opts|
12
- opts.banner = 'usage: que [options] file/to/require ...'
13
-
14
- opts.on('-w', '--worker-count [COUNT]', Integer, "Set number of workers in process (default: 4)") do |worker_count|
15
- options.worker_count = worker_count
16
- end
17
-
18
- opts.on('-i', '--wake-interval [INTERVAL]', Float, "Set maximum interval between polls of the job queue (in seconds) (default: 0.1)") do |wake_interval|
19
- options.wake_interval = wake_interval
20
- end
21
-
22
- opts.on('-l', '--log-level [LEVEL]', String, "Set level of Que's logger (debug, info, warn, error, fatal) (default: info)") do |log_level|
23
- options.log_level = log_level
24
- end
25
-
26
- opts.on('-q', '--queue-name [NAME]', String, "Set the name of the queue to work jobs from (default: the default queue)") do |queue_name|
27
- options.queue_name = queue_name
28
- end
29
-
30
- opts.on('-v', '--version', "Show Que version") do
31
- require 'que'
32
- $stdout.puts "Que version #{Que::Version}"
33
- exit 0
34
- end
35
-
36
- opts.on('-h', '--help', "Show help text") do
37
- $stdout.puts opts
38
- exit 0
39
- end
40
- end.parse!(ARGV)
41
-
42
- if ARGV.length.zero?
43
- $stdout.puts <<-OUTPUT
44
- You didn't include any Ruby files to require!
45
- Que needs to be able to load your application before it can process jobs.
46
- (Hint: If you're using Rails, try `que ./config/environment.rb`)
47
- (Or use `que -h` for a list of options)
48
- OUTPUT
49
- exit 1
50
- end
51
-
52
- ARGV.each do |file|
53
- begin
54
- require file
55
- rescue LoadError
56
- $stdout.puts "Could not load file '#{file}'"
57
- end
58
- end
59
-
60
- Que.logger ||= Logger.new(STDOUT)
61
-
62
- begin
63
- if log_level = (options.log_level || ENV['QUE_LOG_LEVEL'])
64
- Que.logger.level = Logger.const_get(log_level.upcase)
65
- end
66
- rescue NameError
67
- $stdout.puts "Bad logging level: #{log_level}"
68
- exit 1
69
- end
70
-
71
- Que.queue_name = options.queue_name || ENV['QUE_QUEUE'] || Que.queue_name || nil
72
- Que.worker_count = (options.worker_count || ENV['QUE_WORKER_COUNT'] || Que.worker_count || 4).to_i
73
- Que.wake_interval = (options.wake_interval || ENV['QUE_WAKE_INTERVAL'] || Que.wake_interval || 0.1).to_f
74
- Que.mode = :async
75
-
76
- stop = false
77
- %w(INT TERM).each { |signal| trap(signal) { stop = true } }
78
-
79
- loop do
80
- sleep 0.01
81
- break if stop
82
- end
83
-
84
- $stdout.puts
85
- $stdout.puts "Finishing Que's current jobs before exiting..."
86
- Que.worker_count = 0
87
- Que.mode = :off
88
- $stdout.puts "Que's jobs finished, exiting..."
14
+ exit(exit_code)
@@ -1,6 +1,8 @@
1
1
  Docs Index
2
2
  ===============
3
3
 
4
+ TODO: Fix doc links.
5
+
4
6
  - [Advanced Setup](advanced_setup.md#advanced-setup)
5
7
  - [Using ActiveRecord Without Rails](advanced_setup.md#using-activerecord-without-rails)
6
8
  - [Forking Servers](advanced_setup.md#forking-servers)
@@ -0,0 +1,6 @@
1
+ ## Using Que With ActiveJob
2
+
3
+ You can include `Que::ActiveJob::JobExtensions` into your `ApplicationJob` subclass to get support for all of Que's
4
+ [helper methods](/job_helper_methods.md). These methods will become no-ops if you use a queue adapter that isn't Que, so if you like to use a different adapter in development they shouldn't interfere.
5
+
6
+ Additionally, including `Que::ActiveJob::JobExtensions` lets you define a run() method that supports keyword arguments.
@@ -2,7 +2,7 @@
2
2
 
3
3
  ### Using ActiveRecord Without Rails
4
4
 
5
- If you're using both Rails and ActiveRecord, the README describes how to get started with Que (which is pretty straightforward, since Que includes a Railtie that handles a lot of setup for you). Otherwise, you'll need to do some manual setup.
5
+ If you're using both Rails and ActiveRecord, the README describes how to get started with Que (which is pretty straightforward, since it includes a Railtie that handles a lot of setup for you). Otherwise, you'll need to do some manual setup.
6
6
 
7
7
  If you're using ActiveRecord outside of Rails, you'll need to tell Que to piggyback on its connection pool after you've connected to the database:
8
8
 
@@ -18,69 +18,22 @@ Then you can queue jobs just as you would in Rails:
18
18
  ```ruby
19
19
  ActiveRecord::Base.transaction do
20
20
  @user = User.create(params[:user])
21
- SendRegistrationEmail.enqueue :user_id => @user.id
21
+ SendRegistrationEmail.enqueue user_id: @user.id
22
22
  end
23
23
  ```
24
24
 
25
25
  There are other docs to read if you're using [Sequel](https://github.com/chanks/que/blob/master/docs/using_sequel.md) or [plain Postgres connections](https://github.com/chanks/que/blob/master/docs/using_plain_connections.md) (with no ORM at all) instead of ActiveRecord.
26
26
 
27
- ### Forking Servers
28
-
29
- If you want to run a worker pool in your web process and you're using a forking webserver like Phusion Passenger (in smart spawning mode), Unicorn or Puma in some configurations, you'll want to set `Que.mode = :off` in your application configuration and only start up the worker pool in the child processes after the DB connection has been reestablished. So, for Puma:
30
-
31
- ```ruby
32
- # config/puma.rb
33
- on_worker_boot do
34
- ActiveRecord::Base.establish_connection
35
-
36
- Que.mode = :async
37
- end
38
- ```
39
-
40
- And for Unicorn:
41
-
42
- ```ruby
43
- # config/unicorn.rb
44
- after_fork do |server, worker|
45
- ActiveRecord::Base.establish_connection
46
-
47
- Que.mode = :async
48
- end
49
- ```
50
-
51
- And for Phusion Passenger:
52
-
53
- ```ruby
54
- # config.ru
55
- if defined?(PhusionPassenger)
56
- PhusionPassenger.on_event(:starting_worker_process) do |forked|
57
- if forked
58
- Que.mode = :async
59
- end
60
- end
61
- end
62
- ```
63
-
64
- If there's other setup you want to do for workers, such as setting up the
65
- configuration, you'll need to do that manually as well.
66
-
67
27
  ### Managing the Jobs Table
68
28
 
69
- After you've connected Que to the database, you can manage the jobs table:
70
-
71
- ```ruby
72
- # Create/update the jobs table to the latest schema version:
73
- Que.migrate!
74
- ```
75
-
76
- You'll want to migrate to a specific version if you're using migration files, to ensure that they work the same way even when you upgrade Que in the future:
29
+ After you've connected Que to the database, you can manage the jobs table. You'll want to migrate to a specific version in a migration file, to ensure that they work the same way even when you upgrade Que in the future:
77
30
 
78
31
  ```ruby
79
- # Update the schema to version #3.
80
- Que.migrate! :version => 3
32
+ # Update the schema to version #4.
33
+ Que.migrate! version: 4
81
34
 
82
- # To reverse the migration, drop the jobs table entirely:
83
- Que.migrate! :version => 0
35
+ # Remove Que's jobs table entirely.
36
+ Que.migrate! version: 0
84
37
  ```
85
38
 
86
39
  There's also a helper method to clear all jobs from the jobs table:
@@ -91,16 +44,6 @@ Que.clear!
91
44
 
92
45
  ### Other Setup
93
46
 
94
- You'll need to set Que's mode manually:
95
-
96
- ```ruby
97
- # Start the worker pool:
98
- Que.mode = :async
99
-
100
- # Or, when testing:
101
- Que.mode = :sync
102
- ```
103
-
104
47
  Be sure to read the docs on [managing workers](https://github.com/chanks/que/blob/master/docs/managing_workers.md) for more information on using the worker pool.
105
48
 
106
49
  You'll also want to set up [logging](https://github.com/chanks/que/blob/master/docs/logging.md) and an [error handler](https://github.com/chanks/que/blob/master/docs/error_handling.md) to track errors raised by jobs.
@@ -0,0 +1,45 @@
1
+ ## Command Line Interface
2
+
3
+ ```
4
+ usage: que [options] [file/to/require] ...
5
+ -h, --help Show this help text.
6
+ -i, --poll-interval [INTERVAL] Set maximum interval between polls for available jobs, in seconds (default: 5)
7
+ -l, --log-level [LEVEL] Set level at which to log to STDOUT (debug, info, warn, error, fatal) (default: info)
8
+ -q, --queue-name [NAME] Set a queue name to work jobs from. Can be passed multiple times. (default: the default queue only)
9
+ -v, --version Print Que version and exit.
10
+ -w, --worker-count [COUNT] Set number of workers in process (default: 6)
11
+ --connection-url [URL] Set a custom database url to connect to for locking purposes.
12
+ --log-internals Log verbosely about Que's internal state. Only recommended for debugging issues
13
+ --maximum-buffer-size [SIZE] Set maximum number of jobs to be cached in this process awaiting a worker (default: 8)
14
+ --minimum-buffer-size [SIZE] Set minimum number of jobs to be cached in this process awaiting a worker (default: 2)
15
+ --wait-period [PERIOD] Set maximum interval between checks of the in-memory job queue, in milliseconds (default: 50)
16
+ --worker-priorities [LIST] List of priorities to assign to workers, unspecified workers take jobs of any priority (default: 10,30,50)
17
+ ```
18
+
19
+ Some explanation of the more unusual options:
20
+
21
+ ### worker-count and worker-priorities
22
+
23
+ These options dictate the size and priority distribution of the worker pool. The default worker-count is 6 and the default worker-priorities is 10,30,50. This means that the default worker pool will have one worker that only works jobs with priorities under 10, one for priorities under 30, and one for priorities under 50. The leftover workers will work any job.
24
+
25
+ For example, with these defaults, you could have a large backlog of jobs of priority 100. When a more important job (priority 40) comes in, there's guaranteed to be a free worker. If the process then becomes saturated with jobs of priority 40, and then a priority 20 job comes in, there's guaranteed to be a free worker for it, and so on.
26
+
27
+ ### poll-interval
28
+
29
+ This option sets the number of seconds the process will wait between polls of the job queue. Jobs that are ready to be worked immediately will be broadcast via the LISTEN/NOTIFY system, so polling is unnecessary for them - polling is only necessary for jobs that are scheduled in the future or which are being delayed due to errors. The default is 5 seconds.
30
+
31
+ ### minimum-buffer-size and maximum-buffer-size
32
+
33
+ These options set the size of the internal buffer that Que uses to cache job information until it's ready for workers. The default minimum is 2 and the maximum is 8, meaning that the process won't buffer more than 8 jobs that aren't yet ready to be worked, and will only resort to polling if the buffer dips below 2. If you don't want jobs to be buffered at all, you can set both of these values to zero.
34
+
35
+ ### connection-url
36
+
37
+ This option sets the URL to be used to open a connection to the database for locking purposes. By default, Que will simply use a connection from the connection pool for locking - this option is only useful if your application connections can't use advisory locks - for example, if they're passed through an external connection pool like PgBouncer. In that case, you'll need to use this option to specify your actual database URL so that Que can establish a direct connection.
38
+
39
+ ### wait-period
40
+
41
+ This option specifies (in milliseconds) how often the locking thread wakes up to check whether the workers have finished jobs, whether it's time to poll, etc. You shouldn't generally need to tweak this, but it may come in handy for some workloads. The default is 50 milliseconds.
42
+
43
+ ### log-internals
44
+
45
+ This option instructs Que to output a lot of information about its internal state to the logger. It should only be used if it becomes necessary to debug issues.
@@ -7,41 +7,88 @@ If a given job fails repeatedly, Que will retry it at exponentially-increasing i
7
7
  ```ruby
8
8
  class MyJob < Que::Job
9
9
  # Just retry a failed job every 5 seconds:
10
- @retry_interval = 5
10
+ self.retry_interval = 5
11
11
 
12
12
  # Always retry this job immediately (not recommended, or transient
13
13
  # errors will spam your error reporting):
14
- @retry_interval = 0
14
+ self.retry_interval = 0
15
15
 
16
16
  # Increase the delay by 30 seconds every time this job fails:
17
- @retry_interval = proc { |count| count * 30 }
17
+ self.retry_interval = proc { |count| count * 30 }
18
18
  end
19
19
  ```
20
20
 
21
- Unlike DelayedJob, however, there is currently no maximum number of failures after which jobs will be deleted. Que's assumption is that if a job is erroring perpetually (and not just transiently), you will want to take action to get the job working properly rather than simply losing it silently.
21
+ There is a maximum_retry_count option for jobs. It defaults to 15 retries, which with the default retry interval means that a job will stop retrying after a little more than two days.
22
+
23
+ ## Error Notifications
22
24
 
23
25
  If you're using an error notification system (highly recommended, of course), you can hook Que into it by setting a callable as the error notifier:
24
26
 
25
27
  ```ruby
26
28
  Que.error_notifier = proc do |error, job|
27
- # Do whatever you want with the error object or job row here.
28
-
29
- # Note that the job passed is not the actual job object, but the hash
30
- # representing the job row in the database, which looks like:
29
+ # Do whatever you want with the error object or job row here. Note that the
30
+ # job passed is not the actual job object, but the hash representing the job
31
+ # row in the database, which looks like:
31
32
 
32
33
  # {
33
- # "queue" => "my_queue",
34
- # "priority" => 100,
35
- # "run_at" => 2015-03-06 11:07:08 -0500,
36
- # "job_id" => 65,
37
- # "job_class" => "MyJob",
38
- # "args" => ['argument', 78],
39
- # "error_count" => 0
34
+ # :priority => 100,
35
+ # :run_at => "2017-09-15T20:18:52.018101Z",
36
+ # :id => 172340879,
37
+ # :job_class => "TestJob",
38
+ # :error_count => 0,
39
+ # :last_error_message => nil,
40
+ # :queue => "default",
41
+ # :last_error_backtrace => nil,
42
+ # :finished_at => nil,
43
+ # :expired_at => nil,
44
+ # :args => [],
45
+ # :data => {}
40
46
  # }
41
47
 
42
48
  # This is done because the job may not have been able to be deserialized
43
- # properly, if the name of the job class was changed or the job is being
44
- # retrieved and worked by the wrong app. The job argument may also be
45
- # nil, if there was a connection failure or something similar.
49
+ # properly, if the name of the job class was changed or the job class isn't
50
+ # loaded for some reason. The job argument may also be nil, if there was a
51
+ # connection failure or something similar.
46
52
  end
47
53
  ```
54
+
55
+ ## Error-Specific Handling
56
+
57
+ You can also define a handle_error method in your job, like so:
58
+
59
+ ```ruby
60
+ class MyJob < Que::Job
61
+ def run(*args)
62
+ # Your code goes here.
63
+ end
64
+
65
+ def handle_error(error)
66
+ case error
67
+ when TemporaryError then retry_in 10.seconds
68
+ when PermanentError then expire
69
+ else super # Default (exponential backoff) behavior.
70
+ end
71
+ end
72
+ end
73
+ ```
74
+
75
+ The return value of handle_error determines whether the error object is passed to the error notifier. The helper methods like expire and retry_in return true, so these errors will be notified. You can explicitly return false to skip notification.
76
+
77
+ ```ruby
78
+ class MyJob < Que::Job
79
+ def handle_error(error)
80
+ case error
81
+ when AnnoyingError
82
+ retry_in 10.seconds
83
+ false
84
+ when TransientError
85
+ super
86
+ error_count > 3
87
+ else
88
+ super # Default (exponential backoff) behavior.
89
+ end
90
+ end
91
+ end
92
+ ```
93
+
94
+ In this example, AnnoyingError will never be notified, while TransientError will only be notified once it has affected a given job at least three times.
@@ -9,106 +9,56 @@ You can call `Que.job_stats` to return some aggregate data on the types of jobs
9
9
  ```ruby
10
10
  [
11
11
  {
12
- "job_class"=>"ChargeCreditCard",
13
- "count"=>"10",
14
- "count_working"=>"4",
15
- "count_errored"=>"2",
16
- "highest_error_count"=>"5",
17
- "oldest_run_at"=>"2014-01-04 21:24:55.817129+00"
12
+ :job_class=>"ChargeCreditCard",
13
+ :count=>10,
14
+ :count_working=>4,
15
+ :count_errored=>2,
16
+ :highest_error_count=>5,
17
+ :oldest_run_at=>2017-09-08 16:13:18 -0400
18
18
  },
19
19
  {
20
- "job_class"=>"SendRegistrationEmail",
21
- "count"=>"8",
22
- "count_working"=>"0",
23
- "count_errored"=>"0",
24
- "highest_error_count"=>"0",
25
- "oldest_run_at"=>"2014-01-04 22:24:55.81532+00"
20
+ :job_class=>"SendRegistrationEmail",
21
+ :count=>1,
22
+ :count_working=>0,
23
+ :count_errored=>0,
24
+ :highest_error_count=>0,
25
+ :oldest_run_at=>2017-09-08 17:13:18 -0400
26
26
  }
27
27
  ]
28
28
  ```
29
29
 
30
- This tells you that, for instance, there are ten ChargeCreditCard jobs in the queue, four of which are currently being worked, and two of which have experienced errors. One of them has started to process but experienced an error five times. The oldest_run_at is helpful for determining how long jobs have been sitting around, if you have backlog.
31
-
32
- ### Worker States
33
-
34
- You can call `Que.worker_states` to return some information on every worker touching the queue (not just those in the current process). Example output:
35
-
36
- ```ruby
37
- [
38
- {
39
- "priority"=>"2",
40
- "run_at"=>"2014-01-04 22:35:55.772324+00",
41
- "job_id"=>"4592",
42
- "job_class"=>"ChargeCreditCard",
43
- "args"=>"[345,56]",
44
- "error_count"=>"0",
45
- "last_error"=>nil,
46
- "pg_backend_pid"=>"1175",
47
- "pg_state"=>"idle",
48
- "pg_state_changed_at"=>"2014-01-04 22:35:55.777785+00",
49
- "pg_last_query"=>"SELECT * FROM users",
50
- "pg_last_query_started_at"=>"2014-01-04 22:35:55.777519+00",
51
- "pg_transaction_started_at"=>nil,
52
- "pg_waiting_on_lock"=>"f"
53
- }
54
- ]
55
- ```
56
-
57
- In this case, there is only one worker currently working the queue. The first seven fields are the attributes of the job it is currently running. The next seven fields are information about that worker's Postgres connection, and are taken from `pg_stat_activity` - see [Postgres' documentation](http://www.postgresql.org/docs/current/static/monitoring-stats.html#PG-STAT-ACTIVITY-VIEW) for more information on interpreting these fields.
58
-
59
- * `pg_backend_pid` - The pid of the Postgres process serving this worker. This is useful if you wanted to kill that worker's connection, for example, by running "SELECT pg_terminate_backend(1175)". This would free up the job to be attempted by another worker.
60
- * `pg_state` - The state of the Postgres backend. It may be "active" if the worker is currently running a query or "idle"/"idle in transaction" if it is not. It may also be in one of a few other less common states.
61
- * `pg_state_changed_at` - The timestamp for when the backend's state was last changed. If the backend is idle, this would reflect the time that the last query finished.
62
- * `pg_last_query` - The text of the current or most recent query that the worker sent to the database.
63
- * `pg_last_query_started_at` - The timestamp for when the last query began to run.
64
- * `pg_transaction_started_at` - The timestamp for when the worker's current transaction (if any) began.
65
- * `pg_waiting_on_lock` - Whether or not the worker is waiting for a lock in Postgres to be released.
30
+ This tells you that, for instance, there are ten ChargeCreditCard jobs in the queue, four of which are currently being worked, and two of which have experienced errors. One of them has started to process but experienced an error five times. The oldest_run_at is helpful for determining how long jobs have been sitting around, if you have a large backlog.
66
31
 
67
32
  ### Custom Queries
68
33
 
69
- If you want to query the jobs table yourself to see what's been queued or to check the state of various jobs, you can always use Que to execute whatever SQL you want:
34
+ If you're using ActiveRecord or Sequel, Que ships with models that wrap the job queue so you can write your own logic to inspect it. They include some helpful scopes to write your queries - see the gem source for a complete accounting.
70
35
 
71
- ```ruby
72
- Que.execute("select count(*) from que_jobs") #=> [{"count"=>"492"}]
73
- ```
36
+ #### ActiveRecord Example
74
37
 
75
- If you want to use ActiveRecord's features when querying, you can define your own model around Que's job table:
38
+ ``` ruby
39
+ # app/models/que_job.rb
76
40
 
77
- ```ruby
78
- class QueJob < ActiveRecord::Base
41
+ require 'que/active_record/model'
42
+
43
+ class QueJob < Que::ActiveRecord::Model
79
44
  end
80
45
 
81
- # Or:
46
+ QueJob.finished.to_sql # => "SELECT \"que_jobs\".* FROM \"que_jobs\" WHERE (\"que_jobs\".\"finished_at\" IS NOT NULL)"
82
47
 
83
- class MyJob < ActiveRecord::Base
84
- self.table_name = :que_jobs
85
- end
48
+ # You could also name the model whatever you like, or just query from
49
+ # Que::ActiveRecord::Model directly if you don't need to write your own model
50
+ # logic.
86
51
  ```
87
52
 
88
- Then you can query just as you would with any other model. Since the jobs table has a composite primary key, however, you probably won't be able to update or destroy jobs this way, though.
53
+ #### Sequel Example
89
54
 
90
- If you're using Sequel, you can use the same technique:
55
+ ``` ruby
56
+ # app/models/que_job.rb
91
57
 
92
- ```ruby
93
- class QueJob < Sequel::Model
94
- end
95
-
96
- # Or:
58
+ require 'que/sequel/model'
97
59
 
98
- class MyJob < Sequel::Model(:que_jobs)
60
+ class QueJob < Que::Sequel::Model
99
61
  end
100
- ```
101
62
 
102
- And note that Sequel *does* support composite primary keys:
103
-
104
- ```ruby
105
- job = QueJob.where(:job_class => "ChargeCreditCard").first
106
- job.priority = 1
107
- job.save
108
- ```
109
-
110
- Or, you can just use Sequel's dataset methods:
111
-
112
- ```ruby
113
- DB[:que_jobs].where{priority > 3}.all
63
+ QueJob.finished # => #<Sequel::Postgres::Dataset: "SELECT * FROM \"public\".\"que_jobs\" WHERE (\"public\".\"que_jobs\".\"finished_at\" IS NOT NULL)">
114
64
  ```