que 1.0.0.beta → 1.0.0.beta5

Sign up to get free protection for your applications and to get access to all the features.
data/lib/que/worker.rb CHANGED
@@ -1,12 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- # Workers wrap threads which continuously pull job pks from JobCache objects,
3
+ # Workers wrap threads which continuously pull job pks from JobBuffer objects,
4
4
  # fetch and work those jobs, and export relevant data to ResultQueues.
5
5
 
6
+ require 'set'
7
+
6
8
  module Que
7
9
  class Worker
8
10
  attr_reader :thread, :priority
9
11
 
12
+ VALID_LOG_LEVELS = [:debug, :info, :warn, :error, :fatal, :unknown].to_set.freeze
13
+
10
14
  SQL[:check_job] =
11
15
  %{
12
16
  SELECT 1 AS one
@@ -15,20 +19,20 @@ module Que
15
19
  }
16
20
 
17
21
  def initialize(
18
- job_cache:,
22
+ job_buffer:,
19
23
  result_queue:,
20
24
  priority: nil,
21
25
  start_callback: nil
22
26
  )
23
27
 
24
28
  @priority = Que.assert([NilClass, Integer], priority)
25
- @job_cache = Que.assert(JobCache, job_cache)
29
+ @job_buffer = Que.assert(JobBuffer, job_buffer)
26
30
  @result_queue = Que.assert(ResultQueue, result_queue)
27
31
 
28
32
  Que.internal_log(:worker_instantiate, self) do
29
33
  {
30
34
  priority: priority,
31
- job_cache: job_cache.object_id,
35
+ job_buffer: job_buffer.object_id,
32
36
  result_queue: result_queue.object_id,
33
37
  }
34
38
  end
@@ -50,10 +54,17 @@ module Que
50
54
  private
51
55
 
52
56
  def work_loop
53
- # Blocks until a job of the appropriate priority is available. If the
54
- # queue is shutting down this will return nil, which breaks the loop and
57
+ # Blocks until a job of the appropriate priority is available.
58
+ # `fetch_next_metajob` normally returns a job to be processed.
59
+ # If the queue is shutting down it will return false, which breaks the loop and
55
60
  # lets the thread finish.
56
- while metajob = fetch_next_metajob
61
+ while (metajob = fetch_next_metajob) != false
62
+ # If metajob is nil instead of false, we've hit a rare race condition where
63
+ # there was a job in the buffer when the worker code checked, but the job was
64
+ # picked up by the time we got around to shifting it off the buffer.
65
+ # Letting this case go unhandled leads to worker threads exiting pre-maturely, so
66
+ # we check explicitly and continue the loop.
67
+ next if metajob.nil?
57
68
  id = metajob.id
58
69
 
59
70
  Que.internal_log(:worker_received_job, self) { {id: id} }
@@ -80,7 +91,7 @@ module Que
80
91
  end
81
92
 
82
93
  def fetch_next_metajob
83
- @job_cache.shift(*priority)
94
+ @job_buffer.shift(*priority)
84
95
  end
85
96
 
86
97
  def work_job(metajob)
@@ -89,22 +100,33 @@ module Que
89
100
  klass = Que.constantize(job.fetch(:job_class))
90
101
  instance = klass.new(job)
91
102
 
92
- Que.run_middleware(instance) { instance.tap(&:_run) }
103
+ Que.run_job_middleware(instance) { instance.tap(&:_run) }
93
104
 
94
- log_message = {
95
- level: :debug,
96
- job_id: metajob.id,
97
- elapsed: (Time.now - start),
98
- }
105
+ elapsed = Time.now - start
99
106
 
100
- if error = instance.que_error
101
- log_message[:event] = :job_errored
102
- log_message[:error] = "#{error.class}: #{error.message}".slice(0, 500)
103
- else
104
- log_message[:event] = :job_worked
105
- end
107
+ log_level =
108
+ if instance.que_error
109
+ :error
110
+ else
111
+ instance.log_level(elapsed)
112
+ end
113
+
114
+ if VALID_LOG_LEVELS.include?(log_level)
115
+ log_message = {
116
+ level: log_level,
117
+ job_id: metajob.id,
118
+ elapsed: elapsed,
119
+ }
106
120
 
107
- Que.log(log_message)
121
+ if error = instance.que_error
122
+ log_message[:event] = :job_errored
123
+ log_message[:error] = "#{error.class}: #{error.message}".slice(0, 500)
124
+ else
125
+ log_message[:event] = :job_worked
126
+ end
127
+
128
+ Que.log(log_message)
129
+ end
108
130
 
109
131
  instance
110
132
  rescue => error
data/lib/que.rb CHANGED
@@ -35,7 +35,7 @@ module Que
35
35
  require_relative 'que/connection_pool'
36
36
  require_relative 'que/job_methods'
37
37
  require_relative 'que/job'
38
- require_relative 'que/job_cache'
38
+ require_relative 'que/job_buffer'
39
39
  require_relative 'que/locker'
40
40
  require_relative 'que/metajob'
41
41
  require_relative 'que/migrations'
@@ -44,6 +44,12 @@ module Que
44
44
  require_relative 'que/version'
45
45
  require_relative 'que/worker'
46
46
 
47
+ class << self
48
+ attr_writer :default_queue
49
+ end
50
+
51
+ self.default_queue = nil
52
+
47
53
  class << self
48
54
  include Utils::Assertions
49
55
  include Utils::Constantization
@@ -65,7 +71,6 @@ module Que
65
71
 
66
72
  # Global configuration logic.
67
73
  attr_accessor :use_prepared_statements
68
- attr_writer :default_queue
69
74
 
70
75
  def default_queue
71
76
  @default_queue || DEFAULT_QUEUE
@@ -77,8 +82,8 @@ module Que
77
82
  if conn.to_s == 'ActiveRecord'
78
83
  # Load and setup AR compatibility.
79
84
  require_relative 'que/active_record/connection'
80
- m = Que::ActiveRecord::Connection::Middleware
81
- middleware << m unless middleware.include?(m)
85
+ m = Que::ActiveRecord::Connection::JobMiddleware
86
+ job_middleware << m unless job_middleware.include?(m)
82
87
  Que::ActiveRecord::Connection.method(:checkout)
83
88
  else
84
89
  case conn.class.to_s
data/que.gemspec CHANGED
@@ -10,7 +10,7 @@ Gem::Specification.new do |spec|
10
10
  spec.email = ['christopher.m.hanks@gmail.com']
11
11
  spec.description = %q{A job queue that uses PostgreSQL's advisory locks for speed and reliability.}
12
12
  spec.summary = %q{A PostgreSQL-based Job Queue}
13
- spec.homepage = 'https://github.com/chanks/que'
13
+ spec.homepage = 'https://github.com/que-rb/que'
14
14
  spec.license = 'MIT'
15
15
 
16
16
  files_to_exclude = [
@@ -29,5 +29,5 @@ Gem::Specification.new do |spec|
29
29
  spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
30
30
  spec.require_paths = ['lib']
31
31
 
32
- spec.add_development_dependency 'bundler', '~> 1.3'
32
+ spec.add_development_dependency 'bundler'
33
33
  end
metadata CHANGED
@@ -1,29 +1,29 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: que
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.0.beta
4
+ version: 1.0.0.beta5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Chris Hanks
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2017-10-25 00:00:00.000000000 Z
11
+ date: 2021-12-23 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
15
15
  requirement: !ruby/object:Gem::Requirement
16
16
  requirements:
17
- - - "~>"
17
+ - - ">="
18
18
  - !ruby/object:Gem::Version
19
- version: '1.3'
19
+ version: '0'
20
20
  type: :development
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
- - - "~>"
24
+ - - ">="
25
25
  - !ruby/object:Gem::Version
26
- version: '1.3'
26
+ version: '0'
27
27
  description: A job queue that uses PostgreSQL's advisory locks for speed and reliability.
28
28
  email:
29
29
  - christopher.m.hanks@gmail.com
@@ -32,7 +32,9 @@ executables:
32
32
  extensions: []
33
33
  extra_rdoc_files: []
34
34
  files:
35
+ - ".github/workflows/tests.yml"
35
36
  - ".gitignore"
37
+ - CHANGELOG.1.0.beta.md
36
38
  - CHANGELOG.md
37
39
  - LICENSE.txt
38
40
  - README.md
@@ -40,21 +42,6 @@ files:
40
42
  - bin/command_line_interface.rb
41
43
  - bin/que
42
44
  - docs/README.md
43
- - docs/active_job.md
44
- - docs/advanced_setup.md
45
- - docs/command_line_interface.md
46
- - docs/error_handling.md
47
- - docs/inspecting_the_queue.md
48
- - docs/job_helper_methods.md
49
- - docs/logging.md
50
- - docs/managing_workers.md
51
- - docs/middleware.md
52
- - docs/migrating.md
53
- - docs/multiple_queues.md
54
- - docs/shutting_down_safely.md
55
- - docs/using_plain_connections.md
56
- - docs/using_sequel.md
57
- - docs/writing_reliable_jobs.md
58
45
  - lib/que.rb
59
46
  - lib/que/active_job/extensions.rb
60
47
  - lib/que/active_record/connection.rb
@@ -62,7 +49,7 @@ files:
62
49
  - lib/que/connection.rb
63
50
  - lib/que/connection_pool.rb
64
51
  - lib/que/job.rb
65
- - lib/que/job_cache.rb
52
+ - lib/que/job_buffer.rb
66
53
  - lib/que/job_methods.rb
67
54
  - lib/que/listener.rb
68
55
  - lib/que/locker.rb
@@ -93,7 +80,7 @@ files:
93
80
  - lib/que/version.rb
94
81
  - lib/que/worker.rb
95
82
  - que.gemspec
96
- homepage: https://github.com/chanks/que
83
+ homepage: https://github.com/que-rb/que
97
84
  licenses:
98
85
  - MIT
99
86
  metadata: {}
@@ -112,8 +99,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
112
99
  - !ruby/object:Gem::Version
113
100
  version: 1.3.1
114
101
  requirements: []
115
- rubyforge_project:
116
- rubygems_version: 2.6.14
102
+ rubygems_version: 3.1.6
117
103
  signing_key:
118
104
  specification_version: 4
119
105
  summary: A PostgreSQL-based Job Queue
data/docs/active_job.md DELETED
@@ -1,6 +0,0 @@
1
- ## Using Que With ActiveJob
2
-
3
- You can include `Que::ActiveJob::JobExtensions` into your `ApplicationJob` subclass to get support for all of Que's
4
- [helper methods](/job_helper_methods.md). These methods will become no-ops if you use a queue adapter that isn't Que, so if you like to use a different adapter in development they shouldn't interfere.
5
-
6
- Additionally, including `Que::ActiveJob::JobExtensions` lets you define a run() method that supports keyword arguments.
@@ -1,49 +0,0 @@
1
- ## Advanced Setup
2
-
3
- ### Using ActiveRecord Without Rails
4
-
5
- If you're using both Rails and ActiveRecord, the README describes how to get started with Que (which is pretty straightforward, since it includes a Railtie that handles a lot of setup for you). Otherwise, you'll need to do some manual setup.
6
-
7
- If you're using ActiveRecord outside of Rails, you'll need to tell Que to piggyback on its connection pool after you've connected to the database:
8
-
9
- ```ruby
10
- ActiveRecord::Base.establish_connection(ENV['DATABASE_URL'])
11
-
12
- require 'que'
13
- Que.connection = ActiveRecord
14
- ```
15
-
16
- Then you can queue jobs just as you would in Rails:
17
-
18
- ```ruby
19
- ActiveRecord::Base.transaction do
20
- @user = User.create(params[:user])
21
- SendRegistrationEmail.enqueue user_id: @user.id
22
- end
23
- ```
24
-
25
- There are other docs to read if you're using [Sequel](https://github.com/chanks/que/blob/master/docs/using_sequel.md) or [plain Postgres connections](https://github.com/chanks/que/blob/master/docs/using_plain_connections.md) (with no ORM at all) instead of ActiveRecord.
26
-
27
- ### Managing the Jobs Table
28
-
29
- After you've connected Que to the database, you can manage the jobs table. You'll want to migrate to a specific version in a migration file, to ensure that they work the same way even when you upgrade Que in the future:
30
-
31
- ```ruby
32
- # Update the schema to version #4.
33
- Que.migrate! version: 4
34
-
35
- # Remove Que's jobs table entirely.
36
- Que.migrate! version: 0
37
- ```
38
-
39
- There's also a helper method to clear all jobs from the jobs table:
40
-
41
- ```ruby
42
- Que.clear!
43
- ```
44
-
45
- ### Other Setup
46
-
47
- Be sure to read the docs on [managing workers](https://github.com/chanks/que/blob/master/docs/managing_workers.md) for more information on using the worker pool.
48
-
49
- You'll also want to set up [logging](https://github.com/chanks/que/blob/master/docs/logging.md) and an [error handler](https://github.com/chanks/que/blob/master/docs/error_handling.md) to track errors raised by jobs.
@@ -1,45 +0,0 @@
1
- ## Command Line Interface
2
-
3
- ```
4
- usage: que [options] [file/to/require] ...
5
- -h, --help Show this help text.
6
- -i, --poll-interval [INTERVAL] Set maximum interval between polls for available jobs, in seconds (default: 5)
7
- -l, --log-level [LEVEL] Set level at which to log to STDOUT (debug, info, warn, error, fatal) (default: info)
8
- -q, --queue-name [NAME] Set a queue name to work jobs from. Can be passed multiple times. (default: the default queue only)
9
- -v, --version Print Que version and exit.
10
- -w, --worker-count [COUNT] Set number of workers in process (default: 6)
11
- --connection-url [URL] Set a custom database url to connect to for locking purposes.
12
- --log-internals Log verbosely about Que's internal state. Only recommended for debugging issues
13
- --maximum-buffer-size [SIZE] Set maximum number of jobs to be cached in this process awaiting a worker (default: 8)
14
- --minimum-buffer-size [SIZE] Set minimum number of jobs to be cached in this process awaiting a worker (default: 2)
15
- --wait-period [PERIOD] Set maximum interval between checks of the in-memory job queue, in milliseconds (default: 50)
16
- --worker-priorities [LIST] List of priorities to assign to workers, unspecified workers take jobs of any priority (default: 10,30,50)
17
- ```
18
-
19
- Some explanation of the more unusual options:
20
-
21
- ### worker-count and worker-priorities
22
-
23
- These options dictate the size and priority distribution of the worker pool. The default worker-count is 6 and the default worker-priorities is 10,30,50. This means that the default worker pool will have one worker that only works jobs with priorities under 10, one for priorities under 30, and one for priorities under 50. The leftover workers will work any job.
24
-
25
- For example, with these defaults, you could have a large backlog of jobs of priority 100. When a more important job (priority 40) comes in, there's guaranteed to be a free worker. If the process then becomes saturated with jobs of priority 40, and then a priority 20 job comes in, there's guaranteed to be a free worker for it, and so on.
26
-
27
- ### poll-interval
28
-
29
- This option sets the number of seconds the process will wait between polls of the job queue. Jobs that are ready to be worked immediately will be broadcast via the LISTEN/NOTIFY system, so polling is unnecessary for them - polling is only necessary for jobs that are scheduled in the future or which are being delayed due to errors. The default is 5 seconds.
30
-
31
- ### minimum-buffer-size and maximum-buffer-size
32
-
33
- These options set the size of the internal buffer that Que uses to cache job information until it's ready for workers. The default minimum is 2 and the maximum is 8, meaning that the process won't buffer more than 8 jobs that aren't yet ready to be worked, and will only resort to polling if the buffer dips below 2. If you don't want jobs to be buffered at all, you can set both of these values to zero.
34
-
35
- ### connection-url
36
-
37
- This option sets the URL to be used to open a connection to the database for locking purposes. By default, Que will simply use a connection from the connection pool for locking - this option is only useful if your application connections can't use advisory locks - for example, if they're passed through an external connection pool like PgBouncer. In that case, you'll need to use this option to specify your actual database URL so that Que can establish a direct connection.
38
-
39
- ### wait-period
40
-
41
- This option specifies (in milliseconds) how often the locking thread wakes up to check whether the workers have finished jobs, whether it's time to poll, etc. You shouldn't generally need to tweak this, but it may come in handy for some workloads. The default is 50 milliseconds.
42
-
43
- ### log-internals
44
-
45
- This option instructs Que to output a lot of information about its internal state to the logger. It should only be used if it becomes necessary to debug issues.
@@ -1,94 +0,0 @@
1
- ## Error Handling
2
-
3
- If an error is raised and left uncaught by your job, Que will save the error message and backtrace to the database and schedule the job to be retried later.
4
-
5
- If a given job fails repeatedly, Que will retry it at exponentially-increasing intervals equal to (failure_count^4 + 3) seconds. This means that a job will be retried 4 seconds after its first failure, 19 seconds after its second, 84 seconds after its third, 259 seconds after its fourth, and so on until it succeeds. This pattern is very similar to DelayedJob's. Alternately, you can define your own retry logic by setting an interval to delay each time, or a callable that accepts the number of failures and returns an interval:
6
-
7
- ```ruby
8
- class MyJob < Que::Job
9
- # Just retry a failed job every 5 seconds:
10
- self.retry_interval = 5
11
-
12
- # Always retry this job immediately (not recommended, or transient
13
- # errors will spam your error reporting):
14
- self.retry_interval = 0
15
-
16
- # Increase the delay by 30 seconds every time this job fails:
17
- self.retry_interval = proc { |count| count * 30 }
18
- end
19
- ```
20
-
21
- There is a maximum_retry_count option for jobs. It defaults to 15 retries, which with the default retry interval means that a job will stop retrying after a little more than two days.
22
-
23
- ## Error Notifications
24
-
25
- If you're using an error notification system (highly recommended, of course), you can hook Que into it by setting a callable as the error notifier:
26
-
27
- ```ruby
28
- Que.error_notifier = proc do |error, job|
29
- # Do whatever you want with the error object or job row here. Note that the
30
- # job passed is not the actual job object, but the hash representing the job
31
- # row in the database, which looks like:
32
-
33
- # {
34
- # :priority => 100,
35
- # :run_at => "2017-09-15T20:18:52.018101Z",
36
- # :id => 172340879,
37
- # :job_class => "TestJob",
38
- # :error_count => 0,
39
- # :last_error_message => nil,
40
- # :queue => "default",
41
- # :last_error_backtrace => nil,
42
- # :finished_at => nil,
43
- # :expired_at => nil,
44
- # :args => [],
45
- # :data => {}
46
- # }
47
-
48
- # This is done because the job may not have been able to be deserialized
49
- # properly, if the name of the job class was changed or the job class isn't
50
- # loaded for some reason. The job argument may also be nil, if there was a
51
- # connection failure or something similar.
52
- end
53
- ```
54
-
55
- ## Error-Specific Handling
56
-
57
- You can also define a handle_error method in your job, like so:
58
-
59
- ```ruby
60
- class MyJob < Que::Job
61
- def run(*args)
62
- # Your code goes here.
63
- end
64
-
65
- def handle_error(error)
66
- case error
67
- when TemporaryError then retry_in 10.seconds
68
- when PermanentError then expire
69
- else super # Default (exponential backoff) behavior.
70
- end
71
- end
72
- end
73
- ```
74
-
75
- The return value of handle_error determines whether the error object is passed to the error notifier. The helper methods like expire and retry_in return true, so these errors will be notified. You can explicitly return false to skip notification.
76
-
77
- ```ruby
78
- class MyJob < Que::Job
79
- def handle_error(error)
80
- case error
81
- when AnnoyingError
82
- retry_in 10.seconds
83
- false
84
- when TransientError
85
- super
86
- error_count > 3
87
- else
88
- super # Default (exponential backoff) behavior.
89
- end
90
- end
91
- end
92
- ```
93
-
94
- In this example, AnnoyingError will never be notified, while TransientError will only be notified once it has affected a given job at least three times.
@@ -1,64 +0,0 @@
1
- ## Inspecting the Queue
2
-
3
- In order to remain simple and compatible with any ORM (or no ORM at all), Que is really just a very thin wrapper around some raw SQL. There are two methods available that query the jobs table and Postgres' system catalogs to retrieve information on the current state of the queue:
4
-
5
- ### Job Stats
6
-
7
- You can call `Que.job_stats` to return some aggregate data on the types of jobs currently in the queue. Example output:
8
-
9
- ```ruby
10
- [
11
- {
12
- :job_class=>"ChargeCreditCard",
13
- :count=>10,
14
- :count_working=>4,
15
- :count_errored=>2,
16
- :highest_error_count=>5,
17
- :oldest_run_at=>2017-09-08 16:13:18 -0400
18
- },
19
- {
20
- :job_class=>"SendRegistrationEmail",
21
- :count=>1,
22
- :count_working=>0,
23
- :count_errored=>0,
24
- :highest_error_count=>0,
25
- :oldest_run_at=>2017-09-08 17:13:18 -0400
26
- }
27
- ]
28
- ```
29
-
30
- This tells you that, for instance, there are ten ChargeCreditCard jobs in the queue, four of which are currently being worked, and two of which have experienced errors. One of them has started to process but experienced an error five times. The oldest_run_at is helpful for determining how long jobs have been sitting around, if you have a large backlog.
31
-
32
- ### Custom Queries
33
-
34
- If you're using ActiveRecord or Sequel, Que ships with models that wrap the job queue so you can write your own logic to inspect it. They include some helpful scopes to write your queries - see the gem source for a complete accounting.
35
-
36
- #### ActiveRecord Example
37
-
38
- ``` ruby
39
- # app/models/que_job.rb
40
-
41
- require 'que/active_record/model'
42
-
43
- class QueJob < Que::ActiveRecord::Model
44
- end
45
-
46
- QueJob.finished.to_sql # => "SELECT \"que_jobs\".* FROM \"que_jobs\" WHERE (\"que_jobs\".\"finished_at\" IS NOT NULL)"
47
-
48
- # You could also name the model whatever you like, or just query from
49
- # Que::ActiveRecord::Model directly if you don't need to write your own model
50
- # logic.
51
- ```
52
-
53
- #### Sequel Example
54
-
55
- ``` ruby
56
- # app/models/que_job.rb
57
-
58
- require 'que/sequel/model'
59
-
60
- class QueJob < Que::Sequel::Model
61
- end
62
-
63
- QueJob.finished # => #<Sequel::Postgres::Dataset: "SELECT * FROM \"public\".\"que_jobs\" WHERE (\"public\".\"que_jobs\".\"finished_at\" IS NOT NULL)">
64
- ```
@@ -1,27 +0,0 @@
1
- ## Job Helper Methods
2
-
3
- There are a number of instance methods on Que::Job that you can use in your jobs, preferably in transactions. See [Writing Reliable Jobs](/writing_reliable_jobs.md) for more information on where to use these methods.
4
-
5
- ### destroy
6
-
7
- This method deletes the job from the queue table, ensuring that it won't be worked a second time.
8
-
9
- ### finish
10
-
11
- This method marks the current job as finished, ensuring that it won't be worked a second time. This is like destroy, in that it finalizes a job, but this method leaves the job in the table, in case you want to query it later.
12
-
13
- ### expire
14
-
15
- This method marks the current job as expired. It will be left in the table and won't be retried, but it will be easy to query for expired jobs. This method is called if the job exceeds its maximum_retry_count.
16
-
17
- ### retry_in
18
-
19
- This method marks the current job to be retried later. You can pass a numeric to this method, in which case that is the number of seconds after which it can be retried (`retry_in(10)`, `retry_in(0.5)`), or, if you're using ActiveSupport, you can pass in a duration object (`retry_in(10.minutes)`). This automatically happens, with an exponentially-increasing interval, when the job encounters an error.
20
-
21
- ### error_count
22
-
23
- This method returns the total number of times the job has errored, in case you want to modify the job's behavior after it has failed a given number of times.
24
-
25
- ### default_resolve_action
26
-
27
- If you don't perform a resolve action (destroy, finish, expire, retry_in) while the job is worked, Que will call this method for you. By default it simply calls `destroy`, but you can override it in your Job subclasses if you wish - for example, to call `finish`, or to invoke some more complicated logic.
data/docs/logging.md DELETED
@@ -1,31 +0,0 @@
1
- ## Logging
2
-
3
- By default, Que logs important information in JSON to either Rails' logger (when running in a Rails web process) or STDOUT (when running via the `que` executable). So, your logs will look something like:
4
-
5
- ```
6
- I, [2017-08-12T05:07:31.094201 #4687] INFO -- : {"lib":"que","hostname":"lovelace","pid":21626,"thread":21471100,"event":"job_worked","job_id":6157665,"elapsed":0.531411}
7
- ```
8
-
9
- Of course you can have it log wherever you like:
10
-
11
- ```ruby
12
- Que.logger = Logger.new(...)
13
- ```
14
-
15
- If you don't like logging in JSON, you can also customize the format of the logging output by passing a callable object (such as a proc) to Que.log_formatter=. The proc should take a hash (the keys are symbols) and return a string. The keys and values are just as you would expect from the JSON output:
16
-
17
- ```ruby
18
- Que.log_formatter = proc do |data|
19
- "Thread number #{data[:thread]} experienced a #{data[:event]}"
20
- end
21
- ```
22
-
23
- If the log formatter returns nil or false, nothing will be logged at all. You could use this to narrow down what you want to emit, for example:
24
-
25
- ```ruby
26
- Que.log_formatter = proc do |data|
27
- if [:job_worked, :job_unavailable].include?(data[:event])
28
- JSON.dump(data)
29
- end
30
- end
31
- ```
@@ -1,25 +0,0 @@
1
- ## Managing Workers
2
-
3
- Que uses a multithreaded pool of workers to run jobs in parallel - this allows you to save memory by working many jobs simultaneously in the same process. The `que` executable starts up a pool of 6 workers by default. This is fine for most use cases, but the ideal number for your app will depend on your interpreter and what types of jobs you're running.
4
-
5
- Ruby MRI has a global interpreter lock (GIL), which prevents it from using more than one CPU core at a time. Having multiple workers running makes sense if your jobs tend to spend a lot of time in I/O (waiting on complex database queries, sending emails, making HTTP requests, etc.), as most jobs do. However, if your jobs are doing a lot of work in Ruby, they'll be spending a lot of time blocking each other, and having too many workers running will cause you to lose efficiency to context-switching. So, you'll want to choose the appropriate number of workers for your use case.
6
-
7
- ### Working Jobs Via Executable
8
-
9
- ```shell
10
- # Run a pool of 6 workers:
11
- que
12
-
13
- # Or configure the number of workers:
14
- que --worker-count 10
15
- ```
16
-
17
- See `que -h` for a complete list of command-line options.
18
-
19
- ### Thread-Unsafe Application Code
20
-
21
- If your application code is not thread-safe, you won't want any workers to be processing jobs while anything else is happening in the Ruby process. So, you'll want to run a single worker at a time, like so:
22
-
23
- ```shell
24
- que --worker-count 1
25
- ```
data/docs/middleware.md DELETED
@@ -1,15 +0,0 @@
1
- ## Defining Middleware For Jobs
2
-
3
- You can define middleware to wrap jobs. For example:
4
-
5
- ``` ruby
6
- Que.middleware.push(
7
- -> (job, &block) {
8
- # Do stuff with the job object - report on it, count time elapsed, etc.
9
- block.call
10
- nil # Doesn't matter what's returned.
11
- }
12
- )
13
- ```
14
-
15
- This API is experimental for the 1.0 beta and may change.
data/docs/migrating.md DELETED
@@ -1,27 +0,0 @@
1
- ## Migrating
2
-
3
- Some new releases of Que may require updates to the database schema. It's recommended that you integrate these updates alongside your other database migrations. For example, when Que released version 0.6.0, the schema version was updated from 2 to 3. If you're running ActiveRecord, you could make a migration to perform this upgrade like so:
4
-
5
- ```ruby
6
- class UpdateQue < ActiveRecord::Migration[5.0]
7
- def self.up
8
- Que.migrate! version: 3
9
- end
10
-
11
- def self.down
12
- Que.migrate! version: 2
13
- end
14
- end
15
- ```
16
-
17
- This will make sure that your database schema stays consistent with your codebase. If you're looking for something quicker and dirtier, you can always manually migrate in a console session:
18
-
19
- ```ruby
20
- # Change schema to version 3.
21
- Que.migrate! version: 3
22
-
23
- # Check your current schema version.
24
- Que.db_version #=> 3
25
- ```
26
-
27
- Note that you can remove Que from your database completely by migrating to version 0.