que 1.0.0.beta2 → 1.0.0.beta3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.1.0.beta.md +10 -0
- data/bin/command_line_interface.rb +48 -24
- data/docs/README.md +1 -0
- data/docs/command_line_interface.md +9 -5
- data/docs/logging.md +31 -0
- data/docs/using_sequel.md +16 -0
- data/lib/que/job_methods.rb +4 -0
- data/lib/que/locker.rb +18 -22
- data/lib/que/rails/railtie.rb +0 -2
- data/lib/que/version.rb +1 -1
- data/lib/que/worker.rb +27 -12
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 832beb15feb06f24511a7adec121b635639a87449f1403dcf8763cf8517ab11c
|
4
|
+
data.tar.gz: d39465285d79a8348753e4783a22c7432f369dc8ce0205c41872a87798cd5b48
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: bb927afb7f17dd55a2207686f2707bb994860a6c4a4fa2a30e79fb763a7d298b410e3dec36d4520b7617e996e7927b8b90c8fa1ab68a3e264d96b59df9c51a4d
|
7
|
+
data.tar.gz: 421aaa5ce29afb83ac376966b2f859a4ba356087fad248738a57b3e9c77bb193ee8f08d58320ec10b6aa9bb3587c9c05bf6c9d7a22c6da62d908ef6876cdd173
|
data/CHANGELOG.1.0.beta.md
CHANGED
@@ -1,3 +1,13 @@
|
|
1
|
+
### 1.0.0.beta3 (2018-05-18)
|
2
|
+
|
3
|
+
* Added support for customizing log levels for `job_worked` events (#217).
|
4
|
+
|
5
|
+
* Began logging all `job_errored` events at the `ERROR` log level.
|
6
|
+
|
7
|
+
* Fixed the Railtie when running in test mode (#214).
|
8
|
+
|
9
|
+
* Tweaked the meanings of worker-priorities and worker-count options in the CLI, to better support use cases with low worker counts (#216).
|
10
|
+
|
1
11
|
### 1.0.0.beta2 (2018-04-13)
|
2
12
|
|
3
13
|
* Fixed an incompatibility that caused the new locker to hang when using Rails in development mode (#213).
|
@@ -18,12 +18,14 @@ module Que
|
|
18
18
|
default_require_file: RAILS_ENVIRONMENT_FILE
|
19
19
|
)
|
20
20
|
|
21
|
-
options
|
22
|
-
queues
|
23
|
-
log_level
|
24
|
-
log_internals
|
25
|
-
poll_interval
|
26
|
-
connection_url
|
21
|
+
options = {}
|
22
|
+
queues = []
|
23
|
+
log_level = 'info'
|
24
|
+
log_internals = false
|
25
|
+
poll_interval = 5
|
26
|
+
connection_url = nil
|
27
|
+
worker_count = nil
|
28
|
+
worker_priorities = nil
|
27
29
|
|
28
30
|
parser =
|
29
31
|
OptionParser.new do |opts|
|
@@ -58,6 +60,26 @@ module Que
|
|
58
60
|
log_level = l
|
59
61
|
end
|
60
62
|
|
63
|
+
opts.on(
|
64
|
+
'-p',
|
65
|
+
'--worker-priorities [LIST]',
|
66
|
+
Array,
|
67
|
+
"List of priorities to assign to workers (default: 10,30,50,any,any,any)",
|
68
|
+
) do |priority_array|
|
69
|
+
worker_priorities =
|
70
|
+
priority_array.map do |p|
|
71
|
+
case p
|
72
|
+
when /\Aany\z/i
|
73
|
+
nil
|
74
|
+
when /\A\d+\z/
|
75
|
+
Integer(p)
|
76
|
+
else
|
77
|
+
output.puts "Invalid priority option: '#{p}'. Please use an integer or the word 'any'."
|
78
|
+
return 1
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
61
83
|
opts.on(
|
62
84
|
'-q',
|
63
85
|
'--queue-name [NAME]',
|
@@ -69,6 +91,15 @@ module Que
|
|
69
91
|
queues << queue_name
|
70
92
|
end
|
71
93
|
|
94
|
+
opts.on(
|
95
|
+
'-w',
|
96
|
+
'--worker-count [COUNT]',
|
97
|
+
Integer,
|
98
|
+
"Set number of workers in process (default: 6)",
|
99
|
+
) do |w|
|
100
|
+
worker_count = w
|
101
|
+
end
|
102
|
+
|
72
103
|
opts.on(
|
73
104
|
'-v',
|
74
105
|
'--version',
|
@@ -79,15 +110,6 @@ module Que
|
|
79
110
|
return 0
|
80
111
|
end
|
81
112
|
|
82
|
-
opts.on(
|
83
|
-
'-w',
|
84
|
-
'--worker-count [COUNT]',
|
85
|
-
Integer,
|
86
|
-
"Set number of workers in process (default: 6)",
|
87
|
-
) do |w|
|
88
|
-
options[:worker_count] = w
|
89
|
-
end
|
90
|
-
|
91
113
|
opts.on(
|
92
114
|
'--connection-url [URL]',
|
93
115
|
String,
|
@@ -130,19 +152,21 @@ module Que
|
|
130
152
|
) do |p|
|
131
153
|
options[:wait_period] = p
|
132
154
|
end
|
133
|
-
|
134
|
-
opts.on(
|
135
|
-
'--worker-priorities [LIST]',
|
136
|
-
Array,
|
137
|
-
"List of priorities to assign to workers, " \
|
138
|
-
"unspecified workers take jobs of any priority (default: 10,30,50)",
|
139
|
-
) do |p|
|
140
|
-
options[:worker_priorities] = p.map(&:to_i)
|
141
|
-
end
|
142
155
|
end
|
143
156
|
|
144
157
|
parser.parse!(args)
|
145
158
|
|
159
|
+
options[:worker_priorities] =
|
160
|
+
if worker_count && worker_priorities
|
161
|
+
worker_priorities.values_at(0...worker_count)
|
162
|
+
elsif worker_priorities
|
163
|
+
worker_priorities
|
164
|
+
elsif worker_count
|
165
|
+
Array.new(worker_count) { nil }
|
166
|
+
else
|
167
|
+
[10, 30, 50, nil, nil, nil]
|
168
|
+
end
|
169
|
+
|
146
170
|
if args.length.zero?
|
147
171
|
if File.exist?(default_require_file)
|
148
172
|
args << default_require_file
|
data/docs/README.md
CHANGED
@@ -20,6 +20,7 @@ TODO: Fix doc links.
|
|
20
20
|
- [Worker States](inspecting_the_queue.md#worker-states)
|
21
21
|
- [Custom Queries](inspecting_the_queue.md#custom-queries)
|
22
22
|
- [Logging](logging.md#logging)
|
23
|
+
- [Logging Job Completion](logging.md#logging-job-completion)
|
23
24
|
- [Managing Workers](managing_workers.md#managing-workers)
|
24
25
|
- [Working Jobs Via Executable](managing_workers.md#working-jobs-via-executable)
|
25
26
|
- [Thread-Unsafe Application Code](managing_workers.md#thread-unsafe-application-code)
|
@@ -5,24 +5,28 @@ usage: que [options] [file/to/require] ...
|
|
5
5
|
-h, --help Show this help text.
|
6
6
|
-i, --poll-interval [INTERVAL] Set maximum interval between polls for available jobs, in seconds (default: 5)
|
7
7
|
-l, --log-level [LEVEL] Set level at which to log to STDOUT (debug, info, warn, error, fatal) (default: info)
|
8
|
+
-p, --worker-priorities [LIST] List of priorities to assign to workers (default: 10,30,50,any,any,any)
|
8
9
|
-q, --queue-name [NAME] Set a queue name to work jobs from. Can be passed multiple times. (default: the default queue only)
|
9
|
-
-v, --version Print Que version and exit.
|
10
10
|
-w, --worker-count [COUNT] Set number of workers in process (default: 6)
|
11
|
+
-v, --version Print Que version and exit.
|
11
12
|
--connection-url [URL] Set a custom database url to connect to for locking purposes.
|
12
13
|
--log-internals Log verbosely about Que's internal state. Only recommended for debugging issues
|
13
14
|
--maximum-buffer-size [SIZE] Set maximum number of jobs to be locked and held in this process awaiting a worker (default: 8)
|
14
15
|
--minimum-buffer-size [SIZE] Set minimum number of jobs to be locked and held in this process awaiting a worker (default: 2)
|
15
16
|
--wait-period [PERIOD] Set maximum interval between checks of the in-memory job queue, in milliseconds (default: 50)
|
16
|
-
--worker-priorities [LIST] List of priorities to assign to workers, unspecified workers take jobs of any priority (default: 10,30,50)
|
17
17
|
```
|
18
18
|
|
19
19
|
Some explanation of the more unusual options:
|
20
20
|
|
21
|
-
### worker-
|
21
|
+
### worker-priorities and worker-count
|
22
|
+
|
23
|
+
These options dictate the size and priority distribution of the worker pool. The default worker-priorities is `10,30,50,any,any,any`. This means that the default worker pool will reserve one worker to only works jobs with priorities under 10, one for priorities under 30, and one for priorities under 50. Three more workers will work any job.
|
24
|
+
|
25
|
+
For example, with these defaults, you could have a large backlog of jobs of priority 100. When a more important job (priority 40) comes in, there's guaranteed to be a free worker. If the process then becomes saturated with jobs of priority 40, and then a priority 20 job comes in, there's guaranteed to be a free worker for it, and so on. You can pass a priority more than once to have multiple workers at that level (for example: `--worker-priorities=100,100,any,any`). This gives you a lot of freedom to manage your worker capacity at different priority levels.
|
22
26
|
|
23
|
-
|
27
|
+
Instead of passing worker-priorities, you can pass a `worker-count` - this is a shorthand for creating the given number of workers at the `any` priority level. So, `--worker-count=3` is just like passing equivalent to `worker-priorities=any,any,any`.
|
24
28
|
|
25
|
-
|
29
|
+
If you pass both worker-count and worker-priorities, the count will trim or pad the priorities list with `any` workers. So, `--worker-priorities=20,30,40 --worker-count=6` would be the same as passing `--worker-priorities=20,30,40,any,any,any`.
|
26
30
|
|
27
31
|
### poll-interval
|
28
32
|
|
data/docs/logging.md
CHANGED
@@ -29,3 +29,34 @@ Que.log_formatter = proc do |data|
|
|
29
29
|
end
|
30
30
|
end
|
31
31
|
```
|
32
|
+
|
33
|
+
## Logging Job Completion
|
34
|
+
|
35
|
+
Que logs a `job_worked` event whenever a job completes, though by default this event is logged at the `DEBUG` level. Since people often run their applications at the `INFO` level or above, this can make the logs too silent for some use cases. Similarly, you may want to log at a higher level if a time-sensitive job begins taking too long to run.
|
36
|
+
|
37
|
+
You can solve these problems by configuring the level at which a job is logged on a per-job basis. Simply define a `log_level` method in your job class - it will be called with a float representing the number of seconds it took for the job to run, and it should return a symbol indicating what level to log the job at:
|
38
|
+
|
39
|
+
```ruby
|
40
|
+
class TimeSensitiveJob < Que::Job
|
41
|
+
def run(*args)
|
42
|
+
RemoteAPI.execute_important_request
|
43
|
+
end
|
44
|
+
|
45
|
+
def log_level(elapsed)
|
46
|
+
if elapsed > 60
|
47
|
+
# This job took over a minute! We should complain about it!
|
48
|
+
:warn
|
49
|
+
elsif elapsed > 30
|
50
|
+
# A little long, but no big deal!
|
51
|
+
:info
|
52
|
+
else
|
53
|
+
# This is fine, don't bother logging at all.
|
54
|
+
false
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
```
|
59
|
+
|
60
|
+
This method should return a symbol that is a valid logging level (one of `[:debug, :info, :warn, :error, :fatal, :unknown]`). If the method returns anything other than one of these symbols, the job won't be logged.
|
61
|
+
|
62
|
+
If a job errors, a `job_errored` event will be emitted at the `ERROR` log level. This is not currently configurable.
|
data/docs/using_sequel.md
CHANGED
@@ -7,6 +7,22 @@ DB = Sequel.connect(ENV['DATABASE_URL'])
|
|
7
7
|
Que.connection = DB
|
8
8
|
```
|
9
9
|
|
10
|
+
If you are using Sequel's migrator, your app initialization won't happen, so you may need to tweak your migrations to `require 'que'` and set its connection:
|
11
|
+
|
12
|
+
```ruby
|
13
|
+
require 'que'
|
14
|
+
Sequel.migration do
|
15
|
+
up do
|
16
|
+
Que.connection = self
|
17
|
+
Que.migrate! :version => 3
|
18
|
+
end
|
19
|
+
down do
|
20
|
+
Que.connection = self
|
21
|
+
Que.migrate! :version => 0
|
22
|
+
end
|
23
|
+
end
|
24
|
+
```
|
25
|
+
|
10
26
|
Then you can safely use the same database object to transactionally protect your jobs:
|
11
27
|
|
12
28
|
```ruby
|
data/lib/que/job_methods.rb
CHANGED
data/lib/que/locker.rb
CHANGED
@@ -47,8 +47,7 @@ module Que
|
|
47
47
|
DEFAULT_WAIT_PERIOD = 50
|
48
48
|
DEFAULT_MINIMUM_BUFFER_SIZE = 2
|
49
49
|
DEFAULT_MAXIMUM_BUFFER_SIZE = 8
|
50
|
-
|
51
|
-
DEFAULT_WORKER_PRIORITIES = [10, 30, 50].freeze
|
50
|
+
DEFAULT_WORKER_PRIORITIES = [10, 30, 50, nil, nil, nil].freeze
|
52
51
|
|
53
52
|
def initialize(
|
54
53
|
queues: [Que.default_queue],
|
@@ -59,7 +58,6 @@ module Que
|
|
59
58
|
wait_period: DEFAULT_WAIT_PERIOD,
|
60
59
|
maximum_buffer_size: DEFAULT_MAXIMUM_BUFFER_SIZE,
|
61
60
|
minimum_buffer_size: DEFAULT_MINIMUM_BUFFER_SIZE,
|
62
|
-
worker_count: DEFAULT_WORKER_COUNT,
|
63
61
|
worker_priorities: DEFAULT_WORKER_PRIORITIES,
|
64
62
|
on_worker_start: nil
|
65
63
|
)
|
@@ -71,19 +69,16 @@ module Que
|
|
71
69
|
|
72
70
|
Que.assert Numeric, poll_interval
|
73
71
|
Que.assert Numeric, wait_period
|
74
|
-
Que.assert Integer, worker_count
|
75
72
|
|
76
73
|
Que.assert Array, worker_priorities
|
77
|
-
worker_priorities.each { |p| Que.assert(Integer, p) }
|
78
|
-
|
79
|
-
all_worker_priorities = worker_priorities.values_at(0...worker_count)
|
74
|
+
worker_priorities.each { |p| Que.assert([Integer, NilClass], p) }
|
80
75
|
|
81
76
|
# We use a JobBuffer to track jobs and pass them to workers, and a
|
82
77
|
# ResultQueue to receive messages from workers.
|
83
78
|
@job_buffer = JobBuffer.new(
|
84
79
|
maximum_size: maximum_buffer_size,
|
85
80
|
minimum_size: minimum_buffer_size,
|
86
|
-
priorities:
|
81
|
+
priorities: worker_priorities.uniq,
|
87
82
|
)
|
88
83
|
|
89
84
|
@result_queue = ResultQueue.new
|
@@ -99,7 +94,6 @@ module Que
|
|
99
94
|
wait_period: wait_period,
|
100
95
|
maximum_buffer_size: maximum_buffer_size,
|
101
96
|
minimum_buffer_size: minimum_buffer_size,
|
102
|
-
worker_count: worker_count,
|
103
97
|
worker_priorities: worker_priorities,
|
104
98
|
}
|
105
99
|
end
|
@@ -110,13 +104,8 @@ module Que
|
|
110
104
|
@queue_names = queues.is_a?(Hash) ? queues.keys : queues
|
111
105
|
@wait_period = wait_period.to_f / 1000 # Milliseconds to seconds.
|
112
106
|
|
113
|
-
# If the worker_count exceeds the array of priorities it'll result in
|
114
|
-
# extra workers that will work jobs of any priority. For example, the
|
115
|
-
# default worker_count of 6 and the default worker priorities of [10, 30,
|
116
|
-
# 50] will result in three workers that only work jobs that meet those
|
117
|
-
# priorities, and three workers that will work any job.
|
118
107
|
@workers =
|
119
|
-
|
108
|
+
worker_priorities.map do |priority|
|
120
109
|
Worker.new(
|
121
110
|
priority: priority,
|
122
111
|
job_buffer: @job_buffer,
|
@@ -138,13 +127,20 @@ module Que
|
|
138
127
|
if connection_url
|
139
128
|
uri = URI.parse(connection_url)
|
140
129
|
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
130
|
+
opts =
|
131
|
+
{
|
132
|
+
host: uri.host,
|
133
|
+
user: uri.user,
|
134
|
+
password: uri.password,
|
135
|
+
port: uri.port || 5432,
|
136
|
+
dbname: uri.path[1..-1],
|
137
|
+
}
|
138
|
+
|
139
|
+
if uri.query
|
140
|
+
opts.merge!(Hash[uri.query.split("&").map{|s| s.split('=')}.map{|a,b| [a.to_sym, b]}])
|
141
|
+
end
|
142
|
+
|
143
|
+
opts
|
148
144
|
else
|
149
145
|
Que.pool.checkout do |conn|
|
150
146
|
c = conn.wrapped_connection
|
data/lib/que/rails/railtie.rb
CHANGED
data/lib/que/version.rb
CHANGED
data/lib/que/worker.rb
CHANGED
@@ -3,10 +3,14 @@
|
|
3
3
|
# Workers wrap threads which continuously pull job pks from JobBuffer objects,
|
4
4
|
# fetch and work those jobs, and export relevant data to ResultQueues.
|
5
5
|
|
6
|
+
require 'set'
|
7
|
+
|
6
8
|
module Que
|
7
9
|
class Worker
|
8
10
|
attr_reader :thread, :priority
|
9
11
|
|
12
|
+
VALID_LOG_LEVELS = [:debug, :info, :warn, :error, :fatal, :unknown].to_set.freeze
|
13
|
+
|
10
14
|
SQL[:check_job] =
|
11
15
|
%{
|
12
16
|
SELECT 1 AS one
|
@@ -91,20 +95,31 @@ module Que
|
|
91
95
|
|
92
96
|
Que.run_job_middleware(instance) { instance.tap(&:_run) }
|
93
97
|
|
94
|
-
|
95
|
-
level: :debug,
|
96
|
-
job_id: metajob.id,
|
97
|
-
elapsed: (Time.now - start),
|
98
|
-
}
|
98
|
+
elapsed = Time.now - start
|
99
99
|
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
100
|
+
log_level =
|
101
|
+
if instance.que_error
|
102
|
+
:error
|
103
|
+
else
|
104
|
+
instance.log_level(elapsed)
|
105
|
+
end
|
106
|
+
|
107
|
+
if VALID_LOG_LEVELS.include?(log_level)
|
108
|
+
log_message = {
|
109
|
+
level: log_level,
|
110
|
+
job_id: metajob.id,
|
111
|
+
elapsed: elapsed,
|
112
|
+
}
|
106
113
|
|
107
|
-
|
114
|
+
if error = instance.que_error
|
115
|
+
log_message[:event] = :job_errored
|
116
|
+
log_message[:error] = "#{error.class}: #{error.message}".slice(0, 500)
|
117
|
+
else
|
118
|
+
log_message[:event] = :job_worked
|
119
|
+
end
|
120
|
+
|
121
|
+
Que.log(log_message)
|
122
|
+
end
|
108
123
|
|
109
124
|
instance
|
110
125
|
rescue => error
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: que
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.0.
|
4
|
+
version: 1.0.0.beta3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Chris Hanks
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-
|
11
|
+
date: 2018-05-18 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|