canvas-jobs 0.10.6 → 0.11.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/delayed/cli.rb +2 -5
- data/lib/delayed/lifecycle.rb +1 -0
- data/lib/delayed/pool.rb +22 -4
- data/lib/delayed/settings.rb +2 -0
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +13 -0
- data/lib/delayed/work_queue/parent_process.rb +180 -0
- data/lib/delayed/worker.rb +7 -11
- data/lib/delayed_job.rb +2 -0
- data/spec/active_record_job_spec.rb +3 -3
- data/spec/delayed/work_queue/in_process_spec.rb +31 -0
- data/spec/delayed/work_queue/parent_process_spec.rb +159 -0
- data/spec/shared/delayed_batch.rb +1 -1
- data/spec/shared/performable_method.rb +5 -5
- data/spec/shared/shared_backend.rb +5 -5
- data/spec/shared/worker.rb +2 -2
- metadata +10 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1682d0b7ae6c1e398bab7b3ed4e93b45c5f667f6
|
4
|
+
data.tar.gz: 9bcfe44b61d28af0f81f9e9036832468d5aad5a8
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 258d4db18ca5cdc41940d03488b3537ef39f5c95385bad4fe6a007013d3785746bea9cc13218db45ed04fcc0d6eb157cc0b052039e294e1d4a7a5bd4615acd76
|
7
|
+
data.tar.gz: 9e085133db62875654a88b6955a85e53f7e6aaeec14d067d04e912cf3728fcaacd65036b08266fb97f90ec9c53a32d7368f1ae753b2e2ea3a793e3b4ed481b67
|
data/lib/delayed/cli.rb
CHANGED
@@ -4,10 +4,8 @@ module Delayed
|
|
4
4
|
class CLI
|
5
5
|
def initialize(args = ARGV)
|
6
6
|
@args = args
|
7
|
-
# config that will be applied on Settings
|
7
|
+
# config that will be applied on Settings and passed to the created Pool
|
8
8
|
@config = {}
|
9
|
-
# worker configs that will be passed to the created Pool
|
10
|
-
@worker_configs = []
|
11
9
|
# CLI options that will be kept to this class
|
12
10
|
@options = {
|
13
11
|
:config_file => Settings.default_worker_config_name,
|
@@ -56,7 +54,6 @@ class CLI
|
|
56
54
|
|
57
55
|
def load_and_apply_config!
|
58
56
|
@config = Settings.worker_config(@options[:config_file])
|
59
|
-
@worker_configs = @config.delete(:workers)
|
60
57
|
Settings.apply_worker_config!(@config)
|
61
58
|
end
|
62
59
|
|
@@ -88,7 +85,7 @@ class CLI
|
|
88
85
|
def start
|
89
86
|
load_rails
|
90
87
|
tail_rails_log unless daemon.daemonized?
|
91
|
-
Delayed::Pool.new(@
|
88
|
+
Delayed::Pool.new(@config).start
|
92
89
|
end
|
93
90
|
|
94
91
|
def load_rails
|
data/lib/delayed/lifecycle.rb
CHANGED
data/lib/delayed/pool.rb
CHANGED
@@ -6,13 +6,12 @@ class Pool
|
|
6
6
|
attr_reader :workers
|
7
7
|
|
8
8
|
def initialize(*args)
|
9
|
-
if args.
|
10
|
-
|
9
|
+
if args.first.is_a?(Hash)
|
10
|
+
@config = args.first
|
11
11
|
else
|
12
12
|
warn "Calling Delayed::Pool.new directly is deprecated. Use `Delayed::CLI.new.run()` instead."
|
13
13
|
end
|
14
14
|
@workers = {}
|
15
|
-
@config = { workers: worker_configs }
|
16
15
|
end
|
17
16
|
|
18
17
|
def run
|
@@ -66,16 +65,31 @@ class Pool
|
|
66
65
|
def spawn_all_workers
|
67
66
|
ActiveRecord::Base.connection_handler.clear_all_connections!
|
68
67
|
|
68
|
+
if @config[:work_queue] == 'parent_process'
|
69
|
+
@work_queue = WorkQueue::ParentProcess.new
|
70
|
+
spawn_work_queue
|
71
|
+
end
|
72
|
+
|
69
73
|
@config[:workers].each do |worker_config|
|
70
74
|
(worker_config[:workers] || 1).times { spawn_worker(worker_config) }
|
71
75
|
end
|
72
76
|
end
|
73
77
|
|
78
|
+
def spawn_work_queue
|
79
|
+
parent_pid = Process.pid
|
80
|
+
pid = fork_with_reconnects do
|
81
|
+
$0 = "delayed_jobs_work_queue#{Settings.pool_procname_suffix}"
|
82
|
+
@work_queue.server(parent_pid: parent_pid).run
|
83
|
+
end
|
84
|
+
workers[pid] = :work_queue
|
85
|
+
end
|
86
|
+
|
74
87
|
def spawn_worker(worker_config)
|
75
88
|
if worker_config[:periodic]
|
76
89
|
return # backwards compat
|
77
90
|
else
|
78
91
|
worker_config[:parent_pid] = Process.pid
|
92
|
+
worker_config[:work_queue] = @work_queue.client if @work_queue
|
79
93
|
worker = Delayed::Worker.new(worker_config)
|
80
94
|
end
|
81
95
|
|
@@ -125,8 +139,12 @@ class Pool
|
|
125
139
|
child = Process.wait
|
126
140
|
if workers.include?(child)
|
127
141
|
worker = workers.delete(child)
|
128
|
-
|
142
|
+
case worker
|
143
|
+
when :periodic_audit
|
129
144
|
say "ran auditor: #{worker}"
|
145
|
+
when :work_queue
|
146
|
+
say "work queue exited, restarting", :info
|
147
|
+
spawn_work_queue
|
130
148
|
else
|
131
149
|
say "child exited: #{child}, restarting", :info
|
132
150
|
# fork to handle unlocking (to prevent polluting the parent with worker objects)
|
data/lib/delayed/settings.rb
CHANGED
@@ -17,6 +17,7 @@ module Delayed
|
|
17
17
|
:disable_periodic_jobs,
|
18
18
|
:disable_automatic_orphan_unlocking,
|
19
19
|
:last_ditch_logfile,
|
20
|
+
:parent_process_client_timeout,
|
20
21
|
]
|
21
22
|
SETTINGS_WITH_ARGS = [ :num_strands ]
|
22
23
|
|
@@ -43,6 +44,7 @@ module Delayed
|
|
43
44
|
self.fetch_batch_size = 5
|
44
45
|
self.select_random_from_batch = false
|
45
46
|
self.silence_periodic_log = false
|
47
|
+
self.parent_process_client_timeout = 10.0
|
46
48
|
|
47
49
|
self.num_strands = ->(strand_name){ nil }
|
48
50
|
self.default_job_options = ->{ Hash.new }
|
data/lib/delayed/version.rb
CHANGED
@@ -0,0 +1,13 @@
|
|
1
|
+
module Delayed
|
2
|
+
module WorkQueue
|
3
|
+
# The simplest possible implementation of a WorkQueue -- just turns around and
|
4
|
+
# queries the queue inline.
|
5
|
+
class InProcess
|
6
|
+
def get_and_lock_next_available(worker_name, queue_name, min_priority, max_priority)
|
7
|
+
Delayed::Worker.lifecycle.run_callbacks(:work_queue_pop, self) do
|
8
|
+
Delayed::Job.get_and_lock_next_available(worker_name, queue_name, min_priority, max_priority)
|
9
|
+
end
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
@@ -0,0 +1,180 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'tempfile'
|
3
|
+
require 'timeout'
|
4
|
+
|
5
|
+
module Delayed
|
6
|
+
module WorkQueue
|
7
|
+
# ParentProcess is a WorkQueue implementation that spawns a separate worker
|
8
|
+
# process for querying the queue. Each Worker child process sends requests to
|
9
|
+
# the ParentProcess via IPC, and receives responses. This centralized queue
|
10
|
+
# querying cuts down on db queries and lock contention, and allows the
|
11
|
+
# possibility for other centralized logic such as notifications when all workers
|
12
|
+
# are idle.
|
13
|
+
#
|
14
|
+
# The IPC implementation uses Unix stream sockets and Ruby's built-in Marshal
|
15
|
+
# functionality. The ParentProcess creates a Unix socket on the filesystem in
|
16
|
+
# the tmp directory, so that if a worker process dies and is restarted it can
|
17
|
+
# reconnect to the socket.
|
18
|
+
#
|
19
|
+
# While Unix and IP sockets are API compatible, we take a lot of shortcuts
|
20
|
+
# because we know it's just a local Unix socket. If we ever wanted to swap this
|
21
|
+
# out for a TCP/IP socket and have the WorkQueue running on another host, we'd
|
22
|
+
# want to be a lot more robust about partial reads/writes and timeouts.
|
23
|
+
class ParentProcess
|
24
|
+
class ProtocolError < RuntimeError
|
25
|
+
end
|
26
|
+
|
27
|
+
def initialize
|
28
|
+
@path = self.class.generate_socket_path
|
29
|
+
end
|
30
|
+
|
31
|
+
def self.generate_socket_path
|
32
|
+
# We utilize Tempfile as a convenient way to get a socket filename in the
|
33
|
+
# writeable temp directory. However, since we destroy the normal file and
|
34
|
+
# write a unix socket file to the same location, we lose the hard uniqueness
|
35
|
+
# guarantees of Tempfile. This is OK for this use case, we only generate one
|
36
|
+
# Tempfile with this prefix.
|
37
|
+
tmp = Tempfile.new("canvas-jobs-#{Process.pid}-")
|
38
|
+
path = tmp.path
|
39
|
+
tmp.close!
|
40
|
+
path
|
41
|
+
end
|
42
|
+
|
43
|
+
def server(parent_pid: nil)
|
44
|
+
# The unix_server_socket method takes care of cleaning up any existing
|
45
|
+
# socket for us if the work queue process dies and is restarted.
|
46
|
+
listen_socket = Socket.unix_server_socket(@path)
|
47
|
+
Server.new(listen_socket, parent_pid: parent_pid)
|
48
|
+
end
|
49
|
+
|
50
|
+
def client
|
51
|
+
Client.new(Addrinfo.unix(@path))
|
52
|
+
end
|
53
|
+
|
54
|
+
class Client
|
55
|
+
attr_reader :addrinfo
|
56
|
+
|
57
|
+
def initialize(addrinfo)
|
58
|
+
@addrinfo = addrinfo
|
59
|
+
end
|
60
|
+
|
61
|
+
def get_and_lock_next_available(name, queue_name, min_priority, max_priority)
|
62
|
+
@socket ||= @addrinfo.connect
|
63
|
+
Marshal.dump([name, queue_name, min_priority, max_priority], @socket)
|
64
|
+
response = Marshal.load(@socket)
|
65
|
+
unless response.nil? || (response.is_a?(Delayed::Job) && response.locked_by == name)
|
66
|
+
raise(ProtocolError, "response is not a locked job: #{response.inspect}")
|
67
|
+
end
|
68
|
+
response
|
69
|
+
rescue SystemCallError, IOError
|
70
|
+
# The work queue process died. Return nil to signal the worker
|
71
|
+
# process should sleep as if no job was found, and then retry.
|
72
|
+
@socket = nil
|
73
|
+
nil
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
class Server
|
78
|
+
attr_reader :listen_socket
|
79
|
+
|
80
|
+
def initialize(listen_socket, parent_pid: nil)
|
81
|
+
@listen_socket = listen_socket
|
82
|
+
@parent_pid = parent_pid
|
83
|
+
@clients = {}
|
84
|
+
end
|
85
|
+
|
86
|
+
def connected_clients
|
87
|
+
@clients.size
|
88
|
+
end
|
89
|
+
|
90
|
+
def all_workers_idle?
|
91
|
+
!@clients.any? { |_, c| c.working }
|
92
|
+
end
|
93
|
+
|
94
|
+
def say(msg, level = :debug)
|
95
|
+
if defined?(Rails.logger) && Rails.logger
|
96
|
+
Rails.logger.send(level, "[#{Process.pid}]Q #{msg}")
|
97
|
+
else
|
98
|
+
puts(msg)
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
# run the server queue worker
|
103
|
+
# this method does not return, only exits or raises an exception
|
104
|
+
def run
|
105
|
+
say "Starting work queue process"
|
106
|
+
|
107
|
+
while !exit?
|
108
|
+
run_once
|
109
|
+
end
|
110
|
+
|
111
|
+
rescue => e
|
112
|
+
say "WorkQueue Server died: #{e.inspect}"
|
113
|
+
raise
|
114
|
+
end
|
115
|
+
|
116
|
+
def run_once
|
117
|
+
handles = @clients.keys + [@listen_socket]
|
118
|
+
readable, _, _ = IO.select(handles, nil, nil, 1)
|
119
|
+
if readable
|
120
|
+
readable.each { |s| handle_read(s) }
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def handle_read(socket)
|
125
|
+
if socket == @listen_socket
|
126
|
+
handle_accept
|
127
|
+
else
|
128
|
+
handle_request(socket)
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
# Any error on the listen socket other than WaitReadable will bubble up
|
133
|
+
# and terminate the work queue process, to be restarted by the parent daemon.
|
134
|
+
def handle_accept
|
135
|
+
client, _addr = @listen_socket.accept_nonblock
|
136
|
+
if client
|
137
|
+
@clients[client] = ClientState.new(false)
|
138
|
+
end
|
139
|
+
rescue IO::WaitReadable
|
140
|
+
# ignore and just try accepting again next time through the loop
|
141
|
+
end
|
142
|
+
|
143
|
+
def handle_request(socket)
|
144
|
+
# There is an assumption here that the client will never send a partial
|
145
|
+
# request and then leave the socket open. Doing so would leave us hanging
|
146
|
+
# here forever. This is only a reasonable assumption because we control
|
147
|
+
# the client.
|
148
|
+
request = client_timeout { Marshal.load(socket) }
|
149
|
+
response = nil
|
150
|
+
Delayed::Worker.lifecycle.run_callbacks(:work_queue_pop, self) do
|
151
|
+
response = Delayed::Job.get_and_lock_next_available(*request)
|
152
|
+
@clients[socket].working = !response.nil?
|
153
|
+
end
|
154
|
+
client_timeout { Marshal.dump(response, socket) }
|
155
|
+
rescue SystemCallError, IOError, Timeout::Error
|
156
|
+
# this socket went away
|
157
|
+
begin
|
158
|
+
socket.close
|
159
|
+
rescue IOError
|
160
|
+
end
|
161
|
+
@clients.delete(socket)
|
162
|
+
end
|
163
|
+
|
164
|
+
def exit?
|
165
|
+
parent_exited?
|
166
|
+
end
|
167
|
+
|
168
|
+
def parent_exited?
|
169
|
+
@parent_pid && @parent_pid != Process.ppid
|
170
|
+
end
|
171
|
+
|
172
|
+
def client_timeout
|
173
|
+
Timeout.timeout(Settings.parent_process_client_timeout) { yield }
|
174
|
+
end
|
175
|
+
|
176
|
+
ClientState = Struct.new(:working)
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
data/lib/delayed/worker.rb
CHANGED
@@ -6,7 +6,7 @@ require 'tmpdir'
|
|
6
6
|
require 'set'
|
7
7
|
|
8
8
|
class Worker
|
9
|
-
attr_reader :config, :
|
9
|
+
attr_reader :config, :queue_name, :min_priority, :max_priority, :work_queue
|
10
10
|
|
11
11
|
# Callback to fire when a delayed job fails max_attempts times. If this
|
12
12
|
# callback is defined, then the value of destroy_failed_jobs is ignored, and
|
@@ -32,11 +32,12 @@ class Worker
|
|
32
32
|
@exit = false
|
33
33
|
@config = options
|
34
34
|
@parent_pid = options[:parent_pid]
|
35
|
-
@
|
35
|
+
@queue_name = options[:queue] || Settings.queue
|
36
36
|
@min_priority = options[:min_priority]
|
37
37
|
@max_priority = options[:max_priority]
|
38
38
|
@max_job_count = options[:worker_max_job_count].to_i
|
39
39
|
@max_memory_usage = options[:worker_max_memory_usage].to_i
|
40
|
+
@work_queue = options[:work_queue] || WorkQueue::InProcess.new
|
40
41
|
@job_count = 0
|
41
42
|
|
42
43
|
app = Rails.application
|
@@ -93,14 +94,9 @@ class Worker
|
|
93
94
|
|
94
95
|
def run
|
95
96
|
self.class.lifecycle.run_callbacks(:loop, self) do
|
96
|
-
job =
|
97
|
-
|
98
|
-
|
99
|
-
name,
|
100
|
-
queue,
|
101
|
-
min_priority,
|
102
|
-
max_priority)
|
103
|
-
end
|
97
|
+
job = self.class.lifecycle.run_callbacks(:pop, self) do
|
98
|
+
work_queue.get_and_lock_next_available(name, queue_name, min_priority, max_priority)
|
99
|
+
end
|
104
100
|
|
105
101
|
if job
|
106
102
|
configure_for_job(job) do
|
@@ -122,7 +118,7 @@ class Worker
|
|
122
118
|
end
|
123
119
|
end
|
124
120
|
else
|
125
|
-
set_process_name("wait:#{Settings.worker_procname_prefix}#{@
|
121
|
+
set_process_name("wait:#{Settings.worker_procname_prefix}#{@queue_name}:#{min_priority || 0}:#{max_priority || 'max'}")
|
126
122
|
sleep(Settings.sleep_delay + (rand * Settings.sleep_delay_stagger))
|
127
123
|
end
|
128
124
|
end
|
data/lib/delayed_job.rb
CHANGED
@@ -19,7 +19,7 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
19
19
|
allow(Delayed::Job::Failed).to receive(:create).and_raise(RuntimeError)
|
20
20
|
job = "test".send_later_enqueue_args :reverse, no_delay: true
|
21
21
|
job_id = job.id
|
22
|
-
proc { job.fail! }.should raise_error
|
22
|
+
proc { job.fail! }.should raise_error(RuntimeError)
|
23
23
|
proc { Delayed::Job.find(job_id) }.should raise_error(ActiveRecord::RecordNotFound)
|
24
24
|
Delayed::Job.count.should == 0
|
25
25
|
end
|
@@ -77,11 +77,11 @@ describe 'Delayed::Backed::ActiveRecord::Job' do
|
|
77
77
|
end
|
78
78
|
|
79
79
|
it "should raise error when holding failed jobs" do
|
80
|
-
expect { Delayed::Job.bulk_update('hold', :flavor => 'failed', :query => @query) }.to raise_error
|
80
|
+
expect { Delayed::Job.bulk_update('hold', :flavor => 'failed', :query => @query) }.to raise_error(RuntimeError)
|
81
81
|
end
|
82
82
|
|
83
83
|
it "should raise error unholding failed jobs" do
|
84
|
-
expect { Delayed::Job.bulk_update('unhold', :flavor => 'failed', :query => @query) }.to raise_error
|
84
|
+
expect { Delayed::Job.bulk_update('unhold', :flavor => 'failed', :query => @query) }.to raise_error(RuntimeError)
|
85
85
|
end
|
86
86
|
end
|
87
87
|
|
@@ -0,0 +1,31 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe Delayed::WorkQueue::InProcess do
|
4
|
+
before :all do
|
5
|
+
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
6
|
+
end
|
7
|
+
|
8
|
+
after :all do
|
9
|
+
Delayed.send(:remove_const, :Job)
|
10
|
+
end
|
11
|
+
|
12
|
+
after :each do
|
13
|
+
Delayed::Worker.lifecycle.reset!
|
14
|
+
end
|
15
|
+
|
16
|
+
let(:subject) { described_class.new }
|
17
|
+
let(:args) { ["worker_name", "queue_name", 1, 2] }
|
18
|
+
|
19
|
+
it 'triggers the lifecycle event around the pop' do
|
20
|
+
called = false
|
21
|
+
Delayed::Worker.lifecycle.around(:work_queue_pop) do |queue, &cb|
|
22
|
+
expect(queue).to eq(subject)
|
23
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*args).and_return(:job)
|
24
|
+
called = true
|
25
|
+
cb.call(queue)
|
26
|
+
end
|
27
|
+
job = subject.get_and_lock_next_available(*args)
|
28
|
+
expect(job).to eq(:job)
|
29
|
+
expect(called).to eq(true)
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,159 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe Delayed::WorkQueue::ParentProcess do
|
4
|
+
before :all do
|
5
|
+
Delayed.select_backend(Delayed::Backend::ActiveRecord::Job)
|
6
|
+
end
|
7
|
+
|
8
|
+
after :all do
|
9
|
+
Delayed.send(:remove_const, :Job)
|
10
|
+
end
|
11
|
+
|
12
|
+
after :each do
|
13
|
+
Delayed::Worker.lifecycle.reset!
|
14
|
+
end
|
15
|
+
|
16
|
+
let(:subject) { described_class.new }
|
17
|
+
|
18
|
+
it 'generates a server listening on a valid unix socket' do
|
19
|
+
server = subject.server
|
20
|
+
expect(server).to be_a(Delayed::WorkQueue::ParentProcess::Server)
|
21
|
+
expect(server.listen_socket.local_address.unix?).to be(true)
|
22
|
+
expect { server.listen_socket.accept_nonblock }.to raise_error(IO::WaitReadable)
|
23
|
+
end
|
24
|
+
|
25
|
+
it 'generates a client connected to the server unix socket' do
|
26
|
+
server = subject.server
|
27
|
+
client = subject.client
|
28
|
+
expect(client).to be_a(Delayed::WorkQueue::ParentProcess::Client)
|
29
|
+
expect(client.addrinfo.unix?).to be(true)
|
30
|
+
expect(client.addrinfo.unix_path).to eq(server.listen_socket.local_address.unix_path)
|
31
|
+
end
|
32
|
+
|
33
|
+
describe Delayed::WorkQueue::ParentProcess::Client do
|
34
|
+
let(:subject) { described_class.new(addrinfo) }
|
35
|
+
let(:addrinfo) { double('Addrinfo') }
|
36
|
+
let(:connection) { double('Socket') }
|
37
|
+
let(:args) { ["worker_name", "queue_name", 1, 2] }
|
38
|
+
let(:job) { Delayed::Job.new(locked_by: "worker_name") }
|
39
|
+
|
40
|
+
it 'marshals the given arguments to the server and returns the response' do
|
41
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
42
|
+
expect(Marshal).to receive(:dump).with(args, connection).ordered
|
43
|
+
expect(Marshal).to receive(:load).with(connection).and_return(job).ordered
|
44
|
+
response = subject.get_and_lock_next_available(*args)
|
45
|
+
expect(response).to eq(job)
|
46
|
+
end
|
47
|
+
|
48
|
+
it 'returns nil and then reconnects on socket error' do
|
49
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
50
|
+
expect(Marshal).to receive(:dump).and_raise(SystemCallError.new("failure"))
|
51
|
+
response = subject.get_and_lock_next_available(*args)
|
52
|
+
expect(response).to be_nil
|
53
|
+
|
54
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
55
|
+
expect(Marshal).to receive(:dump).with(args, connection)
|
56
|
+
expect(Marshal).to receive(:load).with(connection).and_return(job)
|
57
|
+
response = subject.get_and_lock_next_available(*args)
|
58
|
+
expect(response).to eq(job)
|
59
|
+
end
|
60
|
+
|
61
|
+
it 'errors if the response is not a locked job' do
|
62
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
63
|
+
expect(Marshal).to receive(:dump).with(args, connection)
|
64
|
+
expect(Marshal).to receive(:load).with(connection).and_return(:not_a_job)
|
65
|
+
expect { subject.get_and_lock_next_available(*args) }.to raise_error(Delayed::WorkQueue::ParentProcess::ProtocolError)
|
66
|
+
end
|
67
|
+
|
68
|
+
it 'errors if the response is a job not locked by this worker' do
|
69
|
+
expect(addrinfo).to receive(:connect).once.and_return(connection)
|
70
|
+
expect(Marshal).to receive(:dump).with(args, connection)
|
71
|
+
job.locked_by = "somebody_else"
|
72
|
+
expect(Marshal).to receive(:load).with(connection).and_return(job)
|
73
|
+
expect { subject.get_and_lock_next_available(*args) }.to raise_error(Delayed::WorkQueue::ParentProcess::ProtocolError)
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
describe Delayed::WorkQueue::ParentProcess::Server do
|
78
|
+
let(:subject) { described_class.new(listen_socket) }
|
79
|
+
let(:listen_socket) { Socket.unix_server_socket(Delayed::WorkQueue::ParentProcess.generate_socket_path) }
|
80
|
+
let(:args) { [1,2,3] }
|
81
|
+
let(:job) { :a_job }
|
82
|
+
|
83
|
+
it 'accepts new clients' do
|
84
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
85
|
+
expect { subject.run_once }.to change(subject, :connected_clients).by(1)
|
86
|
+
end
|
87
|
+
|
88
|
+
it 'queries the queue on client request' do
|
89
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
90
|
+
subject.run_once
|
91
|
+
|
92
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*args).and_return(job)
|
93
|
+
Marshal.dump(args, client)
|
94
|
+
subject.run_once
|
95
|
+
expect(Marshal.load(client)).to eq(job)
|
96
|
+
end
|
97
|
+
|
98
|
+
it 'drops the client on i/o error' do
|
99
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
100
|
+
subject.run_once
|
101
|
+
|
102
|
+
Marshal.dump(args, client)
|
103
|
+
|
104
|
+
expect(Marshal).to receive(:load).and_raise(IOError.new("socket went away"))
|
105
|
+
expect { subject.run_once }.to change(subject, :connected_clients).by(-1)
|
106
|
+
end
|
107
|
+
|
108
|
+
it 'drops the client on timeout' do
|
109
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
110
|
+
subject.run_once
|
111
|
+
|
112
|
+
Marshal.dump(args, client)
|
113
|
+
|
114
|
+
expect(Marshal).to receive(:load).and_raise(Timeout::Error.new("socket timed out"))
|
115
|
+
expect(Timeout).to receive(:timeout).with(Delayed::Settings.parent_process_client_timeout).and_yield
|
116
|
+
expect { subject.run_once }.to change(subject, :connected_clients).by(-1)
|
117
|
+
end
|
118
|
+
|
119
|
+
it 'tracks when clients are idle' do
|
120
|
+
expect(subject.all_workers_idle?).to be(true)
|
121
|
+
|
122
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
123
|
+
subject.run_once
|
124
|
+
expect(subject.all_workers_idle?).to be(true)
|
125
|
+
|
126
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*args).and_return(job)
|
127
|
+
Marshal.dump(args, client)
|
128
|
+
subject.run_once
|
129
|
+
expect(subject.all_workers_idle?).to be(false)
|
130
|
+
|
131
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*args).and_return(nil)
|
132
|
+
Marshal.dump(args, client)
|
133
|
+
subject.run_once
|
134
|
+
expect(subject.all_workers_idle?).to be(true)
|
135
|
+
end
|
136
|
+
|
137
|
+
it 'triggers the lifecycle event around the pop' do
|
138
|
+
called = false
|
139
|
+
client = Socket.unix(subject.listen_socket.local_address.unix_path)
|
140
|
+
subject.run_once
|
141
|
+
|
142
|
+
Delayed::Worker.lifecycle.around(:work_queue_pop) do |queue, &cb|
|
143
|
+
expect(subject.all_workers_idle?).to be(true)
|
144
|
+
expect(queue).to eq(subject)
|
145
|
+
expect(Delayed::Job).to receive(:get_and_lock_next_available).with(*args).and_return(job)
|
146
|
+
called = true
|
147
|
+
res = cb.call(queue)
|
148
|
+
expect(subject.all_workers_idle?).to be(false)
|
149
|
+
res
|
150
|
+
end
|
151
|
+
|
152
|
+
Marshal.dump(args, client)
|
153
|
+
subject.run_once
|
154
|
+
|
155
|
+
expect(Marshal.load(client)).to eq(job)
|
156
|
+
expect(called).to eq(true)
|
157
|
+
end
|
158
|
+
end
|
159
|
+
end
|
@@ -29,7 +29,7 @@ shared_examples_for 'Delayed::Batch' do
|
|
29
29
|
}
|
30
30
|
Delayed::Job.jobs_count(:current).should == 1
|
31
31
|
job = Delayed::Job.find_available(1).first
|
32
|
-
expect{ job.invoke_job }.to raise_error
|
32
|
+
expect{ job.invoke_job }.to raise_error(RuntimeError)
|
33
33
|
end
|
34
34
|
|
35
35
|
it "should create valid jobs" do
|
@@ -1,12 +1,12 @@
|
|
1
1
|
shared_examples_for 'Delayed::PerformableMethod' do
|
2
|
-
|
2
|
+
|
3
3
|
it "should not ignore ActiveRecord::RecordNotFound errors because they are not always permanent" do
|
4
4
|
story = Story.create :text => 'Once upon...'
|
5
5
|
p = Delayed::PerformableMethod.new(story, :tell, [])
|
6
6
|
story.destroy
|
7
|
-
lambda { YAML.load(p.to_yaml) }.should raise_error
|
7
|
+
lambda { YAML.load(p.to_yaml) }.should raise_error(Delayed::Backend::RecordNotFound)
|
8
8
|
end
|
9
|
-
|
9
|
+
|
10
10
|
it "should store the object using native YAML even if its an active record" do
|
11
11
|
story = Story.create :text => 'Once upon...'
|
12
12
|
p = Delayed::PerformableMethod.new(story, :tell, [])
|
@@ -16,7 +16,7 @@ shared_examples_for 'Delayed::PerformableMethod' do
|
|
16
16
|
p.args.should == []
|
17
17
|
p.perform.should == 'Once upon...'
|
18
18
|
end
|
19
|
-
|
19
|
+
|
20
20
|
it "should allow class methods to be called on ActiveRecord models" do
|
21
21
|
Story.create!(:text => 'Once upon a...')
|
22
22
|
p = Delayed::PerformableMethod.new(Story, :count, [])
|
@@ -32,7 +32,7 @@ shared_examples_for 'Delayed::PerformableMethod' do
|
|
32
32
|
p = Delayed::PerformableMethod.new(MyReverser, :reverse, ["ohai"])
|
33
33
|
lambda { p.send(:perform).should == "iaho" }.should_not raise_error
|
34
34
|
end
|
35
|
-
|
35
|
+
|
36
36
|
it "should store arguments as native YAML if they are active record objects" do
|
37
37
|
story = Story.create :text => 'Once upon...'
|
38
38
|
reader = StoryReader.new
|
@@ -429,10 +429,10 @@ shared_examples_for 'a backend' do
|
|
429
429
|
Delayed::Periodic.scheduled = {}
|
430
430
|
expect { Delayed::Periodic.cron('my ChangedJob', '*/5 * * * * *') do
|
431
431
|
Delayed::Job.enqueue(SimpleJob.new)
|
432
|
-
end }.to raise_error
|
432
|
+
end }.to raise_error(ArgumentError)
|
433
433
|
end
|
434
434
|
|
435
|
-
expect { Delayed::Periodic.add_overrides({ 'my ChangedJob' => '*/10 * * * * * *' }) }.to raise_error
|
435
|
+
expect { Delayed::Periodic.add_overrides({ 'my ChangedJob' => '*/10 * * * * * *' }) }.to raise_error(ArgumentError)
|
436
436
|
end
|
437
437
|
end
|
438
438
|
|
@@ -451,12 +451,12 @@ shared_examples_for 'a backend' do
|
|
451
451
|
|
452
452
|
it "should fail on job creation if an unsaved AR object is used" do
|
453
453
|
story = Story.new :text => "Once upon..."
|
454
|
-
lambda { story.send_later(:text) }.should raise_error
|
454
|
+
lambda { story.send_later(:text) }.should raise_error(RuntimeError)
|
455
455
|
|
456
456
|
reader = StoryReader.new
|
457
|
-
lambda { reader.send_later(:read, story) }.should raise_error
|
457
|
+
lambda { reader.send_later(:read, story) }.should raise_error(RuntimeError)
|
458
458
|
|
459
|
-
lambda { [story, 1, story, false].send_later(:first) }.should raise_error
|
459
|
+
lambda { [story, 1, story, false].send_later(:first) }.should raise_error(RuntimeError)
|
460
460
|
end
|
461
461
|
|
462
462
|
# the sort order of current_jobs and list_jobs depends on the back-end
|
data/spec/shared/worker.rb
CHANGED
@@ -285,14 +285,14 @@ shared_examples_for 'Delayed::Worker' do
|
|
285
285
|
queue_name = "default_queue"
|
286
286
|
Delayed::Settings.queue = queue_name
|
287
287
|
worker = worker_create(:queue=>nil)
|
288
|
-
worker.
|
288
|
+
worker.queue_name.should == queue_name
|
289
289
|
end
|
290
290
|
|
291
291
|
it "should override default queue name if specified in initialize" do
|
292
292
|
queue_name = "my_queue"
|
293
293
|
Delayed::Settings.queue = "default_queue"
|
294
294
|
worker = worker_create(:queue=>queue_name)
|
295
|
-
worker.
|
295
|
+
worker.queue_name.should == queue_name
|
296
296
|
end
|
297
297
|
end
|
298
298
|
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: canvas-jobs
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.11.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Tobias Luetke
|
@@ -9,7 +9,7 @@ authors:
|
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2016-03-
|
12
|
+
date: 2016-03-17 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: after_transaction_commit
|
@@ -157,14 +157,14 @@ dependencies:
|
|
157
157
|
requirements:
|
158
158
|
- - '='
|
159
159
|
- !ruby/object:Gem::Version
|
160
|
-
version: 3.
|
160
|
+
version: 3.4.0
|
161
161
|
type: :development
|
162
162
|
prerelease: false
|
163
163
|
version_requirements: !ruby/object:Gem::Requirement
|
164
164
|
requirements:
|
165
165
|
- - '='
|
166
166
|
- !ruby/object:Gem::Version
|
167
|
-
version: 3.
|
167
|
+
version: 3.4.0
|
168
168
|
- !ruby/object:Gem::Dependency
|
169
169
|
name: test_after_commit
|
170
170
|
requirement: !ruby/object:Gem::Requirement
|
@@ -313,6 +313,8 @@ files:
|
|
313
313
|
- lib/delayed/settings.rb
|
314
314
|
- lib/delayed/testing.rb
|
315
315
|
- lib/delayed/version.rb
|
316
|
+
- lib/delayed/work_queue/in_process.rb
|
317
|
+
- lib/delayed/work_queue/parent_process.rb
|
316
318
|
- lib/delayed/worker.rb
|
317
319
|
- lib/delayed/yaml_extensions.rb
|
318
320
|
- lib/delayed_job.rb
|
@@ -321,6 +323,8 @@ files:
|
|
321
323
|
- spec/delayed/daemon_spec.rb
|
322
324
|
- spec/delayed/server_spec.rb
|
323
325
|
- spec/delayed/settings_spec.rb
|
326
|
+
- spec/delayed/work_queue/in_process_spec.rb
|
327
|
+
- spec/delayed/work_queue/parent_process_spec.rb
|
324
328
|
- spec/delayed/worker_spec.rb
|
325
329
|
- spec/gemfiles/32.gemfile
|
326
330
|
- spec/gemfiles/40.gemfile
|
@@ -366,6 +370,8 @@ test_files:
|
|
366
370
|
- spec/delayed/daemon_spec.rb
|
367
371
|
- spec/delayed/server_spec.rb
|
368
372
|
- spec/delayed/settings_spec.rb
|
373
|
+
- spec/delayed/work_queue/in_process_spec.rb
|
374
|
+
- spec/delayed/work_queue/parent_process_spec.rb
|
369
375
|
- spec/delayed/worker_spec.rb
|
370
376
|
- spec/gemfiles/32.gemfile
|
371
377
|
- spec/gemfiles/40.gemfile
|