backburner-allq 1.0.17 → 1.0.18
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/deploy.sh +1 -1
- data/lib/backburner/version.rb +1 -1
- data/lib/backburner/worker.rb +29 -28
- metadata +1 -1
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a66b297949437303186f84d3ab4198416a72824de72142dd4727aa029365f119
|
4
|
+
data.tar.gz: b737ec4f3df6378167da3d5936c408ea6b1a92b05391c3c4ab9021160108492c
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 02ec21561843632c02a39742c7eef8591f94a7320af93bc77d566bc6e5d390c089fdeef1b56b5df3865a3430ad3e7a350b51d9faebfdb2fcebd4944905c4150e
|
7
|
+
data.tar.gz: 51ffee578303fd24e70278788153cc6265a356ddab6882c8bd62c7447c8434f7949b7d4deb35affd8a14d7d4f295d1494d48d89dc27e0eec9bf4c06ee69093b0
|
data/deploy.sh
CHANGED
data/lib/backburner/version.rb
CHANGED
data/lib/backburner/worker.rb
CHANGED
@@ -135,35 +135,36 @@ module Backburner
|
|
135
135
|
end
|
136
136
|
|
137
137
|
if job
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
138
|
+
begin
|
139
|
+
self.log_job_begin(job.name, job.args)
|
140
|
+
job.process
|
141
|
+
self.log_job_end(job.name)
|
142
|
+
rescue Backburner::Job::JobFormatInvalid => e
|
143
|
+
self.log_error self.exception_message(e)
|
144
|
+
rescue => e # Error occurred processing job
|
145
|
+
self.log_error self.exception_message(e) unless e.is_a?(Backburner::Job::RetryJob)
|
146
|
+
|
147
|
+
unless job
|
148
|
+
self.log_error "Error occurred before we were able to assign a job. Giving up without retrying!"
|
149
|
+
return
|
150
|
+
end
|
151
|
+
|
152
|
+
# NB: There's a slight chance here that the connection to allq has
|
153
|
+
# gone down between the time we reserved / processed the job and here.
|
154
|
+
num_retries = job.releases
|
155
|
+
max_job_retries = resolve_max_job_retries(job.job_class)
|
156
|
+
retry_status = "failed: attempt #{num_retries+1} of #{max_job_retries+1}"
|
157
|
+
retry_delay = resolve_retry_delay(job.job_class)
|
158
|
+
delay = resolve_retry_delay_proc(job.job_class).call(retry_delay, num_retries) rescue retry_delay
|
159
|
+
if num_retries + 1 > max_job_retries
|
160
|
+
job.bury
|
161
|
+
else
|
162
|
+
job.release(delay)
|
163
|
+
end
|
164
|
+
self.log_job_end(job.name, "#{retry_status}, retrying in #{delay}s") if job_started_at
|
165
|
+
|
166
|
+
handle_error(e, job.name, job.args, job)
|
150
167
|
end
|
151
|
-
|
152
|
-
# NB: There's a slight chance here that the connection to allq has
|
153
|
-
# gone down between the time we reserved / processed the job and here.
|
154
|
-
num_retries = job.releases
|
155
|
-
max_job_retries = resolve_max_job_retries(job.job_class)
|
156
|
-
retry_status = "failed: attempt #{num_retries+1} of #{max_job_retries+1}"
|
157
|
-
retry_delay = resolve_retry_delay(job.job_class)
|
158
|
-
delay = resolve_retry_delay_proc(job.job_class).call(retry_delay, num_retries) rescue retry_delay
|
159
|
-
if num_retries + 1 > max_job_retries
|
160
|
-
job.bury
|
161
|
-
else
|
162
|
-
job.release(delay)
|
163
|
-
end
|
164
|
-
self.log_job_end(job.name, "#{retry_status}, retrying in #{delay}s") if job_started_at
|
165
|
-
|
166
|
-
handle_error(e, job.name, job.args, job)
|
167
168
|
end
|
168
169
|
job
|
169
170
|
end
|