backburner-allq 1.0.13 → 1.0.19

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 01bd4bf1b3f1e5905844d23996a0529b08baa629cd46883dbb144feca42e527c
4
- data.tar.gz: c77846b06501f94b118fc9000a90d98b6fdfa7c7b91bd2af2e36b29fde35f556
3
+ metadata.gz: 961c84a21c68a6bbe287105030dcbcf73e50177d647321dc410786b417ba653c
4
+ data.tar.gz: bee092eb96d993ee4b34b4c1e9fa671cc92e4a3316179890be130dbe14cc70f1
5
5
  SHA512:
6
- metadata.gz: '086c4e08ff55f7c9754f668a97c9003d8d01118c39b527bfc871cf42abbac8a33df9f739ab0836f9f19cbf9f4a8058292170cc8317c18e273cef327129aab7f9'
7
- data.tar.gz: 1b3854c17f9c411d0d7584c94c2d3fe8e7b92ea5ea81d3f5ae3439fce27a8f1aad9f582922c009a15facb82a7c780c3123795f94254cf3ca0f44f3300e92d9e9
6
+ metadata.gz: a09fd305b986a90330434db34e13e961f4f6878bff406348fb246f03ac0a3e0b9b8216862128c62005e5b25a4338371cb341e57133aadaa1e3187066fee8698e
7
+ data.tar.gz: 12fd13a7430682a5021d16ad32c7f3be1f39767413feeb2a96a7335c465d3639dfbd47905f43249aae27d23030c27a284fce36d098f7b6f396ac789182330efe
data/Gemfile CHANGED
@@ -1,4 +1,4 @@
1
1
  source 'https://rubygems.org'
2
2
 
3
3
  # Specify your gem's dependencies in backburner.gemspec
4
- gemspec
4
+ gemspec
data/deploy.sh CHANGED
@@ -1,3 +1,3 @@
1
1
  echo "Did you update the version?"
2
2
  gem build backburner-allq.gemspec
3
- gem push backburner-allq-1.0.13.gem
3
+ gem push backburner-allq-1.0.19.gem
@@ -13,14 +13,16 @@ module Backburner
13
13
 
14
14
  def watch
15
15
  Thread.new do
16
- ran = false
17
- job = @allq_wrapper.get(@tube_name)
18
- if job.body
19
- perform(job)
20
- ran = true
16
+ loop do
17
+ ran = false
18
+ job = @allq_wrapper.get(@tube_name)
19
+ if job.body
20
+ perform(job)
21
+ ran = true
22
+ end
23
+ # Wait if nothing returned
24
+ sleep(rand() * 3) unless ran
21
25
  end
22
- # Wait if nothing returned
23
- sleep(rand() * 3) unless ran
24
26
  end
25
27
  end
26
28
  end
@@ -123,6 +125,10 @@ module Backburner
123
125
  stats_hash.keys
124
126
  end
125
127
 
128
+ def tubes
129
+ tube_names
130
+ end
131
+
126
132
  def peek_buried(tube_name = 'default')
127
133
  job = nil
128
134
  job = @client.peek_get(tube_name, buried: true)
@@ -250,6 +256,11 @@ module Backburner
250
256
  result
251
257
  end
252
258
 
259
+ def stats(tube)
260
+ final_stats = stats
261
+ final_stats[tube]
262
+ end
263
+
253
264
  def stats
254
265
  raw_stats = @admin.stats_get
255
266
  final_stats = {}
@@ -27,7 +27,7 @@ module Backburner
27
27
  @default_priority = 5
28
28
  @respond_timeout = 120
29
29
  @on_error = nil
30
- @max_job_retries = 0
30
+ @max_job_retries = 1
31
31
  @retry_delay = 5
32
32
  @retry_delay_proc = lambda { |min_retry_delay, num_retries| min_retry_delay + (num_retries ** 3) }
33
33
  @default_queues = []
@@ -38,6 +38,10 @@ module Backburner
38
38
  end
39
39
  end
40
40
 
41
+ def tubes
42
+ @allq_wrapper.tube_names if @allq_wrapper
43
+ end
44
+
41
45
  # Attempt to reconnect to allq. Note: the connection will not be watching
42
46
  # or using the tubes it was before it was reconnected (as it's actually a
43
47
  # completely new connection)
@@ -26,8 +26,8 @@ module Backburner
26
26
  @name = body["class"] || body[:class]
27
27
  @args = body["args"] || body[:args]
28
28
  rescue => ex # Job was not valid format
29
- self.bury
30
- raise JobFormatInvalid, "Job body could not be parsed: #{ex.inspect}"
29
+ # self.bury
30
+ # raise JobFormatInvalid, "Job body could not be parsed: #{ex.inspect}"
31
31
  end
32
32
 
33
33
  # Sets the delegator object to the underlying beaneater job
@@ -60,12 +60,12 @@ module Backburner
60
60
 
61
61
  def bury
62
62
  @hooks.invoke_hook_events(job_name, :on_bury, *args)
63
- task.bury
63
+ @task.bury
64
64
  end
65
65
 
66
66
  def retry(count, delay)
67
67
  @hooks.invoke_hook_events(job_name, :on_retry, count, delay, *args)
68
- task.release(delay: delay)
68
+ @task.release(delay: delay)
69
69
  end
70
70
 
71
71
  # Returns the class for the job handler
@@ -1,3 +1,3 @@
1
1
  module Backburner
2
- VERSION = "1.0.13"
2
+ VERSION = "1.0.19"
3
3
  end
@@ -130,39 +130,50 @@ module Backburner
130
130
  begin
131
131
  job = reserve_job(conn)
132
132
  rescue Exception => e
133
+ self.log_error "Sleeping"
134
+ self.log_error "Exception: #{e.full_message}"
133
135
  sleep(rand*3)
134
136
  return
135
137
  end
136
138
 
137
- self.log_job_begin(job.name, job.args)
138
- job.process
139
- self.log_job_end(job.name)
140
-
141
- rescue Backburner::Job::JobFormatInvalid => e
142
- self.log_error self.exception_message(e)
143
- rescue => e # Error occurred processing job
144
- self.log_error self.exception_message(e) unless e.is_a?(Backburner::Job::RetryJob)
145
-
146
- unless job
147
- self.log_error "Error occurred before we were able to assign a job. Giving up without retrying!"
148
- return
149
- end
150
-
151
- # NB: There's a slight chance here that the connection to allq has
152
- # gone down between the time we reserved / processed the job and here.
153
- num_retries = job.releases
154
- max_job_retries = resolve_max_job_retries(job.job_class)
155
- retry_status = "failed: attempt #{num_retries+1} of #{max_job_retries+1}"
156
- retry_delay = resolve_retry_delay(job.job_class)
157
- delay = resolve_retry_delay_proc(job.job_class).call(retry_delay, num_retries) rescue retry_delay
158
- if num_retries + 1 > max_job_retries
159
- job.bury
139
+ if job && job.body
140
+ begin
141
+ self.log_job_begin(job.name, job.args)
142
+ job.process
143
+ self.log_job_end(job.name)
144
+ rescue Backburner::Job::JobFormatInvalid => e
145
+ self.log_error self.exception_message(e)
146
+ rescue => e # Error occurred processing job
147
+ self.log_error self.exception_message(e) unless e.is_a?(Backburner::Job::RetryJob)
148
+
149
+ unless job
150
+ self.log_error "Error occurred before we were able to assign a job. Giving up without retrying!"
151
+ return
152
+ end
153
+
154
+ # NB: There's a slight chance here that the connection to allq has
155
+ # gone down between the time we reserved / processed the job and here.
156
+ num_retries = job.releases
157
+ max_job_retries = resolve_max_job_retries(job.job_class)
158
+ retry_status = "failed: attempt #{num_retries+1} of #{max_job_retries+1}"
159
+ retry_delay = resolve_retry_delay(job.job_class)
160
+ delay = resolve_retry_delay_proc(job.job_class).call(retry_delay, num_retries) rescue retry_delay
161
+ puts "num_retries = #{num_retries}"
162
+ puts "max_job_retries = #{max_job_retries}"
163
+
164
+ if num_retries + 1 > max_job_retries
165
+ job.bury
166
+ else
167
+ job.release(delay)
168
+ end
169
+ self.log_job_end(job.name, "#{retry_status}, retrying in #{delay}s") if job_started_at
170
+
171
+ handle_error(e, job.name, job.args, job)
172
+ end
160
173
  else
161
- job.release(delay)
174
+ sleep(rand*3)
162
175
  end
163
- self.log_job_end(job.name, "#{retry_status}, retrying in #{delay}s") if job_started_at
164
-
165
- handle_error(e, job.name, job.args, job)
176
+ job
166
177
  end
167
178
 
168
179
 
@@ -175,7 +186,9 @@ module Backburner
175
186
 
176
187
  # Reserve a job from the watched queues
177
188
  def reserve_job(conn, reserve_timeout = Backburner.configuration.reserve_timeout)
178
- Backburner::Job.new(conn.get(@tube_names.sample))
189
+ job = conn.get(@tube_names.sample)
190
+ return nil if job.nil? || job.body == nil?
191
+ Backburner::Job.new(job)
179
192
  end
180
193
 
181
194
  # Returns a list of all tubes known within the system
@@ -11,7 +11,7 @@ module Backburner
11
11
  def prepare
12
12
  self.tube_names.map! { |name| expand_tube_name(name) }.uniq!
13
13
  log_info "Working #{tube_names.size} queues: [ #{tube_names.join(', ')} ]"
14
- self.connection.tubes.watch!(*self.tube_names)
14
+ # self.connection.tubes.watch!(*self.tube_names)
15
15
  end
16
16
 
17
17
  # Starts processing new jobs indefinitely.
@@ -11,7 +11,7 @@ module Backburner
11
11
  def prepare
12
12
  self.tube_names.map! { |name| expand_tube_name(name) }.uniq!
13
13
  log_info "Working #{tube_names.size} queues: [ #{tube_names.join(', ')} ]"
14
- self.connection.tubes.watch!(*self.tube_names)
14
+ # self.connection.tubes.watch!(*self.tube_names)
15
15
  end
16
16
 
17
17
  # Starts processing new jobs indefinitely.
@@ -49,8 +49,8 @@ module Backburner
49
49
  @thread_pools.each do |tube_name, pool|
50
50
  pool.max_length.times do
51
51
  # Create a new connection and set it up to listen on this tube name
52
- connection = new_connection.tap{ |conn| conn.tubes.watch!(tube_name) }
53
- connection.on_reconnect = lambda { |conn| conn.tubes.watch!(tube_name) }
52
+ # connection = new_connection.tap{ |conn| conn.tubes.watch!(tube_name) }
53
+ # connection.on_reconnect = lambda { |conn| conn.tubes.watch!(tube_name) }
54
54
 
55
55
  # Make it work jobs using its own connection per thread
56
56
  pool.post(connection) do |memo_connection|
@@ -181,6 +181,8 @@ module Backburner
181
181
 
182
182
  @runs = 0
183
183
 
184
+ puts "Threads number = #{@threads_number}"
185
+
184
186
  if @threads_number == 1
185
187
  watch_tube(name)
186
188
  run_while_can
@@ -205,9 +207,15 @@ module Backburner
205
207
 
206
208
  # Run work_one_job while we can
207
209
  def run_while_can(conn = connection)
210
+ puts "Run while can"
208
211
  while @garbage_after.nil? or @garbage_after > @runs
209
212
  @runs += 1 # FIXME: Likely race condition
210
- work_one_job(conn)
213
+ ran_job = work_one_job(conn)
214
+ # Wait a second if we didn't find a job
215
+ unless ran_job
216
+ puts "sleeping"
217
+ sleep(rand() * 3)
218
+ end
211
219
  end
212
220
  end
213
221
 
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: backburner-allq
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.13
4
+ version: 1.0.19
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jason Malcolm
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2021-01-23 00:00:00.000000000 Z
11
+ date: 2021-02-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: allq_rest