super_queue 0.3.3 → 0.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. data/lib/super_queue.rb +59 -33
  2. metadata +3 -3
data/lib/super_queue.rb CHANGED
@@ -20,12 +20,13 @@ class SuperQueue
20
20
  end
21
21
 
22
22
  def initialize(opts)
23
- AWS.eager_autoload! # for thread safety
24
23
  check_opts(opts)
25
24
  @buffer_size = opts[:buffer_size] || 100
26
25
  @queue_name = generate_queue_name(opts)
27
26
  @bucket_name = opts[:bucket_name] || queue_name
28
- @request_count = 0
27
+ @write_count = 0
28
+ @read_count = 0
29
+ @delete_count = 0
29
30
  initialize_aws(opts)
30
31
 
31
32
  @waiting = []
@@ -44,6 +45,8 @@ class SuperQueue
44
45
  raise @gc_error
45
46
  end
46
47
  end
48
+
49
+ fill_out_buffer_from_sqs_queue
47
50
  end
48
51
 
49
52
  def push(p)
@@ -78,8 +81,7 @@ class SuperQueue
78
81
  def length
79
82
  check_for_errors
80
83
  @mutex.synchronize {
81
- sqsl = sqs_length
82
- return sqsl + @in_buffer.size + @out_buffer.size
84
+ return sqs_length + @in_buffer.size + @out_buffer.size
83
85
  }
84
86
  end
85
87
 
@@ -89,7 +91,9 @@ class SuperQueue
89
91
  end
90
92
 
91
93
  def empty?
92
- self.length == 0
94
+ len = 0
95
+ 2.times { len += self.length; sleep(0.01) }
96
+ len == 0
93
97
  end
94
98
 
95
99
  def clear
@@ -116,7 +120,7 @@ class SuperQueue
116
120
 
117
121
  def sqs_requests
118
122
  check_for_errors
119
- @request_count
123
+ @write_count + @read_count + @delete_count
120
124
  end
121
125
 
122
126
  alias enq push
@@ -163,19 +167,27 @@ class SuperQueue
163
167
  end
164
168
 
165
169
  def open_s3_bucket
166
- @s3.buckets[@bucket_name].exists? ? @s3.buckets[@bucket_name] : @s3.buckets.create(@bucket_name)
170
+ retryable(:tries => 3) do
171
+ @s3.buckets[@bucket_name].exists? ? @s3.buckets[@bucket_name] : @s3.buckets.create(@bucket_name)
172
+ end
167
173
  end
168
174
 
169
175
  def find_queue_by_name
176
+ retries = 0
170
177
  begin
171
178
  @sqs.queues.named(queue_name)
172
179
  rescue AWS::SQS::Errors::NonExistentQueue
173
180
  return nil
181
+ rescue NoMethodError => e
182
+ sleep 1
183
+ retries += 1
184
+ retry if retries < 5
185
+ raise e
174
186
  end
175
187
  end
176
188
 
177
189
  def new_sqs_queue(opts)
178
- @request_count += 1
190
+ @read_count += 1
179
191
  if opts[:visibility_timeout]
180
192
  @sqs.queues.create(queue_name, { :visibility_timeout => opts[:visibility_timeout] })
181
193
  else
@@ -194,8 +206,10 @@ class SuperQueue
194
206
 
195
207
  def send_messages_to_queue(batches)
196
208
  batches.each do |b|
197
- @request_count += 1
198
- @sqs_queue.batch_send(b) if b.any?
209
+ @write_count += 1
210
+ retryable(:tries => 5) do
211
+ @sqs_queue.batch_send(b)
212
+ end if b.any?
199
213
  end
200
214
  end
201
215
 
@@ -204,18 +218,17 @@ class SuperQueue
204
218
  number_of_batches = number_of_messages_to_receive / 10
205
219
  number_of_batches += 1 if number_of_messages_to_receive % 10
206
220
  number_of_batches.times do
207
- batch = @sqs_queue.receive_messages(:limit => 10).compact
208
- batch.each do |message|
209
- messages << message
210
- end
211
- @request_count += 1
221
+ retryable(:retries => 5) { messages += @sqs_queue.receive_messages(:limit => 10).compact }
222
+ @read_count += 1
212
223
  end
213
- messages
224
+ messages.compact
214
225
  end
215
226
 
216
227
  def send_payload_to_s3(encoded_message)
217
228
  key = "#{queue_name}/#{Digest::MD5.hexdigest(encoded_message)}"
218
- return S3Pointer.new(key) if @bucket.objects[key].exists?
229
+ key_exists = false
230
+ retryable(:tries => 5) { key_exists = @bucket.objects[key].exists? }
231
+ return S3Pointer.new(key) if key_exists
219
232
  retryable(:tries => 5) { @bucket.objects[key].write(encoded_message, :reduced_redundancy => true) }
220
233
  S3Pointer.new(key)
221
234
  end
@@ -239,24 +252,32 @@ class SuperQueue
239
252
  end
240
253
 
241
254
  def sqs_length
242
- n = @sqs_queue.approximate_number_of_messages
255
+ n = 0
256
+ retryable(:retries => 5) { n = @sqs_queue.approximate_number_of_messages }
257
+ @read_count += 1
243
258
  return n.is_a?(Integer) ? n : 0
244
259
  end
245
260
 
246
261
  def delete_aws_resources
247
- @request_count += 1
262
+ @delete_count += 1
248
263
  @sqs_queue.delete
249
- @bucket.delete!
264
+ begin
265
+ @bucket.clear!
266
+ sleep 1
267
+ end until @bucket.empty?
268
+ @bucket.delete
250
269
  end
251
270
 
252
271
  def clear_deletion_queue
253
- while !@deletion_queue.empty?
254
- sqs_handles = @deletion_queue[0..9].map { |m| m[:sqs_handle] }.compact
255
- s3_keys = @deletion_queue[0..9].map { |m| m[:s3_key] }.compact
256
- 10.times { @deletion_queue.shift }
257
- @sqs_queue.batch_delete(sqs_handles) if sqs_handles.any?
258
- s3_keys.each { |key| @bucket.objects[key].delete }
259
- @request_count += 1
272
+ retryable(:tries => 4) do
273
+ while !@deletion_queue.empty?
274
+ sqs_handles = @deletion_queue[0..9].map { |m| m[:sqs_handle].is_a?(AWS::SQS::ReceivedMessage) ? m[:sqs_handle] : nil }.compact
275
+ s3_keys = @deletion_queue[0..9].map { |m| m[:s3_key] }.compact
276
+ 10.times { @deletion_queue.shift }
277
+ @sqs_queue.batch_delete(sqs_handles) if sqs_handles.any?
278
+ s3_keys.each { |key| @bucket.objects[key].delete }
279
+ @delete_count += 1
280
+ end
260
281
  end
261
282
  end
262
283
 
@@ -266,7 +287,7 @@ class SuperQueue
266
287
  def fill_out_buffer_from_sqs_queue
267
288
  return false if sqs_length == 0
268
289
  nil_count = 0
269
- while (@out_buffer.size < @buffer_size) && (nil_count < 2)
290
+ while (@out_buffer.size < @buffer_size) && (nil_count < 5)
270
291
  messages = get_messages_from_queue(@buffer_size - @out_buffer.size)
271
292
  if messages.any?
272
293
  messages.each do |message|
@@ -276,14 +297,18 @@ class SuperQueue
276
297
  :payload => obj,
277
298
  :sqs_handle => message)
278
299
  else
279
- p = fetch_payload_from_s3(obj)
280
- @out_buffer.push(
281
- :payload => p,
282
- :sqs_handle => message,
283
- :s3_key => obj.s3_key) if p
300
+ if p = fetch_payload_from_s3(obj)
301
+ @out_buffer.push(
302
+ :payload => p,
303
+ :sqs_handle => message,
304
+ :s3_key => obj.s3_key)
305
+ else
306
+ @deletion_buffer.push(:sqs_handle => message, :s3_key => obj.s3_key)
307
+ end
284
308
  end
285
309
  end
286
310
  nil_count = 0
311
+ sleep 0.01
287
312
  else
288
313
  nil_count += 1
289
314
  end
@@ -414,6 +439,7 @@ class SuperQueue
414
439
  @mutex.synchronize { fill_deletion_queue_from_buffer } if @deletion_buffer.any?
415
440
  Thread.pass
416
441
  @mutex.synchronize { clear_deletion_queue } if @deletion_queue.any?
442
+ sleep 1
417
443
  end
418
444
  end
419
445
  end
metadata CHANGED
@@ -1,15 +1,15 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: super_queue
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.3
5
4
  prerelease:
5
+ version: 0.3.4
6
6
  platform: ruby
7
7
  authors:
8
8
  - Jon Stokes
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2013-02-28 00:00:00.000000000 Z
12
+ date: 2013-11-01 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: aws-sdk
@@ -56,7 +56,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
56
56
  none: false
57
57
  requirements: []
58
58
  rubyforge_project:
59
- rubygems_version: 1.8.25
59
+ rubygems_version: 1.8.24
60
60
  signing_key:
61
61
  specification_version: 3
62
62
  summary: A thread-safe, SQS- and S3-backed queue structure for ruby that works just like a normal queue, except it's essentially infinite and can use very little memory.