super_queue 0.3.1 → 0.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/lib/super_queue.rb +61 -56
- metadata +3 -3
data/lib/super_queue.rb
CHANGED
@@ -23,8 +23,8 @@ class SuperQueue
|
|
23
23
|
AWS.eager_autoload! # for thread safety
|
24
24
|
check_opts(opts)
|
25
25
|
@buffer_size = opts[:buffer_size] || 100
|
26
|
-
@use_s3 = opts[:use_s3]
|
27
26
|
@queue_name = generate_queue_name(opts)
|
27
|
+
@bucket_name = opts[:bucket_name] || queue_name
|
28
28
|
@request_count = 0
|
29
29
|
initialize_aws(opts)
|
30
30
|
|
@@ -64,14 +64,10 @@ class SuperQueue
|
|
64
64
|
check_for_errors
|
65
65
|
@mutex.synchronize {
|
66
66
|
loop do
|
67
|
-
if @out_buffer.empty?
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
raise ThreadError, "queue empty" if non_block
|
72
|
-
@waiting.push Thread.current
|
73
|
-
@mutex.sleep
|
74
|
-
end
|
67
|
+
if @out_buffer.empty? && !(fill_out_buffer_from_sqs_queue || fill_out_buffer_from_in_buffer)
|
68
|
+
raise ThreadError, "queue empty" if non_block
|
69
|
+
@waiting.push Thread.current
|
70
|
+
@mutex.sleep
|
75
71
|
else
|
76
72
|
return pop_out_buffer
|
77
73
|
end
|
@@ -167,7 +163,7 @@ class SuperQueue
|
|
167
163
|
end
|
168
164
|
|
169
165
|
def open_s3_bucket
|
170
|
-
@s3.buckets[
|
166
|
+
@s3.buckets[@bucket_name].exists? ? @s3.buckets[@bucket_name] : @s3.buckets.create(@bucket_name)
|
171
167
|
end
|
172
168
|
|
173
169
|
def find_queue_by_name
|
@@ -196,24 +192,7 @@ class SuperQueue
|
|
196
192
|
raise "Couldn't create queue #{queue_name}, or delete existing queue by this name." if q_url.nil?
|
197
193
|
end
|
198
194
|
|
199
|
-
def send_messages_to_queue
|
200
|
-
number_of_batches = @in_buffer.size / 10
|
201
|
-
number_of_batches += 1 if @in_buffer.size % 10
|
202
|
-
batches = []
|
203
|
-
number_of_batches.times do
|
204
|
-
batch = []
|
205
|
-
10.times do
|
206
|
-
next if @in_buffer.empty?
|
207
|
-
p = @in_buffer.shift
|
208
|
-
unless should_send_to_s3?(p)
|
209
|
-
batch << encode(p)
|
210
|
-
else
|
211
|
-
batch << encode(send_payload_to_s3(p))
|
212
|
-
end
|
213
|
-
end
|
214
|
-
batches << batch
|
215
|
-
end
|
216
|
-
|
195
|
+
def send_messages_to_queue(batches)
|
217
196
|
batches.each do |b|
|
218
197
|
@request_count += 1
|
219
198
|
@sqs_queue.batch_send(b) if b.any?
|
@@ -227,38 +206,25 @@ class SuperQueue
|
|
227
206
|
number_of_batches.times do
|
228
207
|
batch = @sqs_queue.receive_messages(:limit => 10).compact
|
229
208
|
batch.each do |message|
|
230
|
-
|
231
|
-
unless obj.is_a?(SuperQueue::S3Pointer)
|
232
|
-
messages << {
|
233
|
-
:payload => obj,
|
234
|
-
:sqs_handle => message
|
235
|
-
}
|
236
|
-
else
|
237
|
-
p = fetch_payload_from_s3(obj)
|
238
|
-
messages << {
|
239
|
-
:payload => p,
|
240
|
-
:sqs_handle => message,
|
241
|
-
:s3_key => obj.s3_key } if p
|
242
|
-
end
|
209
|
+
messages << message
|
243
210
|
end
|
244
211
|
@request_count += 1
|
245
212
|
end
|
246
213
|
messages
|
247
214
|
end
|
248
215
|
|
249
|
-
def send_payload_to_s3(
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
S3Pointer.new(digest)
|
216
|
+
def send_payload_to_s3(encoded_message)
|
217
|
+
key = "#{queue_name}/#{Digest::MD5.hexdigest(encoded_message)}"
|
218
|
+
return S3Pointer.new(key) if @bucket.objects[key].exists?
|
219
|
+
retryable(:tries => 5) { @bucket.objects[key].write(encoded_message, :reduced_redundancy => true) }
|
220
|
+
S3Pointer.new(key)
|
255
221
|
end
|
256
222
|
|
257
223
|
def fetch_payload_from_s3(pointer)
|
258
224
|
payload = nil
|
259
225
|
retries = 0
|
260
226
|
begin
|
261
|
-
payload =
|
227
|
+
payload = decode(@bucket.objects[pointer.s3_key].read)
|
262
228
|
rescue AWS::S3::Errors::NoSuchKey
|
263
229
|
return nil
|
264
230
|
rescue
|
@@ -268,8 +234,8 @@ class SuperQueue
|
|
268
234
|
payload
|
269
235
|
end
|
270
236
|
|
271
|
-
def should_send_to_s3?(
|
272
|
-
|
237
|
+
def should_send_to_s3?(encoded_message)
|
238
|
+
encoded_message.bytesize > 64000
|
273
239
|
end
|
274
240
|
|
275
241
|
def sqs_length
|
@@ -302,11 +268,24 @@ class SuperQueue
|
|
302
268
|
nil_count = 0
|
303
269
|
while (@out_buffer.size < @buffer_size) && (nil_count < 2)
|
304
270
|
messages = get_messages_from_queue(@buffer_size - @out_buffer.size)
|
305
|
-
if messages.
|
306
|
-
|
307
|
-
|
308
|
-
|
271
|
+
if messages.any?
|
272
|
+
messages.each do |message|
|
273
|
+
obj = decode(message.body)
|
274
|
+
unless obj.is_a?(SuperQueue::S3Pointer)
|
275
|
+
@out_buffer.push(
|
276
|
+
:payload => obj,
|
277
|
+
:sqs_handle => message)
|
278
|
+
else
|
279
|
+
p = fetch_payload_from_s3(obj)
|
280
|
+
@out_buffer.push(
|
281
|
+
:payload => p,
|
282
|
+
:sqs_handle => message,
|
283
|
+
:s3_key => obj.s3_key) if p
|
284
|
+
end
|
285
|
+
end
|
309
286
|
nil_count = 0
|
287
|
+
else
|
288
|
+
nil_count += 1
|
310
289
|
end
|
311
290
|
end
|
312
291
|
!@out_buffer.empty?
|
@@ -332,9 +311,28 @@ class SuperQueue
|
|
332
311
|
end
|
333
312
|
|
334
313
|
def clear_in_buffer
|
314
|
+
batches = []
|
315
|
+
message_stash = []
|
335
316
|
while !@in_buffer.empty? do
|
336
|
-
|
317
|
+
batch = message_stash
|
318
|
+
message_stash = []
|
319
|
+
message_count = batch.size
|
320
|
+
batch_too_big = false
|
321
|
+
while !@in_buffer.empty? && !batch_too_big && (message_count < 10) do
|
322
|
+
encoded_message = encode(@in_buffer.shift)
|
323
|
+
message = should_send_to_s3?(encoded_message) ? encode(send_payload_to_s3(encoded_message)) : encoded_message
|
324
|
+
if (batch_bytesize(batch) + message.bytesize) < 64000
|
325
|
+
batch << message
|
326
|
+
batch_too_big == false
|
327
|
+
message_count += 1
|
328
|
+
else
|
329
|
+
message_stash << message
|
330
|
+
batch_too_big = true
|
331
|
+
end
|
332
|
+
end
|
333
|
+
batches << batch
|
337
334
|
end
|
335
|
+
send_messages_to_queue(batches)
|
338
336
|
end
|
339
337
|
|
340
338
|
#
|
@@ -380,6 +378,14 @@ class SuperQueue
|
|
380
378
|
yield
|
381
379
|
end
|
382
380
|
|
381
|
+
def batch_bytesize(batch)
|
382
|
+
sum = 0
|
383
|
+
batch.each do |string|
|
384
|
+
sum += string.bytesize
|
385
|
+
end
|
386
|
+
sum
|
387
|
+
end
|
388
|
+
|
383
389
|
#
|
384
390
|
# Virtul attributes and convenience methods
|
385
391
|
#
|
@@ -402,7 +408,6 @@ class SuperQueue
|
|
402
408
|
#
|
403
409
|
# Maintence thread-related methods
|
404
410
|
#
|
405
|
-
|
406
411
|
def collect_garbage
|
407
412
|
loop do
|
408
413
|
#This also needs a condition to clear the del queue if there are any handles where the invisibility is about to expire
|
metadata
CHANGED
@@ -1,15 +1,15 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: super_queue
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.3.3
|
4
5
|
prerelease:
|
5
|
-
version: 0.3.1
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
8
8
|
- Jon Stokes
|
9
9
|
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
|
-
date: 2013-02-
|
12
|
+
date: 2013-02-28 00:00:00.000000000 Z
|
13
13
|
dependencies:
|
14
14
|
- !ruby/object:Gem::Dependency
|
15
15
|
name: aws-sdk
|
@@ -56,7 +56,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
56
56
|
none: false
|
57
57
|
requirements: []
|
58
58
|
rubyforge_project:
|
59
|
-
rubygems_version: 1.8.
|
59
|
+
rubygems_version: 1.8.25
|
60
60
|
signing_key:
|
61
61
|
specification_version: 3
|
62
62
|
summary: A thread-safe, SQS- and S3-backed queue structure for ruby that works just like a normal queue, except it's essentially infinite and can use very little memory.
|