resque-concurrent-restriction 0.6.0 → 0.6.1
Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 15ae520632319f854b8b8975c4dfd83338a4e5c0
|
4
|
+
data.tar.gz: 44be3699fa06a36fd8626012133b3821957b67b4
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 64a4aa1348e13b2d440c0e9ff00ab4af6ac0e6a530fd7073d3253efc57da70e8f1f5ef41b9ade01c51c32b28e7c7a0200b104879ea5fbaad962b8ccddcc1d0e5
|
7
|
+
data.tar.gz: 6d39969cee8167c013429ed0c4dd68716e5e7ee931a5ba41f102663d38cccc909fd819831678e87531bd31b44865d00ddcfe396953cb38b9a7c6c451e8acfc18
|
@@ -1,540 +1,539 @@
|
|
1
|
-
# To configure resque concurrent restriction, add something like the
|
2
|
-
# following to an initializer (defaults shown):
|
3
|
-
#
|
4
|
-
# Resque::Plugins::ConcurrentRestriction.configure do |config|
|
5
|
-
# # The lock timeout for the restriction queue lock
|
6
|
-
# config.lock_timeout = 60
|
7
|
-
# # How many times to try to get a lock before giving up
|
8
|
-
# # Worker stays busy for: 2^tries * rand(100) * 0.001 (~30s-3000s)
|
9
|
-
# config.lock_tries = 10
|
10
|
-
# # Try this many times to reserve a job from a queue. Also, the maximum
|
11
|
-
# # number of jobs to move to the restricted queue during this process
|
12
|
-
# # before giving up
|
13
|
-
# config.reserve_queued_job_attempts = 10
|
14
|
-
# # Try to pick jobs off of the restricted queue before normal queues
|
15
|
-
# config.restricted_before_queued = true
|
16
|
-
# end
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
self.
|
34
|
-
self.
|
35
|
-
self.
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
#
|
44
|
-
#
|
45
|
-
#
|
46
|
-
#
|
47
|
-
#
|
48
|
-
#
|
49
|
-
#
|
50
|
-
#
|
51
|
-
#
|
52
|
-
#
|
53
|
-
#
|
54
|
-
#
|
55
|
-
#
|
56
|
-
#
|
57
|
-
#
|
58
|
-
#
|
59
|
-
#
|
60
|
-
#
|
61
|
-
#
|
62
|
-
#
|
63
|
-
#
|
64
|
-
#
|
65
|
-
#
|
66
|
-
#
|
67
|
-
#
|
68
|
-
#
|
69
|
-
#
|
70
|
-
#
|
71
|
-
#
|
72
|
-
#
|
73
|
-
#
|
74
|
-
#
|
75
|
-
#
|
76
|
-
#
|
77
|
-
# the
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
#
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
#
|
96
|
-
|
97
|
-
|
98
|
-
parts
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
#
|
103
|
-
|
104
|
-
|
105
|
-
parts
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
parts
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
#
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
parts
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
#
|
127
|
-
#
|
128
|
-
#
|
129
|
-
|
130
|
-
|
131
|
-
id =
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
item
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
#
|
159
|
-
#
|
160
|
-
#
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
when :
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
availability_key
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
when :
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
count_key
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
restricted
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
#
|
242
|
-
#
|
243
|
-
#
|
244
|
-
#
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
value
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
value
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
value =
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
#
|
297
|
-
#
|
298
|
-
#
|
299
|
-
|
300
|
-
|
301
|
-
queues
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
Resque.redis.srem(runnables_key
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
#
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
old_expiration_time
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
old_expiration_time
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
#
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
#
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
#
|
381
|
-
#
|
382
|
-
#
|
383
|
-
#
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
#
|
402
|
-
#
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
#
|
423
|
-
|
424
|
-
still_runnable
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
count_keys
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
runnable_keys
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
queue_keys
|
470
|
-
|
471
|
-
len
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
queue_keys
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
ident_sizes[ident] ||=
|
503
|
-
ident_sizes[ident][queue_name]
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
ident
|
512
|
-
ident_sizes[ident]
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
runnable_count =
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
end
|
1
|
+
# To configure resque concurrent restriction, add something like the
|
2
|
+
# following to an initializer (defaults shown):
|
3
|
+
#
|
4
|
+
# Resque::Plugins::ConcurrentRestriction.configure do |config|
|
5
|
+
# # The lock timeout for the restriction queue lock
|
6
|
+
# config.lock_timeout = 60
|
7
|
+
# # How many times to try to get a lock before giving up
|
8
|
+
# # Worker stays busy for: 2^tries * rand(100) * 0.001 (~30s-3000s)
|
9
|
+
# config.lock_tries = 10
|
10
|
+
# # Try this many times to reserve a job from a queue. Also, the maximum
|
11
|
+
# # number of jobs to move to the restricted queue during this process
|
12
|
+
# # before giving up
|
13
|
+
# config.reserve_queued_job_attempts = 10
|
14
|
+
# # Try to pick jobs off of the restricted queue before normal queues
|
15
|
+
# config.restricted_before_queued = true
|
16
|
+
# end
|
17
|
+
|
18
|
+
require 'active_support'
|
19
|
+
|
20
|
+
module Resque
|
21
|
+
module Plugins
|
22
|
+
module ConcurrentRestriction
|
23
|
+
include ::ActiveSupport::Inflector
|
24
|
+
|
25
|
+
# Allows configuring via class accessors
|
26
|
+
class << self
|
27
|
+
# optional
|
28
|
+
attr_accessor :lock_timeout, :lock_tries, :reserve_queued_job_attempts, :restricted_before_queued
|
29
|
+
end
|
30
|
+
|
31
|
+
# default values
|
32
|
+
self.lock_timeout = 60
|
33
|
+
self.lock_tries = 15
|
34
|
+
self.reserve_queued_job_attempts = 1
|
35
|
+
self.restricted_before_queued = false
|
36
|
+
|
37
|
+
# Allows configuring via class accessors
|
38
|
+
def self.configure
|
39
|
+
yield self
|
40
|
+
end
|
41
|
+
|
42
|
+
# Redis Data Structures
|
43
|
+
#
|
44
|
+
# concurrent.lock.tracking_id => timestamp
|
45
|
+
# Maintains the distributed lock for the tracking_key to ensure
|
46
|
+
# atomic modification of other data structures
|
47
|
+
#
|
48
|
+
# concurrent.count.tracking_id => count
|
49
|
+
# The count of currently running jobs for the tracking_id
|
50
|
+
#
|
51
|
+
# concurrent.queue.queue_name.tracking_id => List[job1, job2, ...]
|
52
|
+
# The queue of items that is currently unable to run due to count being exceeded
|
53
|
+
#
|
54
|
+
# concurrent.queue_availability.tracking_key => Set[queue_name1, queue_name2, ...]
|
55
|
+
# Maintains the set of queues that currently have something
|
56
|
+
# runnable for each tracking_id
|
57
|
+
#
|
58
|
+
# concurrent.runnable[.queue_name] => Set[tracking_id1, tracking_id2, ...]
|
59
|
+
# Maintains the set of tracking_ids that have something
|
60
|
+
# runnable for each queue (globally without .queue_name postfix in key)
|
61
|
+
#
|
62
|
+
# The behavior has two points of entry:
|
63
|
+
#
|
64
|
+
# When the Resque::Worker is looking for a job to run from a restriction
|
65
|
+
# queue, we use the queue_name to look up the set of tracking IDs that
|
66
|
+
# are currently runnable for that queue. If we get a tracking id, we
|
67
|
+
# know that there is a restriction queue with something runnable in it,
|
68
|
+
# and we then use that tracking_id and queue to look up and pop something
|
69
|
+
# off of the restriction queue.
|
70
|
+
#
|
71
|
+
# When the Resque::Worker gets a job off of a normal resque queue, it uses
|
72
|
+
# the count to see if that job is currently restricted. If not, it runs it
|
73
|
+
# as normal, but if it is restricted, then it sticks it on a restriction queue.
|
74
|
+
#
|
75
|
+
# In both cases, before a job is handed off to resque to be run, we increment
|
76
|
+
# the count so we can keep tracking of how many are currently running. When
|
77
|
+
# the job finishes, we then decrement the count.
|
78
|
+
|
79
|
+
# Used by the user in their job class to set the concurrency limit
|
80
|
+
def concurrent(limit)
|
81
|
+
@concurrent = limit
|
82
|
+
end
|
83
|
+
|
84
|
+
# Allows the user to specify the unique key that identifies a set
|
85
|
+
# of jobs that share a concurrency limit. Defaults to the job class name
|
86
|
+
def concurrent_identifier(*args)
|
87
|
+
end
|
88
|
+
|
89
|
+
# Used to query what the limit the user has set
|
90
|
+
def concurrent_limit
|
91
|
+
@concurrent ||= 1
|
92
|
+
end
|
93
|
+
|
94
|
+
# The key used to acquire a lock so we can operate on multiple
|
95
|
+
# redis structures (runnables set, running_count) atomically
|
96
|
+
def lock_key(tracking_key)
|
97
|
+
parts = tracking_key.split(".")
|
98
|
+
"concurrent.lock.#{parts[2..-1].join('.')}"
|
99
|
+
end
|
100
|
+
|
101
|
+
# The redis key used to store the number of currently running
|
102
|
+
# jobs for the restriction_identifier
|
103
|
+
def running_count_key(tracking_key)
|
104
|
+
parts = tracking_key.split(".")
|
105
|
+
"concurrent.count.#{parts[2..-1].join('.')}"
|
106
|
+
end
|
107
|
+
|
108
|
+
# The key for the redis list where restricted jobs for the given resque queue are stored
|
109
|
+
def restriction_queue_key(tracking_key, queue)
|
110
|
+
parts = tracking_key.split(".")
|
111
|
+
"concurrent.queue.#{queue}.#{parts[2..-1].join('.')}"
|
112
|
+
end
|
113
|
+
|
114
|
+
# The redis key used to store the aggregate number of jobs
|
115
|
+
# in restriction queues by queue name
|
116
|
+
def queue_count_key
|
117
|
+
"concurrent.queue_counts"
|
118
|
+
end
|
119
|
+
|
120
|
+
def restriction_queue_availability_key(tracking_key)
|
121
|
+
parts = tracking_key.split(".")
|
122
|
+
"concurrent.queue_availability.#{parts[2..-1].join('.')}"
|
123
|
+
end
|
124
|
+
|
125
|
+
# The key that groups all jobs of the same restriction_identifier together
|
126
|
+
# so that we can work on any of those jobs if they are runnable
|
127
|
+
# Stored in runnables set, and used to build keys for each queue where jobs
|
128
|
+
# for those queues are stored
|
129
|
+
def tracking_key(*args)
|
130
|
+
id = concurrent_identifier(*args)
|
131
|
+
id = ".#{id}" if id && id.strip.size > 0
|
132
|
+
"concurrent.tracking.#{self.to_s}#{id}"
|
133
|
+
end
|
134
|
+
|
135
|
+
def tracking_class(tracking_key)
|
136
|
+
constantize(tracking_key.split(".")[2])
|
137
|
+
end
|
138
|
+
|
139
|
+
# The key to the redis set where we keep a list of runnable tracking_keys
|
140
|
+
def runnables_key(queue=nil)
|
141
|
+
key = ".#{queue}" if queue
|
142
|
+
"concurrent.runnable#{key}"
|
143
|
+
end
|
144
|
+
|
145
|
+
# Encodes the job into the restriction queue
|
146
|
+
def encode(job)
|
147
|
+
item = {:queue => job.queue, :payload => job.payload}
|
148
|
+
Resque.encode(item)
|
149
|
+
end
|
150
|
+
|
151
|
+
# Decodes the job from the restriction queue
|
152
|
+
def decode(str)
|
153
|
+
item = Resque.decode(str)
|
154
|
+
Resque::Job.new(item['queue'], item['payload']) if item
|
155
|
+
end
|
156
|
+
|
157
|
+
# The restriction queues that have data for each tracking key
|
158
|
+
# Adds/Removes the queue to the list of queues for that tracking key
|
159
|
+
# so we can quickly tell in next_runnable_job if a runnable job exists on a
|
160
|
+
# specific restriction queue
|
161
|
+
def update_queues_available(tracking_key, queue, action)
|
162
|
+
availability_key = restriction_queue_availability_key(tracking_key)
|
163
|
+
case action
|
164
|
+
when :add then Resque.redis.send(:sadd, availability_key, queue)
|
165
|
+
when :remove then Resque.redis.send(:srem, availability_key, queue)
|
166
|
+
else raise "Invalid action to ConcurrentRestriction.track_queue"
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
def queues_available(tracking_key)
|
171
|
+
availability_key = restriction_queue_availability_key(tracking_key)
|
172
|
+
Resque.redis.smembers(availability_key)
|
173
|
+
end
|
174
|
+
|
175
|
+
# Pushes the job to the restriction queue
|
176
|
+
def push_to_restriction_queue(job, location=:back)
|
177
|
+
tracking_key = tracking_key(*job.args)
|
178
|
+
|
179
|
+
case location
|
180
|
+
when :back then Resque.redis.rpush(restriction_queue_key(tracking_key, job.queue), encode(job))
|
181
|
+
when :front then Resque.redis.lpush(restriction_queue_key(tracking_key, job.queue), encode(job))
|
182
|
+
else raise "Invalid location to ConcurrentRestriction.push_to_restriction_queue"
|
183
|
+
end
|
184
|
+
|
185
|
+
increment_queue_count(job.queue)
|
186
|
+
update_queues_available(tracking_key, job.queue, :add)
|
187
|
+
mark_runnable(tracking_key, false)
|
188
|
+
end
|
189
|
+
|
190
|
+
# Pops a job from the restriction queue
|
191
|
+
def pop_from_restriction_queue(tracking_key, queue)
|
192
|
+
queue_key = restriction_queue_key(tracking_key, queue)
|
193
|
+
str = Resque.redis.lpop(queue_key)
|
194
|
+
post_pop_size = Resque.redis.llen(queue_key)
|
195
|
+
|
196
|
+
if post_pop_size == 0
|
197
|
+
update_queues_available(tracking_key, queue, :remove)
|
198
|
+
clear_runnable(tracking_key, queue)
|
199
|
+
end
|
200
|
+
|
201
|
+
decrement_queue_count(queue)
|
202
|
+
|
203
|
+
# increment by one to indicate that we are running
|
204
|
+
increment_running_count(tracking_key) if str
|
205
|
+
|
206
|
+
decode(str)
|
207
|
+
end
|
208
|
+
|
209
|
+
# Grabs the raw data (undecoded) from the restriction queue
|
210
|
+
def restriction_queue_raw(tracking_key, queue)
|
211
|
+
Array(Resque.redis.lrange(restriction_queue_key(tracking_key, queue), 0, -1))
|
212
|
+
end
|
213
|
+
|
214
|
+
# Grabs the contents of the restriction queue (decoded)
|
215
|
+
def restriction_queue(tracking_key, queue)
|
216
|
+
restriction_queue_raw(tracking_key, queue).collect {|s| decode(s) }
|
217
|
+
end
|
218
|
+
|
219
|
+
# Returns the number of jobs currently running
|
220
|
+
def running_count(tracking_key)
|
221
|
+
Resque.redis.get(running_count_key(tracking_key)).to_i
|
222
|
+
end
|
223
|
+
|
224
|
+
# Returns the number of jobs currently running
|
225
|
+
def set_running_count(tracking_key, value)
|
226
|
+
count_key = running_count_key(tracking_key)
|
227
|
+
Resque.redis.set(count_key, value)
|
228
|
+
restricted = (value > concurrent_limit)
|
229
|
+
mark_runnable(tracking_key, !restricted)
|
230
|
+
return restricted
|
231
|
+
end
|
232
|
+
|
233
|
+
def restricted?(tracking_key)
|
234
|
+
count_key = running_count_key(tracking_key)
|
235
|
+
value = Resque.redis.get(count_key).to_i
|
236
|
+
restricted = (value >= concurrent_limit)
|
237
|
+
return restricted
|
238
|
+
end
|
239
|
+
|
240
|
+
# The value in redis is the number of jobs currently running
|
241
|
+
# If we increment past that, we are restricted. Incrementing is only done
|
242
|
+
# after the job is cleared for execution due to checking the runnable
|
243
|
+
# state, and post increment we setup runnable for future jobs based on
|
244
|
+
# the new "restricted" value
|
245
|
+
def increment_running_count(tracking_key)
|
246
|
+
count_key = running_count_key(tracking_key)
|
247
|
+
value = Resque.redis.incr(count_key)
|
248
|
+
restricted = (value >= concurrent_limit)
|
249
|
+
mark_runnable(tracking_key, !restricted)
|
250
|
+
return restricted
|
251
|
+
end
|
252
|
+
|
253
|
+
def decrement_running_count(tracking_key)
|
254
|
+
count_key = running_count_key(tracking_key)
|
255
|
+
value = Resque.redis.decr(count_key)
|
256
|
+
Resque.redis.set(count_key, 0) if value < 0
|
257
|
+
restricted = (value >= concurrent_limit)
|
258
|
+
mark_runnable(tracking_key, !restricted)
|
259
|
+
return restricted
|
260
|
+
end
|
261
|
+
|
262
|
+
def increment_queue_count(queue, by=1)
|
263
|
+
value = Resque.redis.hincrby(queue_count_key, queue, by)
|
264
|
+
return value
|
265
|
+
end
|
266
|
+
|
267
|
+
def decrement_queue_count(queue, by=1)
|
268
|
+
value = Resque.redis.hincrby(queue_count_key, queue, -by)
|
269
|
+
return value
|
270
|
+
end
|
271
|
+
|
272
|
+
def queue_counts
|
273
|
+
value = Resque.redis.hgetall(queue_count_key)
|
274
|
+
value = Hash[*value.collect {|k, v| [k, v.to_i] }.flatten]
|
275
|
+
return value
|
276
|
+
end
|
277
|
+
|
278
|
+
def set_queue_count(queue, count)
|
279
|
+
Resque.redis.hset(queue_count_key, queue, count)
|
280
|
+
end
|
281
|
+
|
282
|
+
def runnable?(tracking_key, queue)
|
283
|
+
Resque.redis.sismember(runnables_key(queue), tracking_key)
|
284
|
+
end
|
285
|
+
|
286
|
+
def get_next_runnable(queue)
|
287
|
+
Resque.redis.srandmember(runnables_key(queue))
|
288
|
+
end
|
289
|
+
|
290
|
+
# Returns the list of tracking_keys that have jobs waiting to run (are not over the concurrency limit)
|
291
|
+
def runnables(queue=nil)
|
292
|
+
Resque.redis.smembers(runnables_key(queue))
|
293
|
+
end
|
294
|
+
|
295
|
+
# Keeps track of which jobs are currently runnable, that is the
|
296
|
+
# tracking_key should have jobs on some restriction queue and
|
297
|
+
# also have less than concurrency_limit jobs running
|
298
|
+
#
|
299
|
+
def mark_runnable(tracking_key, runnable)
|
300
|
+
queues = queues_available(tracking_key)
|
301
|
+
queues.each do |queue|
|
302
|
+
runnable_queues_key = runnables_key(queue)
|
303
|
+
if runnable
|
304
|
+
Resque.redis.sadd(runnable_queues_key, tracking_key)
|
305
|
+
else
|
306
|
+
Resque.redis.srem(runnable_queues_key, tracking_key)
|
307
|
+
end
|
308
|
+
end
|
309
|
+
if runnable
|
310
|
+
Resque.redis.sadd(runnables_key, tracking_key) if queues.size > 0
|
311
|
+
else
|
312
|
+
Resque.redis.srem(runnables_key, tracking_key)
|
313
|
+
end
|
314
|
+
end
|
315
|
+
|
316
|
+
def clear_runnable(tracking_key, queue)
|
317
|
+
Resque.redis.srem(runnables_key(queue), tracking_key)
|
318
|
+
Resque.redis.srem(runnables_key, tracking_key)
|
319
|
+
end
|
320
|
+
|
321
|
+
# Acquires a lock using the given key and lock expiration time
|
322
|
+
def acquire_lock(lock_key, lock_expiration)
|
323
|
+
# acquire the lock to work on the restriction queue
|
324
|
+
expiration_time = lock_expiration + 1
|
325
|
+
acquired_lock = Resque.redis.setnx(lock_key, expiration_time)
|
326
|
+
|
327
|
+
# If we didn't acquire the lock, check the expiration as described
|
328
|
+
# at http://redis.io/commands/setnx
|
329
|
+
if ! acquired_lock
|
330
|
+
# If expiration time is in the future, then someone else beat us to getting the lock
|
331
|
+
old_expiration_time = Resque.redis.get(lock_key)
|
332
|
+
return false if old_expiration_time.to_i > Time.now.to_i
|
333
|
+
|
334
|
+
# if expiration time was in the future when we set it, then someone beat us to it
|
335
|
+
old_expiration_time = Resque.redis.getset(lock_key, expiration_time)
|
336
|
+
return false if old_expiration_time.to_i > Time.now.to_i
|
337
|
+
end
|
338
|
+
|
339
|
+
# expire the lock eventually so we clean up keys - not needed to timeout
|
340
|
+
# lock, just to keep redis clean for locks that aren't being used'
|
341
|
+
Resque.redis.expireat(lock_key, expiration_time + 300)
|
342
|
+
|
343
|
+
return true
|
344
|
+
end
|
345
|
+
|
346
|
+
# Releases the lock acquired by #acquire_lock
|
347
|
+
def release_lock(lock_key, lock_expiration)
|
348
|
+
# Only delete the lock if the one we created hasn't expired
|
349
|
+
expiration_time = lock_expiration + 1
|
350
|
+
Resque.redis.del(lock_key) if expiration_time > Time.now.to_i
|
351
|
+
end
|
352
|
+
|
353
|
+
|
354
|
+
# Uses a lock to ensure that a sequence of redis operations happen atomically
|
355
|
+
# We don't use watch/multi/exec as it doesn't work in a DistributedRedis setup
|
356
|
+
def run_atomically(lock_key, tries=ConcurrentRestriction.lock_tries)
|
357
|
+
acquired_lock = false
|
358
|
+
exp_backoff = 1
|
359
|
+
|
360
|
+
tries.times do
|
361
|
+
lock_expiration = Time.now.to_i + ConcurrentRestriction.lock_timeout
|
362
|
+
if acquire_lock(lock_key, lock_expiration)
|
363
|
+
acquired_lock = true
|
364
|
+
begin
|
365
|
+
yield
|
366
|
+
ensure
|
367
|
+
release_lock(lock_key, lock_expiration)
|
368
|
+
end
|
369
|
+
break
|
370
|
+
else
|
371
|
+
sleep(rand(100) * 0.001 * exp_backoff)
|
372
|
+
exp_backoff *= 2
|
373
|
+
end
|
374
|
+
end
|
375
|
+
|
376
|
+
return acquired_lock
|
377
|
+
end
|
378
|
+
|
379
|
+
# Pushes the job to restriction queue if it is restricted
|
380
|
+
# If the job is within the concurrency limit, thus needs to be run, we
|
381
|
+
# keep the running count incremented so that other calls don't erroneously
|
382
|
+
# see a lower value and run their job. This count gets decremented by call
|
383
|
+
# to release_restriction when job completes
|
384
|
+
def stash_if_restricted(job)
|
385
|
+
restricted = nil
|
386
|
+
tracking_key = tracking_key(*job.args)
|
387
|
+
lock_key = lock_key(tracking_key)
|
388
|
+
|
389
|
+
did_run = run_atomically(lock_key) do
|
390
|
+
|
391
|
+
restricted = restricted?(tracking_key)
|
392
|
+
if restricted
|
393
|
+
push_to_restriction_queue(job)
|
394
|
+
else
|
395
|
+
increment_running_count(tracking_key)
|
396
|
+
end
|
397
|
+
|
398
|
+
end
|
399
|
+
|
400
|
+
# if run_atomically fails to acquire the lock, we need to put
|
401
|
+
# the job back on the queue for processing later and act restricted
|
402
|
+
# upstack so nothing gets run
|
403
|
+
if !did_run
|
404
|
+
restricted = true
|
405
|
+
job.recreate
|
406
|
+
end
|
407
|
+
|
408
|
+
return restricted
|
409
|
+
end
|
410
|
+
|
411
|
+
# Returns the next job that is runnable
|
412
|
+
def next_runnable_job(queue)
|
413
|
+
tracking_key = get_next_runnable(queue)
|
414
|
+
return nil unless tracking_key
|
415
|
+
|
416
|
+
job = nil
|
417
|
+
lock_key = lock_key(tracking_key)
|
418
|
+
|
419
|
+
run_atomically(lock_key) do
|
420
|
+
|
421
|
+
# since we don't have a lock when we get the runnable,
|
422
|
+
# we need to check it again
|
423
|
+
still_runnable = runnable?(tracking_key, queue)
|
424
|
+
if still_runnable
|
425
|
+
klazz = tracking_class(tracking_key)
|
426
|
+
job = klazz.pop_from_restriction_queue(tracking_key, queue)
|
427
|
+
end
|
428
|
+
|
429
|
+
end
|
430
|
+
|
431
|
+
return job
|
432
|
+
|
433
|
+
end
|
434
|
+
|
435
|
+
# Decrements the running_count - to be called at end of job
|
436
|
+
def release_restriction(job)
|
437
|
+
tracking_key = tracking_key(*job.args)
|
438
|
+
lock_key = lock_key(tracking_key)
|
439
|
+
|
440
|
+
run_atomically(lock_key) do
|
441
|
+
|
442
|
+
# decrement the count after a job has run
|
443
|
+
decrement_running_count(tracking_key)
|
444
|
+
|
445
|
+
end
|
446
|
+
end
|
447
|
+
|
448
|
+
# Resets everything to be runnable
|
449
|
+
def reset_restrictions
|
450
|
+
|
451
|
+
counts_reset = 0
|
452
|
+
count_keys = Resque.redis.keys("concurrent.count.*")
|
453
|
+
if count_keys.size > 0
|
454
|
+
count_keys.each_slice(10000) do |key_slice|
|
455
|
+
counts_reset += Resque.redis.del(*key_slice)
|
456
|
+
end
|
457
|
+
end
|
458
|
+
|
459
|
+
runnable_keys = Resque.redis.keys("concurrent.runnable*")
|
460
|
+
if runnable_keys.size > 0
|
461
|
+
runnable_keys.each_slice(10000) do |runnable_slice|
|
462
|
+
Resque.redis.del(*runnable_slice)
|
463
|
+
end
|
464
|
+
end
|
465
|
+
|
466
|
+
Resque.redis.del(queue_count_key)
|
467
|
+
queues_enabled = 0
|
468
|
+
queue_keys = Resque.redis.keys("concurrent.queue.*")
|
469
|
+
queue_keys.each do |k|
|
470
|
+
len = Resque.redis.llen(k)
|
471
|
+
if len > 0
|
472
|
+
parts = k.split(".")
|
473
|
+
queue = parts[2]
|
474
|
+
ident = parts[3..-1].join('.')
|
475
|
+
tracking_key = "concurrent.tracking.#{ident}"
|
476
|
+
|
477
|
+
increment_queue_count(queue, len)
|
478
|
+
update_queues_available(tracking_key, queue, :add)
|
479
|
+
mark_runnable(tracking_key, true)
|
480
|
+
queues_enabled += 1
|
481
|
+
end
|
482
|
+
end
|
483
|
+
|
484
|
+
return counts_reset, queues_enabled
|
485
|
+
|
486
|
+
end
|
487
|
+
|
488
|
+
def stats(extended=false)
|
489
|
+
result = {}
|
490
|
+
|
491
|
+
result[:queues] = queue_counts
|
492
|
+
|
493
|
+
if extended
|
494
|
+
ident_sizes = {}
|
495
|
+
queue_keys = Resque.redis.keys("concurrent.queue.*")
|
496
|
+
queue_keys.each do |k|
|
497
|
+
parts = k.split(".")
|
498
|
+
ident = parts[3..-1].join(".")
|
499
|
+
queue_name = parts[2]
|
500
|
+
size = Resque.redis.llen(k)
|
501
|
+
ident_sizes[ident] ||= {}
|
502
|
+
ident_sizes[ident][queue_name] ||= 0
|
503
|
+
ident_sizes[ident][queue_name] += size
|
504
|
+
end
|
505
|
+
|
506
|
+
count_keys = Resque.redis.keys("concurrent.count.*")
|
507
|
+
running_counts = {}
|
508
|
+
count_keys.each do |k|
|
509
|
+
parts = k.split(".")
|
510
|
+
ident = parts[2..-1].join(".")
|
511
|
+
ident_sizes[ident] ||= {}
|
512
|
+
ident_sizes[ident]["running"] = Resque.redis.get(k).to_i
|
513
|
+
end
|
514
|
+
|
515
|
+
result[:identifiers] = ident_sizes
|
516
|
+
else
|
517
|
+
result[:identifiers] = {}
|
518
|
+
end
|
519
|
+
|
520
|
+
|
521
|
+
lock_keys = Resque.redis.keys("concurrent.lock.*")
|
522
|
+
result[:lock_count] = lock_keys.size
|
523
|
+
|
524
|
+
runnable_count = Resque.redis.scard(runnables_key)
|
525
|
+
result[:runnable_count] = runnable_count
|
526
|
+
|
527
|
+
return result
|
528
|
+
|
529
|
+
end
|
530
|
+
|
531
|
+
end
|
532
|
+
|
533
|
+
# Allows users to subclass instead of extending in their job classes
|
534
|
+
class ConcurrentRestrictionJob
|
535
|
+
extend ConcurrentRestriction
|
536
|
+
end
|
537
|
+
|
538
|
+
end
|
539
|
+
end
|
@@ -1,7 +1,7 @@
|
|
1
|
-
module Resque
|
2
|
-
module Plugins
|
3
|
-
module ConcurrentRestriction
|
4
|
-
VERSION = "0.6.
|
5
|
-
end
|
6
|
-
end
|
7
|
-
end
|
1
|
+
module Resque
|
2
|
+
module Plugins
|
3
|
+
module ConcurrentRestriction
|
4
|
+
VERSION = "0.6.1"
|
5
|
+
end
|
6
|
+
end
|
7
|
+
end
|
@@ -1,30 +1,30 @@
|
|
1
|
-
# -*- encoding: utf-8 -*-
|
2
|
-
$:.push File.expand_path("../lib", __FILE__)
|
3
|
-
require 'resque/plugins/concurrent_restriction/version'
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
s.
|
8
|
-
s.
|
9
|
-
s.
|
10
|
-
s.
|
11
|
-
s.
|
12
|
-
s.
|
13
|
-
s.
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
s.
|
19
|
-
s.
|
20
|
-
s.
|
21
|
-
|
22
|
-
|
23
|
-
s.add_dependency("
|
24
|
-
|
25
|
-
s.add_development_dependency('rspec', '~> 2.5')
|
26
|
-
s.add_development_dependency('awesome_print')
|
27
|
-
|
28
|
-
# Needed for testing newer resque on ruby 1.8.7
|
29
|
-
s.add_development_dependency('json')
|
30
|
-
end
|
1
|
+
# -*- encoding: utf-8 -*-
|
2
|
+
$:.push File.expand_path("../lib", __FILE__)
|
3
|
+
require 'resque/plugins/concurrent_restriction/version'
|
4
|
+
|
5
|
+
Gem::Specification.new do |s|
|
6
|
+
s.name = "resque-concurrent-restriction"
|
7
|
+
s.version = Resque::Plugins::ConcurrentRestriction::VERSION
|
8
|
+
s.platform = Gem::Platform::RUBY
|
9
|
+
s.authors = ["Matt Conway"]
|
10
|
+
s.email = ["matt@conwaysplace.com"]
|
11
|
+
s.homepage = "http://github.com/wr0ngway/resque-concurrent-restriction"
|
12
|
+
s.summary = %q{A resque plugin for limiting how many of a specific job can run concurrently}
|
13
|
+
s.description = %q{A resque plugin for limiting how many of a specific job can run concurrently}
|
14
|
+
|
15
|
+
s.rubyforge_project = "resque-concurrent-restriction"
|
16
|
+
|
17
|
+
s.files = `git ls-files`.split("\n")
|
18
|
+
s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
|
19
|
+
s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
|
20
|
+
s.require_paths = ["lib"]
|
21
|
+
|
22
|
+
s.add_dependency("resque", '~> 1.25')
|
23
|
+
s.add_dependency("activesupport", '~> 3.2')
|
24
|
+
|
25
|
+
s.add_development_dependency('rspec', '~> 2.5')
|
26
|
+
s.add_development_dependency('awesome_print')
|
27
|
+
|
28
|
+
# Needed for testing newer resque on ruby 1.8.7
|
29
|
+
s.add_development_dependency('json')
|
30
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: resque-concurrent-restriction
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.6.
|
4
|
+
version: 0.6.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Matt Conway
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2016-02-
|
11
|
+
date: 2016-02-26 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: resque
|
@@ -24,6 +24,20 @@ dependencies:
|
|
24
24
|
- - "~>"
|
25
25
|
- !ruby/object:Gem::Version
|
26
26
|
version: '1.25'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: activesupport
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "~>"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '3.2'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '3.2'
|
27
41
|
- !ruby/object:Gem::Dependency
|
28
42
|
name: rspec
|
29
43
|
requirement: !ruby/object:Gem::Requirement
|