balsamique 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/balsamique.rb +486 -0
- metadata +67 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: fa87508db8374e4941511cf4aea2322d1fdd7819
|
4
|
+
data.tar.gz: 5258fc17aa56b385a2643c27572f4ddad5e35e32
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: c0f08994466deb01389d682aefa71ee7130f392fba1bab0a3020248aac69563cb4ececf832f9e45131fa397973b26b7a3e99fa13d7644741f390533f6adc858e
|
7
|
+
data.tar.gz: fb3bddda2ecb9449d9ae6f7d5ff6acaedd70ce28b3275af14f0ce748f5083c844a52672c3eb8fa1f0695e75a54a20c10db99a609349d92969a75d8f37e6f74e6
|
data/lib/balsamique.rb
ADDED
@@ -0,0 +1,486 @@
|
|
1
|
+
require 'digest/sha1'
|
2
|
+
require 'json'
|
3
|
+
require 'redis'
|
4
|
+
|
5
|
+
class Balsamique
|
6
|
+
def initialize(redis, namespace = 'bQ')
|
7
|
+
@redis = redis
|
8
|
+
|
9
|
+
@que_prefix = namespace + ':que:'
|
10
|
+
@questats_prefix = namespace + ':questats:'
|
11
|
+
@env_prefix = namespace + ':env:'
|
12
|
+
|
13
|
+
@status = namespace + ':status'
|
14
|
+
@queues = namespace + ':queues'
|
15
|
+
@retries = namespace + ':retries'
|
16
|
+
@failures = namespace + ':failures'
|
17
|
+
@failz = namespace + ':failz'
|
18
|
+
@unique = namespace + ':unique'
|
19
|
+
@tasks = namespace + ':tasks'
|
20
|
+
@args = namespace + ':args'
|
21
|
+
@report_queue = @que_prefix + '_report'
|
22
|
+
end
|
23
|
+
|
24
|
+
REPORT_RETRY_DELAY = 60.0 # seconds
|
25
|
+
RETRY_DELAY = 600.0 # seconds
|
26
|
+
|
27
|
+
def redis
|
28
|
+
@redis
|
29
|
+
end
|
30
|
+
|
31
|
+
def redis_eval(cmd_sha, cmd, keys, argv)
|
32
|
+
redis.evalsha(cmd_sha, keys, argv)
|
33
|
+
rescue Redis::CommandError
|
34
|
+
puts "[INFO] Balsamique falling back to EVAL for #{cmd_sha}"
|
35
|
+
redis.eval(cmd, keys, argv)
|
36
|
+
end
|
37
|
+
|
38
|
+
def self.next_task(tasks)
|
39
|
+
item = tasks.find { |t| t.size == 1 }
|
40
|
+
item && item.first
|
41
|
+
end
|
42
|
+
|
43
|
+
def self.current_task(tasks)
|
44
|
+
item = tasks.reverse.find { |t| t.size > 1 }
|
45
|
+
item && item.first
|
46
|
+
end
|
47
|
+
|
48
|
+
def self.strip_prefix(str, prefix)
|
49
|
+
s = prefix.size
|
50
|
+
if str[0,s] == prefix
|
51
|
+
str[s, str.size - s]
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
STATS_SLICE = 10 # seconds
|
56
|
+
STATS_CHUNK = 90 # slices (= 900 seconds = 15 minutes)
|
57
|
+
def self.slice_timestamp(ts)
|
58
|
+
slice = ts.to_i / STATS_SLICE
|
59
|
+
return slice / STATS_CHUNK, slice % STATS_CHUNK
|
60
|
+
end
|
61
|
+
def self.enc36(i)
|
62
|
+
i.to_s(36)
|
63
|
+
end
|
64
|
+
def self.dec36(s)
|
65
|
+
s.to_i(36)
|
66
|
+
end
|
67
|
+
def self.enc36_slice_timestamp(ts)
|
68
|
+
self.slice_timestamp(ts).map { |i| self.enc36(i) }
|
69
|
+
end
|
70
|
+
def self.assemble_timestamp(chunk, slice)
|
71
|
+
(chunk * STATS_CHUNK + slice) * STATS_SLICE
|
72
|
+
end
|
73
|
+
def self.dec36_assemble_timestamp(echunk, eslice)
|
74
|
+
self.assemble_timestamp(self.dec36(echunk), self.dec36(eslice))
|
75
|
+
end
|
76
|
+
|
77
|
+
# Lua script ENQUEUE_JOB takes keys
|
78
|
+
# [tasks_h, args_h, jobstat_h, task1_z, queues_h, uniq_h]
|
79
|
+
# and args [tasks, args, run_at, uniq].
|
80
|
+
# uniq is optional. If it's present, the script first checks to see
|
81
|
+
# if the key uniq is already set in the hash uniq_h. If so, the
|
82
|
+
# negative of the integer value therein is returned and the script does
|
83
|
+
# nothing. Otherwise, an integer id is written as that value, the
|
84
|
+
# tasks_h hash gets the value of tasks (JSON-encoded task list)
|
85
|
+
# written under the key id, the args_h hash gets the value args
|
86
|
+
# written under the key id, task1_z gets id zadded with score
|
87
|
+
# run_at. Also, task1_z is written to jobstatus_h under the key id.
|
88
|
+
# The value returned from the operation is the id. A successful
|
89
|
+
# enqueueing is thus signaled by the return of the job id, while an
|
90
|
+
# enqueueing blocked by the uniq_in_flight constraint returns minus
|
91
|
+
# the blocking id.
|
92
|
+
ENQUEUE_JOB = <<EOF
|
93
|
+
local id = redis.call('hincrby', KEYS[6], '', 1)
|
94
|
+
if ARGV[4] then
|
95
|
+
local ukey = 'u:' .. ARGV[4]
|
96
|
+
local uniq = redis.call('hsetnx', KEYS[6], ukey, id)
|
97
|
+
if 0 == uniq then
|
98
|
+
return (- redis.call('hget', KEYS[6], ukey))
|
99
|
+
else
|
100
|
+
redis.call('hset', KEYS[6], id, ukey)
|
101
|
+
end
|
102
|
+
end
|
103
|
+
redis.call('hset', KEYS[1], id, ARGV[1])
|
104
|
+
redis.call('hset', KEYS[2], id, ARGV[2])
|
105
|
+
redis.call('hset', KEYS[3], id, KEYS[4] .. ',' .. ARGV[3])
|
106
|
+
redis.call('zadd', KEYS[4], ARGV[3], id)
|
107
|
+
redis.call('hset', KEYS[5], KEYS[4], id .. ',' .. ARGV[3])
|
108
|
+
return id
|
109
|
+
EOF
|
110
|
+
ENQUEUE_JOB_SHA = Digest::SHA1.hexdigest(ENQUEUE_JOB)
|
111
|
+
|
112
|
+
def enqueue(tasks, args, uniq_in_flight = nil, run_at = Time.now.to_f)
|
113
|
+
next_task = self.class.next_task(tasks)
|
114
|
+
return false, nil unless next_task
|
115
|
+
queue_key = @que_prefix + next_task.to_s
|
116
|
+
keys = [@tasks, @args, @status, queue_key, @queues, @unique]
|
117
|
+
argv = [tasks.to_json, args.to_json, run_at]
|
118
|
+
argv << uniq_in_flight if uniq_in_flight
|
119
|
+
result_id = redis_eval(ENQUEUE_JOB_SHA, ENQUEUE_JOB, keys, argv)
|
120
|
+
return result_id > 0, result_id.abs.to_s
|
121
|
+
end
|
122
|
+
|
123
|
+
# Lua script DEQUEUE_TASK takes keys
|
124
|
+
# [args_h, tasks_h, questats_h, retries_h, task1_z, ...],
|
125
|
+
# and args [timestamp_f, retry_delay, tmod].
|
126
|
+
# It performs a conditional ZPOP on task1_z, where the
|
127
|
+
# condition is that the score of the first item is <= timestamp_f.
|
128
|
+
# If nothing is available to ZPOP, it tries task2_z, etc. If an id
|
129
|
+
# is returned from any ZPOP, it increments the retry count in retries_h
|
130
|
+
# and reschedules the task accordingly. Then it writes stats info to
|
131
|
+
# questats_h, and returns the job information from args_h and tasks_h.
|
132
|
+
|
133
|
+
DEQUEUE_TASK = <<EOF
|
134
|
+
local ts = tonumber(ARGV[1])
|
135
|
+
local i = 5
|
136
|
+
while KEYS[i] do
|
137
|
+
local elem = redis.call('zrange', KEYS[i], 0, 0, 'withscores')
|
138
|
+
if elem[2] and tonumber(elem[2]) < ts then
|
139
|
+
local retries = redis.call('hincrby', KEYS[4], elem[1] .. ',' .. KEYS[i], 1)
|
140
|
+
local t_retry = ts + ARGV[2] * 2 ^ retries
|
141
|
+
redis.call('zadd', KEYS[i], t_retry, elem[1])
|
142
|
+
redis.call('hset', KEYS[3], KEYS[i] .. ',len,' .. ARGV[3],
|
143
|
+
redis.call('zcard', KEYS[i]))
|
144
|
+
redis.call('hincrby', KEYS[3], KEYS[i] .. ',dq,' .. ARGV[3], 1)
|
145
|
+
redis.call('expire', KEYS[3], 21600)
|
146
|
+
return({ elem[1],
|
147
|
+
redis.call('hget', KEYS[1], elem[1]),
|
148
|
+
redis.call('hget', KEYS[2], elem[1]), retries })
|
149
|
+
end
|
150
|
+
i = i + 1
|
151
|
+
end
|
152
|
+
EOF
|
153
|
+
DEQUEUE_TASK_SHA = Digest::SHA1.hexdigest(DEQUEUE_TASK)
|
154
|
+
|
155
|
+
def dequeue(tasks, retry_delay = RETRY_DELAY, timestamp = Time.now.to_f)
|
156
|
+
stats_chunk, stats_slice = self.class.enc36_slice_timestamp(timestamp)
|
157
|
+
questats_key = @questats_prefix + stats_chunk
|
158
|
+
keys = [@args, @tasks, questats_key, @retries]
|
159
|
+
tasks.each { |task| keys << @que_prefix + task.to_s }
|
160
|
+
result = redis_eval(
|
161
|
+
DEQUEUE_TASK_SHA, DEQUEUE_TASK, keys,
|
162
|
+
[timestamp, retry_delay, stats_slice])
|
163
|
+
if result
|
164
|
+
id, args, tasks, retries = result
|
165
|
+
{ id: id, args: JSON.parse(args), tasks: JSON.parse(tasks),
|
166
|
+
retries: retries }
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
SUCCEED_TASK = <<EOF
|
171
|
+
local id = ARGV[1]
|
172
|
+
local ts = ARGV[2]
|
173
|
+
local tasks = cjson.decode(redis.call('hget', KEYS[1], id))
|
174
|
+
local cur_task = ''
|
175
|
+
for _, task in ipairs(tasks) do
|
176
|
+
if not task[2] then cur_task = task[1]; break end
|
177
|
+
end
|
178
|
+
if (not (string.sub(KEYS[7], - string.len(cur_task)) == cur_task)) then
|
179
|
+
return redis.error_reply(
|
180
|
+
string.format('task mis-match %s %s %s', id, cur_task, KEYS[7]))
|
181
|
+
end
|
182
|
+
if (redis.call('hdel', KEYS[3], id .. ',' .. KEYS[7]) > 0) then
|
183
|
+
redis.call('zrem', KEYS[7], id)
|
184
|
+
else
|
185
|
+
return redis.error_reply('missing retry count %s %s', id, KEYS[7])
|
186
|
+
end
|
187
|
+
local status = redis.call('hget', KEYS[2], id)
|
188
|
+
local i = 0
|
189
|
+
for r in string.gmatch(status, "[^,]+") do
|
190
|
+
i = i + 1
|
191
|
+
if (i > 2 and i % 2 == 1) then
|
192
|
+
local rkey = id .. ',' .. KEYS[7] .. ',' .. r
|
193
|
+
redis.call('zrem', KEYS[5], rkey)
|
194
|
+
redis.call('hdel', KEYS[6], rkey)
|
195
|
+
end
|
196
|
+
end
|
197
|
+
redis.call('hset', KEYS[1], id, ARGV[3])
|
198
|
+
redis.call('hdel', KEYS[3], id .. ',' .. KEYS[4])
|
199
|
+
redis.call('zadd', KEYS[4], ts, id)
|
200
|
+
if (KEYS[8]) then
|
201
|
+
redis.call('hset', KEYS[2], id, KEYS[8] .. ',' .. ts)
|
202
|
+
redis.call('hdel', KEYS[3], id .. ',' .. KEYS[8])
|
203
|
+
redis.call('zadd', KEYS[8], ts, id)
|
204
|
+
redis.call('hset', KEYS[9], KEYS[8], id .. ',' .. ts)
|
205
|
+
else
|
206
|
+
redis.call('hset', KEYS[2], id, '_' .. ',' .. ts)
|
207
|
+
end
|
208
|
+
return id
|
209
|
+
EOF
|
210
|
+
SUCCEED_TASK_SHA = Digest::SHA1.hexdigest(SUCCEED_TASK)
|
211
|
+
|
212
|
+
def succeed(id, tasks, timestamp = Time.now.to_f)
|
213
|
+
current_task = self.class.current_task(tasks)
|
214
|
+
next_task = self.class.next_task(tasks)
|
215
|
+
keys = [
|
216
|
+
@tasks, @status, @retries, @report_queue, @failz, @failures,
|
217
|
+
@que_prefix + current_task]
|
218
|
+
argv = [id, timestamp, tasks.to_json]
|
219
|
+
keys << (@que_prefix + next_task) << @queues if next_task
|
220
|
+
id == redis_eval(SUCCEED_TASK_SHA, SUCCEED_TASK, keys, argv)
|
221
|
+
end
|
222
|
+
|
223
|
+
FAIL_TASK = <<EOF
|
224
|
+
local id = ARGV[1]
|
225
|
+
local ts = ARGV[2]
|
226
|
+
local tasks = cjson.decode(redis.call('hget', KEYS[1], id))
|
227
|
+
local cur_task = ''
|
228
|
+
for _, task in ipairs(tasks) do
|
229
|
+
if not task[2] then cur_task = task[1]; break end
|
230
|
+
end
|
231
|
+
if (not (string.sub(ARGV[3], - string.len(cur_task)) == cur_task)) then
|
232
|
+
return redis.error_reply(
|
233
|
+
string.format('task mismatch %s %s %s', id, cur_task, ARGV[3]))
|
234
|
+
end
|
235
|
+
local rkey = id .. ',' .. ARGV[3]
|
236
|
+
local retries = tonumber(redis.call('hget', KEYS[3], rkey))
|
237
|
+
if (not retries) then
|
238
|
+
return redis.error_reply(
|
239
|
+
string.format('missing retry count %s %s', id, ARGV[3]))
|
240
|
+
end
|
241
|
+
rkey = rkey .. ',' .. retries
|
242
|
+
redis.call('zadd', KEYS[4], ts, rkey)
|
243
|
+
redis.call('hset', KEYS[5], rkey, ARGV[4])
|
244
|
+
local status = redis.call('hget', KEYS[2], id)
|
245
|
+
status = status .. ',' .. retries .. ',' .. ts
|
246
|
+
redis.call('hset', KEYS[2], id, status)
|
247
|
+
redis.call('hdel', KEYS[3], id .. ',' .. KEYS[6])
|
248
|
+
redis.call('zadd', KEYS[6], ts, id)
|
249
|
+
return id
|
250
|
+
EOF
|
251
|
+
FAIL_TASK_SHA = Digest::SHA1.hexdigest(FAIL_TASK)
|
252
|
+
|
253
|
+
def fail(id, task, details, timestamp = Time.now.to_f)
|
254
|
+
keys = [@tasks, @status, @retries, @failz, @failures, @report_queue]
|
255
|
+
argv = [id, timestamp, @que_prefix + task, JSON.generate(details)]
|
256
|
+
id == redis_eval(FAIL_TASK_SHA, FAIL_TASK, keys, argv)
|
257
|
+
end
|
258
|
+
|
259
|
+
def get_failures(failz)
|
260
|
+
result = Hash.new { Array.new }
|
261
|
+
fkeys = failz.keys
|
262
|
+
if fkeys.size > 0
|
263
|
+
failures = redis.hmget(@failures, fkeys)
|
264
|
+
fkeys.zip(failures).each do |key, details|
|
265
|
+
id, queue, r = key.split(',')
|
266
|
+
r = r.to_i
|
267
|
+
task = self.class.strip_prefix(queue, @que_prefix)
|
268
|
+
result[id] <<= {
|
269
|
+
task: task, retries: r, ts: failz[key],
|
270
|
+
details: JSON.parse(details) }
|
271
|
+
end
|
272
|
+
end
|
273
|
+
result
|
274
|
+
end
|
275
|
+
|
276
|
+
def get_failz(earliest = 0, latest = Time.now.to_f, limit = -100)
|
277
|
+
values =
|
278
|
+
if limit < 0
|
279
|
+
redis.zrevrangebyscore(
|
280
|
+
@failz, latest, earliest, limit: [0, -limit], with_scores: true)
|
281
|
+
else
|
282
|
+
redis.zrangebyscore(
|
283
|
+
@failz, earliest, latest, limit: [0, limit], with_scores: true)
|
284
|
+
end
|
285
|
+
result = {}
|
286
|
+
values.each { |v| result[v[0]] = v[1] }
|
287
|
+
result
|
288
|
+
end
|
289
|
+
|
290
|
+
def failures(*args)
|
291
|
+
get_failures(get_failz(*args))
|
292
|
+
end
|
293
|
+
|
294
|
+
def delete_queue(queue)
|
295
|
+
queue_key = @que_prefix + queue.to_s
|
296
|
+
redis.multi do |r|
|
297
|
+
r.del(queue_key)
|
298
|
+
r.hdel(@queues, queue_key)
|
299
|
+
end.last == 1
|
300
|
+
end
|
301
|
+
|
302
|
+
def queues
|
303
|
+
result = redis.hgetall(@queues)
|
304
|
+
result.keys.map { |k| self.class.strip_prefix(k, @que_prefix) }
|
305
|
+
end
|
306
|
+
|
307
|
+
def queue_length(queue)
|
308
|
+
redis.zcard(@que_prefix + queue) || 0
|
309
|
+
end
|
310
|
+
|
311
|
+
def decode_job_status(status)
|
312
|
+
queue, ts, *retries = status.split(',')
|
313
|
+
ts = ts.to_f
|
314
|
+
timestamps = [ts]
|
315
|
+
while retries.size > 0
|
316
|
+
i = retries.shift.to_i
|
317
|
+
timestamps[i] = retries.shift.to_f
|
318
|
+
end
|
319
|
+
return queue, timestamps
|
320
|
+
end
|
321
|
+
|
322
|
+
def remove_job(id)
|
323
|
+
status = redis.hget(@status, id)
|
324
|
+
queue, timestamps = decode_job_status(status)
|
325
|
+
redis.multi do |r|
|
326
|
+
if queue.start_with?(@que_prefix)
|
327
|
+
r.zrem(queue, id)
|
328
|
+
rkey = "#{id},#{queue}"
|
329
|
+
r.hdel(@retries, rkey)
|
330
|
+
rkeys = []
|
331
|
+
timestamps.drop(1).each_with_index do |ts, i|
|
332
|
+
rkeys << rkey + ",#{i + 1}"
|
333
|
+
end
|
334
|
+
if rkeys.size > 0
|
335
|
+
r.hdel(@failures, rkeys)
|
336
|
+
r.zrem(@failz, rkeys)
|
337
|
+
end
|
338
|
+
end
|
339
|
+
r.hdel(@args, id)
|
340
|
+
r.hdel(@tasks, id)
|
341
|
+
end
|
342
|
+
check_status = redis.hget(@status, id)
|
343
|
+
return if check_status.nil?
|
344
|
+
if check_status == status
|
345
|
+
redis.hdel(@status, id)
|
346
|
+
if (uid = redis.hget(@unique, id))
|
347
|
+
redis.hdel(@unique, [id, uid])
|
348
|
+
end
|
349
|
+
else
|
350
|
+
remove_job(id)
|
351
|
+
end
|
352
|
+
end
|
353
|
+
|
354
|
+
def job_status(*ids)
|
355
|
+
statuses = redis.hmget(@status, *ids)
|
356
|
+
result = {}
|
357
|
+
ids.zip(statuses).each do |(id, status)|
|
358
|
+
next unless status
|
359
|
+
queue, timestamps = decode_job_status(status)
|
360
|
+
result[id] = {
|
361
|
+
task: self.class.strip_prefix(queue, @que_prefix),
|
362
|
+
timestamps: timestamps }
|
363
|
+
end
|
364
|
+
result
|
365
|
+
end
|
366
|
+
|
367
|
+
def fill_job_failures(statuses)
|
368
|
+
failz = {}
|
369
|
+
statuses.each do |id, status|
|
370
|
+
next unless (task = status[:task])
|
371
|
+
timestamps = status[:timestamps]
|
372
|
+
next unless timestamps.size > 1
|
373
|
+
queue = @que_prefix + task
|
374
|
+
timestamps.drop(1).each_with_index do |ts, i|
|
375
|
+
failz["#{id},#{queue},#{i+1}"] = ts
|
376
|
+
end
|
377
|
+
end
|
378
|
+
get_failures(failz).each do |id, failures|
|
379
|
+
statuses[id][:failures] = failures
|
380
|
+
end
|
381
|
+
statuses
|
382
|
+
end
|
383
|
+
|
384
|
+
def fill_args_tasks(statuses)
|
385
|
+
ids = statuses.keys
|
386
|
+
args, tasks = redis.multi do |r|
|
387
|
+
r.hmget(@args, ids)
|
388
|
+
r.hmget(@tasks, ids)
|
389
|
+
end
|
390
|
+
ids.zip(args, tasks).each do |id, a, t|
|
391
|
+
statuses[id][:args] = a && JSON.parse(a)
|
392
|
+
statuses[id][:tasks] = t && JSON.parse(t)
|
393
|
+
end
|
394
|
+
end
|
395
|
+
|
396
|
+
def queue_stats(chunks = 3, latest = Time.now.to_f)
|
397
|
+
last_chunk, last_slice = self.class.slice_timestamp(latest)
|
398
|
+
stats = {}
|
399
|
+
(0..(chunks - 1)).each do |chunk_i|
|
400
|
+
chunk_ts = self.class.enc36(last_chunk - chunk_i)
|
401
|
+
questats_key = @questats_prefix + chunk_ts
|
402
|
+
stats_chunk = redis.hgetall(questats_key)
|
403
|
+
next unless stats_chunk
|
404
|
+
stats_chunk.each do |key, val|
|
405
|
+
queue, stat, slice = key.split(',')
|
406
|
+
queue = self.class.strip_prefix(queue, @que_prefix)
|
407
|
+
timestamp = self.class.dec36_assemble_timestamp(chunk_ts, slice)
|
408
|
+
stats[stat] = {} unless stats[stat]
|
409
|
+
stats[stat][timestamp] = {} unless stats[stat][timestamp]
|
410
|
+
stats[stat][timestamp][queue] = val.to_i
|
411
|
+
end
|
412
|
+
end
|
413
|
+
stats
|
414
|
+
end
|
415
|
+
|
416
|
+
def push_report(id, timestamp = Time.now.to_f)
|
417
|
+
redis.multi do |r|
|
418
|
+
r.hdel(@retries, "#{id},#{@report_queue}")
|
419
|
+
r.zadd(@report_queue, timestamp, id)
|
420
|
+
end
|
421
|
+
end
|
422
|
+
|
423
|
+
REPORT_POP = <<EOF
|
424
|
+
local t_pop = tonumber(ARGV[1])
|
425
|
+
local elem = redis.call('zrange', KEYS[1], 0, 0, 'withscores')
|
426
|
+
local t_elem = tonumber(elem[2])
|
427
|
+
if (t_elem and t_elem < t_pop) then
|
428
|
+
local retries = redis.call('hincrby', KEYS[2], elem[1] .. ',' .. KEYS[1], 1)
|
429
|
+
local t_retry = t_pop + tonumber(ARGV[2]) * 2 ^ retries
|
430
|
+
redis.call('zadd', KEYS[1], t_retry, elem[1])
|
431
|
+
elem[3] = retries
|
432
|
+
elem[2] = t_elem
|
433
|
+
return cjson.encode(elem)
|
434
|
+
end
|
435
|
+
EOF
|
436
|
+
REPORT_POP_SHA = Digest::SHA1.hexdigest(REPORT_POP)
|
437
|
+
|
438
|
+
def pop_report(timestamp = Time.now.to_f)
|
439
|
+
result = redis_eval(
|
440
|
+
REPORT_POP_SHA, REPORT_POP, [@report_queue, @retries],
|
441
|
+
[timestamp, REPORT_RETRY_DELAY])
|
442
|
+
result &&= JSON.parse(result)
|
443
|
+
end
|
444
|
+
|
445
|
+
REPORT_COMPLETE = <<EOF
|
446
|
+
if (redis.call('hdel', KEYS[2], ARGV[1] .. ',' .. KEYS[1]) > 0) then
|
447
|
+
redis.call('zrem', KEYS[1], ARGV[1])
|
448
|
+
end
|
449
|
+
EOF
|
450
|
+
REPORT_COMPLETE_SHA = Digest::SHA1.hexdigest(REPORT_COMPLETE)
|
451
|
+
def complete_report(id)
|
452
|
+
redis_eval(REPORT_COMPLETE_SHA, REPORT_COMPLETE,
|
453
|
+
[@report_queue, @retries], [id])
|
454
|
+
end
|
455
|
+
|
456
|
+
def put_env(topic, h)
|
457
|
+
return if h.empty?
|
458
|
+
kvs = []
|
459
|
+
h.each { |k, v| kvs << k << v }
|
460
|
+
hkey = @env_prefix + topic.to_s
|
461
|
+
'OK' == redis.hmset(hkey, *kvs)
|
462
|
+
end
|
463
|
+
|
464
|
+
def rm_env(topic, keys = nil)
|
465
|
+
hkey = @env_prefix + topic.to_s
|
466
|
+
if keys.nil?
|
467
|
+
redis.del(hkey)
|
468
|
+
elsif !keys.empty?
|
469
|
+
redis.hdel(hkey, keys)
|
470
|
+
end
|
471
|
+
end
|
472
|
+
|
473
|
+
def get_env(topic, keys = nil)
|
474
|
+
hkey = @env_prefix + topic.to_s
|
475
|
+
if keys.nil?
|
476
|
+
redis.hgetall(hkey)
|
477
|
+
elsif keys.empty?
|
478
|
+
{}
|
479
|
+
else
|
480
|
+
result = {}
|
481
|
+
values = redis.hmget(hkey, keys)
|
482
|
+
keys.zip(values).each { |k, v| result[k] = v }
|
483
|
+
result
|
484
|
+
end
|
485
|
+
end
|
486
|
+
end
|
metadata
ADDED
@@ -0,0 +1,67 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: balsamique
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- DWNLD
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2015-12-08 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: redis
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - ">="
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '0'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - ">="
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '0'
|
27
|
+
description: |
|
28
|
+
Balsamique (pronounced "Balsami-QUEUE") is a Redis-backed Ruby library
|
29
|
+
which implements a job queue system. Balsamique jobs consist of
|
30
|
+
JSON-encoded args hashes, along with lists of tasks and their
|
31
|
+
successful outputs. Jobs can be enqueued to run at some time in the
|
32
|
+
future, and workers can also delay the running of subsequent tasks.
|
33
|
+
Retries are automatically scheduled at the time a worker checks out a
|
34
|
+
job, and cancelled only when the worker reports success. In contrast
|
35
|
+
to Resque, Balsamique uses Lua scripting in Redis extensively to make
|
36
|
+
job state transitions as atomic as possible.
|
37
|
+
email: keith@dwnld.me
|
38
|
+
executables: []
|
39
|
+
extensions: []
|
40
|
+
extra_rdoc_files: []
|
41
|
+
files:
|
42
|
+
- lib/balsamique.rb
|
43
|
+
homepage: https://github.com/dwnld/balsamique
|
44
|
+
licenses:
|
45
|
+
- MIT
|
46
|
+
metadata: {}
|
47
|
+
post_install_message:
|
48
|
+
rdoc_options: []
|
49
|
+
require_paths:
|
50
|
+
- lib
|
51
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
52
|
+
requirements:
|
53
|
+
- - ">="
|
54
|
+
- !ruby/object:Gem::Version
|
55
|
+
version: '0'
|
56
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
57
|
+
requirements:
|
58
|
+
- - ">="
|
59
|
+
- !ruby/object:Gem::Version
|
60
|
+
version: '0'
|
61
|
+
requirements: []
|
62
|
+
rubyforge_project:
|
63
|
+
rubygems_version: 2.4.3
|
64
|
+
signing_key:
|
65
|
+
specification_version: 4
|
66
|
+
summary: Redis-backed Job Queue System
|
67
|
+
test_files: []
|