glide-mq 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +190 -0
- package/dist/connection.d.ts +36 -0
- package/dist/connection.d.ts.map +1 -0
- package/dist/connection.js +100 -0
- package/dist/connection.js.map +1 -0
- package/dist/errors.d.ts +10 -0
- package/dist/errors.d.ts.map +1 -0
- package/dist/errors.js +25 -0
- package/dist/errors.js.map +1 -0
- package/dist/flow-producer.d.ts +36 -0
- package/dist/flow-producer.d.ts.map +1 -0
- package/dist/flow-producer.js +185 -0
- package/dist/flow-producer.js.map +1 -0
- package/dist/functions/index.d.ts +136 -0
- package/dist/functions/index.d.ts.map +1 -0
- package/dist/functions/index.js +1062 -0
- package/dist/functions/index.js.map +1 -0
- package/dist/graceful-shutdown.d.ts +17 -0
- package/dist/graceful-shutdown.d.ts.map +1 -0
- package/dist/graceful-shutdown.js +27 -0
- package/dist/graceful-shutdown.js.map +1 -0
- package/dist/index.d.ts +13 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +27 -0
- package/dist/index.js.map +1 -0
- package/dist/job.d.ts +106 -0
- package/dist/job.d.ts.map +1 -0
- package/dist/job.js +252 -0
- package/dist/job.js.map +1 -0
- package/dist/queue-events.d.ts +33 -0
- package/dist/queue-events.d.ts.map +1 -0
- package/dist/queue-events.js +138 -0
- package/dist/queue-events.js.map +1 -0
- package/dist/queue.d.ts +140 -0
- package/dist/queue.d.ts.map +1 -0
- package/dist/queue.js +483 -0
- package/dist/queue.js.map +1 -0
- package/dist/scheduler.d.ts +48 -0
- package/dist/scheduler.d.ts.map +1 -0
- package/dist/scheduler.js +140 -0
- package/dist/scheduler.js.map +1 -0
- package/dist/telemetry.d.ts +29 -0
- package/dist/telemetry.d.ts.map +1 -0
- package/dist/telemetry.js +90 -0
- package/dist/telemetry.js.map +1 -0
- package/dist/types.d.ts +125 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +3 -0
- package/dist/types.js.map +1 -0
- package/dist/utils.d.ts +65 -0
- package/dist/utils.d.ts.map +1 -0
- package/dist/utils.js +217 -0
- package/dist/utils.js.map +1 -0
- package/dist/worker.d.ts +138 -0
- package/dist/worker.d.ts.map +1 -0
- package/dist/worker.js +574 -0
- package/dist/worker.js.map +1 -0
- package/dist/workflows.d.ts +34 -0
- package/dist/workflows.d.ts.map +1 -0
- package/dist/workflows.js +117 -0
- package/dist/workflows.js.map +1 -0
- package/package.json +56 -0
|
@@ -0,0 +1,1062 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.LIBRARY_SOURCE = exports.CONSUMER_GROUP = exports.LIBRARY_VERSION = exports.LIBRARY_NAME = void 0;
|
|
4
|
+
exports.addJob = addJob;
|
|
5
|
+
exports.dedup = dedup;
|
|
6
|
+
exports.promote = promote;
|
|
7
|
+
exports.completeJob = completeJob;
|
|
8
|
+
exports.completeAndFetchNext = completeAndFetchNext;
|
|
9
|
+
exports.failJob = failJob;
|
|
10
|
+
exports.reclaimStalled = reclaimStalled;
|
|
11
|
+
exports.pause = pause;
|
|
12
|
+
exports.resume = resume;
|
|
13
|
+
exports.rateLimit = rateLimit;
|
|
14
|
+
exports.checkConcurrency = checkConcurrency;
|
|
15
|
+
exports.moveToActive = moveToActive;
|
|
16
|
+
exports.removeJob = removeJob;
|
|
17
|
+
exports.revokeJob = revokeJob;
|
|
18
|
+
exports.addFlow = addFlow;
|
|
19
|
+
exports.completeChild = completeChild;
|
|
20
|
+
exports.LIBRARY_NAME = 'glidemq';
|
|
21
|
+
exports.LIBRARY_VERSION = '5';
|
|
22
|
+
// Consumer group name used by workers
|
|
23
|
+
exports.CONSUMER_GROUP = 'workers';
|
|
24
|
+
// Embedded Lua library source (from glidemq.lua)
|
|
25
|
+
// Loaded once via FUNCTION LOAD, persistent across Valkey restarts.
|
|
26
|
+
exports.LIBRARY_SOURCE = `#!lua name=glidemq
|
|
27
|
+
|
|
28
|
+
local PRIORITY_SHIFT = 4398046511104
|
|
29
|
+
|
|
30
|
+
local function emitEvent(eventsKey, eventType, jobId, extraFields)
|
|
31
|
+
local fields = {'event', eventType, 'jobId', tostring(jobId)}
|
|
32
|
+
if extraFields then
|
|
33
|
+
for i = 1, #extraFields, 2 do
|
|
34
|
+
fields[#fields + 1] = extraFields[i]
|
|
35
|
+
fields[#fields + 1] = extraFields[i + 1]
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
redis.call('XADD', eventsKey, 'MAXLEN', '~', '1000', '*', unpack(fields))
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
redis.register_function('glidemq_version', function(keys, args)
|
|
42
|
+
return '${exports.LIBRARY_VERSION}'
|
|
43
|
+
end)
|
|
44
|
+
|
|
45
|
+
redis.register_function('glidemq_addJob', function(keys, args)
|
|
46
|
+
local idKey = keys[1]
|
|
47
|
+
local streamKey = keys[2]
|
|
48
|
+
local scheduledKey = keys[3]
|
|
49
|
+
local eventsKey = keys[4]
|
|
50
|
+
local jobName = args[1]
|
|
51
|
+
local jobData = args[2]
|
|
52
|
+
local jobOpts = args[3]
|
|
53
|
+
local timestamp = tonumber(args[4])
|
|
54
|
+
local delay = tonumber(args[5]) or 0
|
|
55
|
+
local priority = tonumber(args[6]) or 0
|
|
56
|
+
local parentId = args[7] or ''
|
|
57
|
+
local maxAttempts = tonumber(args[8]) or 0
|
|
58
|
+
local jobId = redis.call('INCR', idKey)
|
|
59
|
+
local jobIdStr = tostring(jobId)
|
|
60
|
+
local prefix = string.sub(idKey, 1, #idKey - 2)
|
|
61
|
+
local jobKey = prefix .. 'job:' .. jobIdStr
|
|
62
|
+
local hashFields = {
|
|
63
|
+
'id', jobIdStr,
|
|
64
|
+
'name', jobName,
|
|
65
|
+
'data', jobData,
|
|
66
|
+
'opts', jobOpts,
|
|
67
|
+
'timestamp', tostring(timestamp),
|
|
68
|
+
'attemptsMade', '0',
|
|
69
|
+
'delay', tostring(delay),
|
|
70
|
+
'priority', tostring(priority),
|
|
71
|
+
'maxAttempts', tostring(maxAttempts)
|
|
72
|
+
}
|
|
73
|
+
if parentId ~= '' then
|
|
74
|
+
hashFields[#hashFields + 1] = 'parentId'
|
|
75
|
+
hashFields[#hashFields + 1] = parentId
|
|
76
|
+
end
|
|
77
|
+
if delay > 0 or priority > 0 then
|
|
78
|
+
hashFields[#hashFields + 1] = 'state'
|
|
79
|
+
hashFields[#hashFields + 1] = delay > 0 and 'delayed' or 'prioritized'
|
|
80
|
+
else
|
|
81
|
+
hashFields[#hashFields + 1] = 'state'
|
|
82
|
+
hashFields[#hashFields + 1] = 'waiting'
|
|
83
|
+
end
|
|
84
|
+
redis.call('HSET', jobKey, unpack(hashFields))
|
|
85
|
+
if delay > 0 then
|
|
86
|
+
local score = priority * PRIORITY_SHIFT + (timestamp + delay)
|
|
87
|
+
redis.call('ZADD', scheduledKey, score, jobIdStr)
|
|
88
|
+
elseif priority > 0 then
|
|
89
|
+
local score = priority * PRIORITY_SHIFT
|
|
90
|
+
redis.call('ZADD', scheduledKey, score, jobIdStr)
|
|
91
|
+
else
|
|
92
|
+
redis.call('XADD', streamKey, '*', 'jobId', jobIdStr)
|
|
93
|
+
end
|
|
94
|
+
emitEvent(eventsKey, 'added', jobIdStr, {'name', jobName})
|
|
95
|
+
return jobIdStr
|
|
96
|
+
end)
|
|
97
|
+
|
|
98
|
+
redis.register_function('glidemq_promote', function(keys, args)
|
|
99
|
+
local scheduledKey = keys[1]
|
|
100
|
+
local streamKey = keys[2]
|
|
101
|
+
local eventsKey = keys[3]
|
|
102
|
+
local now = tonumber(args[1])
|
|
103
|
+
local members = redis.call('ZRANGEBYSCORE', scheduledKey, '0', tostring(now))
|
|
104
|
+
local count = 0
|
|
105
|
+
for i = 1, #members do
|
|
106
|
+
local jobId = members[i]
|
|
107
|
+
redis.call('XADD', streamKey, '*', 'jobId', jobId)
|
|
108
|
+
redis.call('ZREM', scheduledKey, jobId)
|
|
109
|
+
local prefix = string.sub(scheduledKey, 1, #scheduledKey - 9)
|
|
110
|
+
local jobKey = prefix .. 'job:' .. jobId
|
|
111
|
+
redis.call('HSET', jobKey, 'state', 'waiting')
|
|
112
|
+
emitEvent(eventsKey, 'promoted', jobId, nil)
|
|
113
|
+
count = count + 1
|
|
114
|
+
end
|
|
115
|
+
return count
|
|
116
|
+
end)
|
|
117
|
+
|
|
118
|
+
redis.register_function('glidemq_complete', function(keys, args)
|
|
119
|
+
local streamKey = keys[1]
|
|
120
|
+
local completedKey = keys[2]
|
|
121
|
+
local eventsKey = keys[3]
|
|
122
|
+
local jobKey = keys[4]
|
|
123
|
+
local jobId = args[1]
|
|
124
|
+
local entryId = args[2]
|
|
125
|
+
local returnvalue = args[3]
|
|
126
|
+
local timestamp = tonumber(args[4])
|
|
127
|
+
local group = args[5]
|
|
128
|
+
local removeMode = args[6] or '0'
|
|
129
|
+
local removeCount = tonumber(args[7]) or 0
|
|
130
|
+
local removeAge = tonumber(args[8]) or 0
|
|
131
|
+
local depsMember = args[9] or ''
|
|
132
|
+
local parentId = args[10] or ''
|
|
133
|
+
redis.call('XACK', streamKey, group, entryId)
|
|
134
|
+
redis.call('XDEL', streamKey, entryId)
|
|
135
|
+
redis.call('ZADD', completedKey, timestamp, jobId)
|
|
136
|
+
redis.call('HSET', jobKey,
|
|
137
|
+
'state', 'completed',
|
|
138
|
+
'returnvalue', returnvalue,
|
|
139
|
+
'finishedOn', tostring(timestamp)
|
|
140
|
+
)
|
|
141
|
+
emitEvent(eventsKey, 'completed', jobId, {'returnvalue', returnvalue})
|
|
142
|
+
local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))
|
|
143
|
+
if removeMode == 'true' then
|
|
144
|
+
redis.call('ZREM', completedKey, jobId)
|
|
145
|
+
redis.call('DEL', jobKey)
|
|
146
|
+
elseif removeMode == 'count' and removeCount > 0 then
|
|
147
|
+
local total = redis.call('ZCARD', completedKey)
|
|
148
|
+
if total > removeCount then
|
|
149
|
+
local excess = redis.call('ZRANGE', completedKey, 0, total - removeCount - 1)
|
|
150
|
+
for i = 1, #excess do
|
|
151
|
+
local oldId = excess[i]
|
|
152
|
+
redis.call('DEL', prefix .. 'job:' .. oldId)
|
|
153
|
+
redis.call('ZREM', completedKey, oldId)
|
|
154
|
+
end
|
|
155
|
+
end
|
|
156
|
+
elseif removeMode == 'age_count' then
|
|
157
|
+
if removeAge > 0 then
|
|
158
|
+
local cutoff = timestamp - (removeAge * 1000)
|
|
159
|
+
local old = redis.call('ZRANGEBYSCORE', completedKey, '0', tostring(cutoff))
|
|
160
|
+
for i = 1, #old do
|
|
161
|
+
local oldId = old[i]
|
|
162
|
+
redis.call('DEL', prefix .. 'job:' .. oldId)
|
|
163
|
+
redis.call('ZREM', completedKey, oldId)
|
|
164
|
+
end
|
|
165
|
+
end
|
|
166
|
+
if removeCount > 0 then
|
|
167
|
+
local total = redis.call('ZCARD', completedKey)
|
|
168
|
+
if total > removeCount then
|
|
169
|
+
local excess = redis.call('ZRANGE', completedKey, 0, total - removeCount - 1)
|
|
170
|
+
for i = 1, #excess do
|
|
171
|
+
local oldId = excess[i]
|
|
172
|
+
redis.call('DEL', prefix .. 'job:' .. oldId)
|
|
173
|
+
redis.call('ZREM', completedKey, oldId)
|
|
174
|
+
end
|
|
175
|
+
end
|
|
176
|
+
end
|
|
177
|
+
end
|
|
178
|
+
if depsMember ~= '' and parentId ~= '' and #keys >= 8 then
|
|
179
|
+
local parentDepsKey = keys[5]
|
|
180
|
+
local parentJobKey = keys[6]
|
|
181
|
+
local parentStreamKey = keys[7]
|
|
182
|
+
local parentEventsKey = keys[8]
|
|
183
|
+
local doneCount = redis.call('HINCRBY', parentJobKey, 'depsCompleted', 1)
|
|
184
|
+
local totalDeps = redis.call('SCARD', parentDepsKey)
|
|
185
|
+
local remaining = totalDeps - doneCount
|
|
186
|
+
if remaining <= 0 then
|
|
187
|
+
redis.call('HSET', parentJobKey, 'state', 'waiting')
|
|
188
|
+
redis.call('XADD', parentStreamKey, '*', 'jobId', parentId)
|
|
189
|
+
emitEvent(parentEventsKey, 'active', parentId, nil)
|
|
190
|
+
end
|
|
191
|
+
end
|
|
192
|
+
return 1
|
|
193
|
+
end)
|
|
194
|
+
|
|
195
|
+
redis.register_function('glidemq_completeAndFetchNext', function(keys, args)
|
|
196
|
+
local streamKey = keys[1]
|
|
197
|
+
local completedKey = keys[2]
|
|
198
|
+
local eventsKey = keys[3]
|
|
199
|
+
local jobKey = keys[4]
|
|
200
|
+
local jobId = args[1]
|
|
201
|
+
local entryId = args[2]
|
|
202
|
+
local returnvalue = args[3]
|
|
203
|
+
local timestamp = tonumber(args[4])
|
|
204
|
+
local group = args[5]
|
|
205
|
+
local consumer = args[6]
|
|
206
|
+
local removeMode = args[7] or '0'
|
|
207
|
+
local removeCount = tonumber(args[8]) or 0
|
|
208
|
+
local removeAge = tonumber(args[9]) or 0
|
|
209
|
+
local depsMember = args[10] or ''
|
|
210
|
+
local parentId = args[11] or ''
|
|
211
|
+
|
|
212
|
+
-- Phase 1: Complete current job (same as glidemq_complete)
|
|
213
|
+
redis.call('XACK', streamKey, group, entryId)
|
|
214
|
+
redis.call('XDEL', streamKey, entryId)
|
|
215
|
+
redis.call('ZADD', completedKey, timestamp, jobId)
|
|
216
|
+
redis.call('HSET', jobKey,
|
|
217
|
+
'state', 'completed',
|
|
218
|
+
'returnvalue', returnvalue,
|
|
219
|
+
'finishedOn', tostring(timestamp)
|
|
220
|
+
)
|
|
221
|
+
emitEvent(eventsKey, 'completed', jobId, {'returnvalue', returnvalue})
|
|
222
|
+
local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))
|
|
223
|
+
|
|
224
|
+
-- Retention cleanup
|
|
225
|
+
if removeMode == 'true' then
|
|
226
|
+
redis.call('ZREM', completedKey, jobId)
|
|
227
|
+
redis.call('DEL', jobKey)
|
|
228
|
+
elseif removeMode == 'count' and removeCount > 0 then
|
|
229
|
+
local total = redis.call('ZCARD', completedKey)
|
|
230
|
+
if total > removeCount then
|
|
231
|
+
local excess = redis.call('ZRANGE', completedKey, 0, total - removeCount - 1)
|
|
232
|
+
for i = 1, #excess do
|
|
233
|
+
redis.call('DEL', prefix .. 'job:' .. excess[i])
|
|
234
|
+
redis.call('ZREM', completedKey, excess[i])
|
|
235
|
+
end
|
|
236
|
+
end
|
|
237
|
+
end
|
|
238
|
+
|
|
239
|
+
-- Parent deps
|
|
240
|
+
if depsMember ~= '' and parentId ~= '' and #keys >= 8 then
|
|
241
|
+
local parentDepsKey = keys[5]
|
|
242
|
+
local parentJobKey = keys[6]
|
|
243
|
+
local parentStreamKey = keys[7]
|
|
244
|
+
local parentEventsKey = keys[8]
|
|
245
|
+
local doneCount = redis.call('HINCRBY', parentJobKey, 'depsCompleted', 1)
|
|
246
|
+
local totalDeps = redis.call('SCARD', parentDepsKey)
|
|
247
|
+
if totalDeps - doneCount <= 0 then
|
|
248
|
+
redis.call('HSET', parentJobKey, 'state', 'waiting')
|
|
249
|
+
redis.call('XADD', parentStreamKey, '*', 'jobId', parentId)
|
|
250
|
+
emitEvent(parentEventsKey, 'active', parentId, nil)
|
|
251
|
+
end
|
|
252
|
+
end
|
|
253
|
+
|
|
254
|
+
-- Phase 2: Fetch next job (non-blocking XREADGROUP)
|
|
255
|
+
local nextEntries = redis.call('XREADGROUP', 'GROUP', group, consumer, 'COUNT', 1, 'STREAMS', streamKey, '>')
|
|
256
|
+
if not nextEntries or #nextEntries == 0 then
|
|
257
|
+
return cjson.encode({completed = jobId, next = false})
|
|
258
|
+
end
|
|
259
|
+
local streamData = nextEntries[1]
|
|
260
|
+
local entries = streamData[2]
|
|
261
|
+
if not entries or #entries == 0 then
|
|
262
|
+
return cjson.encode({completed = jobId, next = false})
|
|
263
|
+
end
|
|
264
|
+
local nextEntry = entries[1]
|
|
265
|
+
local nextEntryId = nextEntry[1]
|
|
266
|
+
local nextFields = nextEntry[2]
|
|
267
|
+
local nextJobId = nil
|
|
268
|
+
for i = 1, #nextFields, 2 do
|
|
269
|
+
if nextFields[i] == 'jobId' then
|
|
270
|
+
nextJobId = nextFields[i + 1]
|
|
271
|
+
break
|
|
272
|
+
end
|
|
273
|
+
end
|
|
274
|
+
if not nextJobId then
|
|
275
|
+
return cjson.encode({completed = jobId, next = false})
|
|
276
|
+
end
|
|
277
|
+
|
|
278
|
+
-- Phase 3: Activate next job (same as moveToActive)
|
|
279
|
+
local nextJobKey = prefix .. 'job:' .. nextJobId
|
|
280
|
+
local nextExists = redis.call('EXISTS', nextJobKey)
|
|
281
|
+
if nextExists == 0 then
|
|
282
|
+
return cjson.encode({completed = jobId, next = false, nextEntryId = nextEntryId})
|
|
283
|
+
end
|
|
284
|
+
local revoked = redis.call('HGET', nextJobKey, 'revoked')
|
|
285
|
+
if revoked == '1' then
|
|
286
|
+
return cjson.encode({completed = jobId, next = 'REVOKED', nextJobId = nextJobId, nextEntryId = nextEntryId})
|
|
287
|
+
end
|
|
288
|
+
redis.call('HSET', nextJobKey, 'state', 'active', 'processedOn', tostring(timestamp), 'lastActive', tostring(timestamp))
|
|
289
|
+
local nextHash = redis.call('HGETALL', nextJobKey)
|
|
290
|
+
return cjson.encode({completed = jobId, next = nextHash, nextJobId = nextJobId, nextEntryId = nextEntryId})
|
|
291
|
+
end)
|
|
292
|
+
|
|
293
|
+
redis.register_function('glidemq_fail', function(keys, args)
|
|
294
|
+
local streamKey = keys[1]
|
|
295
|
+
local failedKey = keys[2]
|
|
296
|
+
local scheduledKey = keys[3]
|
|
297
|
+
local eventsKey = keys[4]
|
|
298
|
+
local jobKey = keys[5]
|
|
299
|
+
local jobId = args[1]
|
|
300
|
+
local entryId = args[2]
|
|
301
|
+
local failedReason = args[3]
|
|
302
|
+
local timestamp = tonumber(args[4])
|
|
303
|
+
local maxAttempts = tonumber(args[5]) or 0
|
|
304
|
+
local backoffDelay = tonumber(args[6]) or 0
|
|
305
|
+
local group = args[7]
|
|
306
|
+
local removeMode = args[8] or '0'
|
|
307
|
+
local removeCount = tonumber(args[9]) or 0
|
|
308
|
+
local removeAge = tonumber(args[10]) or 0
|
|
309
|
+
redis.call('XACK', streamKey, group, entryId)
|
|
310
|
+
redis.call('XDEL', streamKey, entryId)
|
|
311
|
+
local attemptsMade = redis.call('HINCRBY', jobKey, 'attemptsMade', 1)
|
|
312
|
+
if maxAttempts > 0 and attemptsMade < maxAttempts then
|
|
313
|
+
local retryAt = timestamp + backoffDelay
|
|
314
|
+
local priority = tonumber(redis.call('HGET', jobKey, 'priority')) or 0
|
|
315
|
+
local score = priority * PRIORITY_SHIFT + retryAt
|
|
316
|
+
redis.call('ZADD', scheduledKey, score, jobId)
|
|
317
|
+
redis.call('HSET', jobKey,
|
|
318
|
+
'state', 'delayed',
|
|
319
|
+
'failedReason', failedReason,
|
|
320
|
+
'processedOn', tostring(timestamp)
|
|
321
|
+
)
|
|
322
|
+
emitEvent(eventsKey, 'retrying', jobId, {
|
|
323
|
+
'failedReason', failedReason,
|
|
324
|
+
'attemptsMade', tostring(attemptsMade),
|
|
325
|
+
'delay', tostring(backoffDelay)
|
|
326
|
+
})
|
|
327
|
+
return 'retrying'
|
|
328
|
+
else
|
|
329
|
+
redis.call('ZADD', failedKey, timestamp, jobId)
|
|
330
|
+
redis.call('HSET', jobKey,
|
|
331
|
+
'state', 'failed',
|
|
332
|
+
'failedReason', failedReason,
|
|
333
|
+
'finishedOn', tostring(timestamp),
|
|
334
|
+
'processedOn', tostring(timestamp)
|
|
335
|
+
)
|
|
336
|
+
emitEvent(eventsKey, 'failed', jobId, {'failedReason', failedReason})
|
|
337
|
+
local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))
|
|
338
|
+
if removeMode == 'true' then
|
|
339
|
+
redis.call('ZREM', failedKey, jobId)
|
|
340
|
+
redis.call('DEL', jobKey)
|
|
341
|
+
elseif removeMode == 'count' and removeCount > 0 then
|
|
342
|
+
local total = redis.call('ZCARD', failedKey)
|
|
343
|
+
if total > removeCount then
|
|
344
|
+
local excess = redis.call('ZRANGE', failedKey, 0, total - removeCount - 1)
|
|
345
|
+
for i = 1, #excess do
|
|
346
|
+
local oldId = excess[i]
|
|
347
|
+
redis.call('DEL', prefix .. 'job:' .. oldId)
|
|
348
|
+
redis.call('ZREM', failedKey, oldId)
|
|
349
|
+
end
|
|
350
|
+
end
|
|
351
|
+
elseif removeMode == 'age_count' then
|
|
352
|
+
if removeAge > 0 then
|
|
353
|
+
local cutoff = timestamp - (removeAge * 1000)
|
|
354
|
+
local old = redis.call('ZRANGEBYSCORE', failedKey, '0', tostring(cutoff))
|
|
355
|
+
for i = 1, #old do
|
|
356
|
+
local oldId = old[i]
|
|
357
|
+
redis.call('DEL', prefix .. 'job:' .. oldId)
|
|
358
|
+
redis.call('ZREM', failedKey, oldId)
|
|
359
|
+
end
|
|
360
|
+
end
|
|
361
|
+
if removeCount > 0 then
|
|
362
|
+
local total = redis.call('ZCARD', failedKey)
|
|
363
|
+
if total > removeCount then
|
|
364
|
+
local excess = redis.call('ZRANGE', failedKey, 0, total - removeCount - 1)
|
|
365
|
+
for i = 1, #excess do
|
|
366
|
+
local oldId = excess[i]
|
|
367
|
+
redis.call('DEL', prefix .. 'job:' .. oldId)
|
|
368
|
+
redis.call('ZREM', failedKey, oldId)
|
|
369
|
+
end
|
|
370
|
+
end
|
|
371
|
+
end
|
|
372
|
+
end
|
|
373
|
+
return 'failed'
|
|
374
|
+
end
|
|
375
|
+
end)
|
|
376
|
+
|
|
377
|
+
redis.register_function('glidemq_reclaimStalled', function(keys, args)
|
|
378
|
+
local streamKey = keys[1]
|
|
379
|
+
local eventsKey = keys[2]
|
|
380
|
+
local group = args[1]
|
|
381
|
+
local consumer = args[2]
|
|
382
|
+
local minIdleMs = tonumber(args[3])
|
|
383
|
+
local maxStalledCount = tonumber(args[4]) or 1
|
|
384
|
+
local timestamp = tonumber(args[5])
|
|
385
|
+
local failedKey = args[6]
|
|
386
|
+
local result = redis.call('XAUTOCLAIM', streamKey, group, consumer, minIdleMs, '0-0')
|
|
387
|
+
local entries = result[2]
|
|
388
|
+
if not entries or #entries == 0 then
|
|
389
|
+
return 0
|
|
390
|
+
end
|
|
391
|
+
local prefix = string.sub(streamKey, 1, #streamKey - 6)
|
|
392
|
+
local count = 0
|
|
393
|
+
for i = 1, #entries do
|
|
394
|
+
local entry = entries[i]
|
|
395
|
+
local entryId = entry[1]
|
|
396
|
+
local fields = entry[2]
|
|
397
|
+
local jobId = nil
|
|
398
|
+
if type(fields) == 'table' then
|
|
399
|
+
for j = 1, #fields, 2 do
|
|
400
|
+
if fields[j] == 'jobId' then
|
|
401
|
+
jobId = fields[j + 1]
|
|
402
|
+
break
|
|
403
|
+
end
|
|
404
|
+
end
|
|
405
|
+
end
|
|
406
|
+
if jobId then
|
|
407
|
+
local jobKey = prefix .. 'job:' .. jobId
|
|
408
|
+
local lastActive = tonumber(redis.call('HGET', jobKey, 'lastActive'))
|
|
409
|
+
if lastActive and (timestamp - lastActive) < minIdleMs then
|
|
410
|
+
count = count + 1
|
|
411
|
+
else
|
|
412
|
+
local stalledCount = redis.call('HINCRBY', jobKey, 'stalledCount', 1)
|
|
413
|
+
if stalledCount > maxStalledCount then
|
|
414
|
+
redis.call('XACK', streamKey, group, entryId)
|
|
415
|
+
redis.call('XDEL', streamKey, entryId)
|
|
416
|
+
redis.call('ZADD', failedKey, timestamp, jobId)
|
|
417
|
+
redis.call('HSET', jobKey,
|
|
418
|
+
'state', 'failed',
|
|
419
|
+
'failedReason', 'job stalled more than maxStalledCount',
|
|
420
|
+
'finishedOn', tostring(timestamp)
|
|
421
|
+
)
|
|
422
|
+
emitEvent(eventsKey, 'failed', jobId, {
|
|
423
|
+
'failedReason', 'job stalled more than maxStalledCount'
|
|
424
|
+
})
|
|
425
|
+
else
|
|
426
|
+
redis.call('HSET', jobKey, 'state', 'active')
|
|
427
|
+
emitEvent(eventsKey, 'stalled', jobId, nil)
|
|
428
|
+
end
|
|
429
|
+
count = count + 1
|
|
430
|
+
end
|
|
431
|
+
end
|
|
432
|
+
end
|
|
433
|
+
return count
|
|
434
|
+
end)
|
|
435
|
+
|
|
436
|
+
redis.register_function('glidemq_pause', function(keys, args)
|
|
437
|
+
local metaKey = keys[1]
|
|
438
|
+
local eventsKey = keys[2]
|
|
439
|
+
redis.call('HSET', metaKey, 'paused', '1')
|
|
440
|
+
emitEvent(eventsKey, 'paused', '0', nil)
|
|
441
|
+
return 1
|
|
442
|
+
end)
|
|
443
|
+
|
|
444
|
+
redis.register_function('glidemq_resume', function(keys, args)
|
|
445
|
+
local metaKey = keys[1]
|
|
446
|
+
local eventsKey = keys[2]
|
|
447
|
+
redis.call('HSET', metaKey, 'paused', '0')
|
|
448
|
+
emitEvent(eventsKey, 'resumed', '0', nil)
|
|
449
|
+
return 1
|
|
450
|
+
end)
|
|
451
|
+
|
|
452
|
+
redis.register_function('glidemq_dedup', function(keys, args)
|
|
453
|
+
local dedupKey = keys[1]
|
|
454
|
+
local idKey = keys[2]
|
|
455
|
+
local streamKey = keys[3]
|
|
456
|
+
local scheduledKey = keys[4]
|
|
457
|
+
local eventsKey = keys[5]
|
|
458
|
+
local dedupId = args[1]
|
|
459
|
+
local ttlMs = tonumber(args[2]) or 0
|
|
460
|
+
local mode = args[3]
|
|
461
|
+
local jobName = args[4]
|
|
462
|
+
local jobData = args[5]
|
|
463
|
+
local jobOpts = args[6]
|
|
464
|
+
local timestamp = tonumber(args[7])
|
|
465
|
+
local delay = tonumber(args[8]) or 0
|
|
466
|
+
local priority = tonumber(args[9]) or 0
|
|
467
|
+
local parentId = args[10] or ''
|
|
468
|
+
local maxAttempts = tonumber(args[11]) or 0
|
|
469
|
+
local prefix = string.sub(idKey, 1, #idKey - 2)
|
|
470
|
+
local existing = redis.call('HGET', dedupKey, dedupId)
|
|
471
|
+
if mode == 'simple' then
|
|
472
|
+
if existing then
|
|
473
|
+
local sep = string.find(existing, ':')
|
|
474
|
+
if sep then
|
|
475
|
+
local existingJobId = string.sub(existing, 1, sep - 1)
|
|
476
|
+
local jobKey = prefix .. 'job:' .. existingJobId
|
|
477
|
+
local state = redis.call('HGET', jobKey, 'state')
|
|
478
|
+
if state and state ~= 'completed' and state ~= 'failed' then
|
|
479
|
+
return 'skipped'
|
|
480
|
+
end
|
|
481
|
+
end
|
|
482
|
+
end
|
|
483
|
+
elseif mode == 'throttle' then
|
|
484
|
+
if existing and ttlMs > 0 then
|
|
485
|
+
local sep = string.find(existing, ':')
|
|
486
|
+
if sep then
|
|
487
|
+
local storedTs = tonumber(string.sub(existing, sep + 1))
|
|
488
|
+
if storedTs and (timestamp - storedTs) < ttlMs then
|
|
489
|
+
return 'skipped'
|
|
490
|
+
end
|
|
491
|
+
end
|
|
492
|
+
end
|
|
493
|
+
elseif mode == 'debounce' then
|
|
494
|
+
if existing then
|
|
495
|
+
local sep = string.find(existing, ':')
|
|
496
|
+
if sep then
|
|
497
|
+
local existingJobId = string.sub(existing, 1, sep - 1)
|
|
498
|
+
local jobKey = prefix .. 'job:' .. existingJobId
|
|
499
|
+
local state = redis.call('HGET', jobKey, 'state')
|
|
500
|
+
if state == 'delayed' or state == 'prioritized' then
|
|
501
|
+
redis.call('ZREM', scheduledKey, existingJobId)
|
|
502
|
+
redis.call('DEL', jobKey)
|
|
503
|
+
emitEvent(eventsKey, 'removed', existingJobId, nil)
|
|
504
|
+
elseif state and state ~= 'completed' and state ~= 'failed' then
|
|
505
|
+
return 'skipped'
|
|
506
|
+
end
|
|
507
|
+
end
|
|
508
|
+
end
|
|
509
|
+
end
|
|
510
|
+
local jobId = redis.call('INCR', idKey)
|
|
511
|
+
local jobIdStr = tostring(jobId)
|
|
512
|
+
local jobKey = prefix .. 'job:' .. jobIdStr
|
|
513
|
+
local hashFields = {
|
|
514
|
+
'id', jobIdStr,
|
|
515
|
+
'name', jobName,
|
|
516
|
+
'data', jobData,
|
|
517
|
+
'opts', jobOpts,
|
|
518
|
+
'timestamp', tostring(timestamp),
|
|
519
|
+
'attemptsMade', '0',
|
|
520
|
+
'delay', tostring(delay),
|
|
521
|
+
'priority', tostring(priority),
|
|
522
|
+
'maxAttempts', tostring(maxAttempts)
|
|
523
|
+
}
|
|
524
|
+
if parentId ~= '' then
|
|
525
|
+
hashFields[#hashFields + 1] = 'parentId'
|
|
526
|
+
hashFields[#hashFields + 1] = parentId
|
|
527
|
+
end
|
|
528
|
+
if delay > 0 or priority > 0 then
|
|
529
|
+
hashFields[#hashFields + 1] = 'state'
|
|
530
|
+
hashFields[#hashFields + 1] = delay > 0 and 'delayed' or 'prioritized'
|
|
531
|
+
else
|
|
532
|
+
hashFields[#hashFields + 1] = 'state'
|
|
533
|
+
hashFields[#hashFields + 1] = 'waiting'
|
|
534
|
+
end
|
|
535
|
+
redis.call('HSET', jobKey, unpack(hashFields))
|
|
536
|
+
if delay > 0 then
|
|
537
|
+
local score = priority * PRIORITY_SHIFT + (timestamp + delay)
|
|
538
|
+
redis.call('ZADD', scheduledKey, score, jobIdStr)
|
|
539
|
+
elseif priority > 0 then
|
|
540
|
+
local score = priority * PRIORITY_SHIFT
|
|
541
|
+
redis.call('ZADD', scheduledKey, score, jobIdStr)
|
|
542
|
+
else
|
|
543
|
+
redis.call('XADD', streamKey, '*', 'jobId', jobIdStr)
|
|
544
|
+
end
|
|
545
|
+
redis.call('HSET', dedupKey, dedupId, jobIdStr .. ':' .. tostring(timestamp))
|
|
546
|
+
emitEvent(eventsKey, 'added', jobIdStr, {'name', jobName})
|
|
547
|
+
return jobIdStr
|
|
548
|
+
end)
|
|
549
|
+
|
|
550
|
+
redis.register_function('glidemq_rateLimit', function(keys, args)
|
|
551
|
+
local rateKey = keys[1]
|
|
552
|
+
local metaKey = keys[2]
|
|
553
|
+
local maxPerWindow = tonumber(args[1])
|
|
554
|
+
local windowDuration = tonumber(args[2])
|
|
555
|
+
local now = tonumber(args[3])
|
|
556
|
+
local windowStart = tonumber(redis.call('HGET', rateKey, 'windowStart')) or 0
|
|
557
|
+
local count = tonumber(redis.call('HGET', rateKey, 'count')) or 0
|
|
558
|
+
if now - windowStart >= windowDuration then
|
|
559
|
+
redis.call('HSET', rateKey, 'windowStart', tostring(now), 'count', '1')
|
|
560
|
+
return 0
|
|
561
|
+
end
|
|
562
|
+
if count >= maxPerWindow then
|
|
563
|
+
local delayMs = windowDuration - (now - windowStart)
|
|
564
|
+
return delayMs
|
|
565
|
+
end
|
|
566
|
+
redis.call('HSET', rateKey, 'count', tostring(count + 1))
|
|
567
|
+
return 0
|
|
568
|
+
end)
|
|
569
|
+
|
|
570
|
+
redis.register_function('glidemq_checkConcurrency', function(keys, args)
|
|
571
|
+
local metaKey = keys[1]
|
|
572
|
+
local streamKey = keys[2]
|
|
573
|
+
local group = args[1]
|
|
574
|
+
local gc = tonumber(redis.call('HGET', metaKey, 'globalConcurrency')) or 0
|
|
575
|
+
if gc <= 0 then
|
|
576
|
+
return -1
|
|
577
|
+
end
|
|
578
|
+
local pending = redis.call('XPENDING', streamKey, group)
|
|
579
|
+
local pendingCount = tonumber(pending[1]) or 0
|
|
580
|
+
local remaining = gc - pendingCount
|
|
581
|
+
if remaining <= 0 then
|
|
582
|
+
return 0
|
|
583
|
+
end
|
|
584
|
+
return remaining
|
|
585
|
+
end)
|
|
586
|
+
|
|
587
|
+
redis.register_function('glidemq_moveToActive', function(keys, args)
|
|
588
|
+
local jobKey = keys[1]
|
|
589
|
+
local timestamp = args[1]
|
|
590
|
+
local exists = redis.call('EXISTS', jobKey)
|
|
591
|
+
if exists == 0 then
|
|
592
|
+
return ''
|
|
593
|
+
end
|
|
594
|
+
local revoked = redis.call('HGET', jobKey, 'revoked')
|
|
595
|
+
if revoked == '1' then
|
|
596
|
+
return 'REVOKED'
|
|
597
|
+
end
|
|
598
|
+
redis.call('HSET', jobKey, 'state', 'active', 'processedOn', timestamp, 'lastActive', timestamp)
|
|
599
|
+
local fields = redis.call('HGETALL', jobKey)
|
|
600
|
+
return cjson.encode(fields)
|
|
601
|
+
end)
|
|
602
|
+
|
|
603
|
+
redis.register_function('glidemq_addFlow', function(keys, args)
|
|
604
|
+
local parentIdKey = keys[1]
|
|
605
|
+
local parentStreamKey = keys[2]
|
|
606
|
+
local parentScheduledKey = keys[3]
|
|
607
|
+
local parentEventsKey = keys[4]
|
|
608
|
+
local parentName = args[1]
|
|
609
|
+
local parentData = args[2]
|
|
610
|
+
local parentOpts = args[3]
|
|
611
|
+
local timestamp = tonumber(args[4])
|
|
612
|
+
local parentDelay = tonumber(args[5]) or 0
|
|
613
|
+
local parentPriority = tonumber(args[6]) or 0
|
|
614
|
+
local parentMaxAttempts = tonumber(args[7]) or 0
|
|
615
|
+
local numChildren = tonumber(args[8])
|
|
616
|
+
local parentJobId = redis.call('INCR', parentIdKey)
|
|
617
|
+
local parentJobIdStr = tostring(parentJobId)
|
|
618
|
+
local parentPrefix = string.sub(parentIdKey, 1, #parentIdKey - 2)
|
|
619
|
+
local parentJobKey = parentPrefix .. 'job:' .. parentJobIdStr
|
|
620
|
+
local depsKey = parentPrefix .. 'deps:' .. parentJobIdStr
|
|
621
|
+
local parentHash = {
|
|
622
|
+
'id', parentJobIdStr,
|
|
623
|
+
'name', parentName,
|
|
624
|
+
'data', parentData,
|
|
625
|
+
'opts', parentOpts,
|
|
626
|
+
'timestamp', tostring(timestamp),
|
|
627
|
+
'attemptsMade', '0',
|
|
628
|
+
'delay', tostring(parentDelay),
|
|
629
|
+
'priority', tostring(parentPriority),
|
|
630
|
+
'maxAttempts', tostring(parentMaxAttempts),
|
|
631
|
+
'state', 'waiting-children'
|
|
632
|
+
}
|
|
633
|
+
redis.call('HSET', parentJobKey, unpack(parentHash))
|
|
634
|
+
local childIds = {}
|
|
635
|
+
local childArgOffset = 8
|
|
636
|
+
local childKeyOffset = 4
|
|
637
|
+
for i = 1, numChildren do
|
|
638
|
+
local base = childArgOffset + (i - 1) * 8
|
|
639
|
+
local childName = args[base + 1]
|
|
640
|
+
local childData = args[base + 2]
|
|
641
|
+
local childOpts = args[base + 3]
|
|
642
|
+
local childDelay = tonumber(args[base + 4]) or 0
|
|
643
|
+
local childPriority = tonumber(args[base + 5]) or 0
|
|
644
|
+
local childMaxAttempts = tonumber(args[base + 6]) or 0
|
|
645
|
+
local childQueuePrefix = args[base + 7]
|
|
646
|
+
local childParentQueue = args[base + 8]
|
|
647
|
+
local ckBase = childKeyOffset + (i - 1) * 4
|
|
648
|
+
local childIdKey = keys[ckBase + 1]
|
|
649
|
+
local childStreamKey = keys[ckBase + 2]
|
|
650
|
+
local childScheduledKey = keys[ckBase + 3]
|
|
651
|
+
local childEventsKey = keys[ckBase + 4]
|
|
652
|
+
local childJobId = redis.call('INCR', childIdKey)
|
|
653
|
+
local childJobIdStr = tostring(childJobId)
|
|
654
|
+
local childPrefix = string.sub(childIdKey, 1, #childIdKey - 2)
|
|
655
|
+
local childJobKey = childPrefix .. 'job:' .. childJobIdStr
|
|
656
|
+
local childHash = {
|
|
657
|
+
'id', childJobIdStr,
|
|
658
|
+
'name', childName,
|
|
659
|
+
'data', childData,
|
|
660
|
+
'opts', childOpts,
|
|
661
|
+
'timestamp', tostring(timestamp),
|
|
662
|
+
'attemptsMade', '0',
|
|
663
|
+
'delay', tostring(childDelay),
|
|
664
|
+
'priority', tostring(childPriority),
|
|
665
|
+
'maxAttempts', tostring(childMaxAttempts),
|
|
666
|
+
'parentId', parentJobIdStr,
|
|
667
|
+
'parentQueue', childParentQueue
|
|
668
|
+
}
|
|
669
|
+
if childDelay > 0 or childPriority > 0 then
|
|
670
|
+
childHash[#childHash + 1] = 'state'
|
|
671
|
+
childHash[#childHash + 1] = childDelay > 0 and 'delayed' or 'prioritized'
|
|
672
|
+
else
|
|
673
|
+
childHash[#childHash + 1] = 'state'
|
|
674
|
+
childHash[#childHash + 1] = 'waiting'
|
|
675
|
+
end
|
|
676
|
+
redis.call('HSET', childJobKey, unpack(childHash))
|
|
677
|
+
local depsMember = childQueuePrefix .. ':' .. childJobIdStr
|
|
678
|
+
redis.call('SADD', depsKey, depsMember)
|
|
679
|
+
if childDelay > 0 then
|
|
680
|
+
local score = childPriority * PRIORITY_SHIFT + (timestamp + childDelay)
|
|
681
|
+
redis.call('ZADD', childScheduledKey, score, childJobIdStr)
|
|
682
|
+
elseif childPriority > 0 then
|
|
683
|
+
local score = childPriority * PRIORITY_SHIFT
|
|
684
|
+
redis.call('ZADD', childScheduledKey, score, childJobIdStr)
|
|
685
|
+
else
|
|
686
|
+
redis.call('XADD', childStreamKey, '*', 'jobId', childJobIdStr)
|
|
687
|
+
end
|
|
688
|
+
emitEvent(childEventsKey, 'added', childJobIdStr, {'name', childName})
|
|
689
|
+
childIds[#childIds + 1] = childJobIdStr
|
|
690
|
+
end
|
|
691
|
+
local extraDepsOffset = childArgOffset + numChildren * 8
|
|
692
|
+
local numExtraDeps = tonumber(args[extraDepsOffset + 1]) or 0
|
|
693
|
+
for i = 1, numExtraDeps do
|
|
694
|
+
local extraMember = args[extraDepsOffset + 1 + i]
|
|
695
|
+
redis.call('SADD', depsKey, extraMember)
|
|
696
|
+
end
|
|
697
|
+
emitEvent(parentEventsKey, 'added', parentJobIdStr, {'name', parentName})
|
|
698
|
+
local result = {parentJobIdStr}
|
|
699
|
+
for i = 1, #childIds do
|
|
700
|
+
result[#result + 1] = childIds[i]
|
|
701
|
+
end
|
|
702
|
+
return cjson.encode(result)
|
|
703
|
+
end)
|
|
704
|
+
|
|
705
|
+
redis.register_function('glidemq_completeChild', function(keys, args)
|
|
706
|
+
local depsKey = keys[1]
|
|
707
|
+
local parentJobKey = keys[2]
|
|
708
|
+
local parentStreamKey = keys[3]
|
|
709
|
+
local parentEventsKey = keys[4]
|
|
710
|
+
local depsMember = args[1]
|
|
711
|
+
local parentId = args[2]
|
|
712
|
+
local doneCount = redis.call('HINCRBY', parentJobKey, 'depsCompleted', 1)
|
|
713
|
+
local totalDeps = redis.call('SCARD', depsKey)
|
|
714
|
+
local remaining = totalDeps - doneCount
|
|
715
|
+
if remaining <= 0 then
|
|
716
|
+
redis.call('HSET', parentJobKey, 'state', 'waiting')
|
|
717
|
+
redis.call('XADD', parentStreamKey, '*', 'jobId', parentId)
|
|
718
|
+
emitEvent(parentEventsKey, 'active', parentId, nil)
|
|
719
|
+
end
|
|
720
|
+
return remaining
|
|
721
|
+
end)
|
|
722
|
+
|
|
723
|
+
redis.register_function('glidemq_removeJob', function(keys, args)
|
|
724
|
+
local jobKey = keys[1]
|
|
725
|
+
local streamKey = keys[2]
|
|
726
|
+
local scheduledKey = keys[3]
|
|
727
|
+
local completedKey = keys[4]
|
|
728
|
+
local failedKey = keys[5]
|
|
729
|
+
local eventsKey = keys[6]
|
|
730
|
+
local logKey = keys[7]
|
|
731
|
+
local jobId = args[1]
|
|
732
|
+
local exists = redis.call('EXISTS', jobKey)
|
|
733
|
+
if exists == 0 then
|
|
734
|
+
return 0
|
|
735
|
+
end
|
|
736
|
+
redis.call('ZREM', scheduledKey, jobId)
|
|
737
|
+
redis.call('ZREM', completedKey, jobId)
|
|
738
|
+
redis.call('ZREM', failedKey, jobId)
|
|
739
|
+
redis.call('DEL', jobKey)
|
|
740
|
+
redis.call('DEL', logKey)
|
|
741
|
+
emitEvent(eventsKey, 'removed', jobId, nil)
|
|
742
|
+
return 1
|
|
743
|
+
end)
|
|
744
|
+
|
|
745
|
+
redis.register_function('glidemq_revoke', function(keys, args)
|
|
746
|
+
local jobKey = keys[1]
|
|
747
|
+
local streamKey = keys[2]
|
|
748
|
+
local scheduledKey = keys[3]
|
|
749
|
+
local failedKey = keys[4]
|
|
750
|
+
local eventsKey = keys[5]
|
|
751
|
+
local jobId = args[1]
|
|
752
|
+
local timestamp = tonumber(args[2])
|
|
753
|
+
local group = args[3]
|
|
754
|
+
local exists = redis.call('EXISTS', jobKey)
|
|
755
|
+
if exists == 0 then
|
|
756
|
+
return 'not_found'
|
|
757
|
+
end
|
|
758
|
+
redis.call('HSET', jobKey, 'revoked', '1')
|
|
759
|
+
local state = redis.call('HGET', jobKey, 'state')
|
|
760
|
+
if state == 'waiting' or state == 'delayed' or state == 'prioritized' then
|
|
761
|
+
redis.call('ZREM', scheduledKey, jobId)
|
|
762
|
+
local entries = redis.call('XRANGE', streamKey, '-', '+')
|
|
763
|
+
for i = 1, #entries do
|
|
764
|
+
local entryId = entries[i][1]
|
|
765
|
+
local fields = entries[i][2]
|
|
766
|
+
for j = 1, #fields, 2 do
|
|
767
|
+
if fields[j] == 'jobId' and fields[j+1] == jobId then
|
|
768
|
+
redis.call('XACK', streamKey, group, entryId)
|
|
769
|
+
redis.call('XDEL', streamKey, entryId)
|
|
770
|
+
break
|
|
771
|
+
end
|
|
772
|
+
end
|
|
773
|
+
end
|
|
774
|
+
redis.call('ZADD', failedKey, timestamp, jobId)
|
|
775
|
+
redis.call('HSET', jobKey,
|
|
776
|
+
'state', 'failed',
|
|
777
|
+
'failedReason', 'revoked',
|
|
778
|
+
'finishedOn', tostring(timestamp)
|
|
779
|
+
)
|
|
780
|
+
emitEvent(eventsKey, 'revoked', jobId, nil)
|
|
781
|
+
return 'revoked'
|
|
782
|
+
end
|
|
783
|
+
emitEvent(eventsKey, 'revoked', jobId, nil)
|
|
784
|
+
return 'flagged'
|
|
785
|
+
end)
|
|
786
|
+
`;
|
|
787
|
+
// ---- Typed FCALL wrappers ----
|
|
788
|
+
/**
|
|
789
|
+
* Add a job to the queue atomically.
|
|
790
|
+
* Returns the new job ID (string).
|
|
791
|
+
*/
|
|
792
|
+
async function addJob(client, k, jobName, data, opts, timestamp, delay, priority, parentId, maxAttempts) {
|
|
793
|
+
const result = await client.fcall('glidemq_addJob', [k.id, k.stream, k.scheduled, k.events], [
|
|
794
|
+
jobName,
|
|
795
|
+
data,
|
|
796
|
+
opts,
|
|
797
|
+
timestamp.toString(),
|
|
798
|
+
delay.toString(),
|
|
799
|
+
priority.toString(),
|
|
800
|
+
parentId,
|
|
801
|
+
maxAttempts.toString(),
|
|
802
|
+
]);
|
|
803
|
+
return result;
|
|
804
|
+
}
|
|
805
|
+
/**
|
|
806
|
+
* Add a job with deduplication. Checks the dedup hash and either skips or adds the job.
|
|
807
|
+
* Returns "skipped" if deduplicated, otherwise the new job ID (string).
|
|
808
|
+
*/
|
|
809
|
+
async function dedup(client, k, dedupId, ttlMs, mode, jobName, data, opts, timestamp, delay, priority, parentId, maxAttempts) {
|
|
810
|
+
const result = await client.fcall('glidemq_dedup', [k.dedup, k.id, k.stream, k.scheduled, k.events], [
|
|
811
|
+
dedupId,
|
|
812
|
+
ttlMs.toString(),
|
|
813
|
+
mode,
|
|
814
|
+
jobName,
|
|
815
|
+
data,
|
|
816
|
+
opts,
|
|
817
|
+
timestamp.toString(),
|
|
818
|
+
delay.toString(),
|
|
819
|
+
priority.toString(),
|
|
820
|
+
parentId,
|
|
821
|
+
maxAttempts.toString(),
|
|
822
|
+
]);
|
|
823
|
+
return result;
|
|
824
|
+
}
|
|
825
|
+
/**
|
|
826
|
+
* Promote delayed/prioritized jobs whose score <= now from scheduled ZSet to stream.
|
|
827
|
+
* Returns the number of jobs promoted.
|
|
828
|
+
*/
|
|
829
|
+
async function promote(client, k, timestamp) {
|
|
830
|
+
const result = await client.fcall('glidemq_promote', [k.scheduled, k.stream, k.events], [timestamp.toString()]);
|
|
831
|
+
return result;
|
|
832
|
+
}
|
|
833
|
+
/**
|
|
834
|
+
* Encode a removeOnComplete/removeOnFail option into Lua args.
|
|
835
|
+
*/
|
|
836
|
+
function encodeRetention(opt) {
|
|
837
|
+
if (opt === true) {
|
|
838
|
+
return { mode: 'true', count: 0, age: 0 };
|
|
839
|
+
}
|
|
840
|
+
if (typeof opt === 'number') {
|
|
841
|
+
return { mode: 'count', count: opt, age: 0 };
|
|
842
|
+
}
|
|
843
|
+
if (opt && typeof opt === 'object') {
|
|
844
|
+
return { mode: 'age_count', count: opt.count ?? 0, age: opt.age ?? 0 };
|
|
845
|
+
}
|
|
846
|
+
return { mode: '0', count: 0, age: 0 };
|
|
847
|
+
}
|
|
848
|
+
/**
|
|
849
|
+
* Complete a job: XACK, move to completed ZSet, update job hash, emit event.
|
|
850
|
+
* Optionally applies retention cleanup based on removeOnComplete.
|
|
851
|
+
* If the job has a parent (depsMember and parentId provided), also handles
|
|
852
|
+
* the completeChild logic inline: removes from parent deps, re-queues parent when all children done.
|
|
853
|
+
*/
|
|
854
|
+
async function completeJob(client, k, jobId, entryId, returnvalue, timestamp, group = exports.CONSUMER_GROUP, removeOnComplete, parentInfo) {
|
|
855
|
+
const { mode, count, age } = encodeRetention(removeOnComplete);
|
|
856
|
+
const keys = [k.stream, k.completed, k.events, k.job(jobId)];
|
|
857
|
+
const args = [
|
|
858
|
+
jobId,
|
|
859
|
+
entryId,
|
|
860
|
+
returnvalue,
|
|
861
|
+
timestamp.toString(),
|
|
862
|
+
group,
|
|
863
|
+
mode,
|
|
864
|
+
count.toString(),
|
|
865
|
+
age.toString(),
|
|
866
|
+
];
|
|
867
|
+
if (parentInfo) {
|
|
868
|
+
const pk = parentInfo.parentKeys;
|
|
869
|
+
keys.push(pk.deps(parentInfo.parentId), pk.job(parentInfo.parentId), pk.stream, pk.events);
|
|
870
|
+
args.push(parentInfo.depsMember, parentInfo.parentId);
|
|
871
|
+
}
|
|
872
|
+
else {
|
|
873
|
+
args.push('', '');
|
|
874
|
+
}
|
|
875
|
+
return client.fcall('glidemq_complete', keys, args);
|
|
876
|
+
}
|
|
877
|
+
async function completeAndFetchNext(client, k, jobId, entryId, returnvalue, timestamp, group, consumer, removeOnComplete, parentInfo) {
|
|
878
|
+
const { mode, count, age } = encodeRetention(removeOnComplete);
|
|
879
|
+
const keys = [k.stream, k.completed, k.events, k.job(jobId)];
|
|
880
|
+
const args = [
|
|
881
|
+
jobId, entryId, returnvalue, timestamp.toString(),
|
|
882
|
+
group, consumer,
|
|
883
|
+
mode, count.toString(), age.toString(),
|
|
884
|
+
];
|
|
885
|
+
if (parentInfo) {
|
|
886
|
+
const pk = parentInfo.parentKeys;
|
|
887
|
+
keys.push(pk.deps(parentInfo.parentId), pk.job(parentInfo.parentId), pk.stream, pk.events);
|
|
888
|
+
args.push(parentInfo.depsMember, parentInfo.parentId);
|
|
889
|
+
}
|
|
890
|
+
else {
|
|
891
|
+
args.push('', '');
|
|
892
|
+
}
|
|
893
|
+
const raw = await client.fcall('glidemq_completeAndFetchNext', keys, args);
|
|
894
|
+
const parsed = JSON.parse(String(raw));
|
|
895
|
+
if (!parsed.next || parsed.next === false) {
|
|
896
|
+
return { completed: parsed.completed, next: false };
|
|
897
|
+
}
|
|
898
|
+
if (parsed.next === 'REVOKED') {
|
|
899
|
+
return { completed: parsed.completed, next: 'REVOKED', nextJobId: parsed.nextJobId, nextEntryId: parsed.nextEntryId };
|
|
900
|
+
}
|
|
901
|
+
// Parse the HGETALL array into a hash map
|
|
902
|
+
const arr = parsed.next;
|
|
903
|
+
const hash = {};
|
|
904
|
+
for (let i = 0; i < arr.length; i += 2) {
|
|
905
|
+
hash[String(arr[i])] = String(arr[i + 1]);
|
|
906
|
+
}
|
|
907
|
+
return { completed: parsed.completed, next: hash, nextJobId: parsed.nextJobId, nextEntryId: parsed.nextEntryId };
|
|
908
|
+
}
|
|
909
|
+
/**
|
|
910
|
+
* Fail a job: XACK, retry with backoff if attempts remain, else move to failed ZSet.
|
|
911
|
+
* Optionally applies retention cleanup based on removeOnFail.
|
|
912
|
+
* Returns "failed" or "retrying".
|
|
913
|
+
*/
|
|
914
|
+
async function failJob(client, k, jobId, entryId, failedReason, timestamp, maxAttempts, backoffDelay, group = exports.CONSUMER_GROUP, removeOnFail) {
|
|
915
|
+
const { mode, count, age } = encodeRetention(removeOnFail);
|
|
916
|
+
const result = await client.fcall('glidemq_fail', [k.stream, k.failed, k.scheduled, k.events, k.job(jobId)], [
|
|
917
|
+
jobId,
|
|
918
|
+
entryId,
|
|
919
|
+
failedReason,
|
|
920
|
+
timestamp.toString(),
|
|
921
|
+
maxAttempts.toString(),
|
|
922
|
+
backoffDelay.toString(),
|
|
923
|
+
group,
|
|
924
|
+
mode,
|
|
925
|
+
count.toString(),
|
|
926
|
+
age.toString(),
|
|
927
|
+
]);
|
|
928
|
+
return result;
|
|
929
|
+
}
|
|
930
|
+
/**
|
|
931
|
+
* Reclaim stalled jobs via XAUTOCLAIM. Jobs exceeding maxStalledCount are moved to failed.
|
|
932
|
+
* Returns the number of jobs reclaimed.
|
|
933
|
+
*/
|
|
934
|
+
async function reclaimStalled(client, k, consumer, minIdleMs, maxStalledCount, timestamp, group = exports.CONSUMER_GROUP) {
|
|
935
|
+
const result = await client.fcall('glidemq_reclaimStalled', [k.stream, k.events], [
|
|
936
|
+
group,
|
|
937
|
+
consumer,
|
|
938
|
+
minIdleMs.toString(),
|
|
939
|
+
maxStalledCount.toString(),
|
|
940
|
+
timestamp.toString(),
|
|
941
|
+
k.failed,
|
|
942
|
+
]);
|
|
943
|
+
return result;
|
|
944
|
+
}
|
|
945
|
+
/**
|
|
946
|
+
* Pause a queue: sets paused=1 in meta hash, emits event.
|
|
947
|
+
*/
|
|
948
|
+
async function pause(client, k) {
|
|
949
|
+
await client.fcall('glidemq_pause', [k.meta, k.events], []);
|
|
950
|
+
}
|
|
951
|
+
/**
|
|
952
|
+
* Resume a queue: sets paused=0 in meta hash, emits event.
|
|
953
|
+
*/
|
|
954
|
+
async function resume(client, k) {
|
|
955
|
+
await client.fcall('glidemq_resume', [k.meta, k.events], []);
|
|
956
|
+
}
|
|
957
|
+
/**
|
|
958
|
+
* Check and enforce rate limiting using a sliding window counter.
|
|
959
|
+
* Returns 0 if the job is allowed, or a positive number of ms to wait.
|
|
960
|
+
*/
|
|
961
|
+
async function rateLimit(client, k, maxPerWindow, windowDuration, timestamp) {
|
|
962
|
+
const result = await client.fcall('glidemq_rateLimit', [k.rate, k.meta], [
|
|
963
|
+
maxPerWindow.toString(),
|
|
964
|
+
windowDuration.toString(),
|
|
965
|
+
timestamp.toString(),
|
|
966
|
+
]);
|
|
967
|
+
return result;
|
|
968
|
+
}
|
|
969
|
+
/**
|
|
970
|
+
* Check global concurrency: returns -1 if no limit is set, 0 if blocked
|
|
971
|
+
* (pending >= globalConcurrency), or a positive number indicating remaining
|
|
972
|
+
* capacity (globalConcurrency - pending).
|
|
973
|
+
*/
|
|
974
|
+
async function checkConcurrency(client, k, group = exports.CONSUMER_GROUP) {
|
|
975
|
+
const result = await client.fcall('glidemq_checkConcurrency', [k.meta, k.stream], [group]);
|
|
976
|
+
return result;
|
|
977
|
+
}
|
|
978
|
+
/**
|
|
979
|
+
* Move a job to active state in a single round trip.
|
|
980
|
+
* Reads the full job hash, checks revoked flag, sets state=active + processedOn + lastActive.
|
|
981
|
+
* Returns:
|
|
982
|
+
* - null if job hash doesn't exist
|
|
983
|
+
* - 'REVOKED' if the job's revoked flag is set
|
|
984
|
+
* - Record<string, string> with all job fields otherwise
|
|
985
|
+
*
|
|
986
|
+
* Replaces: HGETALL + revoked check + HSET lastActive (3 round trips -> 1)
|
|
987
|
+
*/
|
|
988
|
+
async function moveToActive(client, k, jobId, timestamp) {
|
|
989
|
+
const result = await client.fcall('glidemq_moveToActive', [k.job(jobId)], [timestamp.toString()]);
|
|
990
|
+
const str = String(result);
|
|
991
|
+
if (str === '' || str === 'null')
|
|
992
|
+
return null;
|
|
993
|
+
if (str === 'REVOKED')
|
|
994
|
+
return 'REVOKED';
|
|
995
|
+
// Parse the cjson.encode output: [field1, value1, field2, value2, ...]
|
|
996
|
+
const arr = JSON.parse(str);
|
|
997
|
+
const hash = {};
|
|
998
|
+
for (let i = 0; i < arr.length; i += 2) {
|
|
999
|
+
hash[String(arr[i])] = String(arr[i + 1]);
|
|
1000
|
+
}
|
|
1001
|
+
return hash;
|
|
1002
|
+
}
|
|
1003
|
+
/**
|
|
1004
|
+
* Remove a job from all data structures (hash, stream, scheduled, completed, failed).
|
|
1005
|
+
* Returns 1 if removed, 0 if not found.
|
|
1006
|
+
*/
|
|
1007
|
+
async function removeJob(client, k, jobId) {
|
|
1008
|
+
const result = await client.fcall('glidemq_removeJob', [k.job(jobId), k.stream, k.scheduled, k.completed, k.failed, k.events, k.log(jobId)], [jobId]);
|
|
1009
|
+
return result;
|
|
1010
|
+
}
|
|
1011
|
+
/**
|
|
1012
|
+
* Revoke a job. Sets 'revoked' flag on the job hash.
|
|
1013
|
+
* If the job is waiting/delayed/prioritized, removes from stream/scheduled and moves to failed.
|
|
1014
|
+
* If the job is active (being processed), just sets the flag - worker checks it cooperatively.
|
|
1015
|
+
* Returns 'revoked' (moved to failed), 'flagged' (flag set, job is active), or 'not_found'.
|
|
1016
|
+
*/
|
|
1017
|
+
async function revokeJob(client, k, jobId, timestamp, group = exports.CONSUMER_GROUP) {
|
|
1018
|
+
const result = await client.fcall('glidemq_revoke', [k.job(jobId), k.stream, k.scheduled, k.failed, k.events], [jobId, timestamp.toString(), group]);
|
|
1019
|
+
return result;
|
|
1020
|
+
}
|
|
1021
|
+
/**
|
|
1022
|
+
* Atomically create a parent job (waiting-children) and its child jobs.
|
|
1023
|
+
* Returns a JSON array: [parentId, childId1, childId2, ...].
|
|
1024
|
+
*/
|
|
1025
|
+
async function addFlow(client, parentKeys, parentName, parentData, parentOpts, timestamp, parentDelay, parentPriority, parentMaxAttempts, children, extraDeps = []) {
|
|
1026
|
+
const keys = [
|
|
1027
|
+
parentKeys.id,
|
|
1028
|
+
parentKeys.stream,
|
|
1029
|
+
parentKeys.scheduled,
|
|
1030
|
+
parentKeys.events,
|
|
1031
|
+
];
|
|
1032
|
+
const args = [
|
|
1033
|
+
parentName,
|
|
1034
|
+
parentData,
|
|
1035
|
+
parentOpts,
|
|
1036
|
+
timestamp.toString(),
|
|
1037
|
+
parentDelay.toString(),
|
|
1038
|
+
parentPriority.toString(),
|
|
1039
|
+
parentMaxAttempts.toString(),
|
|
1040
|
+
children.length.toString(),
|
|
1041
|
+
];
|
|
1042
|
+
for (const child of children) {
|
|
1043
|
+
keys.push(child.keys.id, child.keys.stream, child.keys.scheduled, child.keys.events);
|
|
1044
|
+
args.push(child.name, child.data, child.opts, child.delay.toString(), child.priority.toString(), child.maxAttempts.toString(), child.queuePrefix, child.parentQueueName);
|
|
1045
|
+
}
|
|
1046
|
+
// Extra deps: pre-existing sub-flow children to add to deps set atomically
|
|
1047
|
+
args.push(extraDeps.length.toString());
|
|
1048
|
+
for (const dep of extraDeps) {
|
|
1049
|
+
args.push(dep);
|
|
1050
|
+
}
|
|
1051
|
+
const result = await client.fcall('glidemq_addFlow', keys, args);
|
|
1052
|
+
return JSON.parse(result);
|
|
1053
|
+
}
|
|
1054
|
+
/**
|
|
1055
|
+
* Remove a child from the parent's deps set. If all children are done, re-queues the parent.
|
|
1056
|
+
* Returns the number of remaining children (0 means parent was re-queued).
|
|
1057
|
+
*/
|
|
1058
|
+
async function completeChild(client, parentKeys, parentId, depsMember) {
|
|
1059
|
+
const result = await client.fcall('glidemq_completeChild', [parentKeys.deps(parentId), parentKeys.job(parentId), parentKeys.stream, parentKeys.events], [depsMember, parentId]);
|
|
1060
|
+
return result;
|
|
1061
|
+
}
|
|
1062
|
+
//# sourceMappingURL=index.js.map
|