glide-mq 0.5.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env +1 -0
- package/README.md +1 -1
- package/dist/connection.d.ts +12 -0
- package/dist/connection.d.ts.map +1 -1
- package/dist/connection.js +34 -0
- package/dist/connection.js.map +1 -1
- package/dist/flow-producer.d.ts +1 -0
- package/dist/flow-producer.d.ts.map +1 -1
- package/dist/flow-producer.js +41 -4
- package/dist/flow-producer.js.map +1 -1
- package/dist/functions/index.d.ts +15 -5
- package/dist/functions/index.d.ts.map +1 -1
- package/dist/functions/index.js +619 -28
- package/dist/functions/index.js.map +1 -1
- package/dist/index.d.ts +2 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +3 -1
- package/dist/index.js.map +1 -1
- package/dist/job.d.ts +1 -0
- package/dist/job.d.ts.map +1 -1
- package/dist/job.js +2 -0
- package/dist/job.js.map +1 -1
- package/dist/queue-events.d.ts.map +1 -1
- package/dist/queue-events.js +6 -0
- package/dist/queue-events.js.map +1 -1
- package/dist/queue.d.ts +21 -1
- package/dist/queue.d.ts.map +1 -1
- package/dist/queue.js +155 -20
- package/dist/queue.js.map +1 -1
- package/dist/scheduler.d.ts +10 -0
- package/dist/scheduler.d.ts.map +1 -1
- package/dist/scheduler.js +11 -0
- package/dist/scheduler.js.map +1 -1
- package/dist/types.d.ts +47 -2
- package/dist/types.d.ts.map +1 -1
- package/dist/utils.d.ts +1 -0
- package/dist/utils.d.ts.map +1 -1
- package/dist/utils.js +1 -0
- package/dist/utils.js.map +1 -1
- package/dist/worker.d.ts +6 -0
- package/dist/worker.d.ts.map +1 -1
- package/dist/worker.js +105 -22
- package/dist/worker.js.map +1 -1
- package/package.json +3 -2
package/dist/functions/index.js
CHANGED
|
@@ -13,6 +13,7 @@ exports.resume = resume;
|
|
|
13
13
|
exports.rateLimit = rateLimit;
|
|
14
14
|
exports.checkConcurrency = checkConcurrency;
|
|
15
15
|
exports.moveToActive = moveToActive;
|
|
16
|
+
exports.promoteRateLimited = promoteRateLimited;
|
|
16
17
|
exports.deferActive = deferActive;
|
|
17
18
|
exports.removeJob = removeJob;
|
|
18
19
|
exports.revokeJob = revokeJob;
|
|
@@ -20,7 +21,7 @@ exports.searchByName = searchByName;
|
|
|
20
21
|
exports.addFlow = addFlow;
|
|
21
22
|
exports.completeChild = completeChild;
|
|
22
23
|
exports.LIBRARY_NAME = 'glidemq';
|
|
23
|
-
exports.LIBRARY_VERSION = '
|
|
24
|
+
exports.LIBRARY_VERSION = '19';
|
|
24
25
|
// Consumer group name used by workers
|
|
25
26
|
exports.CONSUMER_GROUP = 'workers';
|
|
26
27
|
// Embedded Lua library source (from glidemq.lua)
|
|
@@ -76,19 +77,130 @@ local function markOrderingDone(jobKey, jobId)
|
|
|
76
77
|
end
|
|
77
78
|
end
|
|
78
79
|
|
|
79
|
-
|
|
80
|
+
-- Refill token bucket using remainder accumulator for precision.
|
|
81
|
+
-- tbRefillRate is in millitokens/second. Returns current millitokens after refill.
|
|
82
|
+
-- Side effect: updates tbTokens, tbLastRefill, tbRefillRemainder on the group hash.
|
|
83
|
+
local function tbRefill(groupHashKey, g, now)
|
|
84
|
+
local tbCapacity = tonumber(g.tbCapacity) or 0
|
|
85
|
+
if tbCapacity <= 0 then return 0 end
|
|
86
|
+
local tbTokens = tonumber(g.tbTokens) or tbCapacity
|
|
87
|
+
local tbRefillRate = tonumber(g.tbRefillRate) or 0
|
|
88
|
+
local tbLastRefill = tonumber(g.tbLastRefill) or now
|
|
89
|
+
local tbRefillRemainder = tonumber(g.tbRefillRemainder) or 0
|
|
90
|
+
local elapsed = now - tbLastRefill
|
|
91
|
+
if elapsed <= 0 or tbRefillRate <= 0 then return tbTokens end
|
|
92
|
+
-- Cap elapsed to prevent overflow in long-idle buckets
|
|
93
|
+
local maxElapsed = math.ceil(tbCapacity * 1000 / tbRefillRate)
|
|
94
|
+
if elapsed > maxElapsed then elapsed = maxElapsed end
|
|
95
|
+
local raw = elapsed * tbRefillRate + tbRefillRemainder
|
|
96
|
+
local added = math.floor(raw / 1000)
|
|
97
|
+
local newRemainder = raw % 1000
|
|
98
|
+
local newTokens = math.min(tbCapacity, tbTokens + added)
|
|
99
|
+
redis.call('HSET', groupHashKey,
|
|
100
|
+
'tbTokens', tostring(newTokens),
|
|
101
|
+
'tbLastRefill', tostring(now),
|
|
102
|
+
'tbRefillRemainder', tostring(newRemainder))
|
|
103
|
+
return newTokens
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
local function releaseGroupSlotAndPromote(jobKey, jobId, now)
|
|
80
107
|
local gk = redis.call('HGET', jobKey, 'groupKey')
|
|
81
108
|
if not gk or gk == '' then return end
|
|
82
109
|
local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))
|
|
83
110
|
local groupHashKey = prefix .. 'group:' .. gk
|
|
84
|
-
|
|
111
|
+
-- Load all group fields in one call
|
|
112
|
+
local gFields = redis.call('HGETALL', groupHashKey)
|
|
113
|
+
local g = {}
|
|
114
|
+
for gf = 1, #gFields, 2 do g[gFields[gf]] = gFields[gf + 1] end
|
|
115
|
+
local cur = tonumber(g.active) or 0
|
|
116
|
+
local newActive = (cur > 0) and (cur - 1) or 0
|
|
85
117
|
if cur > 0 then
|
|
86
|
-
redis.call('HSET', groupHashKey, 'active', tostring(
|
|
118
|
+
redis.call('HSET', groupHashKey, 'active', tostring(newActive))
|
|
87
119
|
end
|
|
88
120
|
local waitListKey = prefix .. 'groupq:' .. gk
|
|
89
|
-
local
|
|
90
|
-
if
|
|
91
|
-
|
|
121
|
+
local waitLen = redis.call('LLEN', waitListKey)
|
|
122
|
+
if waitLen == 0 then return end
|
|
123
|
+
-- Concurrency gate: if still at or above max after decrement, do not promote
|
|
124
|
+
local maxConc = tonumber(g.maxConcurrency) or 0
|
|
125
|
+
if maxConc > 0 and newActive >= maxConc then return end
|
|
126
|
+
-- Rate limit gate (skip if now is nil or 0 for safe fallback)
|
|
127
|
+
-- Only blocks promotion; does NOT increment rateCount. moveToActive handles counting.
|
|
128
|
+
local rateMax = tonumber(g.rateMax) or 0
|
|
129
|
+
local rateRemaining = 0
|
|
130
|
+
local ts = tonumber(now) or 0
|
|
131
|
+
if ts > 0 and rateMax > 0 then
|
|
132
|
+
local rateDuration = tonumber(g.rateDuration) or 0
|
|
133
|
+
if rateDuration > 0 then
|
|
134
|
+
local rateWindowStart = tonumber(g.rateWindowStart) or 0
|
|
135
|
+
local rateCount = tonumber(g.rateCount) or 0
|
|
136
|
+
if ts - rateWindowStart < rateDuration then
|
|
137
|
+
if rateCount >= rateMax then
|
|
138
|
+
-- Window active and at capacity: do not promote, register for scheduler
|
|
139
|
+
local rateLimitedKey = prefix .. 'ratelimited'
|
|
140
|
+
redis.call('ZADD', rateLimitedKey, rateWindowStart + rateDuration, gk)
|
|
141
|
+
return
|
|
142
|
+
end
|
|
143
|
+
rateRemaining = rateMax - rateCount
|
|
144
|
+
end
|
|
145
|
+
end
|
|
146
|
+
end
|
|
147
|
+
-- Token bucket gate: check head job cost before promoting
|
|
148
|
+
local tbCap = tonumber(g.tbCapacity) or 0
|
|
149
|
+
if ts > 0 and tbCap > 0 then
|
|
150
|
+
local tbTokensCur = tbRefill(groupHashKey, g, ts)
|
|
151
|
+
-- Peek at head job, skipping tombstones and DLQ'd jobs (up to 10 iterations)
|
|
152
|
+
local tbCheckPasses = 0
|
|
153
|
+
local tbOk = false
|
|
154
|
+
while tbCheckPasses < 10 do
|
|
155
|
+
tbCheckPasses = tbCheckPasses + 1
|
|
156
|
+
local headJobId = redis.call('LINDEX', waitListKey, 0)
|
|
157
|
+
if not headJobId then break end
|
|
158
|
+
local headJobKey = prefix .. 'job:' .. headJobId
|
|
159
|
+
-- Tombstone guard: job hash deleted - pop and check next
|
|
160
|
+
if redis.call('EXISTS', headJobKey) == 0 then
|
|
161
|
+
redis.call('LPOP', waitListKey)
|
|
162
|
+
else
|
|
163
|
+
local headCost = tonumber(redis.call('HGET', headJobKey, 'cost')) or 1000
|
|
164
|
+
-- DLQ guard: cost > capacity - pop, fail, check next
|
|
165
|
+
if headCost > tbCap then
|
|
166
|
+
redis.call('LPOP', waitListKey)
|
|
167
|
+
redis.call('ZADD', prefix .. 'failed', ts, headJobId)
|
|
168
|
+
redis.call('HSET', headJobKey,
|
|
169
|
+
'state', 'failed',
|
|
170
|
+
'failedReason', 'cost exceeds token bucket capacity',
|
|
171
|
+
'finishedOn', tostring(ts))
|
|
172
|
+
emitEvent(prefix .. 'events', 'failed', headJobId, {'failedReason', 'cost exceeds token bucket capacity'})
|
|
173
|
+
elseif tbTokensCur < headCost then
|
|
174
|
+
-- Not enough tokens: register delay and skip promotion
|
|
175
|
+
local tbRateVal = tonumber(g.tbRefillRate) or 0
|
|
176
|
+
if tbRateVal <= 0 then break end
|
|
177
|
+
local tbDelayMs = math.ceil((headCost - tbTokensCur) * 1000 / tbRateVal)
|
|
178
|
+
local rateLimitedKey = prefix .. 'ratelimited'
|
|
179
|
+
redis.call('ZADD', rateLimitedKey, ts + tbDelayMs, gk)
|
|
180
|
+
return
|
|
181
|
+
else
|
|
182
|
+
tbOk = true
|
|
183
|
+
break
|
|
184
|
+
end
|
|
185
|
+
end
|
|
186
|
+
end
|
|
187
|
+
if not tbOk and tbCheckPasses >= 10 then return end
|
|
188
|
+
end
|
|
189
|
+
-- Calculate how many slots are available for promotion
|
|
190
|
+
local available = 1
|
|
191
|
+
if maxConc > 0 then
|
|
192
|
+
available = maxConc - newActive
|
|
193
|
+
else
|
|
194
|
+
available = math.min(waitLen, 1000)
|
|
195
|
+
end
|
|
196
|
+
-- Cap by rate limit remaining if a window is active
|
|
197
|
+
if rateRemaining > 0 then
|
|
198
|
+
available = math.min(available, rateRemaining)
|
|
199
|
+
end
|
|
200
|
+
local streamKey = prefix .. 'stream'
|
|
201
|
+
for p = 1, available do
|
|
202
|
+
local nextJobId = redis.call('LPOP', waitListKey)
|
|
203
|
+
if not nextJobId then break end
|
|
92
204
|
redis.call('XADD', streamKey, '*', 'jobId', nextJobId)
|
|
93
205
|
local nextJobKey = prefix .. 'job:' .. nextJobId
|
|
94
206
|
redis.call('HSET', nextJobKey, 'state', 'waiting')
|
|
@@ -133,6 +245,48 @@ local function extractGroupConcurrencyFromOpts(optsJson)
|
|
|
133
245
|
return tonumber(conc) or 0
|
|
134
246
|
end
|
|
135
247
|
|
|
248
|
+
local function extractGroupRateLimitFromOpts(optsJson)
|
|
249
|
+
if not optsJson or optsJson == '' then
|
|
250
|
+
return 0, 0
|
|
251
|
+
end
|
|
252
|
+
local ok, decoded = pcall(cjson.decode, optsJson)
|
|
253
|
+
if not ok or type(decoded) ~= 'table' then
|
|
254
|
+
return 0, 0
|
|
255
|
+
end
|
|
256
|
+
local ordering = decoded['ordering']
|
|
257
|
+
if type(ordering) ~= 'table' then
|
|
258
|
+
return 0, 0
|
|
259
|
+
end
|
|
260
|
+
local rl = ordering['rateLimit']
|
|
261
|
+
if type(rl) ~= 'table' then
|
|
262
|
+
return 0, 0
|
|
263
|
+
end
|
|
264
|
+
local max = tonumber(rl['max']) or 0
|
|
265
|
+
local duration = tonumber(rl['duration']) or 0
|
|
266
|
+
return max, duration
|
|
267
|
+
end
|
|
268
|
+
|
|
269
|
+
local function extractTokenBucketFromOpts(optsJson)
|
|
270
|
+
if not optsJson or optsJson == '' then return 0, 0 end
|
|
271
|
+
local ok, decoded = pcall(cjson.decode, optsJson)
|
|
272
|
+
if not ok or type(decoded) ~= 'table' then return 0, 0 end
|
|
273
|
+
local ordering = decoded['ordering']
|
|
274
|
+
if type(ordering) ~= 'table' then return 0, 0 end
|
|
275
|
+
local tb = ordering['tokenBucket']
|
|
276
|
+
if type(tb) ~= 'table' then return 0, 0 end
|
|
277
|
+
local capacity = tonumber(tb['capacity']) or 0
|
|
278
|
+
local refillRate = tonumber(tb['refillRate']) or 0
|
|
279
|
+
return math.floor(capacity * 1000), math.floor(refillRate * 1000)
|
|
280
|
+
end
|
|
281
|
+
|
|
282
|
+
local function extractCostFromOpts(optsJson)
|
|
283
|
+
if not optsJson or optsJson == '' then return 0 end
|
|
284
|
+
local ok, decoded = pcall(cjson.decode, optsJson)
|
|
285
|
+
if not ok or type(decoded) ~= 'table' then return 0 end
|
|
286
|
+
local cost = tonumber(decoded['cost']) or 0
|
|
287
|
+
return math.floor(cost * 1000)
|
|
288
|
+
end
|
|
289
|
+
|
|
136
290
|
redis.register_function('glidemq_version', function(keys, args)
|
|
137
291
|
return '${exports.LIBRARY_VERSION}'
|
|
138
292
|
end)
|
|
@@ -152,11 +306,16 @@ redis.register_function('glidemq_addJob', function(keys, args)
|
|
|
152
306
|
local maxAttempts = tonumber(args[8]) or 0
|
|
153
307
|
local orderingKey = args[9] or ''
|
|
154
308
|
local groupConcurrency = tonumber(args[10]) or 0
|
|
309
|
+
local groupRateMax = tonumber(args[11]) or 0
|
|
310
|
+
local groupRateDuration = tonumber(args[12]) or 0
|
|
311
|
+
local tbCapacity = tonumber(args[13]) or 0
|
|
312
|
+
local tbRefillRate = tonumber(args[14]) or 0
|
|
313
|
+
local jobCost = tonumber(args[15]) or 0
|
|
155
314
|
local jobId = redis.call('INCR', idKey)
|
|
156
315
|
local jobIdStr = tostring(jobId)
|
|
157
316
|
local prefix = string.sub(idKey, 1, #idKey - 2)
|
|
158
317
|
local jobKey = prefix .. 'job:' .. jobIdStr
|
|
159
|
-
local useGroupConcurrency = (orderingKey ~= '' and groupConcurrency > 1)
|
|
318
|
+
local useGroupConcurrency = (orderingKey ~= '' and (groupConcurrency > 1 or groupRateMax > 0 or tbCapacity > 0))
|
|
160
319
|
local orderingSeq = 0
|
|
161
320
|
if orderingKey ~= '' and not useGroupConcurrency then
|
|
162
321
|
local orderingMetaKey = prefix .. 'ordering'
|
|
@@ -168,6 +327,57 @@ redis.register_function('glidemq_addJob', function(keys, args)
|
|
|
168
327
|
if curMax ~= groupConcurrency then
|
|
169
328
|
redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(groupConcurrency))
|
|
170
329
|
end
|
|
330
|
+
-- When rate limit or token bucket forces group path but concurrency is 0 or 1, ensure maxConcurrency >= 1
|
|
331
|
+
if curMax == 0 and groupConcurrency <= 1 then
|
|
332
|
+
redis.call('HSET', groupHashKey, 'maxConcurrency', '1')
|
|
333
|
+
end
|
|
334
|
+
-- Upsert rate limit fields on group hash
|
|
335
|
+
if groupRateMax > 0 then
|
|
336
|
+
local curRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0
|
|
337
|
+
if curRateMax ~= groupRateMax then
|
|
338
|
+
redis.call('HSET', groupHashKey, 'rateMax', tostring(groupRateMax))
|
|
339
|
+
end
|
|
340
|
+
local curRateDuration = tonumber(redis.call('HGET', groupHashKey, 'rateDuration')) or 0
|
|
341
|
+
if curRateDuration ~= groupRateDuration then
|
|
342
|
+
redis.call('HSET', groupHashKey, 'rateDuration', tostring(groupRateDuration))
|
|
343
|
+
end
|
|
344
|
+
else
|
|
345
|
+
-- Clear stale rate limit fields if group was previously rate-limited
|
|
346
|
+
local oldRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0
|
|
347
|
+
if oldRateMax > 0 then
|
|
348
|
+
redis.call('HDEL', groupHashKey, 'rateMax', 'rateDuration', 'rateWindowStart', 'rateCount')
|
|
349
|
+
end
|
|
350
|
+
end
|
|
351
|
+
-- Upsert token bucket fields on group hash
|
|
352
|
+
if tbCapacity > 0 then
|
|
353
|
+
local curTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0
|
|
354
|
+
if curTbCap ~= tbCapacity then
|
|
355
|
+
redis.call('HSET', groupHashKey, 'tbCapacity', tostring(tbCapacity))
|
|
356
|
+
end
|
|
357
|
+
local curTbRate = tonumber(redis.call('HGET', groupHashKey, 'tbRefillRate')) or 0
|
|
358
|
+
if curTbRate ~= tbRefillRate then
|
|
359
|
+
redis.call('HSET', groupHashKey, 'tbRefillRate', tostring(tbRefillRate))
|
|
360
|
+
end
|
|
361
|
+
-- Initialize tokens on first setup
|
|
362
|
+
if curTbCap == 0 then
|
|
363
|
+
redis.call('HSET', groupHashKey,
|
|
364
|
+
'tbTokens', tostring(tbCapacity),
|
|
365
|
+
'tbLastRefill', tostring(timestamp),
|
|
366
|
+
'tbRefillRemainder', '0')
|
|
367
|
+
end
|
|
368
|
+
-- Validate cost <= capacity at enqueue
|
|
369
|
+
-- Validate cost (explicit or default 1000 millitokens) against capacity
|
|
370
|
+
local effectiveCost = (jobCost > 0) and jobCost or 1000
|
|
371
|
+
if effectiveCost > tbCapacity then
|
|
372
|
+
return 'ERR:COST_EXCEEDS_CAPACITY'
|
|
373
|
+
end
|
|
374
|
+
else
|
|
375
|
+
-- Clear stale tb fields
|
|
376
|
+
local oldTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0
|
|
377
|
+
if oldTbCap > 0 then
|
|
378
|
+
redis.call('HDEL', groupHashKey, 'tbCapacity', 'tbRefillRate', 'tbTokens', 'tbLastRefill', 'tbRefillRemainder')
|
|
379
|
+
end
|
|
380
|
+
end
|
|
171
381
|
end
|
|
172
382
|
local hashFields = {
|
|
173
383
|
'id', jobIdStr,
|
|
@@ -189,6 +399,10 @@ redis.register_function('glidemq_addJob', function(keys, args)
|
|
|
189
399
|
hashFields[#hashFields + 1] = 'orderingSeq'
|
|
190
400
|
hashFields[#hashFields + 1] = tostring(orderingSeq)
|
|
191
401
|
end
|
|
402
|
+
if jobCost > 0 then
|
|
403
|
+
hashFields[#hashFields + 1] = 'cost'
|
|
404
|
+
hashFields[#hashFields + 1] = tostring(jobCost)
|
|
405
|
+
end
|
|
192
406
|
if parentId ~= '' then
|
|
193
407
|
hashFields[#hashFields + 1] = 'parentId'
|
|
194
408
|
hashFields[#hashFields + 1] = parentId
|
|
@@ -280,7 +494,7 @@ redis.register_function('glidemq_complete', function(keys, args)
|
|
|
280
494
|
'finishedOn', tostring(timestamp)
|
|
281
495
|
)
|
|
282
496
|
markOrderingDone(jobKey, jobId)
|
|
283
|
-
releaseGroupSlotAndPromote(jobKey, jobId)
|
|
497
|
+
releaseGroupSlotAndPromote(jobKey, jobId, timestamp)
|
|
284
498
|
emitEvent(eventsKey, 'completed', jobId, {'returnvalue', returnvalue})
|
|
285
499
|
local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))
|
|
286
500
|
if removeMode == 'true' then
|
|
@@ -362,7 +576,7 @@ redis.register_function('glidemq_completeAndFetchNext', function(keys, args)
|
|
|
362
576
|
'finishedOn', tostring(timestamp)
|
|
363
577
|
)
|
|
364
578
|
markOrderingDone(jobKey, jobId)
|
|
365
|
-
releaseGroupSlotAndPromote(jobKey, jobId)
|
|
579
|
+
releaseGroupSlotAndPromote(jobKey, jobId, timestamp)
|
|
366
580
|
emitEvent(eventsKey, 'completed', jobId, {'returnvalue', returnvalue})
|
|
367
581
|
local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))
|
|
368
582
|
|
|
@@ -433,8 +647,13 @@ redis.register_function('glidemq_completeAndFetchNext', function(keys, args)
|
|
|
433
647
|
local nextGroupKey = redis.call('HGET', nextJobKey, 'groupKey')
|
|
434
648
|
if nextGroupKey and nextGroupKey ~= '' then
|
|
435
649
|
local nextGroupHashKey = prefix .. 'group:' .. nextGroupKey
|
|
436
|
-
|
|
437
|
-
local
|
|
650
|
+
-- Load all group fields in one call
|
|
651
|
+
local nGrpFields = redis.call('HGETALL', nextGroupHashKey)
|
|
652
|
+
local nGrp = {}
|
|
653
|
+
for nf = 1, #nGrpFields, 2 do nGrp[nGrpFields[nf]] = nGrpFields[nf + 1] end
|
|
654
|
+
local nextMaxConc = tonumber(nGrp.maxConcurrency) or 0
|
|
655
|
+
local nextActive = tonumber(nGrp.active) or 0
|
|
656
|
+
-- Concurrency gate first (avoids burning rate/token slots on parked jobs)
|
|
438
657
|
if nextMaxConc > 0 and nextActive >= nextMaxConc then
|
|
439
658
|
redis.call('XACK', streamKey, group, nextEntryId)
|
|
440
659
|
redis.call('XDEL', streamKey, nextEntryId)
|
|
@@ -443,6 +662,73 @@ redis.register_function('glidemq_completeAndFetchNext', function(keys, args)
|
|
|
443
662
|
redis.call('HSET', nextJobKey, 'state', 'group-waiting')
|
|
444
663
|
return cjson.encode({completed = jobId, next = false})
|
|
445
664
|
end
|
|
665
|
+
-- Token bucket gate (read-only)
|
|
666
|
+
local nextTbCapacity = tonumber(nGrp.tbCapacity) or 0
|
|
667
|
+
local nextTbBlocked = false
|
|
668
|
+
local nextTbDelay = 0
|
|
669
|
+
local nextTbTokens = 0
|
|
670
|
+
local nextJobCostVal = 0
|
|
671
|
+
if nextTbCapacity > 0 then
|
|
672
|
+
nextTbTokens = tbRefill(nextGroupHashKey, nGrp, tonumber(timestamp))
|
|
673
|
+
nextJobCostVal = tonumber(redis.call('HGET', nextJobKey, 'cost')) or 1000
|
|
674
|
+
-- DLQ guard: cost > capacity
|
|
675
|
+
if nextJobCostVal > nextTbCapacity then
|
|
676
|
+
redis.call('XACK', streamKey, group, nextEntryId)
|
|
677
|
+
redis.call('XDEL', streamKey, nextEntryId)
|
|
678
|
+
redis.call('ZADD', prefix .. 'failed', tonumber(timestamp), nextJobId)
|
|
679
|
+
redis.call('HSET', nextJobKey,
|
|
680
|
+
'state', 'failed',
|
|
681
|
+
'failedReason', 'cost exceeds token bucket capacity',
|
|
682
|
+
'finishedOn', tostring(timestamp))
|
|
683
|
+
emitEvent(prefix .. 'events', 'failed', nextJobId, {'failedReason', 'cost exceeds token bucket capacity'})
|
|
684
|
+
return cjson.encode({completed = jobId, next = false})
|
|
685
|
+
end
|
|
686
|
+
if nextTbTokens < nextJobCostVal then
|
|
687
|
+
nextTbBlocked = true
|
|
688
|
+
local nextTbRefillRateVal = math.max(tonumber(nGrp.tbRefillRate) or 0, 1)
|
|
689
|
+
nextTbDelay = math.ceil((nextJobCostVal - nextTbTokens) * 1000 / nextTbRefillRateVal)
|
|
690
|
+
end
|
|
691
|
+
end
|
|
692
|
+
-- Sliding window gate (read-only)
|
|
693
|
+
local nextRateMax = tonumber(nGrp.rateMax) or 0
|
|
694
|
+
local nextRlBlocked = false
|
|
695
|
+
local nextRlDelay = 0
|
|
696
|
+
if nextRateMax > 0 then
|
|
697
|
+
local nextRateDuration = tonumber(nGrp.rateDuration) or 0
|
|
698
|
+
local nextRateWindowStart = tonumber(nGrp.rateWindowStart) or 0
|
|
699
|
+
local nextRateCount = tonumber(nGrp.rateCount) or 0
|
|
700
|
+
if nextRateDuration > 0 and timestamp - nextRateWindowStart < nextRateDuration and nextRateCount >= nextRateMax then
|
|
701
|
+
nextRlBlocked = true
|
|
702
|
+
nextRlDelay = (nextRateWindowStart + nextRateDuration) - timestamp
|
|
703
|
+
end
|
|
704
|
+
end
|
|
705
|
+
-- If ANY gate blocked: park + register
|
|
706
|
+
if nextTbBlocked or nextRlBlocked then
|
|
707
|
+
redis.call('XACK', streamKey, group, nextEntryId)
|
|
708
|
+
redis.call('XDEL', streamKey, nextEntryId)
|
|
709
|
+
local nextWaitListKey = prefix .. 'groupq:' .. nextGroupKey
|
|
710
|
+
redis.call('RPUSH', nextWaitListKey, nextJobId)
|
|
711
|
+
redis.call('HSET', nextJobKey, 'state', 'group-waiting')
|
|
712
|
+
local nextMaxDelay = math.max(nextTbDelay, nextRlDelay)
|
|
713
|
+
local rateLimitedKey = prefix .. 'ratelimited'
|
|
714
|
+
redis.call('ZADD', rateLimitedKey, tonumber(timestamp) + nextMaxDelay, nextGroupKey)
|
|
715
|
+
return cjson.encode({completed = jobId, next = false})
|
|
716
|
+
end
|
|
717
|
+
-- All gates passed: mutate state
|
|
718
|
+
if nextTbCapacity > 0 then
|
|
719
|
+
redis.call('HINCRBY', nextGroupHashKey, 'tbTokens', -nextJobCostVal)
|
|
720
|
+
end
|
|
721
|
+
if nextRateMax > 0 then
|
|
722
|
+
local nextRateDuration = tonumber(nGrp.rateDuration) or 0
|
|
723
|
+
if nextRateDuration > 0 then
|
|
724
|
+
local nextRateWindowStart = tonumber(nGrp.rateWindowStart) or 0
|
|
725
|
+
if timestamp - nextRateWindowStart >= nextRateDuration then
|
|
726
|
+
redis.call('HSET', nextGroupHashKey, 'rateWindowStart', tostring(timestamp), 'rateCount', '1')
|
|
727
|
+
else
|
|
728
|
+
redis.call('HINCRBY', nextGroupHashKey, 'rateCount', 1)
|
|
729
|
+
end
|
|
730
|
+
end
|
|
731
|
+
end
|
|
446
732
|
redis.call('HINCRBY', nextGroupHashKey, 'active', 1)
|
|
447
733
|
end
|
|
448
734
|
redis.call('HSET', nextJobKey, 'state', 'active', 'processedOn', tostring(timestamp), 'lastActive', tostring(timestamp))
|
|
@@ -479,7 +765,7 @@ redis.register_function('glidemq_fail', function(keys, args)
|
|
|
479
765
|
'failedReason', failedReason,
|
|
480
766
|
'processedOn', tostring(timestamp)
|
|
481
767
|
)
|
|
482
|
-
releaseGroupSlotAndPromote(jobKey, jobId)
|
|
768
|
+
releaseGroupSlotAndPromote(jobKey, jobId, timestamp)
|
|
483
769
|
emitEvent(eventsKey, 'retrying', jobId, {
|
|
484
770
|
'failedReason', failedReason,
|
|
485
771
|
'attemptsMade', tostring(attemptsMade),
|
|
@@ -495,7 +781,7 @@ redis.register_function('glidemq_fail', function(keys, args)
|
|
|
495
781
|
'processedOn', tostring(timestamp)
|
|
496
782
|
)
|
|
497
783
|
markOrderingDone(jobKey, jobId)
|
|
498
|
-
releaseGroupSlotAndPromote(jobKey, jobId)
|
|
784
|
+
releaseGroupSlotAndPromote(jobKey, jobId, timestamp)
|
|
499
785
|
emitEvent(eventsKey, 'failed', jobId, {'failedReason', failedReason})
|
|
500
786
|
local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))
|
|
501
787
|
if removeMode == 'true' then
|
|
@@ -583,7 +869,7 @@ redis.register_function('glidemq_reclaimStalled', function(keys, args)
|
|
|
583
869
|
'finishedOn', tostring(timestamp)
|
|
584
870
|
)
|
|
585
871
|
markOrderingDone(jobKey, jobId)
|
|
586
|
-
releaseGroupSlotAndPromote(jobKey, jobId)
|
|
872
|
+
releaseGroupSlotAndPromote(jobKey, jobId, timestamp)
|
|
587
873
|
emitEvent(eventsKey, 'failed', jobId, {
|
|
588
874
|
'failedReason', 'job stalled more than maxStalledCount'
|
|
589
875
|
})
|
|
@@ -633,6 +919,11 @@ redis.register_function('glidemq_dedup', function(keys, args)
|
|
|
633
919
|
local maxAttempts = tonumber(args[11]) or 0
|
|
634
920
|
local orderingKey = args[12] or ''
|
|
635
921
|
local groupConcurrency = tonumber(args[13]) or 0
|
|
922
|
+
local groupRateMax = tonumber(args[14]) or 0
|
|
923
|
+
local groupRateDuration = tonumber(args[15]) or 0
|
|
924
|
+
local tbCapacity = tonumber(args[16]) or 0
|
|
925
|
+
local tbRefillRate = tonumber(args[17]) or 0
|
|
926
|
+
local jobCost = tonumber(args[18]) or 0
|
|
636
927
|
local prefix = string.sub(idKey, 1, #idKey - 2)
|
|
637
928
|
local existing = redis.call('HGET', dedupKey, dedupId)
|
|
638
929
|
if mode == 'simple' then
|
|
@@ -678,7 +969,7 @@ redis.register_function('glidemq_dedup', function(keys, args)
|
|
|
678
969
|
local jobId = redis.call('INCR', idKey)
|
|
679
970
|
local jobIdStr = tostring(jobId)
|
|
680
971
|
local jobKey = prefix .. 'job:' .. jobIdStr
|
|
681
|
-
local useGroupConcurrency = (orderingKey ~= '' and groupConcurrency > 1)
|
|
972
|
+
local useGroupConcurrency = (orderingKey ~= '' and (groupConcurrency > 1 or groupRateMax > 0 or tbCapacity > 0))
|
|
682
973
|
local orderingSeq = 0
|
|
683
974
|
if orderingKey ~= '' and not useGroupConcurrency then
|
|
684
975
|
local orderingMetaKey = prefix .. 'ordering'
|
|
@@ -690,6 +981,54 @@ redis.register_function('glidemq_dedup', function(keys, args)
|
|
|
690
981
|
if curMax ~= groupConcurrency then
|
|
691
982
|
redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(groupConcurrency))
|
|
692
983
|
end
|
|
984
|
+
if curMax == 0 and groupConcurrency <= 1 then
|
|
985
|
+
redis.call('HSET', groupHashKey, 'maxConcurrency', '1')
|
|
986
|
+
end
|
|
987
|
+
if groupRateMax > 0 then
|
|
988
|
+
local curRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0
|
|
989
|
+
if curRateMax ~= groupRateMax then
|
|
990
|
+
redis.call('HSET', groupHashKey, 'rateMax', tostring(groupRateMax))
|
|
991
|
+
end
|
|
992
|
+
local curRateDuration = tonumber(redis.call('HGET', groupHashKey, 'rateDuration')) or 0
|
|
993
|
+
if curRateDuration ~= groupRateDuration then
|
|
994
|
+
redis.call('HSET', groupHashKey, 'rateDuration', tostring(groupRateDuration))
|
|
995
|
+
end
|
|
996
|
+
else
|
|
997
|
+
local oldRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0
|
|
998
|
+
if oldRateMax > 0 then
|
|
999
|
+
redis.call('HDEL', groupHashKey, 'rateMax', 'rateDuration', 'rateWindowStart', 'rateCount')
|
|
1000
|
+
end
|
|
1001
|
+
end
|
|
1002
|
+
-- Upsert token bucket fields on group hash
|
|
1003
|
+
if tbCapacity > 0 then
|
|
1004
|
+
local curTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0
|
|
1005
|
+
if curTbCap ~= tbCapacity then
|
|
1006
|
+
redis.call('HSET', groupHashKey, 'tbCapacity', tostring(tbCapacity))
|
|
1007
|
+
end
|
|
1008
|
+
local curTbRate = tonumber(redis.call('HGET', groupHashKey, 'tbRefillRate')) or 0
|
|
1009
|
+
if curTbRate ~= tbRefillRate then
|
|
1010
|
+
redis.call('HSET', groupHashKey, 'tbRefillRate', tostring(tbRefillRate))
|
|
1011
|
+
end
|
|
1012
|
+
-- Initialize tokens on first setup
|
|
1013
|
+
if curTbCap == 0 then
|
|
1014
|
+
redis.call('HSET', groupHashKey,
|
|
1015
|
+
'tbTokens', tostring(tbCapacity),
|
|
1016
|
+
'tbLastRefill', tostring(timestamp),
|
|
1017
|
+
'tbRefillRemainder', '0')
|
|
1018
|
+
end
|
|
1019
|
+
-- Validate cost <= capacity at enqueue
|
|
1020
|
+
-- Validate cost (explicit or default 1000 millitokens) against capacity
|
|
1021
|
+
local effectiveCost = (jobCost > 0) and jobCost or 1000
|
|
1022
|
+
if effectiveCost > tbCapacity then
|
|
1023
|
+
return 'ERR:COST_EXCEEDS_CAPACITY'
|
|
1024
|
+
end
|
|
1025
|
+
else
|
|
1026
|
+
-- Clear stale tb fields
|
|
1027
|
+
local oldTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0
|
|
1028
|
+
if oldTbCap > 0 then
|
|
1029
|
+
redis.call('HDEL', groupHashKey, 'tbCapacity', 'tbRefillRate', 'tbTokens', 'tbLastRefill', 'tbRefillRemainder')
|
|
1030
|
+
end
|
|
1031
|
+
end
|
|
693
1032
|
end
|
|
694
1033
|
local hashFields = {
|
|
695
1034
|
'id', jobIdStr,
|
|
@@ -711,6 +1050,10 @@ redis.register_function('glidemq_dedup', function(keys, args)
|
|
|
711
1050
|
hashFields[#hashFields + 1] = 'orderingSeq'
|
|
712
1051
|
hashFields[#hashFields + 1] = tostring(orderingSeq)
|
|
713
1052
|
end
|
|
1053
|
+
if jobCost > 0 then
|
|
1054
|
+
hashFields[#hashFields + 1] = 'cost'
|
|
1055
|
+
hashFields[#hashFields + 1] = tostring(jobCost)
|
|
1056
|
+
end
|
|
714
1057
|
if parentId ~= '' then
|
|
715
1058
|
hashFields[#hashFields + 1] = 'parentId'
|
|
716
1059
|
hashFields[#hashFields + 1] = parentId
|
|
@@ -743,6 +1086,12 @@ redis.register_function('glidemq_rateLimit', function(keys, args)
|
|
|
743
1086
|
local maxPerWindow = tonumber(args[1])
|
|
744
1087
|
local windowDuration = tonumber(args[2])
|
|
745
1088
|
local now = tonumber(args[3])
|
|
1089
|
+
-- Fallback: read rate limit config from meta if not provided inline
|
|
1090
|
+
if maxPerWindow <= 0 then
|
|
1091
|
+
maxPerWindow = tonumber(redis.call('HGET', metaKey, 'rateLimitMax')) or 0
|
|
1092
|
+
windowDuration = tonumber(redis.call('HGET', metaKey, 'rateLimitDuration')) or 0
|
|
1093
|
+
if maxPerWindow <= 0 then return 0 end
|
|
1094
|
+
end
|
|
746
1095
|
local windowStart = tonumber(redis.call('HGET', rateKey, 'windowStart')) or 0
|
|
747
1096
|
local count = tonumber(redis.call('HGET', rateKey, 'count')) or 0
|
|
748
1097
|
if now - windowStart >= windowDuration then
|
|
@@ -757,6 +1106,87 @@ redis.register_function('glidemq_rateLimit', function(keys, args)
|
|
|
757
1106
|
return 0
|
|
758
1107
|
end)
|
|
759
1108
|
|
|
1109
|
+
redis.register_function('glidemq_promoteRateLimited', function(keys, args)
|
|
1110
|
+
local rateLimitedKey = keys[1]
|
|
1111
|
+
local streamKey = keys[2]
|
|
1112
|
+
local now = tonumber(args[1])
|
|
1113
|
+
-- Derive prefix from the server-validated key instead of caller-supplied arg
|
|
1114
|
+
local prefix = string.sub(rateLimitedKey, 1, #rateLimitedKey - #'ratelimited')
|
|
1115
|
+
local expired = redis.call('ZRANGEBYSCORE', rateLimitedKey, '0', string.format('%.0f', now), 'LIMIT', 0, 100)
|
|
1116
|
+
if not expired or #expired == 0 then return 0 end
|
|
1117
|
+
local promoted = 0
|
|
1118
|
+
for i = 1, #expired do
|
|
1119
|
+
local gk = expired[i]
|
|
1120
|
+
redis.call('ZREM', rateLimitedKey, gk)
|
|
1121
|
+
local groupHashKey = prefix .. 'group:' .. gk
|
|
1122
|
+
local waitListKey = prefix .. 'groupq:' .. gk
|
|
1123
|
+
-- Load all group fields in one call for rate limit + token bucket checks
|
|
1124
|
+
local prGrpFields = redis.call('HGETALL', groupHashKey)
|
|
1125
|
+
local prGrp = {}
|
|
1126
|
+
for pf = 1, #prGrpFields, 2 do prGrp[prGrpFields[pf]] = prGrpFields[pf + 1] end
|
|
1127
|
+
local rateMax = tonumber(prGrp.rateMax) or 0
|
|
1128
|
+
local maxConc = tonumber(prGrp.maxConcurrency) or 0
|
|
1129
|
+
local active = tonumber(prGrp.active) or 0
|
|
1130
|
+
-- Token bucket pre-check: peek head job cost before promoting
|
|
1131
|
+
local prTbCap = tonumber(prGrp.tbCapacity) or 0
|
|
1132
|
+
local tbCheckPassed = true
|
|
1133
|
+
if prTbCap > 0 then
|
|
1134
|
+
local prTbTokens = tbRefill(groupHashKey, prGrp, now)
|
|
1135
|
+
local headJobId = redis.call('LINDEX', waitListKey, 0)
|
|
1136
|
+
if headJobId then
|
|
1137
|
+
local headJobKey = prefix .. 'job:' .. headJobId
|
|
1138
|
+
-- Tombstone guard
|
|
1139
|
+
if redis.call('EXISTS', headJobKey) == 0 then
|
|
1140
|
+
redis.call('LPOP', waitListKey)
|
|
1141
|
+
tbCheckPassed = false
|
|
1142
|
+
end
|
|
1143
|
+
if tbCheckPassed then
|
|
1144
|
+
local headCost = tonumber(redis.call('HGET', headJobKey, 'cost')) or 1000
|
|
1145
|
+
-- DLQ guard: cost > capacity
|
|
1146
|
+
if headCost > prTbCap then
|
|
1147
|
+
redis.call('LPOP', waitListKey)
|
|
1148
|
+
redis.call('ZADD', prefix .. 'failed', now, headJobId)
|
|
1149
|
+
redis.call('HSET', headJobKey,
|
|
1150
|
+
'state', 'failed',
|
|
1151
|
+
'failedReason', 'cost exceeds token bucket capacity',
|
|
1152
|
+
'finishedOn', tostring(now))
|
|
1153
|
+
emitEvent(prefix .. 'events', 'failed', headJobId, {'failedReason', 'cost exceeds token bucket capacity'})
|
|
1154
|
+
tbCheckPassed = false
|
|
1155
|
+
end
|
|
1156
|
+
if tbCheckPassed and prTbTokens < headCost then
|
|
1157
|
+
-- Not enough tokens: re-register with calculated delay
|
|
1158
|
+
local prTbRate = math.max(tonumber(prGrp.tbRefillRate) or 0, 1)
|
|
1159
|
+
local prTbDelay = math.ceil((headCost - prTbTokens) * 1000 / prTbRate)
|
|
1160
|
+
redis.call('ZADD', rateLimitedKey, now + prTbDelay, gk)
|
|
1161
|
+
tbCheckPassed = false
|
|
1162
|
+
end
|
|
1163
|
+
end
|
|
1164
|
+
end
|
|
1165
|
+
end
|
|
1166
|
+
if tbCheckPassed then
|
|
1167
|
+
-- Promote up to min(rateMax, available concurrency) jobs.
|
|
1168
|
+
-- Do NOT touch rateCount/rateWindowStart here - moveToActive handles
|
|
1169
|
+
-- window reset and counting when the worker picks up the promoted jobs.
|
|
1170
|
+
local canPromote = 1000
|
|
1171
|
+
if rateMax > 0 then
|
|
1172
|
+
canPromote = math.min(canPromote, rateMax)
|
|
1173
|
+
end
|
|
1174
|
+
if maxConc > 0 then
|
|
1175
|
+
canPromote = math.min(canPromote, math.max(0, maxConc - active))
|
|
1176
|
+
end
|
|
1177
|
+
for j = 1, canPromote do
|
|
1178
|
+
local nextJobId = redis.call('LPOP', waitListKey)
|
|
1179
|
+
if not nextJobId then break end
|
|
1180
|
+
redis.call('XADD', streamKey, '*', 'jobId', nextJobId)
|
|
1181
|
+
local nextJobKey = prefix .. 'job:' .. nextJobId
|
|
1182
|
+
redis.call('HSET', nextJobKey, 'state', 'waiting')
|
|
1183
|
+
promoted = promoted + 1
|
|
1184
|
+
end
|
|
1185
|
+
end
|
|
1186
|
+
end
|
|
1187
|
+
return promoted
|
|
1188
|
+
end)
|
|
1189
|
+
|
|
760
1190
|
redis.register_function('glidemq_checkConcurrency', function(keys, args)
|
|
761
1191
|
local metaKey = keys[1]
|
|
762
1192
|
local streamKey = keys[2]
|
|
@@ -793,8 +1223,13 @@ redis.register_function('glidemq_moveToActive', function(keys, args)
|
|
|
793
1223
|
if groupKey and groupKey ~= '' then
|
|
794
1224
|
local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))
|
|
795
1225
|
local groupHashKey = prefix .. 'group:' .. groupKey
|
|
796
|
-
|
|
797
|
-
local
|
|
1226
|
+
-- Load all group fields in one call
|
|
1227
|
+
local grpFields = redis.call('HGETALL', groupHashKey)
|
|
1228
|
+
local grp = {}
|
|
1229
|
+
for f = 1, #grpFields, 2 do grp[grpFields[f]] = grpFields[f + 1] end
|
|
1230
|
+
local maxConc = tonumber(grp.maxConcurrency) or 0
|
|
1231
|
+
local active = tonumber(grp.active) or 0
|
|
1232
|
+
-- Concurrency gate (checked first to avoid burning rate/token slots on parked jobs)
|
|
798
1233
|
if maxConc > 0 and active >= maxConc then
|
|
799
1234
|
if streamKey ~= '' and entryId ~= '' and group ~= '' then
|
|
800
1235
|
redis.call('XACK', streamKey, group, entryId)
|
|
@@ -805,6 +1240,81 @@ redis.register_function('glidemq_moveToActive', function(keys, args)
|
|
|
805
1240
|
redis.call('HSET', jobKey, 'state', 'group-waiting')
|
|
806
1241
|
return 'GROUP_FULL'
|
|
807
1242
|
end
|
|
1243
|
+
-- Token bucket gate (read-only)
|
|
1244
|
+
local tbCapacity = tonumber(grp.tbCapacity) or 0
|
|
1245
|
+
local tbBlocked = false
|
|
1246
|
+
local tbDelay = 0
|
|
1247
|
+
local tbTokens = 0
|
|
1248
|
+
local jobCostVal = 0
|
|
1249
|
+
if tbCapacity > 0 then
|
|
1250
|
+
tbTokens = tbRefill(groupHashKey, grp, tonumber(timestamp))
|
|
1251
|
+
jobCostVal = tonumber(redis.call('HGET', jobKey, 'cost')) or 1000
|
|
1252
|
+
-- DLQ guard: cost > capacity
|
|
1253
|
+
if jobCostVal > tbCapacity then
|
|
1254
|
+
if streamKey ~= '' and entryId ~= '' and group ~= '' then
|
|
1255
|
+
redis.call('XACK', streamKey, group, entryId)
|
|
1256
|
+
redis.call('XDEL', streamKey, entryId)
|
|
1257
|
+
end
|
|
1258
|
+
redis.call('ZADD', prefix .. 'failed', tonumber(timestamp), jobId)
|
|
1259
|
+
redis.call('HSET', jobKey,
|
|
1260
|
+
'state', 'failed',
|
|
1261
|
+
'failedReason', 'cost exceeds token bucket capacity',
|
|
1262
|
+
'finishedOn', timestamp)
|
|
1263
|
+
emitEvent(prefix .. 'events', 'failed', jobId, {'failedReason', 'cost exceeds token bucket capacity'})
|
|
1264
|
+
return 'ERR:COST_EXCEEDS_CAPACITY'
|
|
1265
|
+
end
|
|
1266
|
+
if tbTokens < jobCostVal then
|
|
1267
|
+
tbBlocked = true
|
|
1268
|
+
local tbRefillRateVal = tonumber(grp.tbRefillRate) or 0
|
|
1269
|
+
if tbRefillRateVal <= 0 then tbRefillRateVal = 1 end
|
|
1270
|
+
tbDelay = math.ceil((jobCostVal - tbTokens) * 1000 / tbRefillRateVal)
|
|
1271
|
+
end
|
|
1272
|
+
end
|
|
1273
|
+
-- Sliding window gate (read-only)
|
|
1274
|
+
local rateMax = tonumber(grp.rateMax) or 0
|
|
1275
|
+
local rlBlocked = false
|
|
1276
|
+
local rlDelay = 0
|
|
1277
|
+
if rateMax > 0 then
|
|
1278
|
+
local rateDuration = tonumber(grp.rateDuration) or 0
|
|
1279
|
+
local rateWindowStart = tonumber(grp.rateWindowStart) or 0
|
|
1280
|
+
local rateCount = tonumber(grp.rateCount) or 0
|
|
1281
|
+
local now = tonumber(timestamp)
|
|
1282
|
+
if rateDuration > 0 and now - rateWindowStart < rateDuration and rateCount >= rateMax then
|
|
1283
|
+
rlBlocked = true
|
|
1284
|
+
rlDelay = (rateWindowStart + rateDuration) - now
|
|
1285
|
+
end
|
|
1286
|
+
end
|
|
1287
|
+
-- If ANY gate blocked: park + register
|
|
1288
|
+
if tbBlocked or rlBlocked then
|
|
1289
|
+
if streamKey ~= '' and entryId ~= '' and group ~= '' then
|
|
1290
|
+
redis.call('XACK', streamKey, group, entryId)
|
|
1291
|
+
redis.call('XDEL', streamKey, entryId)
|
|
1292
|
+
end
|
|
1293
|
+
local waitListKey = prefix .. 'groupq:' .. groupKey
|
|
1294
|
+
redis.call('RPUSH', waitListKey, jobId)
|
|
1295
|
+
redis.call('HSET', jobKey, 'state', 'group-waiting')
|
|
1296
|
+
local maxDelay = math.max(tbDelay, rlDelay)
|
|
1297
|
+
local rateLimitedKey = prefix .. 'ratelimited'
|
|
1298
|
+
redis.call('ZADD', rateLimitedKey, tonumber(timestamp) + maxDelay, groupKey)
|
|
1299
|
+
if tbBlocked then return 'GROUP_TOKEN_LIMITED' end
|
|
1300
|
+
return 'GROUP_RATE_LIMITED'
|
|
1301
|
+
end
|
|
1302
|
+
-- All gates passed: mutate state
|
|
1303
|
+
if tbCapacity > 0 then
|
|
1304
|
+
redis.call('HINCRBY', groupHashKey, 'tbTokens', -jobCostVal)
|
|
1305
|
+
end
|
|
1306
|
+
if rateMax > 0 then
|
|
1307
|
+
local rateDuration = tonumber(grp.rateDuration) or 0
|
|
1308
|
+
if rateDuration > 0 then
|
|
1309
|
+
local rateWindowStart = tonumber(grp.rateWindowStart) or 0
|
|
1310
|
+
local now = tonumber(timestamp)
|
|
1311
|
+
if now - rateWindowStart >= rateDuration then
|
|
1312
|
+
redis.call('HSET', groupHashKey, 'rateWindowStart', tostring(now), 'rateCount', '1')
|
|
1313
|
+
else
|
|
1314
|
+
redis.call('HINCRBY', groupHashKey, 'rateCount', 1)
|
|
1315
|
+
end
|
|
1316
|
+
end
|
|
1317
|
+
end
|
|
808
1318
|
redis.call('HINCRBY', groupHashKey, 'active', 1)
|
|
809
1319
|
end
|
|
810
1320
|
redis.call('HSET', jobKey, 'state', 'active', 'processedOn', timestamp, 'lastActive', timestamp)
|
|
@@ -849,8 +1359,12 @@ redis.register_function('glidemq_addFlow', function(keys, args)
|
|
|
849
1359
|
local depsKey = parentPrefix .. 'deps:' .. parentJobIdStr
|
|
850
1360
|
local parentOrderingKey = extractOrderingKeyFromOpts(parentOpts)
|
|
851
1361
|
local parentGroupConc = extractGroupConcurrencyFromOpts(parentOpts)
|
|
1362
|
+
local parentRateMax, parentRateDuration = extractGroupRateLimitFromOpts(parentOpts)
|
|
1363
|
+
local parentTbCapacity, parentTbRefillRate = extractTokenBucketFromOpts(parentOpts)
|
|
1364
|
+
local parentCost = extractCostFromOpts(parentOpts)
|
|
1365
|
+
local parentUseGroup = (parentOrderingKey ~= '' and (parentGroupConc > 1 or parentRateMax > 0 or parentTbCapacity > 0))
|
|
852
1366
|
local parentOrderingSeq = 0
|
|
853
|
-
if parentOrderingKey ~= '' and
|
|
1367
|
+
if parentOrderingKey ~= '' and not parentUseGroup then
|
|
854
1368
|
local parentOrderingMetaKey = parentPrefix .. 'ordering'
|
|
855
1369
|
parentOrderingSeq = redis.call('HINCRBY', parentOrderingMetaKey, parentOrderingKey, 1)
|
|
856
1370
|
end
|
|
@@ -866,22 +1380,52 @@ redis.register_function('glidemq_addFlow', function(keys, args)
|
|
|
866
1380
|
'maxAttempts', tostring(parentMaxAttempts),
|
|
867
1381
|
'state', 'waiting-children'
|
|
868
1382
|
}
|
|
869
|
-
if
|
|
1383
|
+
if parentUseGroup then
|
|
870
1384
|
parentHash[#parentHash + 1] = 'groupKey'
|
|
871
1385
|
parentHash[#parentHash + 1] = parentOrderingKey
|
|
872
1386
|
local groupHashKey = parentPrefix .. 'group:' .. parentOrderingKey
|
|
873
|
-
redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(parentGroupConc))
|
|
1387
|
+
redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(parentGroupConc > 1 and parentGroupConc or 1))
|
|
874
1388
|
redis.call('HSETNX', groupHashKey, 'active', '0')
|
|
1389
|
+
if parentRateMax > 0 then
|
|
1390
|
+
redis.call('HSET', groupHashKey, 'rateMax', tostring(parentRateMax))
|
|
1391
|
+
redis.call('HSET', groupHashKey, 'rateDuration', tostring(parentRateDuration))
|
|
1392
|
+
end
|
|
1393
|
+
if parentTbCapacity > 0 then
|
|
1394
|
+
if parentCost > 0 and parentCost > parentTbCapacity then
|
|
1395
|
+
return 'ERR:COST_EXCEEDS_CAPACITY'
|
|
1396
|
+
end
|
|
1397
|
+
redis.call('HSET', groupHashKey, 'tbCapacity', tostring(parentTbCapacity), 'tbRefillRate', tostring(parentTbRefillRate))
|
|
1398
|
+
redis.call('HSETNX', groupHashKey, 'tbTokens', tostring(parentTbCapacity))
|
|
1399
|
+
redis.call('HSETNX', groupHashKey, 'tbLastRefill', tostring(timestamp))
|
|
1400
|
+
redis.call('HSETNX', groupHashKey, 'tbRefillRemainder', '0')
|
|
1401
|
+
end
|
|
875
1402
|
elseif parentOrderingKey ~= '' then
|
|
876
1403
|
parentHash[#parentHash + 1] = 'orderingKey'
|
|
877
1404
|
parentHash[#parentHash + 1] = parentOrderingKey
|
|
878
1405
|
parentHash[#parentHash + 1] = 'orderingSeq'
|
|
879
1406
|
parentHash[#parentHash + 1] = tostring(parentOrderingSeq)
|
|
880
1407
|
end
|
|
1408
|
+
if parentCost > 0 then
|
|
1409
|
+
parentHash[#parentHash + 1] = 'cost'
|
|
1410
|
+
parentHash[#parentHash + 1] = tostring(parentCost)
|
|
1411
|
+
end
|
|
881
1412
|
redis.call('HSET', parentJobKey, unpack(parentHash))
|
|
882
|
-
|
|
1413
|
+
-- Pre-validate all children's cost vs capacity before any child writes
|
|
883
1414
|
local childArgOffset = 8
|
|
884
1415
|
local childKeyOffset = 4
|
|
1416
|
+
for i = 1, numChildren do
|
|
1417
|
+
local base = childArgOffset + (i - 1) * 8
|
|
1418
|
+
local preChildOpts = args[base + 3]
|
|
1419
|
+
local preChildTbCap, _ = extractTokenBucketFromOpts(preChildOpts)
|
|
1420
|
+
if preChildTbCap > 0 then
|
|
1421
|
+
local preChildCost = extractCostFromOpts(preChildOpts)
|
|
1422
|
+
local preEffective = (preChildCost > 0) and preChildCost or 1000
|
|
1423
|
+
if preEffective > preChildTbCap then
|
|
1424
|
+
return 'ERR:COST_EXCEEDS_CAPACITY'
|
|
1425
|
+
end
|
|
1426
|
+
end
|
|
1427
|
+
end
|
|
1428
|
+
local childIds = {}
|
|
885
1429
|
for i = 1, numChildren do
|
|
886
1430
|
local base = childArgOffset + (i - 1) * 8
|
|
887
1431
|
local childName = args[base + 1]
|
|
@@ -903,8 +1447,12 @@ redis.register_function('glidemq_addFlow', function(keys, args)
|
|
|
903
1447
|
local childJobKey = childPrefix .. 'job:' .. childJobIdStr
|
|
904
1448
|
local childOrderingKey = extractOrderingKeyFromOpts(childOpts)
|
|
905
1449
|
local childGroupConc = extractGroupConcurrencyFromOpts(childOpts)
|
|
1450
|
+
local childRateMax, childRateDuration = extractGroupRateLimitFromOpts(childOpts)
|
|
1451
|
+
local childTbCapacity, childTbRefillRate = extractTokenBucketFromOpts(childOpts)
|
|
1452
|
+
local childCost = extractCostFromOpts(childOpts)
|
|
1453
|
+
local childUseGroup = (childOrderingKey ~= '' and (childGroupConc > 1 or childRateMax > 0 or childTbCapacity > 0))
|
|
906
1454
|
local childOrderingSeq = 0
|
|
907
|
-
if childOrderingKey ~= '' and
|
|
1455
|
+
if childOrderingKey ~= '' and not childUseGroup then
|
|
908
1456
|
local childOrderingMetaKey = childPrefix .. 'ordering'
|
|
909
1457
|
childOrderingSeq = redis.call('HINCRBY', childOrderingMetaKey, childOrderingKey, 1)
|
|
910
1458
|
end
|
|
@@ -921,18 +1469,32 @@ redis.register_function('glidemq_addFlow', function(keys, args)
|
|
|
921
1469
|
'parentId', parentJobIdStr,
|
|
922
1470
|
'parentQueue', childParentQueue
|
|
923
1471
|
}
|
|
924
|
-
if
|
|
1472
|
+
if childUseGroup then
|
|
925
1473
|
childHash[#childHash + 1] = 'groupKey'
|
|
926
1474
|
childHash[#childHash + 1] = childOrderingKey
|
|
927
1475
|
local childGroupHashKey = childPrefix .. 'group:' .. childOrderingKey
|
|
928
|
-
redis.call('HSETNX', childGroupHashKey, 'maxConcurrency', tostring(childGroupConc))
|
|
1476
|
+
redis.call('HSETNX', childGroupHashKey, 'maxConcurrency', tostring(childGroupConc > 1 and childGroupConc or 1))
|
|
929
1477
|
redis.call('HSETNX', childGroupHashKey, 'active', '0')
|
|
1478
|
+
if childRateMax > 0 then
|
|
1479
|
+
redis.call('HSET', childGroupHashKey, 'rateMax', tostring(childRateMax))
|
|
1480
|
+
redis.call('HSET', childGroupHashKey, 'rateDuration', tostring(childRateDuration))
|
|
1481
|
+
end
|
|
1482
|
+
if childTbCapacity > 0 then
|
|
1483
|
+
redis.call('HSET', childGroupHashKey, 'tbCapacity', tostring(childTbCapacity), 'tbRefillRate', tostring(childTbRefillRate))
|
|
1484
|
+
redis.call('HSETNX', childGroupHashKey, 'tbTokens', tostring(childTbCapacity))
|
|
1485
|
+
redis.call('HSETNX', childGroupHashKey, 'tbLastRefill', tostring(timestamp))
|
|
1486
|
+
redis.call('HSETNX', childGroupHashKey, 'tbRefillRemainder', '0')
|
|
1487
|
+
end
|
|
930
1488
|
elseif childOrderingKey ~= '' then
|
|
931
1489
|
childHash[#childHash + 1] = 'orderingKey'
|
|
932
1490
|
childHash[#childHash + 1] = childOrderingKey
|
|
933
1491
|
childHash[#childHash + 1] = 'orderingSeq'
|
|
934
1492
|
childHash[#childHash + 1] = tostring(childOrderingSeq)
|
|
935
1493
|
end
|
|
1494
|
+
if childCost > 0 then
|
|
1495
|
+
childHash[#childHash + 1] = 'cost'
|
|
1496
|
+
childHash[#childHash + 1] = tostring(childCost)
|
|
1497
|
+
end
|
|
936
1498
|
if childDelay > 0 or childPriority > 0 then
|
|
937
1499
|
childHash[#childHash + 1] = 'state'
|
|
938
1500
|
childHash[#childHash + 1] = childDelay > 0 and 'delayed' or 'prioritized'
|
|
@@ -1004,7 +1566,7 @@ redis.register_function('glidemq_removeJob', function(keys, args)
|
|
|
1004
1566
|
local groupKey = redis.call('HGET', jobKey, 'groupKey')
|
|
1005
1567
|
if groupKey and groupKey ~= '' then
|
|
1006
1568
|
if state == 'active' then
|
|
1007
|
-
releaseGroupSlotAndPromote(jobKey, jobId)
|
|
1569
|
+
releaseGroupSlotAndPromote(jobKey, jobId, 0)
|
|
1008
1570
|
elseif state == 'group-waiting' then
|
|
1009
1571
|
local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))
|
|
1010
1572
|
local waitListKey = prefix .. 'groupq:' .. groupKey
|
|
@@ -1127,7 +1689,7 @@ end)
|
|
|
1127
1689
|
* Add a job to the queue atomically.
|
|
1128
1690
|
* Returns the new job ID (string).
|
|
1129
1691
|
*/
|
|
1130
|
-
async function addJob(client, k, jobName, data, opts, timestamp, delay, priority, parentId, maxAttempts, orderingKey = '', groupConcurrency = 0) {
|
|
1692
|
+
async function addJob(client, k, jobName, data, opts, timestamp, delay, priority, parentId, maxAttempts, orderingKey = '', groupConcurrency = 0, groupRateMax = 0, groupRateDuration = 0, tbCapacity = 0, tbRefillRate = 0, jobCost = 0) {
|
|
1131
1693
|
const result = await client.fcall('glidemq_addJob', [k.id, k.stream, k.scheduled, k.events], [
|
|
1132
1694
|
jobName,
|
|
1133
1695
|
data,
|
|
@@ -1139,6 +1701,11 @@ async function addJob(client, k, jobName, data, opts, timestamp, delay, priority
|
|
|
1139
1701
|
maxAttempts.toString(),
|
|
1140
1702
|
orderingKey,
|
|
1141
1703
|
groupConcurrency.toString(),
|
|
1704
|
+
groupRateMax.toString(),
|
|
1705
|
+
groupRateDuration.toString(),
|
|
1706
|
+
tbCapacity.toString(),
|
|
1707
|
+
tbRefillRate.toString(),
|
|
1708
|
+
jobCost.toString(),
|
|
1142
1709
|
]);
|
|
1143
1710
|
return result;
|
|
1144
1711
|
}
|
|
@@ -1146,7 +1713,7 @@ async function addJob(client, k, jobName, data, opts, timestamp, delay, priority
|
|
|
1146
1713
|
* Add a job with deduplication. Checks the dedup hash and either skips or adds the job.
|
|
1147
1714
|
* Returns "skipped" if deduplicated, otherwise the new job ID (string).
|
|
1148
1715
|
*/
|
|
1149
|
-
async function dedup(client, k, dedupId, ttlMs, mode, jobName, data, opts, timestamp, delay, priority, parentId, maxAttempts, orderingKey = '', groupConcurrency = 0) {
|
|
1716
|
+
async function dedup(client, k, dedupId, ttlMs, mode, jobName, data, opts, timestamp, delay, priority, parentId, maxAttempts, orderingKey = '', groupConcurrency = 0, groupRateMax = 0, groupRateDuration = 0, tbCapacity = 0, tbRefillRate = 0, jobCost = 0) {
|
|
1150
1717
|
const result = await client.fcall('glidemq_dedup', [k.dedup, k.id, k.stream, k.scheduled, k.events], [
|
|
1151
1718
|
dedupId,
|
|
1152
1719
|
ttlMs.toString(),
|
|
@@ -1161,6 +1728,11 @@ async function dedup(client, k, dedupId, ttlMs, mode, jobName, data, opts, times
|
|
|
1161
1728
|
maxAttempts.toString(),
|
|
1162
1729
|
orderingKey,
|
|
1163
1730
|
groupConcurrency.toString(),
|
|
1731
|
+
groupRateMax.toString(),
|
|
1732
|
+
groupRateDuration.toString(),
|
|
1733
|
+
tbCapacity.toString(),
|
|
1734
|
+
tbRefillRate.toString(),
|
|
1735
|
+
jobCost.toString(),
|
|
1164
1736
|
]);
|
|
1165
1737
|
return result;
|
|
1166
1738
|
}
|
|
@@ -1322,10 +1894,14 @@ async function checkConcurrency(client, k, group = exports.CONSUMER_GROUP) {
|
|
|
1322
1894
|
* Reads the full job hash, checks revoked flag, sets state=active + processedOn + lastActive.
|
|
1323
1895
|
* For group-concurrency jobs, checks if the group has capacity. If not, parks the job
|
|
1324
1896
|
* in the group wait list and returns 'GROUP_FULL'.
|
|
1897
|
+
* For rate-limited groups, parks the job and returns 'GROUP_RATE_LIMITED'.
|
|
1325
1898
|
* Returns:
|
|
1326
1899
|
* - null if job hash doesn't exist
|
|
1327
1900
|
* - 'REVOKED' if the job's revoked flag is set
|
|
1328
1901
|
* - 'GROUP_FULL' if the job's group is at max concurrency (job was parked)
|
|
1902
|
+
* - 'GROUP_RATE_LIMITED' if the job's group exceeded its rate limit (job was parked)
|
|
1903
|
+
* - 'GROUP_TOKEN_LIMITED' if the job's group has insufficient tokens (job was parked)
|
|
1904
|
+
* - 'ERR:COST_EXCEEDS_CAPACITY' if the job cost exceeds token bucket capacity (job was failed)
|
|
1329
1905
|
* - Record<string, string> with all job fields otherwise
|
|
1330
1906
|
*/
|
|
1331
1907
|
async function moveToActive(client, k, jobId, timestamp, streamKey = '', entryId = '', group = '') {
|
|
@@ -1343,6 +1919,12 @@ async function moveToActive(client, k, jobId, timestamp, streamKey = '', entryId
|
|
|
1343
1919
|
return 'REVOKED';
|
|
1344
1920
|
if (str === 'GROUP_FULL')
|
|
1345
1921
|
return 'GROUP_FULL';
|
|
1922
|
+
if (str === 'GROUP_RATE_LIMITED')
|
|
1923
|
+
return 'GROUP_RATE_LIMITED';
|
|
1924
|
+
if (str === 'GROUP_TOKEN_LIMITED')
|
|
1925
|
+
return 'GROUP_TOKEN_LIMITED';
|
|
1926
|
+
if (str === 'ERR:COST_EXCEEDS_CAPACITY')
|
|
1927
|
+
return 'ERR:COST_EXCEEDS_CAPACITY';
|
|
1346
1928
|
// Parse the cjson.encode output: [field1, value1, field2, value2, ...]
|
|
1347
1929
|
const arr = JSON.parse(str);
|
|
1348
1930
|
const hash = {};
|
|
@@ -1351,6 +1933,15 @@ async function moveToActive(client, k, jobId, timestamp, streamKey = '', entryId
|
|
|
1351
1933
|
}
|
|
1352
1934
|
return hash;
|
|
1353
1935
|
}
|
|
1936
|
+
/**
|
|
1937
|
+
* Promote rate-limited groups whose window has expired.
|
|
1938
|
+
* Moves waiting jobs from the group queue back into the stream.
|
|
1939
|
+
* Returns the number of jobs promoted.
|
|
1940
|
+
*/
|
|
1941
|
+
async function promoteRateLimited(client, k, timestamp) {
|
|
1942
|
+
const result = await client.fcall('glidemq_promoteRateLimited', [k.ratelimited, k.stream], [timestamp.toString()]);
|
|
1943
|
+
return Number(result) || 0;
|
|
1944
|
+
}
|
|
1354
1945
|
/**
|
|
1355
1946
|
* Defers an active job back to waiting by acknowledging + deleting the current
|
|
1356
1947
|
* stream entry and re-enqueuing the same jobId to the stream tail.
|