glide-mq 0.7.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/CHANGELOG.md +85 -0
  2. package/LICENSE +191 -0
  3. package/README.md +65 -50
  4. package/dist/connection.d.ts.map +1 -1
  5. package/dist/connection.js +5 -3
  6. package/dist/connection.js.map +1 -1
  7. package/dist/errors.d.ts +3 -0
  8. package/dist/errors.d.ts.map +1 -1
  9. package/dist/errors.js +8 -1
  10. package/dist/errors.js.map +1 -1
  11. package/dist/flow-producer.d.ts.map +1 -1
  12. package/dist/flow-producer.js +5 -43
  13. package/dist/flow-producer.js.map +1 -1
  14. package/dist/functions/index.d.ts +50 -2
  15. package/dist/functions/index.d.ts.map +1 -1
  16. package/dist/functions/index.js +522 -82
  17. package/dist/functions/index.js.map +1 -1
  18. package/dist/graceful-shutdown.d.ts.map +1 -1
  19. package/dist/graceful-shutdown.js.map +1 -1
  20. package/dist/index.d.ts +2 -2
  21. package/dist/index.d.ts.map +1 -1
  22. package/dist/index.js +2 -1
  23. package/dist/index.js.map +1 -1
  24. package/dist/job.d.ts +30 -0
  25. package/dist/job.d.ts.map +1 -1
  26. package/dist/job.js +94 -9
  27. package/dist/job.js.map +1 -1
  28. package/dist/queue-events.d.ts.map +1 -1
  29. package/dist/queue-events.js +12 -6
  30. package/dist/queue-events.js.map +1 -1
  31. package/dist/queue.d.ts +50 -2
  32. package/dist/queue.d.ts.map +1 -1
  33. package/dist/queue.js +290 -113
  34. package/dist/queue.js.map +1 -1
  35. package/dist/sandbox/index.d.ts +10 -0
  36. package/dist/sandbox/index.d.ts.map +1 -0
  37. package/dist/sandbox/index.js +56 -0
  38. package/dist/sandbox/index.js.map +1 -0
  39. package/dist/sandbox/pool.d.ts +27 -0
  40. package/dist/sandbox/pool.d.ts.map +1 -0
  41. package/dist/sandbox/pool.js +273 -0
  42. package/dist/sandbox/pool.js.map +1 -0
  43. package/dist/sandbox/runner.d.ts +6 -0
  44. package/dist/sandbox/runner.d.ts.map +1 -0
  45. package/dist/sandbox/runner.js +130 -0
  46. package/dist/sandbox/runner.js.map +1 -0
  47. package/dist/sandbox/sandbox-job.d.ts +61 -0
  48. package/dist/sandbox/sandbox-job.d.ts.map +1 -0
  49. package/dist/sandbox/sandbox-job.js +137 -0
  50. package/dist/sandbox/sandbox-job.js.map +1 -0
  51. package/dist/sandbox/types.d.ts +59 -0
  52. package/dist/sandbox/types.d.ts.map +1 -0
  53. package/dist/sandbox/types.js +25 -0
  54. package/dist/sandbox/types.js.map +1 -0
  55. package/dist/scheduler.d.ts.map +1 -1
  56. package/dist/scheduler.js +35 -6
  57. package/dist/scheduler.js.map +1 -1
  58. package/dist/telemetry.d.ts.map +1 -1
  59. package/dist/telemetry.js +9 -3
  60. package/dist/telemetry.js.map +1 -1
  61. package/dist/testing.d.ts +36 -2
  62. package/dist/testing.d.ts.map +1 -1
  63. package/dist/testing.js +180 -5
  64. package/dist/testing.js.map +1 -1
  65. package/dist/types.d.ts +16 -0
  66. package/dist/types.d.ts.map +1 -1
  67. package/dist/utils.d.ts +3 -1
  68. package/dist/utils.d.ts.map +1 -1
  69. package/dist/utils.js +8 -2
  70. package/dist/utils.js.map +1 -1
  71. package/dist/worker.d.ts +14 -2
  72. package/dist/worker.d.ts.map +1 -1
  73. package/dist/worker.js +137 -18
  74. package/dist/worker.js.map +1 -1
  75. package/package.json +13 -1
  76. package/.env +0 -1
  77. package/.jules/bolt.md +0 -3
  78. package/demo/README.md +0 -169
  79. package/demo/dashboard-server.ts +0 -474
  80. package/demo/index.ts +0 -502
  81. package/demo/package-lock.json +0 -2051
  82. package/demo/package.json +0 -26
@@ -1,14 +1,21 @@
1
1
  import type { Client } from '../types';
2
2
  import type { GlideReturnType } from '@glidemq/speedkey';
3
3
  export declare const LIBRARY_NAME = "glidemq";
4
- export declare const LIBRARY_VERSION = "19";
4
+ export declare const LIBRARY_VERSION = "28";
5
5
  export declare const CONSUMER_GROUP = "workers";
6
- export declare const LIBRARY_SOURCE = "#!lua name=glidemq\n\nlocal PRIORITY_SHIFT = 4398046511104\n\nlocal function emitEvent(eventsKey, eventType, jobId, extraFields)\n local fields = {'event', eventType, 'jobId', tostring(jobId)}\n if extraFields then\n for i = 1, #extraFields, 2 do\n fields[#fields + 1] = extraFields[i]\n fields[#fields + 1] = extraFields[i + 1]\n end\n end\n redis.call('XADD', eventsKey, 'MAXLEN', '~', '1000', '*', unpack(fields))\nend\n\nlocal function markOrderingDone(jobKey, jobId)\n local orderingKey = redis.call('HGET', jobKey, 'orderingKey')\n if not orderingKey or orderingKey == '' then\n return\n end\n local orderingSeq = tonumber(redis.call('HGET', jobKey, 'orderingSeq')) or 0\n if orderingSeq <= 0 then\n return\n end\n\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local metaKey = prefix .. 'meta'\n local doneField = 'orderdone:' .. orderingKey\n local pendingKey = prefix .. 'orderdone:pending:' .. orderingKey\n\n local lastDone = tonumber(redis.call('HGET', metaKey, doneField)) or 0\n if orderingSeq <= lastDone then\n redis.call('HDEL', pendingKey, tostring(orderingSeq))\n return\n end\n\n redis.call('HSET', pendingKey, tostring(orderingSeq), '1')\n local advanced = lastDone\n while true do\n local nextSeq = advanced + 1\n if redis.call('HEXISTS', pendingKey, tostring(nextSeq)) == 0 then\n break\n end\n redis.call('HDEL', pendingKey, tostring(nextSeq))\n advanced = nextSeq\n end\n if advanced > lastDone then\n redis.call('HSET', metaKey, doneField, tostring(advanced))\n end\nend\n\n-- Refill token bucket using remainder accumulator for precision.\n-- tbRefillRate is in millitokens/second. Returns current millitokens after refill.\n-- Side effect: updates tbTokens, tbLastRefill, tbRefillRemainder on the group hash.\nlocal function tbRefill(groupHashKey, g, now)\n local tbCapacity = tonumber(g.tbCapacity) or 0\n if tbCapacity <= 0 then return 0 end\n local tbTokens = tonumber(g.tbTokens) or tbCapacity\n local tbRefillRate = tonumber(g.tbRefillRate) or 0\n local tbLastRefill = tonumber(g.tbLastRefill) or now\n local tbRefillRemainder = tonumber(g.tbRefillRemainder) or 0\n local elapsed = now - tbLastRefill\n if elapsed <= 0 or tbRefillRate <= 0 then return tbTokens end\n -- Cap elapsed to prevent overflow in long-idle buckets\n local maxElapsed = math.ceil(tbCapacity * 1000 / tbRefillRate)\n if elapsed > maxElapsed then elapsed = maxElapsed end\n local raw = elapsed * tbRefillRate + tbRefillRemainder\n local added = math.floor(raw / 1000)\n local newRemainder = raw % 1000\n local newTokens = math.min(tbCapacity, tbTokens + added)\n redis.call('HSET', groupHashKey,\n 'tbTokens', tostring(newTokens),\n 'tbLastRefill', tostring(now),\n 'tbRefillRemainder', tostring(newRemainder))\n return newTokens\nend\n\nlocal function releaseGroupSlotAndPromote(jobKey, jobId, now)\n local gk = redis.call('HGET', jobKey, 'groupKey')\n if not gk or gk == '' then return end\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local groupHashKey = prefix .. 'group:' .. gk\n -- Load all group fields in one call\n local gFields = redis.call('HGETALL', groupHashKey)\n local g = {}\n for gf = 1, #gFields, 2 do g[gFields[gf]] = gFields[gf + 1] end\n local cur = tonumber(g.active) or 0\n local newActive = (cur > 0) and (cur - 1) or 0\n if cur > 0 then\n redis.call('HSET', groupHashKey, 'active', tostring(newActive))\n end\n local waitListKey = prefix .. 'groupq:' .. gk\n local waitLen = redis.call('LLEN', waitListKey)\n if waitLen == 0 then return end\n -- Concurrency gate: if still at or above max after decrement, do not promote\n local maxConc = tonumber(g.maxConcurrency) or 0\n if maxConc > 0 and newActive >= maxConc then return end\n -- Rate limit gate (skip if now is nil or 0 for safe fallback)\n -- Only blocks promotion; does NOT increment rateCount. moveToActive handles counting.\n local rateMax = tonumber(g.rateMax) or 0\n local rateRemaining = 0\n local ts = tonumber(now) or 0\n if ts > 0 and rateMax > 0 then\n local rateDuration = tonumber(g.rateDuration) or 0\n if rateDuration > 0 then\n local rateWindowStart = tonumber(g.rateWindowStart) or 0\n local rateCount = tonumber(g.rateCount) or 0\n if ts - rateWindowStart < rateDuration then\n if rateCount >= rateMax then\n -- Window active and at capacity: do not promote, register for scheduler\n local rateLimitedKey = prefix .. 'ratelimited'\n redis.call('ZADD', rateLimitedKey, rateWindowStart + rateDuration, gk)\n return\n end\n rateRemaining = rateMax - rateCount\n end\n end\n end\n -- Token bucket gate: check head job cost before promoting\n local tbCap = tonumber(g.tbCapacity) or 0\n if ts > 0 and tbCap > 0 then\n local tbTokensCur = tbRefill(groupHashKey, g, ts)\n -- Peek at head job, skipping tombstones and DLQ'd jobs (up to 10 iterations)\n local tbCheckPasses = 0\n local tbOk = false\n while tbCheckPasses < 10 do\n tbCheckPasses = tbCheckPasses + 1\n local headJobId = redis.call('LINDEX', waitListKey, 0)\n if not headJobId then break end\n local headJobKey = prefix .. 'job:' .. headJobId\n -- Tombstone guard: job hash deleted - pop and check next\n if redis.call('EXISTS', headJobKey) == 0 then\n redis.call('LPOP', waitListKey)\n else\n local headCost = tonumber(redis.call('HGET', headJobKey, 'cost')) or 1000\n -- DLQ guard: cost > capacity - pop, fail, check next\n if headCost > tbCap then\n redis.call('LPOP', waitListKey)\n redis.call('ZADD', prefix .. 'failed', ts, headJobId)\n redis.call('HSET', headJobKey,\n 'state', 'failed',\n 'failedReason', 'cost exceeds token bucket capacity',\n 'finishedOn', tostring(ts))\n emitEvent(prefix .. 'events', 'failed', headJobId, {'failedReason', 'cost exceeds token bucket capacity'})\n elseif tbTokensCur < headCost then\n -- Not enough tokens: register delay and skip promotion\n local tbRateVal = tonumber(g.tbRefillRate) or 0\n if tbRateVal <= 0 then break end\n local tbDelayMs = math.ceil((headCost - tbTokensCur) * 1000 / tbRateVal)\n local rateLimitedKey = prefix .. 'ratelimited'\n redis.call('ZADD', rateLimitedKey, ts + tbDelayMs, gk)\n return\n else\n tbOk = true\n break\n end\n end\n end\n if not tbOk and tbCheckPasses >= 10 then return end\n end\n -- Calculate how many slots are available for promotion\n local available = 1\n if maxConc > 0 then\n available = maxConc - newActive\n else\n available = math.min(waitLen, 1000)\n end\n -- Cap by rate limit remaining if a window is active\n if rateRemaining > 0 then\n available = math.min(available, rateRemaining)\n end\n local streamKey = prefix .. 'stream'\n for p = 1, available do\n local nextJobId = redis.call('LPOP', waitListKey)\n if not nextJobId then break end\n redis.call('XADD', streamKey, '*', 'jobId', nextJobId)\n local nextJobKey = prefix .. 'job:' .. nextJobId\n redis.call('HSET', nextJobKey, 'state', 'waiting')\n end\nend\n\nlocal function extractOrderingKeyFromOpts(optsJson)\n if not optsJson or optsJson == '' then\n return ''\n end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then\n return ''\n end\n local ordering = decoded['ordering']\n if type(ordering) ~= 'table' then\n return ''\n end\n local key = ordering['key']\n if key == nil then\n return ''\n end\n return tostring(key)\nend\n\nlocal function extractGroupConcurrencyFromOpts(optsJson)\n if not optsJson or optsJson == '' then\n return 0\n end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then\n return 0\n end\n local ordering = decoded['ordering']\n if type(ordering) ~= 'table' then\n return 0\n end\n local conc = ordering['concurrency']\n if conc == nil then\n return 0\n end\n return tonumber(conc) or 0\nend\n\nlocal function extractGroupRateLimitFromOpts(optsJson)\n if not optsJson or optsJson == '' then\n return 0, 0\n end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then\n return 0, 0\n end\n local ordering = decoded['ordering']\n if type(ordering) ~= 'table' then\n return 0, 0\n end\n local rl = ordering['rateLimit']\n if type(rl) ~= 'table' then\n return 0, 0\n end\n local max = tonumber(rl['max']) or 0\n local duration = tonumber(rl['duration']) or 0\n return max, duration\nend\n\nlocal function extractTokenBucketFromOpts(optsJson)\n if not optsJson or optsJson == '' then return 0, 0 end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then return 0, 0 end\n local ordering = decoded['ordering']\n if type(ordering) ~= 'table' then return 0, 0 end\n local tb = ordering['tokenBucket']\n if type(tb) ~= 'table' then return 0, 0 end\n local capacity = tonumber(tb['capacity']) or 0\n local refillRate = tonumber(tb['refillRate']) or 0\n return math.floor(capacity * 1000), math.floor(refillRate * 1000)\nend\n\nlocal function extractCostFromOpts(optsJson)\n if not optsJson or optsJson == '' then return 0 end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then return 0 end\n local cost = tonumber(decoded['cost']) or 0\n return math.floor(cost * 1000)\nend\n\nredis.register_function('glidemq_version', function(keys, args)\n return '19'\nend)\n\nredis.register_function('glidemq_addJob', function(keys, args)\n local idKey = keys[1]\n local streamKey = keys[2]\n local scheduledKey = keys[3]\n local eventsKey = keys[4]\n local jobName = args[1]\n local jobData = args[2]\n local jobOpts = args[3]\n local timestamp = tonumber(args[4])\n local delay = tonumber(args[5]) or 0\n local priority = tonumber(args[6]) or 0\n local parentId = args[7] or ''\n local maxAttempts = tonumber(args[8]) or 0\n local orderingKey = args[9] or ''\n local groupConcurrency = tonumber(args[10]) or 0\n local groupRateMax = tonumber(args[11]) or 0\n local groupRateDuration = tonumber(args[12]) or 0\n local tbCapacity = tonumber(args[13]) or 0\n local tbRefillRate = tonumber(args[14]) or 0\n local jobCost = tonumber(args[15]) or 0\n local jobId = redis.call('INCR', idKey)\n local jobIdStr = tostring(jobId)\n local prefix = string.sub(idKey, 1, #idKey - 2)\n local jobKey = prefix .. 'job:' .. jobIdStr\n local useGroupConcurrency = (orderingKey ~= '' and (groupConcurrency > 1 or groupRateMax > 0 or tbCapacity > 0))\n local orderingSeq = 0\n if orderingKey ~= '' and not useGroupConcurrency then\n local orderingMetaKey = prefix .. 'ordering'\n orderingSeq = redis.call('HINCRBY', orderingMetaKey, orderingKey, 1)\n end\n if useGroupConcurrency then\n local groupHashKey = prefix .. 'group:' .. orderingKey\n local curMax = tonumber(redis.call('HGET', groupHashKey, 'maxConcurrency')) or 0\n if curMax ~= groupConcurrency then\n redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(groupConcurrency))\n end\n -- When rate limit or token bucket forces group path but concurrency is 0 or 1, ensure maxConcurrency >= 1\n if curMax == 0 and groupConcurrency <= 1 then\n redis.call('HSET', groupHashKey, 'maxConcurrency', '1')\n end\n -- Upsert rate limit fields on group hash\n if groupRateMax > 0 then\n local curRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0\n if curRateMax ~= groupRateMax then\n redis.call('HSET', groupHashKey, 'rateMax', tostring(groupRateMax))\n end\n local curRateDuration = tonumber(redis.call('HGET', groupHashKey, 'rateDuration')) or 0\n if curRateDuration ~= groupRateDuration then\n redis.call('HSET', groupHashKey, 'rateDuration', tostring(groupRateDuration))\n end\n else\n -- Clear stale rate limit fields if group was previously rate-limited\n local oldRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0\n if oldRateMax > 0 then\n redis.call('HDEL', groupHashKey, 'rateMax', 'rateDuration', 'rateWindowStart', 'rateCount')\n end\n end\n -- Upsert token bucket fields on group hash\n if tbCapacity > 0 then\n local curTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0\n if curTbCap ~= tbCapacity then\n redis.call('HSET', groupHashKey, 'tbCapacity', tostring(tbCapacity))\n end\n local curTbRate = tonumber(redis.call('HGET', groupHashKey, 'tbRefillRate')) or 0\n if curTbRate ~= tbRefillRate then\n redis.call('HSET', groupHashKey, 'tbRefillRate', tostring(tbRefillRate))\n end\n -- Initialize tokens on first setup\n if curTbCap == 0 then\n redis.call('HSET', groupHashKey,\n 'tbTokens', tostring(tbCapacity),\n 'tbLastRefill', tostring(timestamp),\n 'tbRefillRemainder', '0')\n end\n -- Validate cost <= capacity at enqueue\n -- Validate cost (explicit or default 1000 millitokens) against capacity\n local effectiveCost = (jobCost > 0) and jobCost or 1000\n if effectiveCost > tbCapacity then\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n else\n -- Clear stale tb fields\n local oldTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0\n if oldTbCap > 0 then\n redis.call('HDEL', groupHashKey, 'tbCapacity', 'tbRefillRate', 'tbTokens', 'tbLastRefill', 'tbRefillRemainder')\n end\n end\n end\n local hashFields = {\n 'id', jobIdStr,\n 'name', jobName,\n 'data', jobData,\n 'opts', jobOpts,\n 'timestamp', tostring(timestamp),\n 'attemptsMade', '0',\n 'delay', tostring(delay),\n 'priority', tostring(priority),\n 'maxAttempts', tostring(maxAttempts)\n }\n if useGroupConcurrency then\n hashFields[#hashFields + 1] = 'groupKey'\n hashFields[#hashFields + 1] = orderingKey\n elseif orderingKey ~= '' then\n hashFields[#hashFields + 1] = 'orderingKey'\n hashFields[#hashFields + 1] = orderingKey\n hashFields[#hashFields + 1] = 'orderingSeq'\n hashFields[#hashFields + 1] = tostring(orderingSeq)\n end\n if jobCost > 0 then\n hashFields[#hashFields + 1] = 'cost'\n hashFields[#hashFields + 1] = tostring(jobCost)\n end\n if parentId ~= '' then\n hashFields[#hashFields + 1] = 'parentId'\n hashFields[#hashFields + 1] = parentId\n end\n if delay > 0 or priority > 0 then\n hashFields[#hashFields + 1] = 'state'\n hashFields[#hashFields + 1] = delay > 0 and 'delayed' or 'prioritized'\n else\n hashFields[#hashFields + 1] = 'state'\n hashFields[#hashFields + 1] = 'waiting'\n end\n redis.call('HSET', jobKey, unpack(hashFields))\n if delay > 0 then\n local score = priority * PRIORITY_SHIFT + (timestamp + delay)\n redis.call('ZADD', scheduledKey, score, jobIdStr)\n elseif priority > 0 then\n local score = priority * PRIORITY_SHIFT\n redis.call('ZADD', scheduledKey, score, jobIdStr)\n else\n redis.call('XADD', streamKey, '*', 'jobId', jobIdStr)\n end\n emitEvent(eventsKey, 'added', jobIdStr, {'name', jobName})\n return jobIdStr\nend)\n\nredis.register_function('glidemq_promote', function(keys, args)\n local scheduledKey = keys[1]\n local streamKey = keys[2]\n local eventsKey = keys[3]\n local now = tonumber(args[1])\n local MAX_PROMOTIONS = 1000\n local count = 0\n local cursorMin = 0\n while count < MAX_PROMOTIONS do\n local nextEntry = redis.call('ZRANGEBYSCORE', scheduledKey, string.format('%.0f', cursorMin), '+inf', 'WITHSCORES', 'LIMIT', 0, 1)\n if not nextEntry or #nextEntry == 0 then\n break\n end\n local firstScore = tonumber(nextEntry[2]) or 0\n local priority = math.floor(firstScore / PRIORITY_SHIFT)\n local minScore = priority * PRIORITY_SHIFT\n local maxDueScore = minScore + now\n local remaining = MAX_PROMOTIONS - count\n local members = redis.call(\n 'ZRANGEBYSCORE',\n scheduledKey,\n string.format('%.0f', minScore),\n string.format('%.0f', maxDueScore),\n 'LIMIT',\n 0,\n remaining\n )\n for i = 1, #members do\n local jobId = members[i]\n redis.call('XADD', streamKey, '*', 'jobId', jobId)\n redis.call('ZREM', scheduledKey, jobId)\n local prefix = string.sub(scheduledKey, 1, #scheduledKey - 9)\n local jobKey = prefix .. 'job:' .. jobId\n redis.call('HSET', jobKey, 'state', 'waiting')\n emitEvent(eventsKey, 'promoted', jobId, nil)\n count = count + 1\n end\n cursorMin = (priority + 1) * PRIORITY_SHIFT\n end\n return count\nend)\n\nredis.register_function('glidemq_complete', function(keys, args)\n local streamKey = keys[1]\n local completedKey = keys[2]\n local eventsKey = keys[3]\n local jobKey = keys[4]\n local jobId = args[1]\n local entryId = args[2]\n local returnvalue = args[3]\n local timestamp = tonumber(args[4])\n local group = args[5]\n local removeMode = args[6] or '0'\n local removeCount = tonumber(args[7]) or 0\n local removeAge = tonumber(args[8]) or 0\n local depsMember = args[9] or ''\n local parentId = args[10] or ''\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n redis.call('ZADD', completedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'completed',\n 'returnvalue', returnvalue,\n 'finishedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'completed', jobId, {'returnvalue', returnvalue})\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n if removeMode == 'true' then\n redis.call('ZREM', completedKey, jobId)\n redis.call('DEL', jobKey)\n elseif removeMode == 'count' and removeCount > 0 then\n local total = redis.call('ZCARD', completedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', completedKey, 0, total - removeCount - 1)\n for i = 1, #excess do\n local oldId = excess[i]\n redis.call('DEL', prefix .. 'job:' .. oldId)\n redis.call('ZREM', completedKey, oldId)\n end\n end\n elseif removeMode == 'age_count' then\n if removeAge > 0 then\n local cutoff = timestamp - (removeAge * 1000)\n local old = redis.call('ZRANGEBYSCORE', completedKey, '0', tostring(cutoff))\n for i = 1, #old do\n local oldId = old[i]\n redis.call('DEL', prefix .. 'job:' .. oldId)\n redis.call('ZREM', completedKey, oldId)\n end\n end\n if removeCount > 0 then\n local total = redis.call('ZCARD', completedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', completedKey, 0, total - removeCount - 1)\n for i = 1, #excess do\n local oldId = excess[i]\n redis.call('DEL', prefix .. 'job:' .. oldId)\n redis.call('ZREM', completedKey, oldId)\n end\n end\n end\n end\n if depsMember ~= '' and parentId ~= '' and #keys >= 8 then\n local parentDepsKey = keys[5]\n local parentJobKey = keys[6]\n local parentStreamKey = keys[7]\n local parentEventsKey = keys[8]\n local doneCount = redis.call('HINCRBY', parentJobKey, 'depsCompleted', 1)\n local totalDeps = redis.call('SCARD', parentDepsKey)\n local remaining = totalDeps - doneCount\n if remaining <= 0 then\n redis.call('HSET', parentJobKey, 'state', 'waiting')\n redis.call('XADD', parentStreamKey, '*', 'jobId', parentId)\n emitEvent(parentEventsKey, 'active', parentId, nil)\n end\n end\n return 1\nend)\n\nredis.register_function('glidemq_completeAndFetchNext', function(keys, args)\n local streamKey = keys[1]\n local completedKey = keys[2]\n local eventsKey = keys[3]\n local jobKey = keys[4]\n local jobId = args[1]\n local entryId = args[2]\n local returnvalue = args[3]\n local timestamp = tonumber(args[4])\n local group = args[5]\n local consumer = args[6]\n local removeMode = args[7] or '0'\n local removeCount = tonumber(args[8]) or 0\n local removeAge = tonumber(args[9]) or 0\n local depsMember = args[10] or ''\n local parentId = args[11] or ''\n\n -- Phase 1: Complete current job (same as glidemq_complete)\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n redis.call('ZADD', completedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'completed',\n 'returnvalue', returnvalue,\n 'finishedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'completed', jobId, {'returnvalue', returnvalue})\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n\n -- Retention cleanup\n if removeMode == 'true' then\n redis.call('ZREM', completedKey, jobId)\n redis.call('DEL', jobKey)\n elseif removeMode == 'count' and removeCount > 0 then\n local total = redis.call('ZCARD', completedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', completedKey, 0, total - removeCount - 1)\n for i = 1, #excess do\n redis.call('DEL', prefix .. 'job:' .. excess[i])\n redis.call('ZREM', completedKey, excess[i])\n end\n end\n end\n\n -- Parent deps\n if depsMember ~= '' and parentId ~= '' and #keys >= 8 then\n local parentDepsKey = keys[5]\n local parentJobKey = keys[6]\n local parentStreamKey = keys[7]\n local parentEventsKey = keys[8]\n local doneCount = redis.call('HINCRBY', parentJobKey, 'depsCompleted', 1)\n local totalDeps = redis.call('SCARD', parentDepsKey)\n if totalDeps - doneCount <= 0 then\n redis.call('HSET', parentJobKey, 'state', 'waiting')\n redis.call('XADD', parentStreamKey, '*', 'jobId', parentId)\n emitEvent(parentEventsKey, 'active', parentId, nil)\n end\n end\n\n -- Phase 2: Fetch next job (non-blocking XREADGROUP)\n local nextEntries = redis.call('XREADGROUP', 'GROUP', group, consumer, 'COUNT', 1, 'STREAMS', streamKey, '>')\n if not nextEntries or #nextEntries == 0 then\n return cjson.encode({completed = jobId, next = false})\n end\n local streamData = nextEntries[1]\n local entries = streamData[2]\n if not entries or #entries == 0 then\n return cjson.encode({completed = jobId, next = false})\n end\n local nextEntry = entries[1]\n local nextEntryId = nextEntry[1]\n local nextFields = nextEntry[2]\n local nextJobId = nil\n for i = 1, #nextFields, 2 do\n if nextFields[i] == 'jobId' then\n nextJobId = nextFields[i + 1]\n break\n end\n end\n if not nextJobId then\n return cjson.encode({completed = jobId, next = false})\n end\n\n -- Phase 3: Activate next job (same as moveToActive)\n local nextJobKey = prefix .. 'job:' .. nextJobId\n local nextExists = redis.call('EXISTS', nextJobKey)\n if nextExists == 0 then\n return cjson.encode({completed = jobId, next = false, nextEntryId = nextEntryId})\n end\n local revoked = redis.call('HGET', nextJobKey, 'revoked')\n if revoked == '1' then\n return cjson.encode({completed = jobId, next = 'REVOKED', nextJobId = nextJobId, nextEntryId = nextEntryId})\n end\n local nextGroupKey = redis.call('HGET', nextJobKey, 'groupKey')\n if nextGroupKey and nextGroupKey ~= '' then\n local nextGroupHashKey = prefix .. 'group:' .. nextGroupKey\n -- Load all group fields in one call\n local nGrpFields = redis.call('HGETALL', nextGroupHashKey)\n local nGrp = {}\n for nf = 1, #nGrpFields, 2 do nGrp[nGrpFields[nf]] = nGrpFields[nf + 1] end\n local nextMaxConc = tonumber(nGrp.maxConcurrency) or 0\n local nextActive = tonumber(nGrp.active) or 0\n -- Concurrency gate first (avoids burning rate/token slots on parked jobs)\n if nextMaxConc > 0 and nextActive >= nextMaxConc then\n redis.call('XACK', streamKey, group, nextEntryId)\n redis.call('XDEL', streamKey, nextEntryId)\n local nextWaitListKey = prefix .. 'groupq:' .. nextGroupKey\n redis.call('RPUSH', nextWaitListKey, nextJobId)\n redis.call('HSET', nextJobKey, 'state', 'group-waiting')\n return cjson.encode({completed = jobId, next = false})\n end\n -- Token bucket gate (read-only)\n local nextTbCapacity = tonumber(nGrp.tbCapacity) or 0\n local nextTbBlocked = false\n local nextTbDelay = 0\n local nextTbTokens = 0\n local nextJobCostVal = 0\n if nextTbCapacity > 0 then\n nextTbTokens = tbRefill(nextGroupHashKey, nGrp, tonumber(timestamp))\n nextJobCostVal = tonumber(redis.call('HGET', nextJobKey, 'cost')) or 1000\n -- DLQ guard: cost > capacity\n if nextJobCostVal > nextTbCapacity then\n redis.call('XACK', streamKey, group, nextEntryId)\n redis.call('XDEL', streamKey, nextEntryId)\n redis.call('ZADD', prefix .. 'failed', tonumber(timestamp), nextJobId)\n redis.call('HSET', nextJobKey,\n 'state', 'failed',\n 'failedReason', 'cost exceeds token bucket capacity',\n 'finishedOn', tostring(timestamp))\n emitEvent(prefix .. 'events', 'failed', nextJobId, {'failedReason', 'cost exceeds token bucket capacity'})\n return cjson.encode({completed = jobId, next = false})\n end\n if nextTbTokens < nextJobCostVal then\n nextTbBlocked = true\n local nextTbRefillRateVal = math.max(tonumber(nGrp.tbRefillRate) or 0, 1)\n nextTbDelay = math.ceil((nextJobCostVal - nextTbTokens) * 1000 / nextTbRefillRateVal)\n end\n end\n -- Sliding window gate (read-only)\n local nextRateMax = tonumber(nGrp.rateMax) or 0\n local nextRlBlocked = false\n local nextRlDelay = 0\n if nextRateMax > 0 then\n local nextRateDuration = tonumber(nGrp.rateDuration) or 0\n local nextRateWindowStart = tonumber(nGrp.rateWindowStart) or 0\n local nextRateCount = tonumber(nGrp.rateCount) or 0\n if nextRateDuration > 0 and timestamp - nextRateWindowStart < nextRateDuration and nextRateCount >= nextRateMax then\n nextRlBlocked = true\n nextRlDelay = (nextRateWindowStart + nextRateDuration) - timestamp\n end\n end\n -- If ANY gate blocked: park + register\n if nextTbBlocked or nextRlBlocked then\n redis.call('XACK', streamKey, group, nextEntryId)\n redis.call('XDEL', streamKey, nextEntryId)\n local nextWaitListKey = prefix .. 'groupq:' .. nextGroupKey\n redis.call('RPUSH', nextWaitListKey, nextJobId)\n redis.call('HSET', nextJobKey, 'state', 'group-waiting')\n local nextMaxDelay = math.max(nextTbDelay, nextRlDelay)\n local rateLimitedKey = prefix .. 'ratelimited'\n redis.call('ZADD', rateLimitedKey, tonumber(timestamp) + nextMaxDelay, nextGroupKey)\n return cjson.encode({completed = jobId, next = false})\n end\n -- All gates passed: mutate state\n if nextTbCapacity > 0 then\n redis.call('HINCRBY', nextGroupHashKey, 'tbTokens', -nextJobCostVal)\n end\n if nextRateMax > 0 then\n local nextRateDuration = tonumber(nGrp.rateDuration) or 0\n if nextRateDuration > 0 then\n local nextRateWindowStart = tonumber(nGrp.rateWindowStart) or 0\n if timestamp - nextRateWindowStart >= nextRateDuration then\n redis.call('HSET', nextGroupHashKey, 'rateWindowStart', tostring(timestamp), 'rateCount', '1')\n else\n redis.call('HINCRBY', nextGroupHashKey, 'rateCount', 1)\n end\n end\n end\n redis.call('HINCRBY', nextGroupHashKey, 'active', 1)\n end\n redis.call('HSET', nextJobKey, 'state', 'active', 'processedOn', tostring(timestamp), 'lastActive', tostring(timestamp))\n local nextHash = redis.call('HGETALL', nextJobKey)\n return cjson.encode({completed = jobId, next = nextHash, nextJobId = nextJobId, nextEntryId = nextEntryId})\nend)\n\nredis.register_function('glidemq_fail', function(keys, args)\n local streamKey = keys[1]\n local failedKey = keys[2]\n local scheduledKey = keys[3]\n local eventsKey = keys[4]\n local jobKey = keys[5]\n local jobId = args[1]\n local entryId = args[2]\n local failedReason = args[3]\n local timestamp = tonumber(args[4])\n local maxAttempts = tonumber(args[5]) or 0\n local backoffDelay = tonumber(args[6]) or 0\n local group = args[7]\n local removeMode = args[8] or '0'\n local removeCount = tonumber(args[9]) or 0\n local removeAge = tonumber(args[10]) or 0\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n local attemptsMade = redis.call('HINCRBY', jobKey, 'attemptsMade', 1)\n if maxAttempts > 0 and attemptsMade < maxAttempts then\n local retryAt = timestamp + backoffDelay\n local priority = tonumber(redis.call('HGET', jobKey, 'priority')) or 0\n local score = priority * PRIORITY_SHIFT + retryAt\n redis.call('ZADD', scheduledKey, score, jobId)\n redis.call('HSET', jobKey,\n 'state', 'delayed',\n 'failedReason', failedReason,\n 'processedOn', tostring(timestamp)\n )\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'retrying', jobId, {\n 'failedReason', failedReason,\n 'attemptsMade', tostring(attemptsMade),\n 'delay', tostring(backoffDelay)\n })\n return 'retrying'\n else\n redis.call('ZADD', failedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', failedReason,\n 'finishedOn', tostring(timestamp),\n 'processedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'failed', jobId, {'failedReason', failedReason})\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n if removeMode == 'true' then\n redis.call('ZREM', failedKey, jobId)\n redis.call('DEL', jobKey)\n elseif removeMode == 'count' and removeCount > 0 then\n local total = redis.call('ZCARD', failedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', failedKey, 0, total - removeCount - 1)\n for i = 1, #excess do\n local oldId = excess[i]\n redis.call('DEL', prefix .. 'job:' .. oldId)\n redis.call('ZREM', failedKey, oldId)\n end\n end\n elseif removeMode == 'age_count' then\n if removeAge > 0 then\n local cutoff = timestamp - (removeAge * 1000)\n local old = redis.call('ZRANGEBYSCORE', failedKey, '0', tostring(cutoff))\n for i = 1, #old do\n local oldId = old[i]\n redis.call('DEL', prefix .. 'job:' .. oldId)\n redis.call('ZREM', failedKey, oldId)\n end\n end\n if removeCount > 0 then\n local total = redis.call('ZCARD', failedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', failedKey, 0, total - removeCount - 1)\n for i = 1, #excess do\n local oldId = excess[i]\n redis.call('DEL', prefix .. 'job:' .. oldId)\n redis.call('ZREM', failedKey, oldId)\n end\n end\n end\n end\n return 'failed'\n end\nend)\n\nredis.register_function('glidemq_reclaimStalled', function(keys, args)\n local streamKey = keys[1]\n local eventsKey = keys[2]\n local group = args[1]\n local consumer = args[2]\n local minIdleMs = tonumber(args[3])\n local maxStalledCount = tonumber(args[4]) or 1\n local timestamp = tonumber(args[5])\n local failedKey = args[6]\n local result = redis.call('XAUTOCLAIM', streamKey, group, consumer, minIdleMs, '0-0')\n local entries = result[2]\n if not entries or #entries == 0 then\n return 0\n end\n local prefix = string.sub(streamKey, 1, #streamKey - 6)\n local count = 0\n for i = 1, #entries do\n local entry = entries[i]\n local entryId = entry[1]\n local fields = entry[2]\n local jobId = nil\n if type(fields) == 'table' then\n for j = 1, #fields, 2 do\n if fields[j] == 'jobId' then\n jobId = fields[j + 1]\n break\n end\n end\n end\n if jobId then\n local jobKey = prefix .. 'job:' .. jobId\n local lastActive = tonumber(redis.call('HGET', jobKey, 'lastActive'))\n if lastActive and (timestamp - lastActive) < minIdleMs then\n count = count + 1\n else\n local stalledCount = redis.call('HINCRBY', jobKey, 'stalledCount', 1)\n if stalledCount > maxStalledCount then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n redis.call('ZADD', failedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', 'job stalled more than maxStalledCount',\n 'finishedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'failed', jobId, {\n 'failedReason', 'job stalled more than maxStalledCount'\n })\n else\n redis.call('HSET', jobKey, 'state', 'active')\n emitEvent(eventsKey, 'stalled', jobId, nil)\n end\n count = count + 1\n end\n end\n end\n return count\nend)\n\nredis.register_function('glidemq_pause', function(keys, args)\n local metaKey = keys[1]\n local eventsKey = keys[2]\n redis.call('HSET', metaKey, 'paused', '1')\n emitEvent(eventsKey, 'paused', '0', nil)\n return 1\nend)\n\nredis.register_function('glidemq_resume', function(keys, args)\n local metaKey = keys[1]\n local eventsKey = keys[2]\n redis.call('HSET', metaKey, 'paused', '0')\n emitEvent(eventsKey, 'resumed', '0', nil)\n return 1\nend)\n\nredis.register_function('glidemq_dedup', function(keys, args)\n local dedupKey = keys[1]\n local idKey = keys[2]\n local streamKey = keys[3]\n local scheduledKey = keys[4]\n local eventsKey = keys[5]\n local dedupId = args[1]\n local ttlMs = tonumber(args[2]) or 0\n local mode = args[3]\n local jobName = args[4]\n local jobData = args[5]\n local jobOpts = args[6]\n local timestamp = tonumber(args[7])\n local delay = tonumber(args[8]) or 0\n local priority = tonumber(args[9]) or 0\n local parentId = args[10] or ''\n local maxAttempts = tonumber(args[11]) or 0\n local orderingKey = args[12] or ''\n local groupConcurrency = tonumber(args[13]) or 0\n local groupRateMax = tonumber(args[14]) or 0\n local groupRateDuration = tonumber(args[15]) or 0\n local tbCapacity = tonumber(args[16]) or 0\n local tbRefillRate = tonumber(args[17]) or 0\n local jobCost = tonumber(args[18]) or 0\n local prefix = string.sub(idKey, 1, #idKey - 2)\n local existing = redis.call('HGET', dedupKey, dedupId)\n if mode == 'simple' then\n if existing then\n local sep = string.find(existing, ':')\n if sep then\n local existingJobId = string.sub(existing, 1, sep - 1)\n local jobKey = prefix .. 'job:' .. existingJobId\n local state = redis.call('HGET', jobKey, 'state')\n if state and state ~= 'completed' and state ~= 'failed' then\n return 'skipped'\n end\n end\n end\n elseif mode == 'throttle' then\n if existing and ttlMs > 0 then\n local sep = string.find(existing, ':')\n if sep then\n local storedTs = tonumber(string.sub(existing, sep + 1))\n if storedTs and (timestamp - storedTs) < ttlMs then\n return 'skipped'\n end\n end\n end\n elseif mode == 'debounce' then\n if existing then\n local sep = string.find(existing, ':')\n if sep then\n local existingJobId = string.sub(existing, 1, sep - 1)\n local jobKey = prefix .. 'job:' .. existingJobId\n local state = redis.call('HGET', jobKey, 'state')\n if state == 'delayed' or state == 'prioritized' then\n redis.call('ZREM', scheduledKey, existingJobId)\n markOrderingDone(jobKey, existingJobId)\n redis.call('DEL', jobKey)\n emitEvent(eventsKey, 'removed', existingJobId, nil)\n elseif state and state ~= 'completed' and state ~= 'failed' then\n return 'skipped'\n end\n end\n end\n end\n local jobId = redis.call('INCR', idKey)\n local jobIdStr = tostring(jobId)\n local jobKey = prefix .. 'job:' .. jobIdStr\n local useGroupConcurrency = (orderingKey ~= '' and (groupConcurrency > 1 or groupRateMax > 0 or tbCapacity > 0))\n local orderingSeq = 0\n if orderingKey ~= '' and not useGroupConcurrency then\n local orderingMetaKey = prefix .. 'ordering'\n orderingSeq = redis.call('HINCRBY', orderingMetaKey, orderingKey, 1)\n end\n if useGroupConcurrency then\n local groupHashKey = prefix .. 'group:' .. orderingKey\n local curMax = tonumber(redis.call('HGET', groupHashKey, 'maxConcurrency')) or 0\n if curMax ~= groupConcurrency then\n redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(groupConcurrency))\n end\n if curMax == 0 and groupConcurrency <= 1 then\n redis.call('HSET', groupHashKey, 'maxConcurrency', '1')\n end\n if groupRateMax > 0 then\n local curRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0\n if curRateMax ~= groupRateMax then\n redis.call('HSET', groupHashKey, 'rateMax', tostring(groupRateMax))\n end\n local curRateDuration = tonumber(redis.call('HGET', groupHashKey, 'rateDuration')) or 0\n if curRateDuration ~= groupRateDuration then\n redis.call('HSET', groupHashKey, 'rateDuration', tostring(groupRateDuration))\n end\n else\n local oldRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0\n if oldRateMax > 0 then\n redis.call('HDEL', groupHashKey, 'rateMax', 'rateDuration', 'rateWindowStart', 'rateCount')\n end\n end\n -- Upsert token bucket fields on group hash\n if tbCapacity > 0 then\n local curTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0\n if curTbCap ~= tbCapacity then\n redis.call('HSET', groupHashKey, 'tbCapacity', tostring(tbCapacity))\n end\n local curTbRate = tonumber(redis.call('HGET', groupHashKey, 'tbRefillRate')) or 0\n if curTbRate ~= tbRefillRate then\n redis.call('HSET', groupHashKey, 'tbRefillRate', tostring(tbRefillRate))\n end\n -- Initialize tokens on first setup\n if curTbCap == 0 then\n redis.call('HSET', groupHashKey,\n 'tbTokens', tostring(tbCapacity),\n 'tbLastRefill', tostring(timestamp),\n 'tbRefillRemainder', '0')\n end\n -- Validate cost <= capacity at enqueue\n -- Validate cost (explicit or default 1000 millitokens) against capacity\n local effectiveCost = (jobCost > 0) and jobCost or 1000\n if effectiveCost > tbCapacity then\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n else\n -- Clear stale tb fields\n local oldTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0\n if oldTbCap > 0 then\n redis.call('HDEL', groupHashKey, 'tbCapacity', 'tbRefillRate', 'tbTokens', 'tbLastRefill', 'tbRefillRemainder')\n end\n end\n end\n local hashFields = {\n 'id', jobIdStr,\n 'name', jobName,\n 'data', jobData,\n 'opts', jobOpts,\n 'timestamp', tostring(timestamp),\n 'attemptsMade', '0',\n 'delay', tostring(delay),\n 'priority', tostring(priority),\n 'maxAttempts', tostring(maxAttempts)\n }\n if useGroupConcurrency then\n hashFields[#hashFields + 1] = 'groupKey'\n hashFields[#hashFields + 1] = orderingKey\n elseif orderingKey ~= '' then\n hashFields[#hashFields + 1] = 'orderingKey'\n hashFields[#hashFields + 1] = orderingKey\n hashFields[#hashFields + 1] = 'orderingSeq'\n hashFields[#hashFields + 1] = tostring(orderingSeq)\n end\n if jobCost > 0 then\n hashFields[#hashFields + 1] = 'cost'\n hashFields[#hashFields + 1] = tostring(jobCost)\n end\n if parentId ~= '' then\n hashFields[#hashFields + 1] = 'parentId'\n hashFields[#hashFields + 1] = parentId\n end\n if delay > 0 or priority > 0 then\n hashFields[#hashFields + 1] = 'state'\n hashFields[#hashFields + 1] = delay > 0 and 'delayed' or 'prioritized'\n else\n hashFields[#hashFields + 1] = 'state'\n hashFields[#hashFields + 1] = 'waiting'\n end\n redis.call('HSET', jobKey, unpack(hashFields))\n if delay > 0 then\n local score = priority * PRIORITY_SHIFT + (timestamp + delay)\n redis.call('ZADD', scheduledKey, score, jobIdStr)\n elseif priority > 0 then\n local score = priority * PRIORITY_SHIFT\n redis.call('ZADD', scheduledKey, score, jobIdStr)\n else\n redis.call('XADD', streamKey, '*', 'jobId', jobIdStr)\n end\n redis.call('HSET', dedupKey, dedupId, jobIdStr .. ':' .. tostring(timestamp))\n emitEvent(eventsKey, 'added', jobIdStr, {'name', jobName})\n return jobIdStr\nend)\n\nredis.register_function('glidemq_rateLimit', function(keys, args)\n local rateKey = keys[1]\n local metaKey = keys[2]\n local maxPerWindow = tonumber(args[1])\n local windowDuration = tonumber(args[2])\n local now = tonumber(args[3])\n -- Fallback: read rate limit config from meta if not provided inline\n if maxPerWindow <= 0 then\n maxPerWindow = tonumber(redis.call('HGET', metaKey, 'rateLimitMax')) or 0\n windowDuration = tonumber(redis.call('HGET', metaKey, 'rateLimitDuration')) or 0\n if maxPerWindow <= 0 then return 0 end\n end\n local windowStart = tonumber(redis.call('HGET', rateKey, 'windowStart')) or 0\n local count = tonumber(redis.call('HGET', rateKey, 'count')) or 0\n if now - windowStart >= windowDuration then\n redis.call('HSET', rateKey, 'windowStart', tostring(now), 'count', '1')\n return 0\n end\n if count >= maxPerWindow then\n local delayMs = windowDuration - (now - windowStart)\n return delayMs\n end\n redis.call('HSET', rateKey, 'count', tostring(count + 1))\n return 0\nend)\n\nredis.register_function('glidemq_promoteRateLimited', function(keys, args)\n local rateLimitedKey = keys[1]\n local streamKey = keys[2]\n local now = tonumber(args[1])\n -- Derive prefix from the server-validated key instead of caller-supplied arg\n local prefix = string.sub(rateLimitedKey, 1, #rateLimitedKey - #'ratelimited')\n local expired = redis.call('ZRANGEBYSCORE', rateLimitedKey, '0', string.format('%.0f', now), 'LIMIT', 0, 100)\n if not expired or #expired == 0 then return 0 end\n local promoted = 0\n for i = 1, #expired do\n local gk = expired[i]\n redis.call('ZREM', rateLimitedKey, gk)\n local groupHashKey = prefix .. 'group:' .. gk\n local waitListKey = prefix .. 'groupq:' .. gk\n -- Load all group fields in one call for rate limit + token bucket checks\n local prGrpFields = redis.call('HGETALL', groupHashKey)\n local prGrp = {}\n for pf = 1, #prGrpFields, 2 do prGrp[prGrpFields[pf]] = prGrpFields[pf + 1] end\n local rateMax = tonumber(prGrp.rateMax) or 0\n local maxConc = tonumber(prGrp.maxConcurrency) or 0\n local active = tonumber(prGrp.active) or 0\n -- Token bucket pre-check: peek head job cost before promoting\n local prTbCap = tonumber(prGrp.tbCapacity) or 0\n local tbCheckPassed = true\n if prTbCap > 0 then\n local prTbTokens = tbRefill(groupHashKey, prGrp, now)\n local headJobId = redis.call('LINDEX', waitListKey, 0)\n if headJobId then\n local headJobKey = prefix .. 'job:' .. headJobId\n -- Tombstone guard\n if redis.call('EXISTS', headJobKey) == 0 then\n redis.call('LPOP', waitListKey)\n tbCheckPassed = false\n end\n if tbCheckPassed then\n local headCost = tonumber(redis.call('HGET', headJobKey, 'cost')) or 1000\n -- DLQ guard: cost > capacity\n if headCost > prTbCap then\n redis.call('LPOP', waitListKey)\n redis.call('ZADD', prefix .. 'failed', now, headJobId)\n redis.call('HSET', headJobKey,\n 'state', 'failed',\n 'failedReason', 'cost exceeds token bucket capacity',\n 'finishedOn', tostring(now))\n emitEvent(prefix .. 'events', 'failed', headJobId, {'failedReason', 'cost exceeds token bucket capacity'})\n tbCheckPassed = false\n end\n if tbCheckPassed and prTbTokens < headCost then\n -- Not enough tokens: re-register with calculated delay\n local prTbRate = math.max(tonumber(prGrp.tbRefillRate) or 0, 1)\n local prTbDelay = math.ceil((headCost - prTbTokens) * 1000 / prTbRate)\n redis.call('ZADD', rateLimitedKey, now + prTbDelay, gk)\n tbCheckPassed = false\n end\n end\n end\n end\n if tbCheckPassed then\n -- Promote up to min(rateMax, available concurrency) jobs.\n -- Do NOT touch rateCount/rateWindowStart here - moveToActive handles\n -- window reset and counting when the worker picks up the promoted jobs.\n local canPromote = 1000\n if rateMax > 0 then\n canPromote = math.min(canPromote, rateMax)\n end\n if maxConc > 0 then\n canPromote = math.min(canPromote, math.max(0, maxConc - active))\n end\n for j = 1, canPromote do\n local nextJobId = redis.call('LPOP', waitListKey)\n if not nextJobId then break end\n redis.call('XADD', streamKey, '*', 'jobId', nextJobId)\n local nextJobKey = prefix .. 'job:' .. nextJobId\n redis.call('HSET', nextJobKey, 'state', 'waiting')\n promoted = promoted + 1\n end\n end\n end\n return promoted\nend)\n\nredis.register_function('glidemq_checkConcurrency', function(keys, args)\n local metaKey = keys[1]\n local streamKey = keys[2]\n local group = args[1]\n local gc = tonumber(redis.call('HGET', metaKey, 'globalConcurrency')) or 0\n if gc <= 0 then\n return -1\n end\n local pending = redis.call('XPENDING', streamKey, group)\n local pendingCount = tonumber(pending[1]) or 0\n local remaining = gc - pendingCount\n if remaining <= 0 then\n return 0\n end\n return remaining\nend)\n\nredis.register_function('glidemq_moveToActive', function(keys, args)\n local jobKey = keys[1]\n local streamKey = keys[2] or ''\n local timestamp = args[1]\n local entryId = args[2] or ''\n local group = args[3] or ''\n local jobId = args[4] or ''\n local exists = redis.call('EXISTS', jobKey)\n if exists == 0 then\n return ''\n end\n local revoked = redis.call('HGET', jobKey, 'revoked')\n if revoked == '1' then\n return 'REVOKED'\n end\n local groupKey = redis.call('HGET', jobKey, 'groupKey')\n if groupKey and groupKey ~= '' then\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local groupHashKey = prefix .. 'group:' .. groupKey\n -- Load all group fields in one call\n local grpFields = redis.call('HGETALL', groupHashKey)\n local grp = {}\n for f = 1, #grpFields, 2 do grp[grpFields[f]] = grpFields[f + 1] end\n local maxConc = tonumber(grp.maxConcurrency) or 0\n local active = tonumber(grp.active) or 0\n -- Concurrency gate (checked first to avoid burning rate/token slots on parked jobs)\n if maxConc > 0 and active >= maxConc then\n if streamKey ~= '' and entryId ~= '' and group ~= '' then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n end\n local waitListKey = prefix .. 'groupq:' .. groupKey\n redis.call('RPUSH', waitListKey, jobId)\n redis.call('HSET', jobKey, 'state', 'group-waiting')\n return 'GROUP_FULL'\n end\n -- Token bucket gate (read-only)\n local tbCapacity = tonumber(grp.tbCapacity) or 0\n local tbBlocked = false\n local tbDelay = 0\n local tbTokens = 0\n local jobCostVal = 0\n if tbCapacity > 0 then\n tbTokens = tbRefill(groupHashKey, grp, tonumber(timestamp))\n jobCostVal = tonumber(redis.call('HGET', jobKey, 'cost')) or 1000\n -- DLQ guard: cost > capacity\n if jobCostVal > tbCapacity then\n if streamKey ~= '' and entryId ~= '' and group ~= '' then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n end\n redis.call('ZADD', prefix .. 'failed', tonumber(timestamp), jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', 'cost exceeds token bucket capacity',\n 'finishedOn', timestamp)\n emitEvent(prefix .. 'events', 'failed', jobId, {'failedReason', 'cost exceeds token bucket capacity'})\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n if tbTokens < jobCostVal then\n tbBlocked = true\n local tbRefillRateVal = tonumber(grp.tbRefillRate) or 0\n if tbRefillRateVal <= 0 then tbRefillRateVal = 1 end\n tbDelay = math.ceil((jobCostVal - tbTokens) * 1000 / tbRefillRateVal)\n end\n end\n -- Sliding window gate (read-only)\n local rateMax = tonumber(grp.rateMax) or 0\n local rlBlocked = false\n local rlDelay = 0\n if rateMax > 0 then\n local rateDuration = tonumber(grp.rateDuration) or 0\n local rateWindowStart = tonumber(grp.rateWindowStart) or 0\n local rateCount = tonumber(grp.rateCount) or 0\n local now = tonumber(timestamp)\n if rateDuration > 0 and now - rateWindowStart < rateDuration and rateCount >= rateMax then\n rlBlocked = true\n rlDelay = (rateWindowStart + rateDuration) - now\n end\n end\n -- If ANY gate blocked: park + register\n if tbBlocked or rlBlocked then\n if streamKey ~= '' and entryId ~= '' and group ~= '' then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n end\n local waitListKey = prefix .. 'groupq:' .. groupKey\n redis.call('RPUSH', waitListKey, jobId)\n redis.call('HSET', jobKey, 'state', 'group-waiting')\n local maxDelay = math.max(tbDelay, rlDelay)\n local rateLimitedKey = prefix .. 'ratelimited'\n redis.call('ZADD', rateLimitedKey, tonumber(timestamp) + maxDelay, groupKey)\n if tbBlocked then return 'GROUP_TOKEN_LIMITED' end\n return 'GROUP_RATE_LIMITED'\n end\n -- All gates passed: mutate state\n if tbCapacity > 0 then\n redis.call('HINCRBY', groupHashKey, 'tbTokens', -jobCostVal)\n end\n if rateMax > 0 then\n local rateDuration = tonumber(grp.rateDuration) or 0\n if rateDuration > 0 then\n local rateWindowStart = tonumber(grp.rateWindowStart) or 0\n local now = tonumber(timestamp)\n if now - rateWindowStart >= rateDuration then\n redis.call('HSET', groupHashKey, 'rateWindowStart', tostring(now), 'rateCount', '1')\n else\n redis.call('HINCRBY', groupHashKey, 'rateCount', 1)\n end\n end\n end\n redis.call('HINCRBY', groupHashKey, 'active', 1)\n end\n redis.call('HSET', jobKey, 'state', 'active', 'processedOn', timestamp, 'lastActive', timestamp)\n local fields = redis.call('HGETALL', jobKey)\n return cjson.encode(fields)\nend)\n\nredis.register_function('glidemq_deferActive', function(keys, args)\n local streamKey = keys[1]\n local jobKey = keys[2]\n local jobId = args[1]\n local entryId = args[2]\n local group = args[3]\n local exists = redis.call('EXISTS', jobKey)\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n if exists == 0 then\n return 0\n end\n redis.call('XADD', streamKey, '*', 'jobId', jobId)\n redis.call('HSET', jobKey, 'state', 'waiting')\n return 1\nend)\n\nredis.register_function('glidemq_addFlow', function(keys, args)\n local parentIdKey = keys[1]\n local parentStreamKey = keys[2]\n local parentScheduledKey = keys[3]\n local parentEventsKey = keys[4]\n local parentName = args[1]\n local parentData = args[2]\n local parentOpts = args[3]\n local timestamp = tonumber(args[4])\n local parentDelay = tonumber(args[5]) or 0\n local parentPriority = tonumber(args[6]) or 0\n local parentMaxAttempts = tonumber(args[7]) or 0\n local numChildren = tonumber(args[8])\n local parentJobId = redis.call('INCR', parentIdKey)\n local parentJobIdStr = tostring(parentJobId)\n local parentPrefix = string.sub(parentIdKey, 1, #parentIdKey - 2)\n local parentJobKey = parentPrefix .. 'job:' .. parentJobIdStr\n local depsKey = parentPrefix .. 'deps:' .. parentJobIdStr\n local parentOrderingKey = extractOrderingKeyFromOpts(parentOpts)\n local parentGroupConc = extractGroupConcurrencyFromOpts(parentOpts)\n local parentRateMax, parentRateDuration = extractGroupRateLimitFromOpts(parentOpts)\n local parentTbCapacity, parentTbRefillRate = extractTokenBucketFromOpts(parentOpts)\n local parentCost = extractCostFromOpts(parentOpts)\n local parentUseGroup = (parentOrderingKey ~= '' and (parentGroupConc > 1 or parentRateMax > 0 or parentTbCapacity > 0))\n local parentOrderingSeq = 0\n if parentOrderingKey ~= '' and not parentUseGroup then\n local parentOrderingMetaKey = parentPrefix .. 'ordering'\n parentOrderingSeq = redis.call('HINCRBY', parentOrderingMetaKey, parentOrderingKey, 1)\n end\n local parentHash = {\n 'id', parentJobIdStr,\n 'name', parentName,\n 'data', parentData,\n 'opts', parentOpts,\n 'timestamp', tostring(timestamp),\n 'attemptsMade', '0',\n 'delay', tostring(parentDelay),\n 'priority', tostring(parentPriority),\n 'maxAttempts', tostring(parentMaxAttempts),\n 'state', 'waiting-children'\n }\n if parentUseGroup then\n parentHash[#parentHash + 1] = 'groupKey'\n parentHash[#parentHash + 1] = parentOrderingKey\n local groupHashKey = parentPrefix .. 'group:' .. parentOrderingKey\n redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(parentGroupConc > 1 and parentGroupConc or 1))\n redis.call('HSETNX', groupHashKey, 'active', '0')\n if parentRateMax > 0 then\n redis.call('HSET', groupHashKey, 'rateMax', tostring(parentRateMax))\n redis.call('HSET', groupHashKey, 'rateDuration', tostring(parentRateDuration))\n end\n if parentTbCapacity > 0 then\n if parentCost > 0 and parentCost > parentTbCapacity then\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n redis.call('HSET', groupHashKey, 'tbCapacity', tostring(parentTbCapacity), 'tbRefillRate', tostring(parentTbRefillRate))\n redis.call('HSETNX', groupHashKey, 'tbTokens', tostring(parentTbCapacity))\n redis.call('HSETNX', groupHashKey, 'tbLastRefill', tostring(timestamp))\n redis.call('HSETNX', groupHashKey, 'tbRefillRemainder', '0')\n end\n elseif parentOrderingKey ~= '' then\n parentHash[#parentHash + 1] = 'orderingKey'\n parentHash[#parentHash + 1] = parentOrderingKey\n parentHash[#parentHash + 1] = 'orderingSeq'\n parentHash[#parentHash + 1] = tostring(parentOrderingSeq)\n end\n if parentCost > 0 then\n parentHash[#parentHash + 1] = 'cost'\n parentHash[#parentHash + 1] = tostring(parentCost)\n end\n redis.call('HSET', parentJobKey, unpack(parentHash))\n -- Pre-validate all children's cost vs capacity before any child writes\n local childArgOffset = 8\n local childKeyOffset = 4\n for i = 1, numChildren do\n local base = childArgOffset + (i - 1) * 8\n local preChildOpts = args[base + 3]\n local preChildTbCap, _ = extractTokenBucketFromOpts(preChildOpts)\n if preChildTbCap > 0 then\n local preChildCost = extractCostFromOpts(preChildOpts)\n local preEffective = (preChildCost > 0) and preChildCost or 1000\n if preEffective > preChildTbCap then\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n end\n end\n local childIds = {}\n for i = 1, numChildren do\n local base = childArgOffset + (i - 1) * 8\n local childName = args[base + 1]\n local childData = args[base + 2]\n local childOpts = args[base + 3]\n local childDelay = tonumber(args[base + 4]) or 0\n local childPriority = tonumber(args[base + 5]) or 0\n local childMaxAttempts = tonumber(args[base + 6]) or 0\n local childQueuePrefix = args[base + 7]\n local childParentQueue = args[base + 8]\n local ckBase = childKeyOffset + (i - 1) * 4\n local childIdKey = keys[ckBase + 1]\n local childStreamKey = keys[ckBase + 2]\n local childScheduledKey = keys[ckBase + 3]\n local childEventsKey = keys[ckBase + 4]\n local childJobId = redis.call('INCR', childIdKey)\n local childJobIdStr = tostring(childJobId)\n local childPrefix = string.sub(childIdKey, 1, #childIdKey - 2)\n local childJobKey = childPrefix .. 'job:' .. childJobIdStr\n local childOrderingKey = extractOrderingKeyFromOpts(childOpts)\n local childGroupConc = extractGroupConcurrencyFromOpts(childOpts)\n local childRateMax, childRateDuration = extractGroupRateLimitFromOpts(childOpts)\n local childTbCapacity, childTbRefillRate = extractTokenBucketFromOpts(childOpts)\n local childCost = extractCostFromOpts(childOpts)\n local childUseGroup = (childOrderingKey ~= '' and (childGroupConc > 1 or childRateMax > 0 or childTbCapacity > 0))\n local childOrderingSeq = 0\n if childOrderingKey ~= '' and not childUseGroup then\n local childOrderingMetaKey = childPrefix .. 'ordering'\n childOrderingSeq = redis.call('HINCRBY', childOrderingMetaKey, childOrderingKey, 1)\n end\n local childHash = {\n 'id', childJobIdStr,\n 'name', childName,\n 'data', childData,\n 'opts', childOpts,\n 'timestamp', tostring(timestamp),\n 'attemptsMade', '0',\n 'delay', tostring(childDelay),\n 'priority', tostring(childPriority),\n 'maxAttempts', tostring(childMaxAttempts),\n 'parentId', parentJobIdStr,\n 'parentQueue', childParentQueue\n }\n if childUseGroup then\n childHash[#childHash + 1] = 'groupKey'\n childHash[#childHash + 1] = childOrderingKey\n local childGroupHashKey = childPrefix .. 'group:' .. childOrderingKey\n redis.call('HSETNX', childGroupHashKey, 'maxConcurrency', tostring(childGroupConc > 1 and childGroupConc or 1))\n redis.call('HSETNX', childGroupHashKey, 'active', '0')\n if childRateMax > 0 then\n redis.call('HSET', childGroupHashKey, 'rateMax', tostring(childRateMax))\n redis.call('HSET', childGroupHashKey, 'rateDuration', tostring(childRateDuration))\n end\n if childTbCapacity > 0 then\n redis.call('HSET', childGroupHashKey, 'tbCapacity', tostring(childTbCapacity), 'tbRefillRate', tostring(childTbRefillRate))\n redis.call('HSETNX', childGroupHashKey, 'tbTokens', tostring(childTbCapacity))\n redis.call('HSETNX', childGroupHashKey, 'tbLastRefill', tostring(timestamp))\n redis.call('HSETNX', childGroupHashKey, 'tbRefillRemainder', '0')\n end\n elseif childOrderingKey ~= '' then\n childHash[#childHash + 1] = 'orderingKey'\n childHash[#childHash + 1] = childOrderingKey\n childHash[#childHash + 1] = 'orderingSeq'\n childHash[#childHash + 1] = tostring(childOrderingSeq)\n end\n if childCost > 0 then\n childHash[#childHash + 1] = 'cost'\n childHash[#childHash + 1] = tostring(childCost)\n end\n if childDelay > 0 or childPriority > 0 then\n childHash[#childHash + 1] = 'state'\n childHash[#childHash + 1] = childDelay > 0 and 'delayed' or 'prioritized'\n else\n childHash[#childHash + 1] = 'state'\n childHash[#childHash + 1] = 'waiting'\n end\n redis.call('HSET', childJobKey, unpack(childHash))\n local depsMember = childQueuePrefix .. ':' .. childJobIdStr\n redis.call('SADD', depsKey, depsMember)\n if childDelay > 0 then\n local score = childPriority * PRIORITY_SHIFT + (timestamp + childDelay)\n redis.call('ZADD', childScheduledKey, score, childJobIdStr)\n elseif childPriority > 0 then\n local score = childPriority * PRIORITY_SHIFT\n redis.call('ZADD', childScheduledKey, score, childJobIdStr)\n else\n redis.call('XADD', childStreamKey, '*', 'jobId', childJobIdStr)\n end\n emitEvent(childEventsKey, 'added', childJobIdStr, {'name', childName})\n childIds[#childIds + 1] = childJobIdStr\n end\n local extraDepsOffset = childArgOffset + numChildren * 8\n local numExtraDeps = tonumber(args[extraDepsOffset + 1]) or 0\n for i = 1, numExtraDeps do\n local extraMember = args[extraDepsOffset + 1 + i]\n redis.call('SADD', depsKey, extraMember)\n end\n emitEvent(parentEventsKey, 'added', parentJobIdStr, {'name', parentName})\n local result = {parentJobIdStr}\n for i = 1, #childIds do\n result[#result + 1] = childIds[i]\n end\n return cjson.encode(result)\nend)\n\nredis.register_function('glidemq_completeChild', function(keys, args)\n local depsKey = keys[1]\n local parentJobKey = keys[2]\n local parentStreamKey = keys[3]\n local parentEventsKey = keys[4]\n local depsMember = args[1]\n local parentId = args[2]\n local doneCount = redis.call('HINCRBY', parentJobKey, 'depsCompleted', 1)\n local totalDeps = redis.call('SCARD', depsKey)\n local remaining = totalDeps - doneCount\n if remaining <= 0 then\n redis.call('HSET', parentJobKey, 'state', 'waiting')\n redis.call('XADD', parentStreamKey, '*', 'jobId', parentId)\n emitEvent(parentEventsKey, 'active', parentId, nil)\n end\n return remaining\nend)\n\nredis.register_function('glidemq_removeJob', function(keys, args)\n local jobKey = keys[1]\n local streamKey = keys[2]\n local scheduledKey = keys[3]\n local completedKey = keys[4]\n local failedKey = keys[5]\n local eventsKey = keys[6]\n local logKey = keys[7]\n local jobId = args[1]\n local exists = redis.call('EXISTS', jobKey)\n if exists == 0 then\n return 0\n end\n local state = redis.call('HGET', jobKey, 'state')\n local groupKey = redis.call('HGET', jobKey, 'groupKey')\n if groupKey and groupKey ~= '' then\n if state == 'active' then\n releaseGroupSlotAndPromote(jobKey, jobId, 0)\n elseif state == 'group-waiting' then\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local waitListKey = prefix .. 'groupq:' .. groupKey\n redis.call('LREM', waitListKey, 1, jobId)\n end\n end\n redis.call('ZREM', scheduledKey, jobId)\n redis.call('ZREM', completedKey, jobId)\n redis.call('ZREM', failedKey, jobId)\n markOrderingDone(jobKey, jobId)\n redis.call('DEL', jobKey)\n redis.call('DEL', logKey)\n emitEvent(eventsKey, 'removed', jobId, nil)\n return 1\nend)\n\nredis.register_function('glidemq_revoke', function(keys, args)\n local jobKey = keys[1]\n local streamKey = keys[2]\n local scheduledKey = keys[3]\n local failedKey = keys[4]\n local eventsKey = keys[5]\n local jobId = args[1]\n local timestamp = tonumber(args[2])\n local group = args[3]\n local exists = redis.call('EXISTS', jobKey)\n if exists == 0 then\n return 'not_found'\n end\n redis.call('HSET', jobKey, 'revoked', '1')\n local state = redis.call('HGET', jobKey, 'state')\n if state == 'group-waiting' then\n local gk = redis.call('HGET', jobKey, 'groupKey')\n if gk and gk ~= '' then\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local waitListKey = prefix .. 'groupq:' .. gk\n redis.call('LREM', waitListKey, 1, jobId)\n end\n redis.call('ZADD', failedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', 'revoked',\n 'finishedOn', tostring(timestamp)\n )\n emitEvent(eventsKey, 'revoked', jobId, nil)\n return 'revoked'\n end\n if state == 'waiting' or state == 'delayed' or state == 'prioritized' then\n redis.call('ZREM', scheduledKey, jobId)\n local entries = redis.call('XRANGE', streamKey, '-', '+')\n for i = 1, #entries do\n local entryId = entries[i][1]\n local fields = entries[i][2]\n for j = 1, #fields, 2 do\n if fields[j] == 'jobId' and fields[j+1] == jobId then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n break\n end\n end\n end\n redis.call('ZADD', failedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', 'revoked',\n 'finishedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n emitEvent(eventsKey, 'revoked', jobId, nil)\n return 'revoked'\n end\n emitEvent(eventsKey, 'revoked', jobId, nil)\n return 'flagged'\nend)\n\nredis.register_function('glidemq_searchByName', function(keys, args)\n local stateKey = keys[1]\n local stateType = args[1]\n local nameFilter = args[2]\n local limit = tonumber(args[3]) or 100\n local prefix = args[4]\n local matched = {}\n if stateType == 'zset' then\n local members = redis.call('ZRANGE', stateKey, 0, -1)\n for i = 1, #members do\n if #matched >= limit then break end\n local jobId = members[i]\n local jobKey = prefix .. 'job:' .. jobId\n local name = redis.call('HGET', jobKey, 'name')\n if name == nameFilter then\n matched[#matched + 1] = jobId\n end\n end\n elseif stateType == 'stream' then\n local entries = redis.call('XRANGE', stateKey, '-', '+')\n for i = 1, #entries do\n if #matched >= limit then break end\n local fields = entries[i][2]\n local jobId = nil\n for j = 1, #fields, 2 do\n if fields[j] == 'jobId' then\n jobId = fields[j + 1]\n break\n end\n end\n if jobId then\n local jobKey = prefix .. 'job:' .. jobId\n local name = redis.call('HGET', jobKey, 'name')\n if name == nameFilter then\n matched[#matched + 1] = jobId\n end\n end\n end\n end\n return matched\nend)\n";
6
+ export declare const LIBRARY_SOURCE = "#!lua name=glidemq\n\nlocal PRIORITY_SHIFT = 4398046511104\n\nlocal function emitEvent(eventsKey, eventType, jobId, extraFields)\n local fields = {'event', eventType, 'jobId', tostring(jobId)}\n if extraFields then\n for i = 1, #extraFields, 2 do\n fields[#fields + 1] = extraFields[i]\n fields[#fields + 1] = extraFields[i + 1]\n end\n end\n redis.call('XADD', eventsKey, 'MAXLEN', '~', '1000', '*', unpack(fields))\nend\n\nlocal function markOrderingDone(jobKey, jobId)\n local orderingKey = redis.call('HGET', jobKey, 'orderingKey')\n if not orderingKey or orderingKey == '' then\n return\n end\n local orderingSeq = tonumber(redis.call('HGET', jobKey, 'orderingSeq')) or 0\n if orderingSeq <= 0 then\n return\n end\n\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local metaKey = prefix .. 'meta'\n local doneField = 'orderdone:' .. orderingKey\n local pendingKey = prefix .. 'orderdone:pending:' .. orderingKey\n\n local lastDone = tonumber(redis.call('HGET', metaKey, doneField)) or 0\n if orderingSeq <= lastDone then\n redis.call('HDEL', pendingKey, tostring(orderingSeq))\n return\n end\n\n redis.call('HSET', pendingKey, tostring(orderingSeq), '1')\n local advanced = lastDone\n while true do\n local nextSeq = advanced + 1\n if redis.call('HEXISTS', pendingKey, tostring(nextSeq)) == 0 then\n break\n end\n redis.call('HDEL', pendingKey, tostring(nextSeq))\n advanced = nextSeq\n end\n if advanced > lastDone then\n redis.call('HSET', metaKey, doneField, tostring(advanced))\n end\nend\n\n-- Refill token bucket using remainder accumulator for precision.\n-- tbRefillRate is in millitokens/second. Returns current millitokens after refill.\n-- Side effect: updates tbTokens, tbLastRefill, tbRefillRemainder on the group hash.\nlocal function tbRefill(groupHashKey, g, now)\n local tbCapacity = tonumber(g.tbCapacity) or 0\n if tbCapacity <= 0 then return 0 end\n local tbTokens = tonumber(g.tbTokens) or tbCapacity\n local tbRefillRate = tonumber(g.tbRefillRate) or 0\n local tbLastRefill = tonumber(g.tbLastRefill) or now\n local tbRefillRemainder = tonumber(g.tbRefillRemainder) or 0\n local elapsed = now - tbLastRefill\n if elapsed <= 0 or tbRefillRate <= 0 then return tbTokens end\n -- Cap elapsed to prevent overflow in long-idle buckets\n local maxElapsed = math.ceil(tbCapacity * 1000 / tbRefillRate)\n if elapsed > maxElapsed then elapsed = maxElapsed end\n local raw = elapsed * tbRefillRate + tbRefillRemainder\n local added = math.floor(raw / 1000)\n local newRemainder = raw % 1000\n local newTokens = math.min(tbCapacity, tbTokens + added)\n redis.call('HSET', groupHashKey,\n 'tbTokens', tostring(newTokens),\n 'tbLastRefill', tostring(now),\n 'tbRefillRemainder', tostring(newRemainder))\n return newTokens\nend\n\nlocal function releaseGroupSlotAndPromote(jobKey, jobId, now)\n local gk = redis.call('HGET', jobKey, 'groupKey')\n if not gk or gk == '' then return end\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local groupHashKey = prefix .. 'group:' .. gk\n -- Load all group fields in one call\n local gFields = redis.call('HGETALL', groupHashKey)\n local g = {}\n for gf = 1, #gFields, 2 do g[gFields[gf]] = gFields[gf + 1] end\n local cur = tonumber(g.active) or 0\n local newActive = (cur > 0) and (cur - 1) or 0\n if cur > 0 then\n redis.call('HSET', groupHashKey, 'active', tostring(newActive))\n end\n local waitListKey = prefix .. 'groupq:' .. gk\n local waitLen = redis.call('LLEN', waitListKey)\n if waitLen == 0 then return end\n -- Concurrency gate: if still at or above max after decrement, do not promote\n local maxConc = tonumber(g.maxConcurrency) or 0\n if maxConc > 0 and newActive >= maxConc then return end\n -- Rate limit gate (skip if now is nil or 0 for safe fallback)\n -- Only blocks promotion; does NOT increment rateCount. moveToActive handles counting.\n local rateMax = tonumber(g.rateMax) or 0\n local rateRemaining = 0\n local ts = tonumber(now) or 0\n if ts > 0 and rateMax > 0 then\n local rateDuration = tonumber(g.rateDuration) or 0\n if rateDuration > 0 then\n local rateWindowStart = tonumber(g.rateWindowStart) or 0\n local rateCount = tonumber(g.rateCount) or 0\n if ts - rateWindowStart < rateDuration then\n if rateCount >= rateMax then\n -- Window active and at capacity: do not promote, register for scheduler\n local rateLimitedKey = prefix .. 'ratelimited'\n redis.call('ZADD', rateLimitedKey, rateWindowStart + rateDuration, gk)\n return\n end\n rateRemaining = rateMax - rateCount\n end\n end\n end\n -- Token bucket gate: check head job cost before promoting\n local tbCap = tonumber(g.tbCapacity) or 0\n if ts > 0 and tbCap > 0 then\n local tbTokensCur = tbRefill(groupHashKey, g, ts)\n -- Peek at head job, skipping tombstones and DLQ'd jobs (up to 10 iterations)\n local tbCheckPasses = 0\n local tbOk = false\n while tbCheckPasses < 10 do\n tbCheckPasses = tbCheckPasses + 1\n local headJobId = redis.call('LINDEX', waitListKey, 0)\n if not headJobId then break end\n local headJobKey = prefix .. 'job:' .. headJobId\n -- Tombstone guard: job hash deleted - pop and check next\n if redis.call('EXISTS', headJobKey) == 0 then\n redis.call('LPOP', waitListKey)\n else\n local headCost = tonumber(redis.call('HGET', headJobKey, 'cost')) or 1000\n -- DLQ guard: cost > capacity - pop, fail, check next\n if headCost > tbCap then\n redis.call('LPOP', waitListKey)\n redis.call('ZADD', prefix .. 'failed', ts, headJobId)\n redis.call('HSET', headJobKey,\n 'state', 'failed',\n 'failedReason', 'cost exceeds token bucket capacity',\n 'finishedOn', tostring(ts))\n emitEvent(prefix .. 'events', 'failed', headJobId, {'failedReason', 'cost exceeds token bucket capacity'})\n elseif tbTokensCur < headCost then\n -- Not enough tokens: register delay and skip promotion\n local tbRateVal = tonumber(g.tbRefillRate) or 0\n if tbRateVal <= 0 then break end\n local tbDelayMs = math.ceil((headCost - tbTokensCur) * 1000 / tbRateVal)\n local rateLimitedKey = prefix .. 'ratelimited'\n redis.call('ZADD', rateLimitedKey, ts + tbDelayMs, gk)\n return\n else\n tbOk = true\n break\n end\n end\n end\n if not tbOk and tbCheckPasses >= 10 then return end\n end\n -- Calculate how many slots are available for promotion\n local available = 1\n if maxConc > 0 then\n available = maxConc - newActive\n else\n available = math.min(waitLen, 1000)\n end\n -- Cap by rate limit remaining if a window is active\n if rateRemaining > 0 then\n available = math.min(available, rateRemaining)\n end\n local streamKey = prefix .. 'stream'\n for p = 1, available do\n local nextJobId = redis.call('LPOP', waitListKey)\n if not nextJobId then break end\n redis.call('XADD', streamKey, '*', 'jobId', nextJobId)\n local nextJobKey = prefix .. 'job:' .. nextJobId\n redis.call('HSET', nextJobKey, 'state', 'waiting')\n end\nend\n\nlocal function extractOrderingKeyFromOpts(optsJson)\n if not optsJson or optsJson == '' then\n return ''\n end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then\n return ''\n end\n local ordering = decoded['ordering']\n if type(ordering) ~= 'table' then\n return ''\n end\n local key = ordering['key']\n if key == nil then\n return ''\n end\n return tostring(key)\nend\n\nlocal function extractGroupConcurrencyFromOpts(optsJson)\n if not optsJson or optsJson == '' then\n return 0\n end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then\n return 0\n end\n local ordering = decoded['ordering']\n if type(ordering) ~= 'table' then\n return 0\n end\n local conc = ordering['concurrency']\n if conc == nil then\n return 0\n end\n return tonumber(conc) or 0\nend\n\nlocal function extractGroupRateLimitFromOpts(optsJson)\n if not optsJson or optsJson == '' then\n return 0, 0\n end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then\n return 0, 0\n end\n local ordering = decoded['ordering']\n if type(ordering) ~= 'table' then\n return 0, 0\n end\n local rl = ordering['rateLimit']\n if type(rl) ~= 'table' then\n return 0, 0\n end\n local max = tonumber(rl['max']) or 0\n local duration = tonumber(rl['duration']) or 0\n return max, duration\nend\n\nlocal function extractTokenBucketFromOpts(optsJson)\n if not optsJson or optsJson == '' then return 0, 0 end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then return 0, 0 end\n local ordering = decoded['ordering']\n if type(ordering) ~= 'table' then return 0, 0 end\n local tb = ordering['tokenBucket']\n if type(tb) ~= 'table' then return 0, 0 end\n local capacity = tonumber(tb['capacity']) or 0\n local refillRate = tonumber(tb['refillRate']) or 0\n return math.floor(capacity * 1000), math.floor(refillRate * 1000)\nend\n\nlocal function extractCostFromOpts(optsJson)\n if not optsJson or optsJson == '' then return 0 end\n local ok, decoded = pcall(cjson.decode, optsJson)\n if not ok or type(decoded) ~= 'table' then return 0 end\n local cost = tonumber(decoded['cost']) or 0\n return math.floor(cost * 1000)\nend\n\n-- Remove excess jobs from a sorted set in capped, stack-safe batches.\n-- Deletes job hashes and removes from the set in chunks of 1000.\nlocal function removeExcessJobs(setKey, prefix, ids)\n for i = 1, #ids do\n redis.call('DEL', prefix .. 'job:' .. ids[i])\n end\n for i = 1, #ids, 1000 do\n redis.call('ZREM', setKey, unpack(ids, i, math.min(i + 999, #ids)))\n end\nend\n\nredis.register_function('glidemq_version', function(keys, args)\n return '28'\nend)\n\nredis.register_function('glidemq_addJob', function(keys, args)\n local idKey = keys[1]\n local streamKey = keys[2]\n local scheduledKey = keys[3]\n local eventsKey = keys[4]\n local jobName = args[1]\n local jobData = args[2]\n local jobOpts = args[3]\n local timestamp = tonumber(args[4])\n local delay = tonumber(args[5]) or 0\n local priority = tonumber(args[6]) or 0\n local parentId = args[7] or ''\n local maxAttempts = tonumber(args[8]) or 0\n local orderingKey = args[9] or ''\n local groupConcurrency = tonumber(args[10]) or 0\n local groupRateMax = tonumber(args[11]) or 0\n local groupRateDuration = tonumber(args[12]) or 0\n local tbCapacity = tonumber(args[13]) or 0\n local tbRefillRate = tonumber(args[14]) or 0\n local jobCost = tonumber(args[15]) or 0\n local jobId = redis.call('INCR', idKey)\n local jobIdStr = tostring(jobId)\n local prefix = string.sub(idKey, 1, #idKey - 2)\n local jobKey = prefix .. 'job:' .. jobIdStr\n local useGroupConcurrency = (orderingKey ~= '' and (groupConcurrency > 1 or groupRateMax > 0 or tbCapacity > 0))\n local orderingSeq = 0\n if orderingKey ~= '' and not useGroupConcurrency then\n local orderingMetaKey = prefix .. 'ordering'\n orderingSeq = redis.call('HINCRBY', orderingMetaKey, orderingKey, 1)\n end\n if useGroupConcurrency then\n local groupHashKey = prefix .. 'group:' .. orderingKey\n local curMax = tonumber(redis.call('HGET', groupHashKey, 'maxConcurrency')) or 0\n if curMax ~= groupConcurrency then\n redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(groupConcurrency))\n end\n -- When rate limit or token bucket forces group path but concurrency is 0 or 1, ensure maxConcurrency >= 1\n if curMax == 0 and groupConcurrency <= 1 then\n redis.call('HSET', groupHashKey, 'maxConcurrency', '1')\n end\n -- Upsert rate limit fields on group hash\n if groupRateMax > 0 then\n local curRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0\n if curRateMax ~= groupRateMax then\n redis.call('HSET', groupHashKey, 'rateMax', tostring(groupRateMax))\n end\n local curRateDuration = tonumber(redis.call('HGET', groupHashKey, 'rateDuration')) or 0\n if curRateDuration ~= groupRateDuration then\n redis.call('HSET', groupHashKey, 'rateDuration', tostring(groupRateDuration))\n end\n else\n -- Clear stale rate limit fields if group was previously rate-limited\n local oldRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0\n if oldRateMax > 0 then\n redis.call('HDEL', groupHashKey, 'rateMax', 'rateDuration', 'rateWindowStart', 'rateCount')\n end\n end\n -- Upsert token bucket fields on group hash\n if tbCapacity > 0 then\n local curTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0\n if curTbCap ~= tbCapacity then\n redis.call('HSET', groupHashKey, 'tbCapacity', tostring(tbCapacity))\n end\n local curTbRate = tonumber(redis.call('HGET', groupHashKey, 'tbRefillRate')) or 0\n if curTbRate ~= tbRefillRate then\n redis.call('HSET', groupHashKey, 'tbRefillRate', tostring(tbRefillRate))\n end\n -- Initialize tokens on first setup\n if curTbCap == 0 then\n redis.call('HSET', groupHashKey,\n 'tbTokens', tostring(tbCapacity),\n 'tbLastRefill', tostring(timestamp),\n 'tbRefillRemainder', '0')\n end\n -- Validate cost <= capacity at enqueue\n -- Validate cost (explicit or default 1000 millitokens) against capacity\n local effectiveCost = (jobCost > 0) and jobCost or 1000\n if effectiveCost > tbCapacity then\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n else\n -- Clear stale tb fields\n local oldTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0\n if oldTbCap > 0 then\n redis.call('HDEL', groupHashKey, 'tbCapacity', 'tbRefillRate', 'tbTokens', 'tbLastRefill', 'tbRefillRemainder')\n end\n end\n end\n local hashFields = {\n 'id', jobIdStr,\n 'name', jobName,\n 'data', jobData,\n 'opts', jobOpts,\n 'timestamp', tostring(timestamp),\n 'attemptsMade', '0',\n 'delay', tostring(delay),\n 'priority', tostring(priority),\n 'maxAttempts', tostring(maxAttempts)\n }\n if useGroupConcurrency then\n hashFields[#hashFields + 1] = 'groupKey'\n hashFields[#hashFields + 1] = orderingKey\n elseif orderingKey ~= '' then\n hashFields[#hashFields + 1] = 'orderingKey'\n hashFields[#hashFields + 1] = orderingKey\n hashFields[#hashFields + 1] = 'orderingSeq'\n hashFields[#hashFields + 1] = tostring(orderingSeq)\n end\n if jobCost > 0 then\n hashFields[#hashFields + 1] = 'cost'\n hashFields[#hashFields + 1] = tostring(jobCost)\n end\n if parentId ~= '' then\n hashFields[#hashFields + 1] = 'parentId'\n hashFields[#hashFields + 1] = parentId\n end\n if delay > 0 or priority > 0 then\n hashFields[#hashFields + 1] = 'state'\n hashFields[#hashFields + 1] = delay > 0 and 'delayed' or 'prioritized'\n else\n hashFields[#hashFields + 1] = 'state'\n hashFields[#hashFields + 1] = 'waiting'\n end\n redis.call('HSET', jobKey, unpack(hashFields))\n if delay > 0 then\n local score = priority * PRIORITY_SHIFT + (timestamp + delay)\n redis.call('ZADD', scheduledKey, score, jobIdStr)\n elseif priority > 0 then\n local score = priority * PRIORITY_SHIFT\n redis.call('ZADD', scheduledKey, score, jobIdStr)\n else\n redis.call('XADD', streamKey, '*', 'jobId', jobIdStr)\n end\n emitEvent(eventsKey, 'added', jobIdStr, {'name', jobName})\n return jobIdStr\nend)\n\nredis.register_function('glidemq_promote', function(keys, args)\n local scheduledKey = keys[1]\n local streamKey = keys[2]\n local eventsKey = keys[3]\n local now = tonumber(args[1])\n local MAX_PROMOTIONS = 1000\n local count = 0\n local cursorMin = 0\n while count < MAX_PROMOTIONS do\n local nextEntry = redis.call('ZRANGEBYSCORE', scheduledKey, string.format('%.0f', cursorMin), '+inf', 'WITHSCORES', 'LIMIT', 0, 1)\n if not nextEntry or #nextEntry == 0 then\n break\n end\n local firstScore = tonumber(nextEntry[2]) or 0\n local priority = math.floor(firstScore / PRIORITY_SHIFT)\n local minScore = priority * PRIORITY_SHIFT\n local maxDueScore = minScore + now\n local remaining = MAX_PROMOTIONS - count\n local members = redis.call(\n 'ZRANGEBYSCORE',\n scheduledKey,\n string.format('%.0f', minScore),\n string.format('%.0f', maxDueScore),\n 'LIMIT',\n 0,\n remaining\n )\n for i = 1, #members do\n local jobId = members[i]\n redis.call('XADD', streamKey, '*', 'jobId', jobId)\n redis.call('ZREM', scheduledKey, jobId)\n local prefix = string.sub(scheduledKey, 1, #scheduledKey - 9)\n local jobKey = prefix .. 'job:' .. jobId\n redis.call('HSET', jobKey, 'state', 'waiting')\n emitEvent(eventsKey, 'promoted', jobId, nil)\n count = count + 1\n end\n cursorMin = (priority + 1) * PRIORITY_SHIFT\n end\n return count\nend)\n\nredis.register_function('glidemq_complete', function(keys, args)\n local streamKey = keys[1]\n local completedKey = keys[2]\n local eventsKey = keys[3]\n local jobKey = keys[4]\n local jobId = args[1]\n local entryId = args[2]\n local returnvalue = args[3]\n local timestamp = tonumber(args[4])\n local group = args[5]\n local removeMode = args[6] or '0'\n local removeCount = tonumber(args[7]) or 0\n local removeAge = tonumber(args[8]) or 0\n local depsMember = args[9] or ''\n local parentId = args[10] or ''\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n redis.call('ZADD', completedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'completed',\n 'returnvalue', returnvalue,\n 'finishedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'completed', jobId, {'returnvalue', returnvalue})\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n if removeMode == 'true' then\n redis.call('ZREM', completedKey, jobId)\n redis.call('DEL', jobKey)\n elseif removeMode == 'count' and removeCount > 0 then\n local total = redis.call('ZCARD', completedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', completedKey, 0, math.min(total - removeCount, 1000) - 1)\n if #excess > 0 then removeExcessJobs(completedKey, prefix, excess) end\n end\n elseif removeMode == 'age_count' then\n if removeAge > 0 then\n local cutoff = timestamp - (removeAge * 1000)\n local old = redis.call('ZRANGEBYSCORE', completedKey, '0', string.format('%.0f', cutoff), 'LIMIT', 0, 1000)\n if #old > 0 then removeExcessJobs(completedKey, prefix, old) end\n end\n if removeCount > 0 then\n local total = redis.call('ZCARD', completedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', completedKey, 0, math.min(total - removeCount, 1000) - 1)\n if #excess > 0 then removeExcessJobs(completedKey, prefix, excess) end\n end\n end\n end\n if depsMember ~= '' and parentId ~= '' and #keys >= 8 then\n local parentDepsKey = keys[5]\n local parentJobKey = keys[6]\n local parentStreamKey = keys[7]\n local parentEventsKey = keys[8]\n local doneCount = redis.call('HINCRBY', parentJobKey, 'depsCompleted', 1)\n local totalDeps = redis.call('SCARD', parentDepsKey)\n local remaining = totalDeps - doneCount\n if remaining <= 0 then\n redis.call('HSET', parentJobKey, 'state', 'waiting')\n redis.call('XADD', parentStreamKey, '*', 'jobId', parentId)\n emitEvent(parentEventsKey, 'active', parentId, nil)\n end\n end\n return 1\nend)\n\nredis.register_function('glidemq_completeAndFetchNext', function(keys, args)\n local streamKey = keys[1]\n local completedKey = keys[2]\n local eventsKey = keys[3]\n local jobKey = keys[4]\n local jobId = args[1]\n local entryId = args[2]\n local returnvalue = args[3]\n local timestamp = tonumber(args[4])\n local group = args[5]\n local consumer = args[6]\n local removeMode = args[7] or '0'\n local removeCount = tonumber(args[8]) or 0\n local removeAge = tonumber(args[9]) or 0\n local depsMember = args[10] or ''\n local parentId = args[11] or ''\n\n -- Phase 1: Complete current job (same as glidemq_complete)\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n redis.call('ZADD', completedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'completed',\n 'returnvalue', returnvalue,\n 'finishedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'completed', jobId, {'returnvalue', returnvalue})\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n\n -- Retention cleanup\n if removeMode == 'true' then\n redis.call('ZREM', completedKey, jobId)\n redis.call('DEL', jobKey)\n elseif removeMode == 'count' and removeCount > 0 then\n local total = redis.call('ZCARD', completedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', completedKey, 0, math.min(total - removeCount, 1000) - 1)\n if #excess > 0 then removeExcessJobs(completedKey, prefix, excess) end\n end\n elseif removeMode == 'age_count' then\n if removeAge > 0 then\n local cutoff = timestamp - (removeAge * 1000)\n local old = redis.call('ZRANGEBYSCORE', completedKey, '0', string.format('%.0f', cutoff), 'LIMIT', 0, 1000)\n if #old > 0 then removeExcessJobs(completedKey, prefix, old) end\n end\n if removeCount > 0 then\n local total = redis.call('ZCARD', completedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', completedKey, 0, math.min(total - removeCount, 1000) - 1)\n if #excess > 0 then removeExcessJobs(completedKey, prefix, excess) end\n end\n end\n end\n\n -- Parent deps\n if depsMember ~= '' and parentId ~= '' and #keys >= 8 then\n local parentDepsKey = keys[5]\n local parentJobKey = keys[6]\n local parentStreamKey = keys[7]\n local parentEventsKey = keys[8]\n local doneCount = redis.call('HINCRBY', parentJobKey, 'depsCompleted', 1)\n local totalDeps = redis.call('SCARD', parentDepsKey)\n if totalDeps - doneCount <= 0 then\n redis.call('HSET', parentJobKey, 'state', 'waiting')\n redis.call('XADD', parentStreamKey, '*', 'jobId', parentId)\n emitEvent(parentEventsKey, 'active', parentId, nil)\n end\n end\n\n -- Phase 2: Fetch next job (non-blocking XREADGROUP)\n local nextEntries = redis.call('XREADGROUP', 'GROUP', group, consumer, 'COUNT', 1, 'STREAMS', streamKey, '>')\n if not nextEntries or #nextEntries == 0 then\n return cjson.encode({completed = jobId, next = false})\n end\n local streamData = nextEntries[1]\n local entries = streamData[2]\n if not entries or #entries == 0 then\n return cjson.encode({completed = jobId, next = false})\n end\n local nextEntry = entries[1]\n local nextEntryId = nextEntry[1]\n local nextFields = nextEntry[2]\n local nextJobId = nil\n for i = 1, #nextFields, 2 do\n if nextFields[i] == 'jobId' then\n nextJobId = nextFields[i + 1]\n break\n end\n end\n if not nextJobId then\n return cjson.encode({completed = jobId, next = false})\n end\n\n -- Phase 3: Activate next job (same as moveToActive)\n local nextJobKey = prefix .. 'job:' .. nextJobId\n local nextExists = redis.call('EXISTS', nextJobKey)\n if nextExists == 0 then\n return cjson.encode({completed = jobId, next = false, nextEntryId = nextEntryId})\n end\n local revoked = redis.call('HGET', nextJobKey, 'revoked')\n if revoked == '1' then\n return cjson.encode({completed = jobId, next = 'REVOKED', nextJobId = nextJobId, nextEntryId = nextEntryId})\n end\n local nextGroupKey = redis.call('HGET', nextJobKey, 'groupKey')\n if nextGroupKey and nextGroupKey ~= '' then\n local nextGroupHashKey = prefix .. 'group:' .. nextGroupKey\n -- Load all group fields in one call\n local nGrpFields = redis.call('HGETALL', nextGroupHashKey)\n local nGrp = {}\n for nf = 1, #nGrpFields, 2 do nGrp[nGrpFields[nf]] = nGrpFields[nf + 1] end\n local nextMaxConc = tonumber(nGrp.maxConcurrency) or 0\n local nextActive = tonumber(nGrp.active) or 0\n -- Concurrency gate first (avoids burning rate/token slots on parked jobs)\n if nextMaxConc > 0 and nextActive >= nextMaxConc then\n redis.call('XACK', streamKey, group, nextEntryId)\n redis.call('XDEL', streamKey, nextEntryId)\n local nextWaitListKey = prefix .. 'groupq:' .. nextGroupKey\n redis.call('RPUSH', nextWaitListKey, nextJobId)\n redis.call('HSET', nextJobKey, 'state', 'group-waiting')\n return cjson.encode({completed = jobId, next = false})\n end\n -- Token bucket gate (read-only)\n local nextTbCapacity = tonumber(nGrp.tbCapacity) or 0\n local nextTbBlocked = false\n local nextTbDelay = 0\n local nextTbTokens = 0\n local nextJobCostVal = 0\n if nextTbCapacity > 0 then\n nextTbTokens = tbRefill(nextGroupHashKey, nGrp, tonumber(timestamp))\n nextJobCostVal = tonumber(redis.call('HGET', nextJobKey, 'cost')) or 1000\n -- DLQ guard: cost > capacity\n if nextJobCostVal > nextTbCapacity then\n redis.call('XACK', streamKey, group, nextEntryId)\n redis.call('XDEL', streamKey, nextEntryId)\n redis.call('ZADD', prefix .. 'failed', tonumber(timestamp), nextJobId)\n redis.call('HSET', nextJobKey,\n 'state', 'failed',\n 'failedReason', 'cost exceeds token bucket capacity',\n 'finishedOn', tostring(timestamp))\n emitEvent(prefix .. 'events', 'failed', nextJobId, {'failedReason', 'cost exceeds token bucket capacity'})\n return cjson.encode({completed = jobId, next = false})\n end\n if nextTbTokens < nextJobCostVal then\n nextTbBlocked = true\n local nextTbRefillRateVal = math.max(tonumber(nGrp.tbRefillRate) or 0, 1)\n nextTbDelay = math.ceil((nextJobCostVal - nextTbTokens) * 1000 / nextTbRefillRateVal)\n end\n end\n -- Sliding window gate (read-only)\n local nextRateMax = tonumber(nGrp.rateMax) or 0\n local nextRlBlocked = false\n local nextRlDelay = 0\n if nextRateMax > 0 then\n local nextRateDuration = tonumber(nGrp.rateDuration) or 0\n local nextRateWindowStart = tonumber(nGrp.rateWindowStart) or 0\n local nextRateCount = tonumber(nGrp.rateCount) or 0\n if nextRateDuration > 0 and timestamp - nextRateWindowStart < nextRateDuration and nextRateCount >= nextRateMax then\n nextRlBlocked = true\n nextRlDelay = (nextRateWindowStart + nextRateDuration) - timestamp\n end\n end\n -- If ANY gate blocked: park + register\n if nextTbBlocked or nextRlBlocked then\n redis.call('XACK', streamKey, group, nextEntryId)\n redis.call('XDEL', streamKey, nextEntryId)\n local nextWaitListKey = prefix .. 'groupq:' .. nextGroupKey\n redis.call('RPUSH', nextWaitListKey, nextJobId)\n redis.call('HSET', nextJobKey, 'state', 'group-waiting')\n local nextMaxDelay = math.max(nextTbDelay, nextRlDelay)\n local rateLimitedKey = prefix .. 'ratelimited'\n redis.call('ZADD', rateLimitedKey, tonumber(timestamp) + nextMaxDelay, nextGroupKey)\n return cjson.encode({completed = jobId, next = false})\n end\n -- All gates passed: mutate state\n if nextTbCapacity > 0 then\n redis.call('HINCRBY', nextGroupHashKey, 'tbTokens', -nextJobCostVal)\n end\n if nextRateMax > 0 then\n local nextRateDuration = tonumber(nGrp.rateDuration) or 0\n if nextRateDuration > 0 then\n local nextRateWindowStart = tonumber(nGrp.rateWindowStart) or 0\n if timestamp - nextRateWindowStart >= nextRateDuration then\n redis.call('HSET', nextGroupHashKey, 'rateWindowStart', tostring(timestamp), 'rateCount', '1')\n else\n redis.call('HINCRBY', nextGroupHashKey, 'rateCount', 1)\n end\n end\n end\n redis.call('HINCRBY', nextGroupHashKey, 'active', 1)\n end\n redis.call('HSET', nextJobKey, 'state', 'active', 'processedOn', tostring(timestamp), 'lastActive', tostring(timestamp))\n local nextHash = redis.call('HGETALL', nextJobKey)\n return cjson.encode({completed = jobId, next = nextHash, nextJobId = nextJobId, nextEntryId = nextEntryId})\nend)\n\nredis.register_function('glidemq_fail', function(keys, args)\n local streamKey = keys[1]\n local failedKey = keys[2]\n local scheduledKey = keys[3]\n local eventsKey = keys[4]\n local jobKey = keys[5]\n local jobId = args[1]\n local entryId = args[2]\n local failedReason = args[3]\n local timestamp = tonumber(args[4])\n local maxAttempts = tonumber(args[5]) or 0\n local backoffDelay = tonumber(args[6]) or 0\n local group = args[7]\n local removeMode = args[8] or '0'\n local removeCount = tonumber(args[9]) or 0\n local removeAge = tonumber(args[10]) or 0\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n local attemptsMade = redis.call('HINCRBY', jobKey, 'attemptsMade', 1)\n if maxAttempts > 0 and attemptsMade < maxAttempts then\n local retryAt = timestamp + backoffDelay\n local priority = tonumber(redis.call('HGET', jobKey, 'priority')) or 0\n local score = priority * PRIORITY_SHIFT + retryAt\n redis.call('ZADD', scheduledKey, score, jobId)\n redis.call('HSET', jobKey,\n 'state', 'delayed',\n 'failedReason', failedReason,\n 'processedOn', tostring(timestamp)\n )\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'retrying', jobId, {\n 'failedReason', failedReason,\n 'attemptsMade', tostring(attemptsMade),\n 'delay', tostring(backoffDelay)\n })\n return 'retrying'\n else\n redis.call('ZADD', failedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', failedReason,\n 'finishedOn', tostring(timestamp),\n 'processedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'failed', jobId, {'failedReason', failedReason})\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n if removeMode == 'true' then\n redis.call('ZREM', failedKey, jobId)\n redis.call('DEL', jobKey)\n elseif removeMode == 'count' and removeCount > 0 then\n local total = redis.call('ZCARD', failedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', failedKey, 0, math.min(total - removeCount, 1000) - 1)\n if #excess > 0 then removeExcessJobs(failedKey, prefix, excess) end\n end\n elseif removeMode == 'age_count' then\n if removeAge > 0 then\n local cutoff = timestamp - (removeAge * 1000)\n local old = redis.call('ZRANGEBYSCORE', failedKey, '0', string.format('%.0f', cutoff), 'LIMIT', 0, 1000)\n if #old > 0 then removeExcessJobs(failedKey, prefix, old) end\n end\n if removeCount > 0 then\n local total = redis.call('ZCARD', failedKey)\n if total > removeCount then\n local excess = redis.call('ZRANGE', failedKey, 0, math.min(total - removeCount, 1000) - 1)\n if #excess > 0 then removeExcessJobs(failedKey, prefix, excess) end\n end\n end\n end\n return 'failed'\n end\nend)\n\nredis.register_function('glidemq_reclaimStalled', function(keys, args)\n local streamKey = keys[1]\n local eventsKey = keys[2]\n local group = args[1]\n local consumer = args[2]\n local minIdleMs = tonumber(args[3])\n local maxStalledCount = tonumber(args[4]) or 1\n local timestamp = tonumber(args[5])\n local failedKey = args[6]\n local result = redis.call('XAUTOCLAIM', streamKey, group, consumer, minIdleMs, '0-0')\n local entries = result[2]\n if not entries or #entries == 0 then\n return 0\n end\n local prefix = string.sub(streamKey, 1, #streamKey - 6)\n local count = 0\n for i = 1, #entries do\n local entry = entries[i]\n local entryId = entry[1]\n local fields = entry[2]\n local jobId = nil\n if type(fields) == 'table' then\n for j = 1, #fields, 2 do\n if fields[j] == 'jobId' then\n jobId = fields[j + 1]\n break\n end\n end\n end\n if jobId then\n local jobKey = prefix .. 'job:' .. jobId\n local lastActive = tonumber(redis.call('HGET', jobKey, 'lastActive'))\n if lastActive and (timestamp - lastActive) < minIdleMs then\n count = count + 1\n else\n local stalledCount = redis.call('HINCRBY', jobKey, 'stalledCount', 1)\n if stalledCount > maxStalledCount then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n redis.call('ZADD', failedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', 'job stalled more than maxStalledCount',\n 'finishedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n releaseGroupSlotAndPromote(jobKey, jobId, timestamp)\n emitEvent(eventsKey, 'failed', jobId, {\n 'failedReason', 'job stalled more than maxStalledCount'\n })\n else\n redis.call('HSET', jobKey, 'state', 'active')\n emitEvent(eventsKey, 'stalled', jobId, nil)\n end\n count = count + 1\n end\n end\n end\n return count\nend)\n\nredis.register_function('glidemq_pause', function(keys, args)\n local metaKey = keys[1]\n local eventsKey = keys[2]\n redis.call('HSET', metaKey, 'paused', '1')\n emitEvent(eventsKey, 'paused', '0', nil)\n return 1\nend)\n\nredis.register_function('glidemq_resume', function(keys, args)\n local metaKey = keys[1]\n local eventsKey = keys[2]\n redis.call('HSET', metaKey, 'paused', '0')\n emitEvent(eventsKey, 'resumed', '0', nil)\n return 1\nend)\n\nredis.register_function('glidemq_dedup', function(keys, args)\n local dedupKey = keys[1]\n local idKey = keys[2]\n local streamKey = keys[3]\n local scheduledKey = keys[4]\n local eventsKey = keys[5]\n local dedupId = args[1]\n local ttlMs = tonumber(args[2]) or 0\n local mode = args[3]\n local jobName = args[4]\n local jobData = args[5]\n local jobOpts = args[6]\n local timestamp = tonumber(args[7])\n local delay = tonumber(args[8]) or 0\n local priority = tonumber(args[9]) or 0\n local parentId = args[10] or ''\n local maxAttempts = tonumber(args[11]) or 0\n local orderingKey = args[12] or ''\n local groupConcurrency = tonumber(args[13]) or 0\n local groupRateMax = tonumber(args[14]) or 0\n local groupRateDuration = tonumber(args[15]) or 0\n local tbCapacity = tonumber(args[16]) or 0\n local tbRefillRate = tonumber(args[17]) or 0\n local jobCost = tonumber(args[18]) or 0\n local prefix = string.sub(idKey, 1, #idKey - 2)\n local existing = redis.call('HGET', dedupKey, dedupId)\n if mode == 'simple' then\n if existing then\n local sep = string.find(existing, ':')\n if sep then\n local existingJobId = string.sub(existing, 1, sep - 1)\n local jobKey = prefix .. 'job:' .. existingJobId\n local state = redis.call('HGET', jobKey, 'state')\n if state and state ~= 'completed' and state ~= 'failed' then\n return 'skipped'\n end\n end\n end\n elseif mode == 'throttle' then\n if existing and ttlMs > 0 then\n local sep = string.find(existing, ':')\n if sep then\n local storedTs = tonumber(string.sub(existing, sep + 1))\n if storedTs and (timestamp - storedTs) < ttlMs then\n return 'skipped'\n end\n end\n end\n elseif mode == 'debounce' then\n if existing then\n local sep = string.find(existing, ':')\n if sep then\n local existingJobId = string.sub(existing, 1, sep - 1)\n local jobKey = prefix .. 'job:' .. existingJobId\n local state = redis.call('HGET', jobKey, 'state')\n if state == 'delayed' or state == 'prioritized' then\n redis.call('ZREM', scheduledKey, existingJobId)\n markOrderingDone(jobKey, existingJobId)\n redis.call('DEL', jobKey)\n emitEvent(eventsKey, 'removed', existingJobId, nil)\n elseif state and state ~= 'completed' and state ~= 'failed' then\n return 'skipped'\n end\n end\n end\n end\n local jobId = redis.call('INCR', idKey)\n local jobIdStr = tostring(jobId)\n local jobKey = prefix .. 'job:' .. jobIdStr\n local useGroupConcurrency = (orderingKey ~= '' and (groupConcurrency > 1 or groupRateMax > 0 or tbCapacity > 0))\n local orderingSeq = 0\n if orderingKey ~= '' and not useGroupConcurrency then\n local orderingMetaKey = prefix .. 'ordering'\n orderingSeq = redis.call('HINCRBY', orderingMetaKey, orderingKey, 1)\n end\n if useGroupConcurrency then\n local groupHashKey = prefix .. 'group:' .. orderingKey\n local curMax = tonumber(redis.call('HGET', groupHashKey, 'maxConcurrency')) or 0\n if curMax ~= groupConcurrency then\n redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(groupConcurrency))\n end\n if curMax == 0 and groupConcurrency <= 1 then\n redis.call('HSET', groupHashKey, 'maxConcurrency', '1')\n end\n if groupRateMax > 0 then\n local curRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0\n if curRateMax ~= groupRateMax then\n redis.call('HSET', groupHashKey, 'rateMax', tostring(groupRateMax))\n end\n local curRateDuration = tonumber(redis.call('HGET', groupHashKey, 'rateDuration')) or 0\n if curRateDuration ~= groupRateDuration then\n redis.call('HSET', groupHashKey, 'rateDuration', tostring(groupRateDuration))\n end\n else\n local oldRateMax = tonumber(redis.call('HGET', groupHashKey, 'rateMax')) or 0\n if oldRateMax > 0 then\n redis.call('HDEL', groupHashKey, 'rateMax', 'rateDuration', 'rateWindowStart', 'rateCount')\n end\n end\n -- Upsert token bucket fields on group hash\n if tbCapacity > 0 then\n local curTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0\n if curTbCap ~= tbCapacity then\n redis.call('HSET', groupHashKey, 'tbCapacity', tostring(tbCapacity))\n end\n local curTbRate = tonumber(redis.call('HGET', groupHashKey, 'tbRefillRate')) or 0\n if curTbRate ~= tbRefillRate then\n redis.call('HSET', groupHashKey, 'tbRefillRate', tostring(tbRefillRate))\n end\n -- Initialize tokens on first setup\n if curTbCap == 0 then\n redis.call('HSET', groupHashKey,\n 'tbTokens', tostring(tbCapacity),\n 'tbLastRefill', tostring(timestamp),\n 'tbRefillRemainder', '0')\n end\n -- Validate cost <= capacity at enqueue\n -- Validate cost (explicit or default 1000 millitokens) against capacity\n local effectiveCost = (jobCost > 0) and jobCost or 1000\n if effectiveCost > tbCapacity then\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n else\n -- Clear stale tb fields\n local oldTbCap = tonumber(redis.call('HGET', groupHashKey, 'tbCapacity')) or 0\n if oldTbCap > 0 then\n redis.call('HDEL', groupHashKey, 'tbCapacity', 'tbRefillRate', 'tbTokens', 'tbLastRefill', 'tbRefillRemainder')\n end\n end\n end\n local hashFields = {\n 'id', jobIdStr,\n 'name', jobName,\n 'data', jobData,\n 'opts', jobOpts,\n 'timestamp', tostring(timestamp),\n 'attemptsMade', '0',\n 'delay', tostring(delay),\n 'priority', tostring(priority),\n 'maxAttempts', tostring(maxAttempts)\n }\n if useGroupConcurrency then\n hashFields[#hashFields + 1] = 'groupKey'\n hashFields[#hashFields + 1] = orderingKey\n elseif orderingKey ~= '' then\n hashFields[#hashFields + 1] = 'orderingKey'\n hashFields[#hashFields + 1] = orderingKey\n hashFields[#hashFields + 1] = 'orderingSeq'\n hashFields[#hashFields + 1] = tostring(orderingSeq)\n end\n if jobCost > 0 then\n hashFields[#hashFields + 1] = 'cost'\n hashFields[#hashFields + 1] = tostring(jobCost)\n end\n if parentId ~= '' then\n hashFields[#hashFields + 1] = 'parentId'\n hashFields[#hashFields + 1] = parentId\n end\n if delay > 0 or priority > 0 then\n hashFields[#hashFields + 1] = 'state'\n hashFields[#hashFields + 1] = delay > 0 and 'delayed' or 'prioritized'\n else\n hashFields[#hashFields + 1] = 'state'\n hashFields[#hashFields + 1] = 'waiting'\n end\n redis.call('HSET', jobKey, unpack(hashFields))\n if delay > 0 then\n local score = priority * PRIORITY_SHIFT + (timestamp + delay)\n redis.call('ZADD', scheduledKey, score, jobIdStr)\n elseif priority > 0 then\n local score = priority * PRIORITY_SHIFT\n redis.call('ZADD', scheduledKey, score, jobIdStr)\n else\n redis.call('XADD', streamKey, '*', 'jobId', jobIdStr)\n end\n redis.call('HSET', dedupKey, dedupId, jobIdStr .. ':' .. tostring(timestamp))\n emitEvent(eventsKey, 'added', jobIdStr, {'name', jobName})\n return jobIdStr\nend)\n\nredis.register_function('glidemq_rateLimit', function(keys, args)\n local rateKey = keys[1]\n local metaKey = keys[2]\n local maxPerWindow = tonumber(args[1])\n local windowDuration = tonumber(args[2])\n local now = tonumber(args[3])\n -- Fallback: read rate limit config from meta if not provided inline\n if maxPerWindow <= 0 then\n maxPerWindow = tonumber(redis.call('HGET', metaKey, 'rateLimitMax')) or 0\n windowDuration = tonumber(redis.call('HGET', metaKey, 'rateLimitDuration')) or 0\n if maxPerWindow <= 0 then return 0 end\n end\n local windowStart = tonumber(redis.call('HGET', rateKey, 'windowStart')) or 0\n local count = tonumber(redis.call('HGET', rateKey, 'count')) or 0\n if now - windowStart >= windowDuration then\n redis.call('HSET', rateKey, 'windowStart', tostring(now), 'count', '1')\n return 0\n end\n if count >= maxPerWindow then\n local delayMs = windowDuration - (now - windowStart)\n return delayMs\n end\n redis.call('HSET', rateKey, 'count', tostring(count + 1))\n return 0\nend)\n\nredis.register_function('glidemq_promoteRateLimited', function(keys, args)\n local rateLimitedKey = keys[1]\n local streamKey = keys[2]\n local now = tonumber(args[1])\n -- Derive prefix from the server-validated key instead of caller-supplied arg\n local prefix = string.sub(rateLimitedKey, 1, #rateLimitedKey - #'ratelimited')\n local expired = redis.call('ZRANGEBYSCORE', rateLimitedKey, '0', string.format('%.0f', now), 'LIMIT', 0, 100)\n if not expired or #expired == 0 then return 0 end\n local promoted = 0\n for i = 1, #expired do\n local gk = expired[i]\n redis.call('ZREM', rateLimitedKey, gk)\n local groupHashKey = prefix .. 'group:' .. gk\n local waitListKey = prefix .. 'groupq:' .. gk\n -- Load all group fields in one call for rate limit + token bucket checks\n local prGrpFields = redis.call('HGETALL', groupHashKey)\n local prGrp = {}\n for pf = 1, #prGrpFields, 2 do prGrp[prGrpFields[pf]] = prGrpFields[pf + 1] end\n local rateMax = tonumber(prGrp.rateMax) or 0\n local maxConc = tonumber(prGrp.maxConcurrency) or 0\n local active = tonumber(prGrp.active) or 0\n -- Token bucket pre-check: peek head job cost before promoting\n local prTbCap = tonumber(prGrp.tbCapacity) or 0\n local tbCheckPassed = true\n if prTbCap > 0 then\n local prTbTokens = tbRefill(groupHashKey, prGrp, now)\n local headJobId = redis.call('LINDEX', waitListKey, 0)\n if headJobId then\n local headJobKey = prefix .. 'job:' .. headJobId\n -- Tombstone guard\n if redis.call('EXISTS', headJobKey) == 0 then\n redis.call('LPOP', waitListKey)\n tbCheckPassed = false\n end\n if tbCheckPassed then\n local headCost = tonumber(redis.call('HGET', headJobKey, 'cost')) or 1000\n -- DLQ guard: cost > capacity\n if headCost > prTbCap then\n redis.call('LPOP', waitListKey)\n redis.call('ZADD', prefix .. 'failed', now, headJobId)\n redis.call('HSET', headJobKey,\n 'state', 'failed',\n 'failedReason', 'cost exceeds token bucket capacity',\n 'finishedOn', tostring(now))\n emitEvent(prefix .. 'events', 'failed', headJobId, {'failedReason', 'cost exceeds token bucket capacity'})\n tbCheckPassed = false\n end\n if tbCheckPassed and prTbTokens < headCost then\n -- Not enough tokens: re-register with calculated delay\n local prTbRate = math.max(tonumber(prGrp.tbRefillRate) or 0, 1)\n local prTbDelay = math.ceil((headCost - prTbTokens) * 1000 / prTbRate)\n redis.call('ZADD', rateLimitedKey, now + prTbDelay, gk)\n tbCheckPassed = false\n end\n end\n end\n end\n if tbCheckPassed then\n -- Promote up to min(rateMax, available concurrency) jobs.\n -- Do NOT touch rateCount/rateWindowStart here - moveToActive handles\n -- window reset and counting when the worker picks up the promoted jobs.\n local canPromote = 1000\n if rateMax > 0 then\n canPromote = math.min(canPromote, rateMax)\n end\n if maxConc > 0 then\n canPromote = math.min(canPromote, math.max(0, maxConc - active))\n end\n for j = 1, canPromote do\n local nextJobId = redis.call('LPOP', waitListKey)\n if not nextJobId then break end\n redis.call('XADD', streamKey, '*', 'jobId', nextJobId)\n local nextJobKey = prefix .. 'job:' .. nextJobId\n redis.call('HSET', nextJobKey, 'state', 'waiting')\n promoted = promoted + 1\n end\n end\n end\n return promoted\nend)\n\nredis.register_function('glidemq_checkConcurrency', function(keys, args)\n local metaKey = keys[1]\n local streamKey = keys[2]\n local group = args[1]\n local gc = tonumber(redis.call('HGET', metaKey, 'globalConcurrency')) or 0\n if gc <= 0 then\n return -1\n end\n local pending = redis.call('XPENDING', streamKey, group)\n local pendingCount = tonumber(pending[1]) or 0\n local remaining = gc - pendingCount\n if remaining <= 0 then\n return 0\n end\n return remaining\nend)\n\nredis.register_function('glidemq_moveToActive', function(keys, args)\n local jobKey = keys[1]\n local streamKey = keys[2] or ''\n local timestamp = args[1]\n local entryId = args[2] or ''\n local group = args[3] or ''\n local jobId = args[4] or ''\n local exists = redis.call('EXISTS', jobKey)\n if exists == 0 then\n return ''\n end\n local revoked = redis.call('HGET', jobKey, 'revoked')\n if revoked == '1' then\n return 'REVOKED'\n end\n local groupKey = redis.call('HGET', jobKey, 'groupKey')\n if groupKey and groupKey ~= '' then\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local groupHashKey = prefix .. 'group:' .. groupKey\n -- Load all group fields in one call\n local grpFields = redis.call('HGETALL', groupHashKey)\n local grp = {}\n for f = 1, #grpFields, 2 do grp[grpFields[f]] = grpFields[f + 1] end\n local maxConc = tonumber(grp.maxConcurrency) or 0\n local active = tonumber(grp.active) or 0\n -- Concurrency gate (checked first to avoid burning rate/token slots on parked jobs)\n if maxConc > 0 and active >= maxConc then\n if streamKey ~= '' and entryId ~= '' and group ~= '' then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n end\n local waitListKey = prefix .. 'groupq:' .. groupKey\n redis.call('RPUSH', waitListKey, jobId)\n redis.call('HSET', jobKey, 'state', 'group-waiting')\n return 'GROUP_FULL'\n end\n -- Token bucket gate (read-only)\n local tbCapacity = tonumber(grp.tbCapacity) or 0\n local tbBlocked = false\n local tbDelay = 0\n local tbTokens = 0\n local jobCostVal = 0\n if tbCapacity > 0 then\n tbTokens = tbRefill(groupHashKey, grp, tonumber(timestamp))\n jobCostVal = tonumber(redis.call('HGET', jobKey, 'cost')) or 1000\n -- DLQ guard: cost > capacity\n if jobCostVal > tbCapacity then\n if streamKey ~= '' and entryId ~= '' and group ~= '' then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n end\n redis.call('ZADD', prefix .. 'failed', tonumber(timestamp), jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', 'cost exceeds token bucket capacity',\n 'finishedOn', timestamp)\n emitEvent(prefix .. 'events', 'failed', jobId, {'failedReason', 'cost exceeds token bucket capacity'})\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n if tbTokens < jobCostVal then\n tbBlocked = true\n local tbRefillRateVal = tonumber(grp.tbRefillRate) or 0\n if tbRefillRateVal <= 0 then tbRefillRateVal = 1 end\n tbDelay = math.ceil((jobCostVal - tbTokens) * 1000 / tbRefillRateVal)\n end\n end\n -- Sliding window gate (read-only)\n local rateMax = tonumber(grp.rateMax) or 0\n local rlBlocked = false\n local rlDelay = 0\n if rateMax > 0 then\n local rateDuration = tonumber(grp.rateDuration) or 0\n local rateWindowStart = tonumber(grp.rateWindowStart) or 0\n local rateCount = tonumber(grp.rateCount) or 0\n local now = tonumber(timestamp)\n if rateDuration > 0 and now - rateWindowStart < rateDuration and rateCount >= rateMax then\n rlBlocked = true\n rlDelay = (rateWindowStart + rateDuration) - now\n end\n end\n -- If ANY gate blocked: park + register\n if tbBlocked or rlBlocked then\n if streamKey ~= '' and entryId ~= '' and group ~= '' then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n end\n local waitListKey = prefix .. 'groupq:' .. groupKey\n redis.call('RPUSH', waitListKey, jobId)\n redis.call('HSET', jobKey, 'state', 'group-waiting')\n local maxDelay = math.max(tbDelay, rlDelay)\n local rateLimitedKey = prefix .. 'ratelimited'\n redis.call('ZADD', rateLimitedKey, tonumber(timestamp) + maxDelay, groupKey)\n if tbBlocked then return 'GROUP_TOKEN_LIMITED' end\n return 'GROUP_RATE_LIMITED'\n end\n -- All gates passed: mutate state\n if tbCapacity > 0 then\n redis.call('HINCRBY', groupHashKey, 'tbTokens', -jobCostVal)\n end\n if rateMax > 0 then\n local rateDuration = tonumber(grp.rateDuration) or 0\n if rateDuration > 0 then\n local rateWindowStart = tonumber(grp.rateWindowStart) or 0\n local now = tonumber(timestamp)\n if now - rateWindowStart >= rateDuration then\n redis.call('HSET', groupHashKey, 'rateWindowStart', tostring(now), 'rateCount', '1')\n else\n redis.call('HINCRBY', groupHashKey, 'rateCount', 1)\n end\n end\n end\n redis.call('HINCRBY', groupHashKey, 'active', 1)\n end\n redis.call('HSET', jobKey, 'state', 'active', 'processedOn', timestamp, 'lastActive', timestamp)\n local fields = redis.call('HGETALL', jobKey)\n return cjson.encode(fields)\nend)\n\nredis.register_function('glidemq_deferActive', function(keys, args)\n local streamKey = keys[1]\n local jobKey = keys[2]\n local jobId = args[1]\n local entryId = args[2]\n local group = args[3]\n local exists = redis.call('EXISTS', jobKey)\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n if exists == 0 then\n return 0\n end\n redis.call('XADD', streamKey, '*', 'jobId', jobId)\n redis.call('HSET', jobKey, 'state', 'waiting')\n return 1\nend)\n\nredis.register_function('glidemq_addFlow', function(keys, args)\n local parentIdKey = keys[1]\n local parentStreamKey = keys[2]\n local parentScheduledKey = keys[3]\n local parentEventsKey = keys[4]\n local parentName = args[1]\n local parentData = args[2]\n local parentOpts = args[3]\n local timestamp = tonumber(args[4])\n local parentDelay = tonumber(args[5]) or 0\n local parentPriority = tonumber(args[6]) or 0\n local parentMaxAttempts = tonumber(args[7]) or 0\n local numChildren = tonumber(args[8])\n local parentJobId = redis.call('INCR', parentIdKey)\n local parentJobIdStr = tostring(parentJobId)\n local parentPrefix = string.sub(parentIdKey, 1, #parentIdKey - 2)\n local parentJobKey = parentPrefix .. 'job:' .. parentJobIdStr\n local depsKey = parentPrefix .. 'deps:' .. parentJobIdStr\n local parentOrderingKey = extractOrderingKeyFromOpts(parentOpts)\n local parentGroupConc = extractGroupConcurrencyFromOpts(parentOpts)\n local parentRateMax, parentRateDuration = extractGroupRateLimitFromOpts(parentOpts)\n local parentTbCapacity, parentTbRefillRate = extractTokenBucketFromOpts(parentOpts)\n local parentCost = extractCostFromOpts(parentOpts)\n local parentUseGroup = (parentOrderingKey ~= '' and (parentGroupConc > 1 or parentRateMax > 0 or parentTbCapacity > 0))\n local parentOrderingSeq = 0\n if parentOrderingKey ~= '' and not parentUseGroup then\n local parentOrderingMetaKey = parentPrefix .. 'ordering'\n parentOrderingSeq = redis.call('HINCRBY', parentOrderingMetaKey, parentOrderingKey, 1)\n end\n local parentHash = {\n 'id', parentJobIdStr,\n 'name', parentName,\n 'data', parentData,\n 'opts', parentOpts,\n 'timestamp', tostring(timestamp),\n 'attemptsMade', '0',\n 'delay', tostring(parentDelay),\n 'priority', tostring(parentPriority),\n 'maxAttempts', tostring(parentMaxAttempts),\n 'state', 'waiting-children'\n }\n if parentUseGroup then\n parentHash[#parentHash + 1] = 'groupKey'\n parentHash[#parentHash + 1] = parentOrderingKey\n local groupHashKey = parentPrefix .. 'group:' .. parentOrderingKey\n redis.call('HSET', groupHashKey, 'maxConcurrency', tostring(parentGroupConc > 1 and parentGroupConc or 1))\n redis.call('HSETNX', groupHashKey, 'active', '0')\n if parentRateMax > 0 then\n redis.call('HSET', groupHashKey, 'rateMax', tostring(parentRateMax))\n redis.call('HSET', groupHashKey, 'rateDuration', tostring(parentRateDuration))\n end\n if parentTbCapacity > 0 then\n if parentCost > 0 and parentCost > parentTbCapacity then\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n redis.call('HSET', groupHashKey, 'tbCapacity', tostring(parentTbCapacity), 'tbRefillRate', tostring(parentTbRefillRate))\n redis.call('HSETNX', groupHashKey, 'tbTokens', tostring(parentTbCapacity))\n redis.call('HSETNX', groupHashKey, 'tbLastRefill', tostring(timestamp))\n redis.call('HSETNX', groupHashKey, 'tbRefillRemainder', '0')\n end\n elseif parentOrderingKey ~= '' then\n parentHash[#parentHash + 1] = 'orderingKey'\n parentHash[#parentHash + 1] = parentOrderingKey\n parentHash[#parentHash + 1] = 'orderingSeq'\n parentHash[#parentHash + 1] = tostring(parentOrderingSeq)\n end\n if parentCost > 0 then\n parentHash[#parentHash + 1] = 'cost'\n parentHash[#parentHash + 1] = tostring(parentCost)\n end\n redis.call('HSET', parentJobKey, unpack(parentHash))\n -- Pre-validate all children's cost vs capacity before any child writes\n local childArgOffset = 8\n local childKeyOffset = 4\n for i = 1, numChildren do\n local base = childArgOffset + (i - 1) * 8\n local preChildOpts = args[base + 3]\n local preChildTbCap, _ = extractTokenBucketFromOpts(preChildOpts)\n if preChildTbCap > 0 then\n local preChildCost = extractCostFromOpts(preChildOpts)\n local preEffective = (preChildCost > 0) and preChildCost or 1000\n if preEffective > preChildTbCap then\n return 'ERR:COST_EXCEEDS_CAPACITY'\n end\n end\n end\n local childIds = {}\n for i = 1, numChildren do\n local base = childArgOffset + (i - 1) * 8\n local childName = args[base + 1]\n local childData = args[base + 2]\n local childOpts = args[base + 3]\n local childDelay = tonumber(args[base + 4]) or 0\n local childPriority = tonumber(args[base + 5]) or 0\n local childMaxAttempts = tonumber(args[base + 6]) or 0\n local childQueuePrefix = args[base + 7]\n local childParentQueue = args[base + 8]\n local ckBase = childKeyOffset + (i - 1) * 4\n local childIdKey = keys[ckBase + 1]\n local childStreamKey = keys[ckBase + 2]\n local childScheduledKey = keys[ckBase + 3]\n local childEventsKey = keys[ckBase + 4]\n local childJobId = redis.call('INCR', childIdKey)\n local childJobIdStr = tostring(childJobId)\n local childPrefix = string.sub(childIdKey, 1, #childIdKey - 2)\n local childJobKey = childPrefix .. 'job:' .. childJobIdStr\n local childOrderingKey = extractOrderingKeyFromOpts(childOpts)\n local childGroupConc = extractGroupConcurrencyFromOpts(childOpts)\n local childRateMax, childRateDuration = extractGroupRateLimitFromOpts(childOpts)\n local childTbCapacity, childTbRefillRate = extractTokenBucketFromOpts(childOpts)\n local childCost = extractCostFromOpts(childOpts)\n local childUseGroup = (childOrderingKey ~= '' and (childGroupConc > 1 or childRateMax > 0 or childTbCapacity > 0))\n local childOrderingSeq = 0\n if childOrderingKey ~= '' and not childUseGroup then\n local childOrderingMetaKey = childPrefix .. 'ordering'\n childOrderingSeq = redis.call('HINCRBY', childOrderingMetaKey, childOrderingKey, 1)\n end\n local childHash = {\n 'id', childJobIdStr,\n 'name', childName,\n 'data', childData,\n 'opts', childOpts,\n 'timestamp', tostring(timestamp),\n 'attemptsMade', '0',\n 'delay', tostring(childDelay),\n 'priority', tostring(childPriority),\n 'maxAttempts', tostring(childMaxAttempts),\n 'parentId', parentJobIdStr,\n 'parentQueue', childParentQueue\n }\n if childUseGroup then\n childHash[#childHash + 1] = 'groupKey'\n childHash[#childHash + 1] = childOrderingKey\n local childGroupHashKey = childPrefix .. 'group:' .. childOrderingKey\n redis.call('HSETNX', childGroupHashKey, 'maxConcurrency', tostring(childGroupConc > 1 and childGroupConc or 1))\n redis.call('HSETNX', childGroupHashKey, 'active', '0')\n if childRateMax > 0 then\n redis.call('HSET', childGroupHashKey, 'rateMax', tostring(childRateMax))\n redis.call('HSET', childGroupHashKey, 'rateDuration', tostring(childRateDuration))\n end\n if childTbCapacity > 0 then\n redis.call('HSET', childGroupHashKey, 'tbCapacity', tostring(childTbCapacity), 'tbRefillRate', tostring(childTbRefillRate))\n redis.call('HSETNX', childGroupHashKey, 'tbTokens', tostring(childTbCapacity))\n redis.call('HSETNX', childGroupHashKey, 'tbLastRefill', tostring(timestamp))\n redis.call('HSETNX', childGroupHashKey, 'tbRefillRemainder', '0')\n end\n elseif childOrderingKey ~= '' then\n childHash[#childHash + 1] = 'orderingKey'\n childHash[#childHash + 1] = childOrderingKey\n childHash[#childHash + 1] = 'orderingSeq'\n childHash[#childHash + 1] = tostring(childOrderingSeq)\n end\n if childCost > 0 then\n childHash[#childHash + 1] = 'cost'\n childHash[#childHash + 1] = tostring(childCost)\n end\n if childDelay > 0 or childPriority > 0 then\n childHash[#childHash + 1] = 'state'\n childHash[#childHash + 1] = childDelay > 0 and 'delayed' or 'prioritized'\n else\n childHash[#childHash + 1] = 'state'\n childHash[#childHash + 1] = 'waiting'\n end\n redis.call('HSET', childJobKey, unpack(childHash))\n local depsMember = childQueuePrefix .. ':' .. childJobIdStr\n redis.call('SADD', depsKey, depsMember)\n if childDelay > 0 then\n local score = childPriority * PRIORITY_SHIFT + (timestamp + childDelay)\n redis.call('ZADD', childScheduledKey, score, childJobIdStr)\n elseif childPriority > 0 then\n local score = childPriority * PRIORITY_SHIFT\n redis.call('ZADD', childScheduledKey, score, childJobIdStr)\n else\n redis.call('XADD', childStreamKey, '*', 'jobId', childJobIdStr)\n end\n emitEvent(childEventsKey, 'added', childJobIdStr, {'name', childName})\n childIds[#childIds + 1] = childJobIdStr\n end\n local extraDepsOffset = childArgOffset + numChildren * 8\n local numExtraDeps = tonumber(args[extraDepsOffset + 1]) or 0\n for i = 1, numExtraDeps do\n local extraMember = args[extraDepsOffset + 1 + i]\n redis.call('SADD', depsKey, extraMember)\n end\n emitEvent(parentEventsKey, 'added', parentJobIdStr, {'name', parentName})\n local result = {parentJobIdStr}\n for i = 1, #childIds do\n result[#result + 1] = childIds[i]\n end\n return cjson.encode(result)\nend)\n\nredis.register_function('glidemq_completeChild', function(keys, args)\n local depsKey = keys[1]\n local parentJobKey = keys[2]\n local parentStreamKey = keys[3]\n local parentEventsKey = keys[4]\n local depsMember = args[1]\n local parentId = args[2]\n local doneCount = redis.call('HINCRBY', parentJobKey, 'depsCompleted', 1)\n local totalDeps = redis.call('SCARD', depsKey)\n local remaining = totalDeps - doneCount\n if remaining <= 0 then\n redis.call('HSET', parentJobKey, 'state', 'waiting')\n redis.call('XADD', parentStreamKey, '*', 'jobId', parentId)\n emitEvent(parentEventsKey, 'active', parentId, nil)\n end\n return remaining\nend)\n\nredis.register_function('glidemq_removeJob', function(keys, args)\n local jobKey = keys[1]\n local streamKey = keys[2]\n local scheduledKey = keys[3]\n local completedKey = keys[4]\n local failedKey = keys[5]\n local eventsKey = keys[6]\n local logKey = keys[7]\n local jobId = args[1]\n local exists = redis.call('EXISTS', jobKey)\n if exists == 0 then\n return 0\n end\n local state = redis.call('HGET', jobKey, 'state')\n local groupKey = redis.call('HGET', jobKey, 'groupKey')\n if groupKey and groupKey ~= '' then\n if state == 'active' then\n releaseGroupSlotAndPromote(jobKey, jobId, 0)\n elseif state == 'group-waiting' then\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local waitListKey = prefix .. 'groupq:' .. groupKey\n redis.call('LREM', waitListKey, 1, jobId)\n end\n end\n redis.call('ZREM', scheduledKey, jobId)\n redis.call('ZREM', completedKey, jobId)\n redis.call('ZREM', failedKey, jobId)\n markOrderingDone(jobKey, jobId)\n redis.call('DEL', jobKey)\n redis.call('DEL', logKey)\n emitEvent(eventsKey, 'removed', jobId, nil)\n return 1\nend)\n\nredis.register_function('glidemq_clean', function(keys, args)\n local setKey = keys[1]\n local eventsKey = keys[2]\n local idKey = keys[3]\n local cutoff = tonumber(args[1])\n local limit = tonumber(args[2])\n if not limit or limit <= 0 then return {} end\n local prefix = string.sub(idKey, 1, #idKey - 2)\n local ids = redis.call('ZRANGEBYSCORE', setKey, '-inf', string.format('%.0f', cutoff), 'LIMIT', 0, limit)\n if #ids == 0 then\n return {}\n end\n for i = 1, #ids do\n redis.call('DEL', prefix .. 'job:' .. ids[i], prefix .. 'log:' .. ids[i], prefix .. 'deps:' .. ids[i])\n end\n for i = 1, #ids, 1000 do\n redis.call('ZREM', setKey, unpack(ids, i, math.min(i + 999, #ids)))\n end\n emitEvent(eventsKey, 'cleaned', tostring(#ids), nil)\n return ids\nend)\n\nredis.register_function('glidemq_revoke', function(keys, args)\n local jobKey = keys[1]\n local streamKey = keys[2]\n local scheduledKey = keys[3]\n local failedKey = keys[4]\n local eventsKey = keys[5]\n local jobId = args[1]\n local timestamp = tonumber(args[2])\n local group = args[3]\n local exists = redis.call('EXISTS', jobKey)\n if exists == 0 then\n return 'not_found'\n end\n redis.call('HSET', jobKey, 'revoked', '1')\n local state = redis.call('HGET', jobKey, 'state')\n if state == 'group-waiting' then\n local gk = redis.call('HGET', jobKey, 'groupKey')\n if gk and gk ~= '' then\n local prefix = string.sub(jobKey, 1, #jobKey - #('job:' .. jobId))\n local waitListKey = prefix .. 'groupq:' .. gk\n redis.call('LREM', waitListKey, 1, jobId)\n end\n redis.call('ZADD', failedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', 'revoked',\n 'finishedOn', tostring(timestamp)\n )\n emitEvent(eventsKey, 'revoked', jobId, nil)\n return 'revoked'\n end\n if state == 'waiting' or state == 'delayed' or state == 'prioritized' then\n redis.call('ZREM', scheduledKey, jobId)\n local entries = redis.call('XRANGE', streamKey, '-', '+')\n for i = 1, #entries do\n local entryId = entries[i][1]\n local fields = entries[i][2]\n for j = 1, #fields, 2 do\n if fields[j] == 'jobId' and fields[j+1] == jobId then\n redis.call('XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n break\n end\n end\n end\n redis.call('ZADD', failedKey, timestamp, jobId)\n redis.call('HSET', jobKey,\n 'state', 'failed',\n 'failedReason', 'revoked',\n 'finishedOn', tostring(timestamp)\n )\n markOrderingDone(jobKey, jobId)\n emitEvent(eventsKey, 'revoked', jobId, nil)\n return 'revoked'\n end\n emitEvent(eventsKey, 'revoked', jobId, nil)\n return 'flagged'\nend)\n\nredis.register_function('glidemq_changePriority', function(keys, args)\n local jobKey = keys[1]\n local streamKey = keys[2]\n local scheduledKey = keys[3]\n local eventsKey = keys[4]\n local jobId = args[1]\n local newPriority = tonumber(args[2])\n if newPriority == nil or newPriority < 0 then\n return 'error:invalid_priority'\n end\n local group = args[3]\n local exists = redis.call('EXISTS', jobKey)\n if exists == 0 then\n return 'error:not_found'\n end\n local state = redis.call('HGET', jobKey, 'state')\n if state == 'waiting' then\n if newPriority == 0 then\n return 'no_op'\n end\n local cursor = '-'\n local found = false\n while not found do\n local entries = redis.call('XRANGE', streamKey, cursor, '+', 'COUNT', 1000)\n if #entries == 0 then break end\n for i = 1, #entries do\n local entryId = entries[i][1]\n local fields = entries[i][2]\n for j = 1, #fields, 2 do\n if fields[j] == 'jobId' and fields[j+1] == jobId then\n pcall(redis.call, 'XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n found = true\n break\n end\n end\n if found then break end\n end\n if not found then\n local lastId = entries[#entries][1]\n local dashPos = lastId:find('-')\n cursor = lastId:sub(1, dashPos) .. tostring(tonumber(lastId:sub(dashPos + 1)) + 1)\n end\n end\n if not found then\n return 'error:not_in_stream'\n end\n redis.call('ZADD', scheduledKey, string.format('%.0f', newPriority * PRIORITY_SHIFT), jobId)\n redis.call('HSET', jobKey, 'state', 'prioritized', 'priority', tostring(newPriority))\n emitEvent(eventsKey, 'priority-changed', jobId, {'priority', tostring(newPriority)})\n return 'ok'\n elseif state == 'prioritized' then\n if newPriority == 0 then\n redis.call('ZREM', scheduledKey, jobId)\n redis.call('XADD', streamKey, '*', 'jobId', jobId)\n redis.call('HSET', jobKey, 'state', 'waiting', 'priority', '0')\n else\n redis.call('ZADD', scheduledKey, string.format('%.0f', newPriority * PRIORITY_SHIFT), jobId)\n redis.call('HSET', jobKey, 'priority', tostring(newPriority))\n end\n emitEvent(eventsKey, 'priority-changed', jobId, {'priority', tostring(newPriority)})\n return 'ok'\n elseif state == 'delayed' then\n local rawScore = redis.call('ZSCORE', scheduledKey, jobId)\n if rawScore == false then\n return 'error:not_in_scheduled'\n end\n local oldScore = tonumber(rawScore) or 0\n local oldTimestamp = oldScore % PRIORITY_SHIFT\n local newScore = newPriority * PRIORITY_SHIFT + oldTimestamp\n redis.call('ZREM', scheduledKey, jobId)\n redis.call('ZADD', scheduledKey, string.format('%.0f', newScore), jobId)\n redis.call('HSET', jobKey, 'priority', tostring(newPriority))\n emitEvent(eventsKey, 'priority-changed', jobId, {'priority', tostring(newPriority)})\n return 'ok'\n else\n return 'error:invalid_state'\n end\nend)\n\nredis.register_function('glidemq_changeDelay', function(keys, args)\n local jobKey = keys[1]\n local streamKey = keys[2]\n local scheduledKey = keys[3]\n local eventsKey = keys[4]\n local jobId = args[1]\n local newDelay = tonumber(args[2])\n if newDelay == nil or newDelay < 0 then\n return 'error:invalid_delay'\n end\n local now = tonumber(args[3])\n local group = args[4]\n local exists = redis.call('EXISTS', jobKey)\n if exists == 0 then\n return 'error:not_found'\n end\n local state = redis.call('HGET', jobKey, 'state')\n if state == 'delayed' then\n if newDelay == 0 then\n local rawScore = redis.call('ZSCORE', scheduledKey, jobId)\n if rawScore == false then\n return 'error:not_in_scheduled'\n end\n local oldScore = tonumber(rawScore) or 0\n local priority = math.floor(oldScore / PRIORITY_SHIFT)\n if priority > 0 then\n redis.call('ZADD', scheduledKey, 'XX', string.format('%.0f', priority * PRIORITY_SHIFT), jobId)\n redis.call('HSET', jobKey, 'state', 'prioritized', 'delay', '0')\n else\n redis.call('ZREM', scheduledKey, jobId)\n redis.call('XADD', streamKey, '*', 'jobId', jobId)\n redis.call('HSET', jobKey, 'state', 'waiting', 'delay', '0')\n end\n else\n local rawScore = redis.call('ZSCORE', scheduledKey, jobId)\n if rawScore == false then\n return 'error:not_in_scheduled'\n end\n local oldScore = tonumber(rawScore) or 0\n local priority = math.floor(oldScore / PRIORITY_SHIFT)\n local newScore = priority * PRIORITY_SHIFT + (now + newDelay)\n redis.call('ZADD', scheduledKey, 'XX', string.format('%.0f', newScore), jobId)\n redis.call('HSET', jobKey, 'delay', tostring(newDelay))\n end\n emitEvent(eventsKey, 'delay-changed', jobId, {'delay', tostring(newDelay)})\n return 'ok'\n elseif state == 'waiting' then\n if newDelay == 0 then\n return 'no_op'\n end\n local priority = tonumber(redis.call('HGET', jobKey, 'priority')) or 0\n local cursor = '-'\n local found = false\n while not found do\n local entries = redis.call('XRANGE', streamKey, cursor, '+', 'COUNT', 1000)\n if #entries == 0 then break end\n for i = 1, #entries do\n local entryId = entries[i][1]\n local fields = entries[i][2]\n for j = 1, #fields, 2 do\n if fields[j] == 'jobId' and fields[j+1] == jobId then\n pcall(redis.call, 'XACK', streamKey, group, entryId)\n redis.call('XDEL', streamKey, entryId)\n found = true\n break\n end\n end\n if found then break end\n end\n if not found then\n cursor = '(' .. entries[#entries][1]\n end\n end\n if not found then\n return 'error:not_in_stream'\n end\n local newScore = priority * PRIORITY_SHIFT + (now + newDelay)\n redis.call('ZADD', scheduledKey, string.format('%.0f', newScore), jobId)\n redis.call('HSET', jobKey, 'state', 'delayed', 'delay', tostring(newDelay))\n emitEvent(eventsKey, 'delay-changed', jobId, {'delay', tostring(newDelay)})\n return 'ok'\n elseif state == 'prioritized' then\n if newDelay == 0 then\n return 'no_op'\n end\n local rawScore = redis.call('ZSCORE', scheduledKey, jobId)\n if rawScore == false then\n return 'error:not_in_scheduled'\n end\n local oldScore = tonumber(rawScore) or 0\n local priority = math.floor(oldScore / PRIORITY_SHIFT)\n local newScore = priority * PRIORITY_SHIFT + (now + newDelay)\n redis.call('ZADD', scheduledKey, 'XX', string.format('%.0f', newScore), jobId)\n redis.call('HSET', jobKey, 'state', 'delayed', 'delay', tostring(newDelay))\n emitEvent(eventsKey, 'delay-changed', jobId, {'delay', tostring(newDelay)})\n return 'ok'\n else\n return 'error:invalid_state'\n end\nend)\n\nredis.register_function('glidemq_promoteJob', function(keys, args)\n local jobKey = keys[1]\n local streamKey = keys[2]\n local scheduledKey = keys[3]\n local eventsKey = keys[4]\n local jobId = args[1]\n local exists = redis.call('EXISTS', jobKey)\n if exists == 0 then\n return 'error:not_found'\n end\n local state = redis.call('HGET', jobKey, 'state')\n if state ~= 'delayed' then\n return 'error:not_delayed'\n end\n redis.call('ZREM', scheduledKey, jobId)\n redis.call('XADD', streamKey, '*', 'jobId', jobId)\n redis.call('HSET', jobKey, 'state', 'waiting', 'delay', '0')\n emitEvent(eventsKey, 'promoted', jobId, nil)\n return 'ok'\nend)\n\nredis.register_function('glidemq_searchByName', function(keys, args)\n local stateKey = keys[1]\n local stateType = args[1]\n local nameFilter = args[2]\n local limit = tonumber(args[3]) or 100\n local prefix = args[4]\n local matched = {}\n if stateType == 'zset' then\n local members = redis.call('ZRANGE', stateKey, 0, -1)\n for i = 1, #members do\n if #matched >= limit then break end\n local jobId = members[i]\n local jobKey = prefix .. 'job:' .. jobId\n local name = redis.call('HGET', jobKey, 'name')\n if name == nameFilter then\n matched[#matched + 1] = jobId\n end\n end\n elseif stateType == 'stream' then\n local entries = redis.call('XRANGE', stateKey, '-', '+')\n for i = 1, #entries do\n if #matched >= limit then break end\n local fields = entries[i][2]\n local jobId = nil\n for j = 1, #fields, 2 do\n if fields[j] == 'jobId' then\n jobId = fields[j + 1]\n break\n end\n end\n if jobId then\n local jobKey = prefix .. 'job:' .. jobId\n local name = redis.call('HGET', jobKey, 'name')\n if name == nameFilter then\n matched[#matched + 1] = jobId\n end\n end\n end\n end\n return matched\nend)\n\nredis.register_function('glidemq_drain', function(keys, args)\n local streamKey = keys[1]\n local scheduledKey = keys[2]\n local eventsKey = keys[3]\n local idKey = keys[4]\n local drainDelayed = args[1] == '1'\n local group = args[2]\n local prefix = string.sub(idKey, 1, #idKey - 2)\n local removed = 0\n\n -- Build set of active entry IDs from PEL via paginated XPENDING\n local activeSet = {}\n local ok, pending = pcall(redis.call, 'XPENDING', streamKey, group, '-', '+', '10000')\n if ok and pending and #pending > 0 then\n for i = 1, #pending do\n activeSet[pending[i][1]] = true\n end\n -- Page through remaining PEL entries if there were exactly 10000\n while #pending == 10000 do\n local lastId = pending[#pending][1]\n local dashPos = lastId:find('-')\n local seq = tonumber(lastId:sub(dashPos + 1))\n local nextStart = lastId:sub(1, dashPos) .. tostring(seq + 1)\n ok, pending = pcall(redis.call, 'XPENDING', streamKey, group, nextStart, '+', '10000')\n if ok and pending and #pending > 0 then\n for i = 1, #pending do\n activeSet[pending[i][1]] = true\n end\n else\n break\n end\n end\n end\n\n -- Paginated XRANGE to avoid loading entire stream into memory\n local cursor = '-'\n while true do\n local entries = redis.call('XRANGE', streamKey, cursor, '+', 'COUNT', 1000)\n if #entries == 0 then break end\n\n local toDelete = {}\n for i = 1, #entries do\n local entryId = entries[i][1]\n if not activeSet[entryId] then\n toDelete[#toDelete + 1] = entryId\n local fields = entries[i][2]\n for j = 1, #fields, 2 do\n if fields[j] == 'jobId' and fields[j + 1] ~= '' then\n local jobId = fields[j + 1]\n redis.call('DEL', prefix .. 'job:' .. jobId, prefix .. 'log:' .. jobId, prefix .. 'deps:' .. jobId)\n removed = removed + 1\n break\n end\n end\n end\n end\n if #toDelete > 0 then\n for i = 1, #toDelete, 1000 do\n redis.call('XDEL', streamKey, unpack(toDelete, i, math.min(i + 999, #toDelete)))\n end\n end\n\n -- Advance cursor past the last entry\n local lastId = entries[#entries][1]\n local dashPos = lastId:find('-')\n local seq = tonumber(lastId:sub(dashPos + 1))\n cursor = lastId:sub(1, dashPos) .. tostring(seq + 1)\n end\n\n -- Optionally drain delayed/scheduled jobs\n if drainDelayed then\n local offset = 0\n while true do\n local scheduled = redis.call('ZRANGE', scheduledKey, offset, offset + 999)\n if #scheduled == 0 then break end\n local batch = {}\n for j = 1, #scheduled do\n local jobId = scheduled[j]\n batch[#batch + 1] = prefix .. 'job:' .. jobId\n batch[#batch + 1] = prefix .. 'log:' .. jobId\n batch[#batch + 1] = prefix .. 'deps:' .. jobId\n end\n redis.call('DEL', unpack(batch))\n removed = removed + #scheduled\n offset = offset + 1000\n end\n redis.call('DEL', scheduledKey)\n end\n\n if removed > 0 then\n emitEvent(eventsKey, 'drained', tostring(removed), nil)\n end\n return removed\nend)\n\nredis.register_function('glidemq_retryJobs', function(keys, args)\n local failedKey = keys[1]\n local scheduledKey = keys[2]\n local eventsKey = keys[3]\n local idKey = keys[4]\n local count = tonumber(args[1]) or 0\n local timestamp = tonumber(args[2])\n if not timestamp then return redis.error_reply('ERR invalid timestamp') end\n local prefix = string.sub(idKey, 1, #idKey - 2)\n local retried = 0\n\n while true do\n if count > 0 and retried >= count then break end\n local batchSize = 1000\n if count > 0 then\n batchSize = math.min(1000, count - retried)\n end\n local ids = redis.call('ZRANGE', failedKey, 0, batchSize - 1)\n if #ids == 0 then break end\n redis.call('ZREM', failedKey, unpack(ids))\n for i = 1, #ids do\n local jobId = ids[i]\n local jobKey = prefix .. 'job:' .. jobId\n if redis.call('EXISTS', jobKey) == 1 then\n local priority = tonumber(redis.call('HGET', jobKey, 'priority')) or 0\n local score = priority * PRIORITY_SHIFT + timestamp\n redis.call('ZADD', scheduledKey, score, jobId)\n redis.call('HSET', jobKey,\n 'state', 'delayed',\n 'attemptsMade', '0',\n 'failedReason', '',\n 'finishedOn', ''\n )\n retried = retried + 1\n end\n end\n end\n if retried > 0 then\n emitEvent(eventsKey, 'retried', tostring(retried), nil)\n end\n return retried\nend)\n";
7
7
  export type QueueKeys = ReturnType<typeof import('../utils').buildKeys>;
8
8
  /**
9
9
  * Add a job to the queue atomically.
10
10
  * Returns the new job ID (string).
11
11
  */
12
+ /**
13
+ * Build the keys and args arrays for glidemq_addJob, shared by addJob() and Batch callers.
14
+ */
15
+ export declare function addJobArgs(k: QueueKeys, jobName: string, data: string, opts: string, timestamp: number, delay: number, priority: number, parentId: string, maxAttempts: number, orderingKey?: string, groupConcurrency?: number, groupRateMax?: number, groupRateDuration?: number, tbCapacity?: number, tbRefillRate?: number, jobCost?: number): {
16
+ keys: string[];
17
+ args: string[];
18
+ };
12
19
  export declare function addJob(client: Client, k: QueueKeys, jobName: string, data: string, opts: string, timestamp: number, delay: number, priority: number, parentId: string, maxAttempts: number, orderingKey?: string, groupConcurrency?: number, groupRateMax?: number, groupRateDuration?: number, tbCapacity?: number, tbRefillRate?: number, jobCost?: number): Promise<string>;
13
20
  /**
14
21
  * Add a job with deduplication. Checks the dedup hash and either skips or adds the job.
@@ -123,6 +130,29 @@ export declare function deferActive(client: Client, k: QueueKeys, jobId: string,
123
130
  * Returns 1 if removed, 0 if not found.
124
131
  */
125
132
  export declare function removeJob(client: Client, k: QueueKeys, jobId: string): Promise<number>;
133
+ /**
134
+ * Bulk-remove old completed or failed jobs by age.
135
+ * Removes job hashes, log keys, and ZSet entries for jobs older than cutoff.
136
+ * Returns an array of removed job IDs.
137
+ */
138
+ export declare function cleanJobs(client: Client, k: QueueKeys, type: 'completed' | 'failed', grace: number, limit: number, timestamp: number): Promise<string[]>;
139
+ /**
140
+ * Drain the queue: remove all waiting jobs from the stream (skipping active ones).
141
+ * Optionally also remove all delayed/scheduled jobs.
142
+ * Deletes associated job/log/deps hashes. Emits 'drained' event.
143
+ * Returns the number of removed jobs.
144
+ */
145
+ export declare function drainQueue(client: Client, k: QueueKeys, delayed: boolean, group?: string): Promise<number>;
146
+ /**
147
+ * Bulk retry failed jobs.
148
+ * Moves jobs from the failed ZSet to the scheduled ZSet for re-processing.
149
+ * The promote cycle picks them up immediately (score = priority * PRIORITY_SHIFT + now).
150
+ * Resets attemptsMade, failedReason, and finishedOn on each job hash.
151
+ * Emits a single 'retried' event with the total count.
152
+ * @param count - Maximum number of jobs to retry. 0 means all.
153
+ * @returns The number of jobs retried.
154
+ */
155
+ export declare function retryJobs(client: Client, k: QueueKeys, count: number, timestamp: number): Promise<number>;
126
156
  /**
127
157
  * Revoke a job. Sets 'revoked' flag on the job hash.
128
158
  * If the job is waiting/delayed/prioritized, removes from stream/scheduled and moves to failed.
@@ -130,6 +160,24 @@ export declare function removeJob(client: Client, k: QueueKeys, jobId: string):
130
160
  * Returns 'revoked' (moved to failed), 'flagged' (flag set, job is active), or 'not_found'.
131
161
  */
132
162
  export declare function revokeJob(client: Client, k: QueueKeys, jobId: string, timestamp: number, group?: string): Promise<string>;
163
+ /**
164
+ * Change the priority of a job after enqueue.
165
+ * Handles waiting, prioritized, and delayed states. Returns 'ok', 'no_op',
166
+ * or an error string for invalid states.
167
+ */
168
+ export declare function changePriority(client: Client, k: QueueKeys, jobId: string, newPriority: number, group?: string): Promise<string>;
169
+ /**
170
+ * Change the delay of a job after enqueue.
171
+ * Handles delayed, waiting, and prioritized states. Returns 'ok', 'no_op',
172
+ * or an error string for invalid states.
173
+ */
174
+ export declare function changeDelay(client: Client, k: QueueKeys, jobId: string, newDelay: number, group?: string): Promise<string>;
175
+ /**
176
+ * Promote a delayed job to waiting immediately.
177
+ * Removes from the scheduled ZSet, adds to the stream, sets state to 'waiting'.
178
+ * Returns 'ok', 'error:not_found', or 'error:not_delayed'.
179
+ */
180
+ export declare function promoteJob(client: Client, k: QueueKeys, jobId: string): Promise<string>;
133
181
  /**
134
182
  * Search for jobs by name within a specific state structure.
135
183
  * For ZSet states (completed, failed, delayed): iterates members and checks name.
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/functions/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AACvC,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AAEzD,eAAO,MAAM,YAAY,YAAY,CAAC;AACtC,eAAO,MAAM,eAAe,OAAO,CAAC;AAGpC,eAAO,MAAM,cAAc,YAAY,CAAC;AAIxC,eAAO,MAAM,cAAc,kjhEAynD1B,CAAC;AAIF,MAAM,MAAM,SAAS,GAAG,UAAU,CAAC,cAAc,UAAU,EAAE,SAAS,CAAC,CAAC;AAIxE;;;GAGG;AACH,wBAAsB,MAAM,CAC1B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,OAAO,EAAE,MAAM,EACf,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,MAAM,EACZ,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,MAAM,EAChB,WAAW,EAAE,MAAM,EACnB,WAAW,GAAE,MAAW,EACxB,gBAAgB,GAAE,MAAU,EAC5B,YAAY,GAAE,MAAU,EACxB,iBAAiB,GAAE,MAAU,EAC7B,UAAU,GAAE,MAAU,EACtB,YAAY,GAAE,MAAU,EACxB,OAAO,GAAE,MAAU,GAClB,OAAO,CAAC,MAAM,CAAC,CAuBjB;AAED;;;GAGG;AACH,wBAAsB,KAAK,CACzB,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,OAAO,EAAE,MAAM,EACf,KAAK,EAAE,MAAM,EACb,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,MAAM,EACf,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,MAAM,EACZ,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,MAAM,EAChB,WAAW,EAAE,MAAM,EACnB,WAAW,GAAE,MAAW,EACxB,gBAAgB,GAAE,MAAU,EAC5B,YAAY,GAAE,MAAU,EACxB,iBAAiB,GAAE,MAAU,EAC7B,UAAU,GAAE,MAAU,EACtB,YAAY,GAAE,MAAU,EACxB,OAAO,GAAE,MAAU,GAClB,OAAO,CAAC,MAAM,CAAC,CA0BjB;AAED;;;GAGG;AACH,wBAAsB,OAAO,CAC3B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC,MAAM,CAAC,CAOjB;AAoBD;;;;;GAKG;AACH,wBAAsB,WAAW,CAC/B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,EACf,WAAW,EAAE,MAAM,EACnB,SAAS,EAAE,MAAM,EACjB,KAAK,GAAE,MAAuB,EAC9B,gBAAgB,CAAC,EAAE,OAAO,GAAG,MAAM,GAAG;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAA;CAAE,EACpE,UAAU,CAAC,EAAE;IAAE,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,UAAU,EAAE,SAAS,CAAA;CAAE,GAC3E,OAAO,CAAC,eAAe,CAAC,CA6B1B;AAED;;;;;;;;GAQG;AACH,MAAM,WAAW,sBAAsB;IACrC,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,EAAE,KAAK,GAAG,SAAS,GAAG,MAAM,EAAE,CAAC;IACnC,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,WAAW,CAAC,EAAE,MAAM,CAAC;CACtB;AAED,wBAAsB,oBAAoB,CACxC,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,EACf,WAAW,EAAE,MAAM,EACnB,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,EAChB,gBAAgB,CAAC,EAAE,OAAO,GAAG,MAAM,GAAG;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAA;CAAE,EACpE,UAAU,CAAC,EAAE;IAAE,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,UAAU,EAAE,SAAS,CAAA;CAAE,GAC3E,OAAO,CAAC,sBAAsB,CAAC,CAmCjC;AAED;;;;GAIG;AACH,wBAAsB,OAAO,CAC3B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,EACf,YAAY,EAAE,MAAM,EACpB,SAAS,EAAE,MAAM,EACjB,WAAW,EAAE,MAAM,EACnB,YAAY,EAAE,MAAM,EACpB,KAAK,GAAE,MAAuB,EAC9B,YAAY,CAAC,EAAE,OAAO,GAAG,MAAM,GAAG;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAA;CAAE,GAC/D,OAAO,CAAC,MAAM,CAAC,CAmBjB;AAED;;;GAGG;AACH,wBAAsB,cAAc,CAClC,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,EACjB,eAAe,EAAE,MAAM,EACvB,SAAS,EAAE,MAAM,EACjB,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,MAAM,CAAC,CAcjB;AAED;;GAEG;AACH,wBAAsB,KAAK,CACzB,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,GACX,OAAO,CAAC,IAAI,CAAC,CAMf;AAED;;GAEG;AACH,wBAAsB,MAAM,CAC1B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,GACX,OAAO,CAAC,IAAI,CAAC,CAMf;AAED;;;GAGG;AACH,wBAAsB,SAAS,CAC7B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,YAAY,EAAE,MAAM,EACpB,cAAc,EAAE,MAAM,EACtB,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC,MAAM,CAAC,CAWjB;AAED;;;;GAIG;AACH,wBAAsB,gBAAgB,CACpC,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;;;;;;;;;;;;;GAcG;AACH,wBAAsB,YAAY,CAChC,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,SAAS,EAAE,MAAM,EACjB,SAAS,GAAE,MAAW,EACtB,OAAO,GAAE,MAAW,EACpB,KAAK,GAAE,MAAW,GACjB,OAAO,CAAC,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GAAG,SAAS,GAAG,YAAY,GAAG,oBAAoB,GAAG,qBAAqB,GAAG,2BAA2B,GAAG,IAAI,CAAC,CA0BhJ;AAED;;;;GAIG;AACH,wBAAsB,kBAAkB,CACtC,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;;;GAIG;AACH,wBAAsB,WAAW,CAC/B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,EACf,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,IAAI,CAAC,CAMf;AAED;;;GAGG;AACH,wBAAsB,SAAS,CAC7B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;;;;GAKG;AACH,wBAAsB,SAAS,CAC7B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,SAAS,EAAE,MAAM,EACjB,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;;;;GAKG;AACH,wBAAsB,YAAY,CAChC,MAAM,EAAE,MAAM,EACd,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,GAAG,QAAQ,EAC5B,UAAU,EAAE,MAAM,EAClB,KAAK,EAAE,MAAM,EACb,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC,MAAM,EAAE,CAAC,CAWnB;AAED;;;GAGG;AACH,wBAAsB,OAAO,CAC3B,MAAM,EAAE,MAAM,EACd,UAAU,EAAE,SAAS,EACrB,UAAU,EAAE,MAAM,EAClB,UAAU,EAAE,MAAM,EAClB,UAAU,EAAE,MAAM,EAClB,SAAS,EAAE,MAAM,EACjB,WAAW,EAAE,MAAM,EACnB,cAAc,EAAE,MAAM,EACtB,iBAAiB,EAAE,MAAM,EACzB,QAAQ,EAAE;IACR,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE,SAAS,CAAC;IAChB,WAAW,EAAE,MAAM,CAAC;IACpB,eAAe,EAAE,MAAM,CAAC;CACzB,EAAE,EACH,SAAS,GAAE,MAAM,EAAO,GACvB,OAAO,CAAC,MAAM,EAAE,CAAC,CAwCnB;AAED;;;GAGG;AACH,wBAAsB,aAAa,CACjC,MAAM,EAAE,MAAM,EACd,UAAU,EAAE,SAAS,EACrB,QAAQ,EAAE,MAAM,EAChB,UAAU,EAAE,MAAM,GACjB,OAAO,CAAC,MAAM,CAAC,CAOjB"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/functions/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AACvC,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,mBAAmB,CAAC;AAEzD,eAAO,MAAM,YAAY,YAAY,CAAC;AACtC,eAAO,MAAM,eAAe,OAAO,CAAC;AAGpC,eAAO,MAAM,cAAc,YAAY,CAAC;AAIxC,eAAO,MAAM,cAAc,6w7EAg+D1B,CAAC;AAIF,MAAM,MAAM,SAAS,GAAG,UAAU,CAAC,cAAc,UAAU,EAAE,SAAS,CAAC,CAAC;AAIxE;;;GAGG;AACH;;GAEG;AACH,wBAAgB,UAAU,CACxB,CAAC,EAAE,SAAS,EACZ,OAAO,EAAE,MAAM,EACf,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,MAAM,EACZ,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,MAAM,EAChB,WAAW,EAAE,MAAM,EACnB,WAAW,GAAE,MAAW,EACxB,gBAAgB,GAAE,MAAU,EAC5B,YAAY,GAAE,MAAU,EACxB,iBAAiB,GAAE,MAAU,EAC7B,UAAU,GAAE,MAAU,EACtB,YAAY,GAAE,MAAU,EACxB,OAAO,GAAE,MAAU,GAClB;IAAE,IAAI,EAAE,MAAM,EAAE,CAAC;IAAC,IAAI,EAAE,MAAM,EAAE,CAAA;CAAE,CAqBpC;AAED,wBAAsB,MAAM,CAC1B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,OAAO,EAAE,MAAM,EACf,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,MAAM,EACZ,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,MAAM,EAChB,WAAW,EAAE,MAAM,EACnB,WAAW,GAAE,MAAW,EACxB,gBAAgB,GAAE,MAAU,EAC5B,YAAY,GAAE,MAAU,EACxB,iBAAiB,GAAE,MAAU,EAC7B,UAAU,GAAE,MAAU,EACtB,YAAY,GAAE,MAAU,EACxB,OAAO,GAAE,MAAU,GAClB,OAAO,CAAC,MAAM,CAAC,CAqBjB;AAED;;;GAGG;AACH,wBAAsB,KAAK,CACzB,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,OAAO,EAAE,MAAM,EACf,KAAK,EAAE,MAAM,EACb,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,MAAM,EACf,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,MAAM,EACZ,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,MAAM,EAChB,WAAW,EAAE,MAAM,EACnB,WAAW,GAAE,MAAW,EACxB,gBAAgB,GAAE,MAAU,EAC5B,YAAY,GAAE,MAAU,EACxB,iBAAiB,GAAE,MAAU,EAC7B,UAAU,GAAE,MAAU,EACtB,YAAY,GAAE,MAAU,EACxB,OAAO,GAAE,MAAU,GAClB,OAAO,CAAC,MAAM,CAAC,CA0BjB;AAED;;;GAGG;AACH,wBAAsB,OAAO,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAG9F;AAsBD;;;;;GAKG;AACH,wBAAsB,WAAW,CAC/B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,EACf,WAAW,EAAE,MAAM,EACnB,SAAS,EAAE,MAAM,EACjB,KAAK,GAAE,MAAuB,EAC9B,gBAAgB,CAAC,EAAE,OAAO,GAAG,MAAM,GAAG;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAA;CAAE,EACpE,UAAU,CAAC,EAAE;IAAE,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,UAAU,EAAE,SAAS,CAAA;CAAE,GAC3E,OAAO,CAAC,eAAe,CAAC,CAwB1B;AAED;;;;;;;;GAQG;AACH,MAAM,WAAW,sBAAsB;IACrC,SAAS,EAAE,MAAM,CAAC;IAClB,IAAI,EAAE,KAAK,GAAG,SAAS,GAAG,MAAM,EAAE,CAAC;IACnC,SAAS,CAAC,EAAE,MAAM,CAAC;IACnB,WAAW,CAAC,EAAE,MAAM,CAAC;CACtB;AAED,wBAAsB,oBAAoB,CACxC,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,EACf,WAAW,EAAE,MAAM,EACnB,SAAS,EAAE,MAAM,EACjB,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,EAChB,gBAAgB,CAAC,EAAE,OAAO,GAAG,MAAM,GAAG;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAA;CAAE,EACpE,UAAU,CAAC,EAAE;IAAE,UAAU,EAAE,MAAM,CAAC;IAAC,QAAQ,EAAE,MAAM,CAAC;IAAC,UAAU,EAAE,SAAS,CAAA;CAAE,GAC3E,OAAO,CAAC,sBAAsB,CAAC,CAmDjC;AAED;;;;GAIG;AACH,wBAAsB,OAAO,CAC3B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,EACf,YAAY,EAAE,MAAM,EACpB,SAAS,EAAE,MAAM,EACjB,WAAW,EAAE,MAAM,EACnB,YAAY,EAAE,MAAM,EACpB,KAAK,GAAE,MAAuB,EAC9B,YAAY,CAAC,EAAE,OAAO,GAAG,MAAM,GAAG;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,KAAK,EAAE,MAAM,CAAA;CAAE,GAC/D,OAAO,CAAC,MAAM,CAAC,CAmBjB;AAED;;;GAGG;AACH,wBAAsB,cAAc,CAClC,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,EACjB,eAAe,EAAE,MAAM,EACvB,SAAS,EAAE,MAAM,EACjB,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;GAEG;AACH,wBAAsB,KAAK,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,GAAG,OAAO,CAAC,IAAI,CAAC,CAEvE;AAED;;GAEG;AACH,wBAAsB,MAAM,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,GAAG,OAAO,CAAC,IAAI,CAAC,CAExE;AAED;;;GAGG;AACH,wBAAsB,SAAS,CAC7B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,YAAY,EAAE,MAAM,EACpB,cAAc,EAAE,MAAM,EACtB,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;;;GAIG;AACH,wBAAsB,gBAAgB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,KAAK,GAAE,MAAuB,GAAG,OAAO,CAAC,MAAM,CAAC,CAGpH;AAED;;;;;;;;;;;;;;GAcG;AACH,wBAAsB,YAAY,CAChC,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,SAAS,EAAE,MAAM,EACjB,SAAS,GAAE,MAAW,EACtB,OAAO,GAAE,MAAW,EACpB,KAAK,GAAE,MAAW,GACjB,OAAO,CACN,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,GACtB,SAAS,GACT,YAAY,GACZ,oBAAoB,GACpB,qBAAqB,GACrB,2BAA2B,GAC3B,IAAI,CACP,CAsBA;AAED;;;;GAIG;AACH,wBAAsB,kBAAkB,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAGzG;AAED;;;;GAIG;AACH,wBAAsB,WAAW,CAC/B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,EACf,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,IAAI,CAAC,CAEf;AAED;;;GAGG;AACH,wBAAsB,SAAS,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAO5F;AAED;;;;GAIG;AACH,wBAAsB,SAAS,CAC7B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,IAAI,EAAE,WAAW,GAAG,QAAQ,EAC5B,KAAK,EAAE,MAAM,EACb,KAAK,EAAE,MAAM,EACb,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC,MAAM,EAAE,CAAC,CAQnB;AAED;;;;;GAKG;AACH,wBAAsB,UAAU,CAC9B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,OAAO,EAAE,OAAO,EAChB,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;;;;;;;GAQG;AACH,wBAAsB,SAAS,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,KAAK,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAO/G;AAED;;;;;GAKG;AACH,wBAAsB,SAAS,CAC7B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,SAAS,EAAE,MAAM,EACjB,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;;;GAIG;AACH,wBAAsB,cAAc,CAClC,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,WAAW,EAAE,MAAM,EACnB,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;;;GAIG;AACH,wBAAsB,WAAW,CAC/B,MAAM,EAAE,MAAM,EACd,CAAC,EAAE,SAAS,EACZ,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,EAChB,KAAK,GAAE,MAAuB,GAC7B,OAAO,CAAC,MAAM,CAAC,CAOjB;AAED;;;;GAIG;AACH,wBAAsB,UAAU,CAAC,MAAM,EAAE,MAAM,EAAE,CAAC,EAAE,SAAS,EAAE,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAG7F;AAED;;;;;GAKG;AACH,wBAAsB,YAAY,CAChC,MAAM,EAAE,MAAM,EACd,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,GAAG,QAAQ,EAC5B,UAAU,EAAE,MAAM,EAClB,KAAK,EAAE,MAAM,EACb,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC,MAAM,EAAE,CAAC,CAWnB;AAED;;;GAGG;AACH,wBAAsB,OAAO,CAC3B,MAAM,EAAE,MAAM,EACd,UAAU,EAAE,SAAS,EACrB,UAAU,EAAE,MAAM,EAClB,UAAU,EAAE,MAAM,EAClB,UAAU,EAAE,MAAM,EAClB,SAAS,EAAE,MAAM,EACjB,WAAW,EAAE,MAAM,EACnB,cAAc,EAAE,MAAM,EACtB,iBAAiB,EAAE,MAAM,EACzB,QAAQ,EAAE;IACR,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,IAAI,EAAE,MAAM,CAAC;IACb,KAAK,EAAE,MAAM,CAAC;IACd,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,IAAI,EAAE,SAAS,CAAC;IAChB,WAAW,EAAE,MAAM,CAAC;IACpB,eAAe,EAAE,MAAM,CAAC;CACzB,EAAE,EACH,SAAS,GAAE,MAAM,EAAO,GACvB,OAAO,CAAC,MAAM,EAAE,CAAC,CAmCnB;AAED;;;GAGG;AACH,wBAAsB,aAAa,CACjC,MAAM,EAAE,MAAM,EACd,UAAU,EAAE,SAAS,EACrB,QAAQ,EAAE,MAAM,EAChB,UAAU,EAAE,MAAM,GACjB,OAAO,CAAC,MAAM,CAAC,CAOjB"}