groupmq-plus 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +59 -0
- package/README.md +722 -0
- package/dist/index.cjs +2567 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +1300 -0
- package/dist/index.d.ts +1300 -0
- package/dist/index.js +2557 -0
- package/dist/index.js.map +1 -0
- package/dist/lua/change-delay.lua +62 -0
- package/dist/lua/check-stalled.lua +86 -0
- package/dist/lua/clean-status.lua +64 -0
- package/dist/lua/cleanup-poisoned-group.lua +46 -0
- package/dist/lua/cleanup.lua +46 -0
- package/dist/lua/complete-and-reserve-next-with-metadata.lua +221 -0
- package/dist/lua/complete-with-metadata.lua +190 -0
- package/dist/lua/complete.lua +51 -0
- package/dist/lua/dead-letter.lua +86 -0
- package/dist/lua/enqueue-batch.lua +149 -0
- package/dist/lua/enqueue-flow.lua +107 -0
- package/dist/lua/enqueue.lua +154 -0
- package/dist/lua/get-active-count.lua +6 -0
- package/dist/lua/get-active-jobs.lua +6 -0
- package/dist/lua/get-delayed-count.lua +5 -0
- package/dist/lua/get-delayed-jobs.lua +5 -0
- package/dist/lua/get-unique-groups-count.lua +13 -0
- package/dist/lua/get-unique-groups.lua +15 -0
- package/dist/lua/get-waiting-count.lua +11 -0
- package/dist/lua/get-waiting-jobs.lua +15 -0
- package/dist/lua/heartbeat.lua +22 -0
- package/dist/lua/is-empty.lua +35 -0
- package/dist/lua/promote-delayed-jobs.lua +40 -0
- package/dist/lua/promote-delayed-one.lua +44 -0
- package/dist/lua/promote-staged.lua +70 -0
- package/dist/lua/record-job-result.lua +143 -0
- package/dist/lua/remove.lua +55 -0
- package/dist/lua/reserve-atomic.lua +114 -0
- package/dist/lua/reserve-batch.lua +141 -0
- package/dist/lua/reserve.lua +161 -0
- package/dist/lua/retry.lua +53 -0
- package/package.json +92 -0
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
-- Complete a job by removing from processing and unlocking the group
|
|
2
|
+
-- Does NOT record job metadata - that's handled separately by record-job-result.lua
|
|
3
|
+
-- argv: ns, jobId, groupId
|
|
4
|
+
local ns = KEYS[1]
|
|
5
|
+
local jobId = ARGV[1]
|
|
6
|
+
local gid = ARGV[2]
|
|
7
|
+
|
|
8
|
+
-- Remove from processing
|
|
9
|
+
redis.call("DEL", ns .. ":processing:" .. jobId)
|
|
10
|
+
redis.call("ZREM", ns .. ":processing", jobId)
|
|
11
|
+
|
|
12
|
+
-- Check if this job holds the lock
|
|
13
|
+
local lockKey = ns .. ":lock:" .. gid
|
|
14
|
+
local val = redis.call("GET", lockKey)
|
|
15
|
+
if val == jobId then
|
|
16
|
+
redis.call("DEL", lockKey)
|
|
17
|
+
|
|
18
|
+
-- Check if there are more jobs in this group
|
|
19
|
+
local gZ = ns .. ":g:" .. gid
|
|
20
|
+
local jobCount = redis.call("ZCARD", gZ)
|
|
21
|
+
if jobCount == 0 then
|
|
22
|
+
-- Remove empty group zset and from groups tracking set
|
|
23
|
+
redis.call("DEL", gZ)
|
|
24
|
+
redis.call("SREM", ns .. ":groups", gid)
|
|
25
|
+
-- Remove from ready queue
|
|
26
|
+
redis.call("ZREM", ns .. ":ready", gid)
|
|
27
|
+
-- Clean up any buffering state (shouldn't exist but be safe)
|
|
28
|
+
redis.call("DEL", ns .. ":buffer:" .. gid)
|
|
29
|
+
redis.call("ZREM", ns .. ":buffering", gid)
|
|
30
|
+
else
|
|
31
|
+
-- Group has more jobs, re-add to ready set
|
|
32
|
+
-- Note: If the group was buffering, it will be handled by the buffering logic
|
|
33
|
+
-- If it's not buffering, add to ready immediately
|
|
34
|
+
local groupBufferKey = ns .. ":buffer:" .. gid
|
|
35
|
+
local isBuffering = redis.call("EXISTS", groupBufferKey)
|
|
36
|
+
|
|
37
|
+
if isBuffering == 0 then
|
|
38
|
+
-- Not buffering, add to ready immediately
|
|
39
|
+
local nextHead = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
|
|
40
|
+
if nextHead and #nextHead >= 2 then
|
|
41
|
+
local nextScore = tonumber(nextHead[2])
|
|
42
|
+
local readyKey = ns .. ":ready"
|
|
43
|
+
redis.call("ZADD", readyKey, nextScore, gid)
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
-- If buffering, the scheduler will promote when ready
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
return 1
|
|
50
|
+
end
|
|
51
|
+
return 0
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
-- argv: ns, jobId, groupId
|
|
2
|
+
local ns = KEYS[1]
|
|
3
|
+
local jobId = ARGV[1]
|
|
4
|
+
local groupId = ARGV[2]
|
|
5
|
+
local gZ = ns .. ":g:" .. groupId
|
|
6
|
+
local readyKey = ns .. ":ready"
|
|
7
|
+
|
|
8
|
+
local jobKey = ns .. ":job:" .. jobId
|
|
9
|
+
|
|
10
|
+
-- [FLOW SUPPORT: Get parentId before any cleanup]
|
|
11
|
+
local parentId = redis.call("HGET", jobKey, "parentId")
|
|
12
|
+
|
|
13
|
+
-- Remove job from group
|
|
14
|
+
redis.call("ZREM", gZ, jobId)
|
|
15
|
+
|
|
16
|
+
-- Remove from processing if it's there
|
|
17
|
+
redis.call("DEL", ns .. ":processing:" .. jobId)
|
|
18
|
+
redis.call("ZREM", ns .. ":processing", jobId)
|
|
19
|
+
|
|
20
|
+
-- No counter operations - use ZCARD for counts
|
|
21
|
+
|
|
22
|
+
-- Remove idempotence mapping to allow reuse
|
|
23
|
+
redis.call("DEL", ns .. ":unique:" .. jobId)
|
|
24
|
+
|
|
25
|
+
-- BullMQ-style: Remove from group active list if present
|
|
26
|
+
local groupActiveKey = ns .. ":g:" .. groupId .. ":active"
|
|
27
|
+
redis.call("LREM", groupActiveKey, 1, jobId)
|
|
28
|
+
|
|
29
|
+
-- Check if group is now empty or should be removed from ready queue
|
|
30
|
+
local jobCount = redis.call("ZCARD", gZ)
|
|
31
|
+
if jobCount == 0 then
|
|
32
|
+
-- Group is empty, remove from ready queue and clean up
|
|
33
|
+
redis.call("ZREM", readyKey, groupId)
|
|
34
|
+
redis.call("DEL", gZ)
|
|
35
|
+
redis.call("DEL", groupActiveKey)
|
|
36
|
+
redis.call("SREM", ns .. ":groups", groupId)
|
|
37
|
+
else
|
|
38
|
+
-- Group still has jobs, update ready queue with new head
|
|
39
|
+
local head = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
|
|
40
|
+
if head and #head >= 2 then
|
|
41
|
+
local headScore = tonumber(head[2])
|
|
42
|
+
redis.call("ZADD", readyKey, headScore, groupId)
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
-- Optionally store in dead letter queue (uncomment if needed)
|
|
47
|
+
-- redis.call("LPUSH", ns .. ":dead", jobId)
|
|
48
|
+
|
|
49
|
+
-- [FLOW SUPPORT: Update parent if this job is a child in a flow]
|
|
50
|
+
if parentId then
|
|
51
|
+
local parentKey = ns .. ":job:" .. parentId
|
|
52
|
+
-- 1. Store error result in flow:results hash
|
|
53
|
+
local flowResultsKey = ns .. ":flow:results:" .. parentId
|
|
54
|
+
redis.call("HSET", flowResultsKey, jobId, '{"error":"dead-lettered"}')
|
|
55
|
+
|
|
56
|
+
-- 2. Decrement remaining counter
|
|
57
|
+
local remaining = redis.call("HINCRBY", parentKey, "flowRemaining", -1)
|
|
58
|
+
|
|
59
|
+
-- 3. If all children done, move parent to waiting
|
|
60
|
+
if remaining <= 0 then
|
|
61
|
+
local parentStatus = redis.call("HGET", parentKey, "status")
|
|
62
|
+
if parentStatus == "waiting-children" then
|
|
63
|
+
redis.call("HSET", parentKey, "status", "waiting")
|
|
64
|
+
|
|
65
|
+
local parentGroupId = redis.call("HGET", parentKey, "groupId")
|
|
66
|
+
local parentScore = tonumber(redis.call("HGET", parentKey, "score"))
|
|
67
|
+
if not parentScore then
|
|
68
|
+
parentScore = tonumber(redis.call("TIME")[1]) * 1000
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
local pGZ = ns .. ":g:" .. parentGroupId
|
|
72
|
+
redis.call("ZADD", pGZ, parentScore, parentId)
|
|
73
|
+
redis.call("SADD", ns .. ":groups", parentGroupId)
|
|
74
|
+
|
|
75
|
+
-- Add to ready if head
|
|
76
|
+
local pHead = redis.call("ZRANGE", pGZ, 0, 0, "WITHSCORES")
|
|
77
|
+
if pHead and #pHead >= 2 then
|
|
78
|
+
local pHeadScore = tonumber(pHead[2])
|
|
79
|
+
redis.call("ZADD", readyKey, pHeadScore, parentGroupId)
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
return 1
|
|
86
|
+
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
-- Batch enqueue multiple jobs atomically
|
|
2
|
+
-- argv: ns, jobsJson, keepCompleted, clientTimestamp, orderingDelayMs
|
|
3
|
+
local ns = KEYS[1]
|
|
4
|
+
local jobsJson = ARGV[1]
|
|
5
|
+
local keepCompleted = tonumber(ARGV[2]) or 0
|
|
6
|
+
local clientTimestamp = tonumber(ARGV[3])
|
|
7
|
+
local orderingDelayMs = tonumber(ARGV[4]) or 0
|
|
8
|
+
|
|
9
|
+
local jobs = cjson.decode(jobsJson)
|
|
10
|
+
|
|
11
|
+
-- Get Redis server time
|
|
12
|
+
local timeResult = redis.call("TIME")
|
|
13
|
+
local now = tonumber(timeResult[1]) * 1000 + math.floor(tonumber(timeResult[2]) / 1000)
|
|
14
|
+
|
|
15
|
+
-- Keys
|
|
16
|
+
local stageKey = ns .. ":stage"
|
|
17
|
+
local readyKey = ns .. ":ready"
|
|
18
|
+
local delayedKey = ns .. ":delayed"
|
|
19
|
+
local groupsKey = ns .. ":groups"
|
|
20
|
+
local timerKey = ns .. ":stage:timer"
|
|
21
|
+
|
|
22
|
+
local baseEpoch = 1704067200000
|
|
23
|
+
local daysSinceEpoch = math.floor(clientTimestamp / 86400000)
|
|
24
|
+
local seqKey = ns .. ":seq:" .. daysSinceEpoch
|
|
25
|
+
|
|
26
|
+
-- Track groups that need ready queue updates
|
|
27
|
+
local groupsToUpdate = {}
|
|
28
|
+
local results = {}
|
|
29
|
+
|
|
30
|
+
-- Process all jobs in batch
|
|
31
|
+
for i, job in ipairs(jobs) do
|
|
32
|
+
local jobId = job.jobId
|
|
33
|
+
local groupId = job.groupId
|
|
34
|
+
local data = job.data
|
|
35
|
+
local maxAttempts = tonumber(job.maxAttempts)
|
|
36
|
+
local orderMs = tonumber(job.orderMs) or clientTimestamp
|
|
37
|
+
local delayUntil = job.delayMs and (now + tonumber(job.delayMs)) or 0
|
|
38
|
+
|
|
39
|
+
-- Idempotence check
|
|
40
|
+
local uniqueKey = ns .. ":unique:" .. jobId
|
|
41
|
+
local uniqueSet = redis.call("SET", uniqueKey, jobId, "NX")
|
|
42
|
+
|
|
43
|
+
if uniqueSet then
|
|
44
|
+
-- Generate sequence and score
|
|
45
|
+
local seq = redis.call("INCR", seqKey)
|
|
46
|
+
local relativeMs = orderMs - baseEpoch
|
|
47
|
+
local score = relativeMs * 1000 + seq
|
|
48
|
+
|
|
49
|
+
-- Create job hash
|
|
50
|
+
local jobKey = ns .. ":job:" .. jobId
|
|
51
|
+
redis.call("HMSET", jobKey,
|
|
52
|
+
"id", jobId,
|
|
53
|
+
"groupId", groupId,
|
|
54
|
+
"data", data,
|
|
55
|
+
"attempts", "0",
|
|
56
|
+
"maxAttempts", tostring(maxAttempts),
|
|
57
|
+
"seq", tostring(seq),
|
|
58
|
+
"timestamp", tostring(clientTimestamp),
|
|
59
|
+
"orderMs", tostring(orderMs),
|
|
60
|
+
"score", tostring(score),
|
|
61
|
+
"delayUntil", tostring(delayUntil)
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
-- Add to groups set
|
|
65
|
+
redis.call("SADD", groupsKey, groupId)
|
|
66
|
+
|
|
67
|
+
-- Add to group sorted set
|
|
68
|
+
local gZ = ns .. ":g:" .. groupId
|
|
69
|
+
redis.call("ZADD", gZ, score, jobId)
|
|
70
|
+
|
|
71
|
+
-- Determine job placement
|
|
72
|
+
local jobStatus = "waiting"
|
|
73
|
+
|
|
74
|
+
if delayUntil > 0 and delayUntil > now then
|
|
75
|
+
-- Delayed job
|
|
76
|
+
redis.call("ZADD", delayedKey, delayUntil, jobId)
|
|
77
|
+
jobStatus = "delayed"
|
|
78
|
+
redis.call("HSET", jobKey, "status", jobStatus)
|
|
79
|
+
elseif orderMs and orderingDelayMs > 0 then
|
|
80
|
+
-- Staged job (ordering)
|
|
81
|
+
local releaseAt = orderMs + orderingDelayMs
|
|
82
|
+
redis.call("ZADD", stageKey, releaseAt, jobId)
|
|
83
|
+
jobStatus = "staged"
|
|
84
|
+
redis.call("HSET", jobKey, "status", jobStatus)
|
|
85
|
+
else
|
|
86
|
+
-- Ready to process
|
|
87
|
+
jobStatus = "waiting"
|
|
88
|
+
redis.call("HSET", jobKey, "status", jobStatus)
|
|
89
|
+
-- Mark group for ready queue update (batch later)
|
|
90
|
+
groupsToUpdate[groupId] = true
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
-- Store job metadata to return
|
|
94
|
+
table.insert(results, {
|
|
95
|
+
jobId,
|
|
96
|
+
groupId,
|
|
97
|
+
data,
|
|
98
|
+
"0", -- attempts
|
|
99
|
+
tostring(maxAttempts),
|
|
100
|
+
tostring(clientTimestamp),
|
|
101
|
+
tostring(orderMs),
|
|
102
|
+
tostring(delayUntil),
|
|
103
|
+
jobStatus,
|
|
104
|
+
})
|
|
105
|
+
else
|
|
106
|
+
-- Job ID already exists (idempotence) - fetch existing job data
|
|
107
|
+
local jobKey = ns .. ":job:" .. jobId
|
|
108
|
+
local jobData = redis.call("HMGET", jobKey, "id", "groupId", "data", "attempts", "maxAttempts", "timestamp", "orderMs", "delayUntil", "status")
|
|
109
|
+
if jobData[1] then
|
|
110
|
+
table.insert(results, jobData)
|
|
111
|
+
else
|
|
112
|
+
-- Shouldn't happen but handle gracefully
|
|
113
|
+
table.insert(results, {
|
|
114
|
+
jobId,
|
|
115
|
+
groupId,
|
|
116
|
+
data,
|
|
117
|
+
"0",
|
|
118
|
+
tostring(maxAttempts),
|
|
119
|
+
tostring(clientTimestamp),
|
|
120
|
+
tostring(orderMs),
|
|
121
|
+
tostring(delayUntil),
|
|
122
|
+
"waiting",
|
|
123
|
+
})
|
|
124
|
+
end
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
-- Batch update ready queue for all affected groups
|
|
129
|
+
for groupId, _ in pairs(groupsToUpdate) do
|
|
130
|
+
local gZ = ns .. ":g:" .. groupId
|
|
131
|
+
local head = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
|
|
132
|
+
if head and #head >= 2 then
|
|
133
|
+
local headScore = tonumber(head[2])
|
|
134
|
+
redis.call("ZADD", readyKey, headScore, groupId)
|
|
135
|
+
end
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
-- Update staging timer if needed
|
|
139
|
+
if orderingDelayMs > 0 then
|
|
140
|
+
local currentHead = redis.call("ZRANGE", stageKey, 0, 0, "WITHSCORES")
|
|
141
|
+
if currentHead and #currentHead >= 2 then
|
|
142
|
+
local headReleaseAt = tonumber(currentHead[2])
|
|
143
|
+
local ttlMs = math.max(1, headReleaseAt - now)
|
|
144
|
+
redis.call("SET", timerKey, "1", "PX", ttlMs)
|
|
145
|
+
end
|
|
146
|
+
end
|
|
147
|
+
|
|
148
|
+
return results
|
|
149
|
+
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
-- Atomic Flow Creation
|
|
2
|
+
-- KEYS: [ns]
|
|
3
|
+
-- ARGV: [parentId, parentGroupId, parentData, parentMaxAttempts, parentOrderMs, now, ...childrenArgs]
|
|
4
|
+
-- childrenArgs: [id, groupId, data, maxAttempts, orderMs, delay, ...] (6 fields per child)
|
|
5
|
+
|
|
6
|
+
local ns = KEYS[1]
|
|
7
|
+
local parentId = ARGV[1]
|
|
8
|
+
local parentGroupId = ARGV[2]
|
|
9
|
+
local parentData = ARGV[3]
|
|
10
|
+
local parentMaxAttempts = ARGV[4]
|
|
11
|
+
local parentOrderMs = tonumber(ARGV[5])
|
|
12
|
+
local now = tonumber(ARGV[6])
|
|
13
|
+
|
|
14
|
+
local baseEpoch = 1704067200000
|
|
15
|
+
local parentKey = ns .. ":job:" .. parentId
|
|
16
|
+
local uniqueKey = ns .. ":unique:" .. parentId
|
|
17
|
+
|
|
18
|
+
-- Check idempotence for parent
|
|
19
|
+
if redis.call("EXISTS", uniqueKey) == 1 then
|
|
20
|
+
return nil -- Already exists
|
|
21
|
+
end
|
|
22
|
+
redis.call("SET", uniqueKey, parentId)
|
|
23
|
+
|
|
24
|
+
local childrenCount = (#ARGV - 6) / 6
|
|
25
|
+
|
|
26
|
+
-- 1. Setup Parent Job
|
|
27
|
+
-- Status is 'waiting-children', NOT 'waiting'. It is NOT added to ready queue yet.
|
|
28
|
+
local parentRelativeMs = parentOrderMs - baseEpoch
|
|
29
|
+
local parentDaysSinceEpoch = math.floor(parentOrderMs / 86400000)
|
|
30
|
+
local parentSeqKey = ns .. ":seq:" .. parentDaysSinceEpoch
|
|
31
|
+
local parentSeq = redis.call("INCR", parentSeqKey)
|
|
32
|
+
local parentScore = parentRelativeMs * 1000 + parentSeq
|
|
33
|
+
|
|
34
|
+
redis.call("HMSET", parentKey,
|
|
35
|
+
"id", parentId,
|
|
36
|
+
"groupId", parentGroupId,
|
|
37
|
+
"data", parentData,
|
|
38
|
+
"attempts", "0",
|
|
39
|
+
"maxAttempts", parentMaxAttempts,
|
|
40
|
+
"timestamp", tostring(now),
|
|
41
|
+
"orderMs", tostring(parentOrderMs),
|
|
42
|
+
"score", tostring(parentScore),
|
|
43
|
+
"seq", tostring(parentSeq),
|
|
44
|
+
"status", "waiting-children",
|
|
45
|
+
"flowRemaining", tostring(childrenCount)
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
-- 2. Setup Children Jobs
|
|
49
|
+
local results = {}
|
|
50
|
+
|
|
51
|
+
for i = 0, childrenCount - 1 do
|
|
52
|
+
local offset = 6 + (i * 6)
|
|
53
|
+
local childId = ARGV[offset + 1]
|
|
54
|
+
local childGroupId = ARGV[offset + 2]
|
|
55
|
+
local childData = ARGV[offset + 3]
|
|
56
|
+
local childMaxAttempts = ARGV[offset + 4]
|
|
57
|
+
local childOrderMs = tonumber(ARGV[offset + 5])
|
|
58
|
+
local childDelay = tonumber(ARGV[offset + 6])
|
|
59
|
+
|
|
60
|
+
local childKey = ns .. ":job:" .. childId
|
|
61
|
+
|
|
62
|
+
-- Generate score (replicate enqueue.lua logic)
|
|
63
|
+
local relativeMs = childOrderMs - baseEpoch
|
|
64
|
+
local daysSinceEpoch = math.floor(childOrderMs / 86400000)
|
|
65
|
+
local seqKey = ns .. ":seq:" .. daysSinceEpoch
|
|
66
|
+
local seq = redis.call("INCR", seqKey)
|
|
67
|
+
local score = relativeMs * 1000 + seq
|
|
68
|
+
|
|
69
|
+
-- Create Child Hash
|
|
70
|
+
redis.call("HMSET", childKey,
|
|
71
|
+
"id", childId,
|
|
72
|
+
"groupId", childGroupId,
|
|
73
|
+
"parentId", parentId, -- The link to parent
|
|
74
|
+
"data", childData,
|
|
75
|
+
"attempts", "0",
|
|
76
|
+
"maxAttempts", childMaxAttempts,
|
|
77
|
+
"timestamp", tostring(now),
|
|
78
|
+
"orderMs", tostring(childOrderMs),
|
|
79
|
+
"score", tostring(score),
|
|
80
|
+
"seq", tostring(seq),
|
|
81
|
+
"status", "waiting"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
redis.call("SET", ns .. ":unique:" .. childId, childId)
|
|
85
|
+
|
|
86
|
+
-- Handle delay or immediate waiting
|
|
87
|
+
if childDelay > 0 then
|
|
88
|
+
local delayUntil = now + childDelay
|
|
89
|
+
redis.call("HSET", childKey, "delayUntil", tostring(delayUntil), "status", "delayed")
|
|
90
|
+
redis.call("ZADD", ns .. ":delayed", delayUntil, childId)
|
|
91
|
+
else
|
|
92
|
+
-- Add to group ZSET
|
|
93
|
+
local gZ = ns .. ":g:" .. childGroupId
|
|
94
|
+
redis.call("ZADD", gZ, score, childId)
|
|
95
|
+
|
|
96
|
+
-- Update ready queue if this is the new head
|
|
97
|
+
local head = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
|
|
98
|
+
if head and #head >= 2 then
|
|
99
|
+
local headScore = tonumber(head[2])
|
|
100
|
+
redis.call("ZADD", ns .. ":ready", headScore, childGroupId)
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
table.insert(results, childId)
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
return results
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
-- argv: ns, groupId, dataJson, maxAttempts, orderMs, delayUntil, jobId, keepCompleted, clientTimestamp, orderingDelayMs
|
|
2
|
+
local ns = KEYS[1]
|
|
3
|
+
local groupId = ARGV[1]
|
|
4
|
+
local data = ARGV[2]
|
|
5
|
+
local maxAttempts = tonumber(ARGV[3])
|
|
6
|
+
local orderMs = tonumber(ARGV[4])
|
|
7
|
+
local delayUntil = tonumber(ARGV[5])
|
|
8
|
+
local jobId = ARGV[6]
|
|
9
|
+
local keepCompleted = tonumber(ARGV[7]) or 0
|
|
10
|
+
local clientTimestamp = tonumber(ARGV[8])
|
|
11
|
+
local orderingDelayMs = tonumber(ARGV[9]) or 0
|
|
12
|
+
|
|
13
|
+
local readyKey = ns .. ":ready"
|
|
14
|
+
local delayedKey = ns .. ":delayed"
|
|
15
|
+
local stageKey = ns .. ":stage"
|
|
16
|
+
local timerKey = ns .. ":stage:timer"
|
|
17
|
+
local jobKey = ns .. ":job:" .. jobId
|
|
18
|
+
local groupsKey = ns .. ":groups"
|
|
19
|
+
|
|
20
|
+
-- Idempotence: ensure unique jobId per queue namespace with stale-key recovery
|
|
21
|
+
local uniqueKey = ns .. ":unique:" .. jobId
|
|
22
|
+
local uniqueSet = redis.call("SET", uniqueKey, jobId, "NX")
|
|
23
|
+
if not uniqueSet then
|
|
24
|
+
-- Duplicate detected. Check for stale unique mapping
|
|
25
|
+
local exists = redis.call("EXISTS", jobKey)
|
|
26
|
+
if exists == 0 then
|
|
27
|
+
-- Job doesn't exist but unique key does (stale), clean up and proceed
|
|
28
|
+
redis.call("DEL", uniqueKey)
|
|
29
|
+
redis.call("SET", uniqueKey, jobId)
|
|
30
|
+
else
|
|
31
|
+
-- Job exists, check its status and location
|
|
32
|
+
local gid = redis.call("HGET", jobKey, "groupId")
|
|
33
|
+
local inProcessing = redis.call("ZSCORE", ns .. ":processing", jobId)
|
|
34
|
+
local inDelayed = redis.call("ZSCORE", ns .. ":delayed", jobId)
|
|
35
|
+
local inGroup = nil
|
|
36
|
+
if gid then
|
|
37
|
+
inGroup = redis.call("ZSCORE", ns .. ":g:" .. gid, jobId)
|
|
38
|
+
end
|
|
39
|
+
if (not inProcessing) and (not inDelayed) and (not inGroup) then
|
|
40
|
+
if keepCompleted == 0 then
|
|
41
|
+
redis.call("DEL", jobKey)
|
|
42
|
+
redis.call("DEL", uniqueKey)
|
|
43
|
+
redis.call("SET", uniqueKey, jobId)
|
|
44
|
+
else
|
|
45
|
+
-- Job hash exists and we're keeping completed jobs, ensure unique key exists
|
|
46
|
+
redis.call("SET", uniqueKey, jobId)
|
|
47
|
+
return jobId
|
|
48
|
+
end
|
|
49
|
+
else
|
|
50
|
+
if keepCompleted == 0 then
|
|
51
|
+
local status = redis.call("HGET", jobKey, "status")
|
|
52
|
+
if status == "completed" then
|
|
53
|
+
redis.call("DEL", jobKey)
|
|
54
|
+
redis.call("DEL", uniqueKey)
|
|
55
|
+
redis.call("SET", uniqueKey, jobId)
|
|
56
|
+
else
|
|
57
|
+
-- Job is still active, ensure unique key exists
|
|
58
|
+
redis.call("SET", uniqueKey, jobId)
|
|
59
|
+
return jobId
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
local activeAgain = redis.call("ZSCORE", ns .. ":processing", jobId)
|
|
63
|
+
local delayedAgain = redis.call("ZSCORE", ns .. ":delayed", jobId)
|
|
64
|
+
local inGroupAgain = nil
|
|
65
|
+
if gid then
|
|
66
|
+
inGroupAgain = redis.call("ZSCORE", ns .. ":g:" .. gid, jobId)
|
|
67
|
+
end
|
|
68
|
+
local jobStillExists = redis.call("EXISTS", jobKey)
|
|
69
|
+
if jobStillExists == 1 and (activeAgain or delayedAgain or inGroupAgain) then
|
|
70
|
+
return jobId
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
local gZ = ns .. ":g:" .. groupId
|
|
77
|
+
|
|
78
|
+
if not orderMs then
|
|
79
|
+
orderMs = tonumber(redis.call("TIME")[1]) * 1000
|
|
80
|
+
end
|
|
81
|
+
local baseEpoch = 1704067200000
|
|
82
|
+
local relativeMs = orderMs - baseEpoch
|
|
83
|
+
|
|
84
|
+
-- Use date-based sequence key to auto-reset daily (prevents max int overflow)
|
|
85
|
+
local daysSinceEpoch = math.floor(orderMs / 86400000)
|
|
86
|
+
local seqKey = ns .. ":seq:" .. daysSinceEpoch
|
|
87
|
+
local seq = redis.call("INCR", seqKey)
|
|
88
|
+
local score = relativeMs * 1000 + seq
|
|
89
|
+
|
|
90
|
+
-- Get Redis server time for buffering logic (to be consistent with server time)
|
|
91
|
+
local timeResult = redis.call("TIME")
|
|
92
|
+
local now = tonumber(timeResult[1]) * 1000 + math.floor(tonumber(timeResult[2]) / 1000)
|
|
93
|
+
|
|
94
|
+
-- Use client timestamp for the job hash so timing calculations are accurate from client perspective
|
|
95
|
+
local timestamp = clientTimestamp or now
|
|
96
|
+
|
|
97
|
+
redis.call("HMSET", jobKey,
|
|
98
|
+
"id", jobId,
|
|
99
|
+
"groupId", groupId,
|
|
100
|
+
"data", data,
|
|
101
|
+
"attempts", "0",
|
|
102
|
+
"maxAttempts", tostring(maxAttempts),
|
|
103
|
+
"seq", tostring(seq),
|
|
104
|
+
"timestamp", tostring(timestamp),
|
|
105
|
+
"orderMs", tostring(orderMs),
|
|
106
|
+
"score", tostring(score),
|
|
107
|
+
"delayUntil", tostring(delayUntil)
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
-- Track group membership (idempotent)
|
|
111
|
+
redis.call("SADD", groupsKey, groupId)
|
|
112
|
+
|
|
113
|
+
-- Determine job status and placement
|
|
114
|
+
local jobStatus = "waiting"
|
|
115
|
+
|
|
116
|
+
if delayUntil > 0 and delayUntil > now then
|
|
117
|
+
-- Job is delayed, add to delayed set and group set
|
|
118
|
+
redis.call("ZADD", gZ, score, jobId)
|
|
119
|
+
redis.call("ZADD", delayedKey, delayUntil, jobId)
|
|
120
|
+
jobStatus = "delayed"
|
|
121
|
+
redis.call("HSET", jobKey, "status", jobStatus)
|
|
122
|
+
elseif orderMs and orderingDelayMs > 0 then
|
|
123
|
+
-- Job should be staged for ordering (orderMs provided and orderingDelayMs > 0)
|
|
124
|
+
-- NOTE: Do NOT add to group ZSET yet - only to staging
|
|
125
|
+
local releaseAt = orderMs + orderingDelayMs
|
|
126
|
+
redis.call("ZADD", stageKey, releaseAt, jobId)
|
|
127
|
+
jobStatus = "staged"
|
|
128
|
+
redis.call("HSET", jobKey, "status", jobStatus)
|
|
129
|
+
|
|
130
|
+
-- Update/set timer to earliest staged job
|
|
131
|
+
local currentHead = redis.call("ZRANGE", stageKey, 0, 0, "WITHSCORES")
|
|
132
|
+
if currentHead and #currentHead >= 2 then
|
|
133
|
+
local headReleaseAt = tonumber(currentHead[2])
|
|
134
|
+
-- Set timer to expire when the earliest job is ready
|
|
135
|
+
local ttlMs = math.max(1, headReleaseAt - now)
|
|
136
|
+
redis.call("SET", timerKey, "1", "PX", ttlMs)
|
|
137
|
+
end
|
|
138
|
+
else
|
|
139
|
+
-- Job is not delayed and not staged, add to group set and make ready
|
|
140
|
+
redis.call("ZADD", gZ, score, jobId)
|
|
141
|
+
jobStatus = "waiting"
|
|
142
|
+
redis.call("HSET", jobKey, "status", jobStatus)
|
|
143
|
+
local head = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
|
|
144
|
+
if head and #head >= 2 then
|
|
145
|
+
local headScore = tonumber(head[2])
|
|
146
|
+
redis.call("ZADD", readyKey, headScore, groupId)
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
-- Return job data to avoid race condition where job might be processed & cleaned up
|
|
151
|
+
-- before getJob() is called
|
|
152
|
+
return {jobId, groupId, data, "0", tostring(maxAttempts), tostring(timestamp), tostring(orderMs), tostring(delayUntil), jobStatus}
|
|
153
|
+
|
|
154
|
+
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
-- argv: ns
|
|
2
|
+
local ns = KEYS[1]
|
|
3
|
+
local groupsKey = ns .. ":groups"
|
|
4
|
+
local groupIds = redis.call("SMEMBERS", groupsKey)
|
|
5
|
+
local count = 0
|
|
6
|
+
for _, groupId in ipairs(groupIds) do
|
|
7
|
+
local gZ = ns .. ":g:" .. groupId
|
|
8
|
+
local jobCount = redis.call("ZCARD", gZ)
|
|
9
|
+
if jobCount > 0 then
|
|
10
|
+
count = count + 1
|
|
11
|
+
end
|
|
12
|
+
end
|
|
13
|
+
return count
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
-- argv: ns
|
|
2
|
+
local ns = KEYS[1]
|
|
3
|
+
local groupsKey = ns .. ":groups"
|
|
4
|
+
local groupIds = redis.call("SMEMBERS", groupsKey)
|
|
5
|
+
local groups = {}
|
|
6
|
+
for _, groupId in ipairs(groupIds) do
|
|
7
|
+
local gZ = ns .. ":g:" .. groupId
|
|
8
|
+
local jobCount = redis.call("ZCARD", gZ)
|
|
9
|
+
if jobCount > 0 then
|
|
10
|
+
table.insert(groups, groupId)
|
|
11
|
+
end
|
|
12
|
+
end
|
|
13
|
+
return groups
|
|
14
|
+
|
|
15
|
+
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
local ns = KEYS[1]
|
|
2
|
+
local groupsKey = ns .. ":groups"
|
|
3
|
+
local groupIds = redis.call("SMEMBERS", groupsKey)
|
|
4
|
+
local total = 0
|
|
5
|
+
for _, gid in ipairs(groupIds) do
|
|
6
|
+
local gk = ns .. ":g:" .. gid
|
|
7
|
+
total = total + (redis.call("ZCARD", gk) or 0)
|
|
8
|
+
end
|
|
9
|
+
return total
|
|
10
|
+
|
|
11
|
+
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
-- argv: ns
|
|
2
|
+
local ns = KEYS[1]
|
|
3
|
+
local groupsKey = ns .. ":groups"
|
|
4
|
+
local groupIds = redis.call("SMEMBERS", groupsKey)
|
|
5
|
+
local jobs = {}
|
|
6
|
+
for _, gid in ipairs(groupIds) do
|
|
7
|
+
local gZ = ns .. ":g:" .. gid
|
|
8
|
+
local groupJobs = redis.call("ZRANGE", gZ, 0, -1)
|
|
9
|
+
for _, jobId in ipairs(groupJobs) do
|
|
10
|
+
table.insert(jobs, jobId)
|
|
11
|
+
end
|
|
12
|
+
end
|
|
13
|
+
return jobs
|
|
14
|
+
|
|
15
|
+
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
-- argv: ns, jobId, groupId, extendMs
|
|
2
|
+
local ns = KEYS[1]
|
|
3
|
+
local jobId = ARGV[1]
|
|
4
|
+
local gid = ARGV[2]
|
|
5
|
+
local extendMs = tonumber(ARGV[3])
|
|
6
|
+
|
|
7
|
+
-- BullMQ-style: only extend processing deadline, no group lock
|
|
8
|
+
local procKey = ns .. ":processing:" .. jobId
|
|
9
|
+
local exists = redis.call("EXISTS", procKey)
|
|
10
|
+
if exists == 1 then
|
|
11
|
+
local now = tonumber(redis.call("TIME")[1]) * 1000
|
|
12
|
+
local newDeadline = now + extendMs
|
|
13
|
+
redis.call("HSET", procKey, "deadlineAt", tostring(newDeadline))
|
|
14
|
+
|
|
15
|
+
-- Also update the processing ZSET score
|
|
16
|
+
local processingKey = ns .. ":processing"
|
|
17
|
+
redis.call("ZADD", processingKey, newDeadline, jobId)
|
|
18
|
+
return 1
|
|
19
|
+
end
|
|
20
|
+
return 0
|
|
21
|
+
|
|
22
|
+
|