groupmq-plus 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/LICENSE +59 -0
  2. package/README.md +722 -0
  3. package/dist/index.cjs +2567 -0
  4. package/dist/index.cjs.map +1 -0
  5. package/dist/index.d.cts +1300 -0
  6. package/dist/index.d.ts +1300 -0
  7. package/dist/index.js +2557 -0
  8. package/dist/index.js.map +1 -0
  9. package/dist/lua/change-delay.lua +62 -0
  10. package/dist/lua/check-stalled.lua +86 -0
  11. package/dist/lua/clean-status.lua +64 -0
  12. package/dist/lua/cleanup-poisoned-group.lua +46 -0
  13. package/dist/lua/cleanup.lua +46 -0
  14. package/dist/lua/complete-and-reserve-next-with-metadata.lua +221 -0
  15. package/dist/lua/complete-with-metadata.lua +190 -0
  16. package/dist/lua/complete.lua +51 -0
  17. package/dist/lua/dead-letter.lua +86 -0
  18. package/dist/lua/enqueue-batch.lua +149 -0
  19. package/dist/lua/enqueue-flow.lua +107 -0
  20. package/dist/lua/enqueue.lua +154 -0
  21. package/dist/lua/get-active-count.lua +6 -0
  22. package/dist/lua/get-active-jobs.lua +6 -0
  23. package/dist/lua/get-delayed-count.lua +5 -0
  24. package/dist/lua/get-delayed-jobs.lua +5 -0
  25. package/dist/lua/get-unique-groups-count.lua +13 -0
  26. package/dist/lua/get-unique-groups.lua +15 -0
  27. package/dist/lua/get-waiting-count.lua +11 -0
  28. package/dist/lua/get-waiting-jobs.lua +15 -0
  29. package/dist/lua/heartbeat.lua +22 -0
  30. package/dist/lua/is-empty.lua +35 -0
  31. package/dist/lua/promote-delayed-jobs.lua +40 -0
  32. package/dist/lua/promote-delayed-one.lua +44 -0
  33. package/dist/lua/promote-staged.lua +70 -0
  34. package/dist/lua/record-job-result.lua +143 -0
  35. package/dist/lua/remove.lua +55 -0
  36. package/dist/lua/reserve-atomic.lua +114 -0
  37. package/dist/lua/reserve-batch.lua +141 -0
  38. package/dist/lua/reserve.lua +161 -0
  39. package/dist/lua/retry.lua +53 -0
  40. package/package.json +92 -0
@@ -0,0 +1,161 @@
1
+ -- argv: ns, nowEpochMs, vtMs, scanLimit
2
+ local ns = KEYS[1]
3
+ local now = tonumber(ARGV[1])
4
+ local vt = tonumber(ARGV[2])
5
+ local scanLimit = tonumber(ARGV[3]) or 20
6
+
7
+ local readyKey = ns .. ":ready"
8
+
9
+ -- Respect paused state
10
+ if redis.call("GET", ns .. ":paused") then
11
+ return nil
12
+ end
13
+
14
+ -- STALLED JOB RECOVERY WITH THROTTLING
15
+ -- Check for stalled jobs periodically to avoid overhead in hot path
16
+ -- This ensures stalled jobs are recovered even in high-load systems
17
+ -- Check interval is adaptive: 1/4 of jobTimeout (to check 4x during visibility window), max 5s
18
+ local processingKey = ns .. ":processing"
19
+ local stalledCheckKey = ns .. ":stalled:lastcheck"
20
+ local lastCheck = tonumber(redis.call("GET", stalledCheckKey)) or 0
21
+ local stalledCheckInterval = math.min(math.floor(vt / 4), 5000)
22
+
23
+ local shouldCheckStalled = (now - lastCheck) >= stalledCheckInterval
24
+
25
+ -- Get available groups
26
+ local groups = redis.call("ZRANGE", readyKey, 0, scanLimit - 1, "WITHSCORES")
27
+
28
+ -- Check for stalled jobs if: queue is empty OR it's time for periodic check
29
+ if (not groups or #groups == 0) or shouldCheckStalled then
30
+ if shouldCheckStalled then
31
+ redis.call("SET", stalledCheckKey, tostring(now))
32
+ end
33
+
34
+ local expiredJobs = redis.call("ZRANGEBYSCORE", processingKey, 0, now)
35
+ for _, jobId in ipairs(expiredJobs) do
36
+ local procKey = ns .. ":processing:" .. jobId
37
+ local procData = redis.call("HMGET", procKey, "groupId", "deadlineAt")
38
+ local gid = procData[1]
39
+ local deadlineAt = tonumber(procData[2])
40
+ if gid and deadlineAt and now > deadlineAt then
41
+ local jobKey = ns .. ":job:" .. jobId
42
+ local jobScore = redis.call("HGET", jobKey, "score")
43
+ if jobScore then
44
+ local gZ = ns .. ":g:" .. gid
45
+ redis.call("ZADD", gZ, tonumber(jobScore), jobId)
46
+ local head = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
47
+ if head and #head >= 2 then
48
+ local headScore = tonumber(head[2])
49
+ redis.call("ZADD", readyKey, headScore, gid)
50
+ end
51
+ redis.call("DEL", ns .. ":lock:" .. gid)
52
+ redis.call("DEL", procKey)
53
+ redis.call("ZREM", processingKey, jobId)
54
+ end
55
+ end
56
+ end
57
+
58
+ -- Refresh groups after recovery (only if we didn't have any before)
59
+ if not groups or #groups == 0 then
60
+ groups = redis.call("ZRANGE", readyKey, 0, scanLimit - 1, "WITHSCORES")
61
+ end
62
+ end
63
+
64
+ if not groups or #groups == 0 then
65
+ return nil
66
+ end
67
+
68
+ local chosenGid = nil
69
+ local chosenIndex = nil
70
+ local headJobId = nil
71
+ local job = nil
72
+
73
+ -- Try to atomically acquire a group and its head job
74
+ -- BullMQ-style: use per-group active list instead of group locks
75
+ for i = 1, #groups, 2 do
76
+ local gid = groups[i]
77
+ local gZ = ns .. ":g:" .. gid
78
+ local groupActiveKey = ns .. ":g:" .. gid .. ":active"
79
+ local configKey = ns .. ":config:" .. gid
80
+
81
+ -- [PHASE 2 MODIFICATION START]
82
+ -- Check concurrency limit
83
+ local activeCount = redis.call("LLEN", groupActiveKey)
84
+ local limit = tonumber(redis.call("HGET", configKey, "concurrency")) or 1
85
+
86
+ if activeCount < limit then
87
+ -- Group has capacity, try to get head job
88
+ local head = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
89
+ if head and #head >= 2 then
90
+ local headJobId = head[1]
91
+ local headJobKey = ns .. ":job:" .. headJobId
92
+
93
+ -- Skip if head job is delayed (will be promoted later)
94
+ local jobStatus = redis.call("HGET", headJobKey, "status")
95
+ if jobStatus ~= "delayed" then
96
+ -- Pop the job and push to active list atomically
97
+ local zpop = redis.call("ZPOPMIN", gZ, 1)
98
+ if zpop and #zpop > 0 then
99
+ headJobId = zpop[1]
100
+ -- Read the popped job (use headJobId to avoid races)
101
+ headJobKey = ns .. ":job:" .. headJobId
102
+ job = redis.call("HMGET", headJobKey, "id","groupId","data","attempts","maxAttempts","seq","timestamp","orderMs","score")
103
+
104
+ -- Push to group active list
105
+ redis.call("LPUSH", groupActiveKey, headJobId)
106
+
107
+ chosenGid = gid
108
+ chosenIndex = (i + 1) / 2 - 1
109
+ -- Mark job as processing for accurate stalled detection and idempotency
110
+ redis.call("HSET", headJobKey, "status", "processing")
111
+ break
112
+ end
113
+ end
114
+ end
115
+ end
116
+ -- [PHASE 2 MODIFICATION END]
117
+ end
118
+
119
+ if not chosenGid or not job then
120
+ return nil
121
+ end
122
+
123
+ local id, groupId, payload, attempts, maxAttempts, seq, enq, orderMs, score = job[1], job[2], job[3], job[4], job[5], job[6], job[7], job[8], job[9]
124
+
125
+ -- Validate job data exists (handle corrupted/missing job hash)
126
+ if not id or id == false then
127
+ -- Job hash is missing/corrupted, clean up group active list
128
+ local groupActiveKey = ns .. ":g:" .. chosenGid .. ":active"
129
+ redis.call("LREM", groupActiveKey, 1, headJobId)
130
+
131
+ -- Re-add next job to ready queue if exists
132
+ local gZ = ns .. ":g:" .. chosenGid
133
+ local nextHead = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
134
+ if nextHead and #nextHead >= 2 then
135
+ local nextScore = tonumber(nextHead[2])
136
+ redis.call("ZADD", readyKey, nextScore, chosenGid)
137
+ end
138
+
139
+ return nil
140
+ end
141
+
142
+ -- Remove the group from ready queue
143
+ redis.call("ZREMRANGEBYRANK", readyKey, chosenIndex, chosenIndex)
144
+
145
+ local procKey = ns .. ":processing:" .. id
146
+ local deadline = now + vt
147
+ redis.call("HSET", procKey, "groupId", chosenGid, "deadlineAt", tostring(deadline))
148
+
149
+ local processingKey2 = ns .. ":processing"
150
+ redis.call("ZADD", processingKey2, deadline, id)
151
+
152
+ local gZ = ns .. ":g:" .. chosenGid
153
+ local nextHead = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
154
+ if nextHead and #nextHead >= 2 then
155
+ local nextScore = tonumber(nextHead[2])
156
+ redis.call("ZADD", readyKey, nextScore, chosenGid)
157
+ end
158
+
159
+ return id .. "|||" .. groupId .. "|||" .. payload .. "|||" .. attempts .. "|||" .. maxAttempts .. "|||" .. seq .. "|||" .. enq .. "|||" .. orderMs .. "|||" .. score .. "|||" .. deadline
160
+
161
+
@@ -0,0 +1,53 @@
1
+ -- argv: ns, jobId, backoffMs
2
+ local ns = KEYS[1]
3
+ local jobId = ARGV[1]
4
+ local backoffMs = tonumber(ARGV[2]) or 0
5
+
6
+ local jobKey = ns .. ":job:" .. jobId
7
+ local gid = redis.call("HGET", jobKey, "groupId")
8
+ local attempts = tonumber(redis.call("HINCRBY", jobKey, "attempts", 1))
9
+ local maxAttempts = tonumber(redis.call("HGET", jobKey, "maxAttempts"))
10
+
11
+ redis.call("DEL", ns .. ":processing:" .. jobId)
12
+ redis.call("ZREM", ns .. ":processing", jobId)
13
+
14
+ -- BullMQ-style: Remove from group active list
15
+ local groupActiveKey = ns .. ":g:" .. gid .. ":active"
16
+ redis.call("LREM", groupActiveKey, 1, jobId)
17
+
18
+ if attempts > maxAttempts then
19
+ return -1
20
+ end
21
+
22
+ local score = tonumber(redis.call("HGET", jobKey, "score"))
23
+ local gZ = ns .. ":g:" .. gid
24
+
25
+ -- Re-add job to group
26
+ redis.call("ZADD", gZ, score, jobId)
27
+
28
+ -- If backoffMs > 0, delay the retry
29
+ if backoffMs > 0 then
30
+ local now = tonumber(redis.call("TIME")[1]) * 1000
31
+ local delayUntil = now + backoffMs
32
+
33
+ -- Move to delayed set
34
+ local delayedKey = ns .. ":delayed"
35
+ redis.call("ZADD", delayedKey, delayUntil, jobId)
36
+ redis.call("HSET", jobKey, "runAt", tostring(delayUntil), "status", "delayed")
37
+
38
+ -- Don't add to ready yet - will be added when promoted
39
+ -- (delayed jobs block their group)
40
+ else
41
+ -- No backoff - immediate retry
42
+ redis.call("HSET", jobKey, "status", "waiting")
43
+
44
+ -- Add group to ready queue
45
+ local head = redis.call("ZRANGE", gZ, 0, 0, "WITHSCORES")
46
+ if head and #head >= 2 then
47
+ local headScore = tonumber(head[2])
48
+ local readyKey = ns .. ":ready"
49
+ redis.call("ZADD", readyKey, headScore, gid)
50
+ end
51
+ end
52
+
53
+ return attempts
package/package.json ADDED
@@ -0,0 +1,92 @@
1
+ {
2
+ "name": "groupmq-plus",
3
+ "version": "1.1.0",
4
+ "description": "Per-group FIFO queue on Redis with visibility timeouts and retries.",
5
+ "license": "MIT",
6
+ "private": false,
7
+ "type": "module",
8
+ "main": "./dist/index.cjs",
9
+ "module": "./dist/index.js",
10
+ "types": "./dist/index.d.ts",
11
+ "exports": {
12
+ ".": {
13
+ "types": "./dist/index.d.ts",
14
+ "import": "./dist/index.js",
15
+ "require": "./dist/index.cjs"
16
+ },
17
+ "./package.json": "./package.json"
18
+ },
19
+ "author": "OpenPanel.dev <hello@openpanel.dev> (https://openpanel.dev)",
20
+ "repository": {
21
+ "type": "git",
22
+ "url": "https://github.com/Openpanel-dev/groupmq"
23
+ },
24
+ "bugs": {
25
+ "url": "https://github.com/Openpanel-dev/groupmq/issues"
26
+ },
27
+ "homepage": "https://github.com/Openpanel-dev/groupmq#readme",
28
+ "files": [
29
+ "dist"
30
+ ],
31
+ "engines": {
32
+ "node": ">=18"
33
+ },
34
+ "keywords": [
35
+ "redis",
36
+ "queue",
37
+ "fifo",
38
+ "worker",
39
+ "node",
40
+ "typescript"
41
+ ],
42
+ "dependencies": {
43
+ "cron-parser": "^4.9.0"
44
+ },
45
+ "devDependencies": {
46
+ "@biomejs/biome": "^2.2.4",
47
+ "@bull-board/api": "^6.13.0",
48
+ "@semantic-release/changelog": "^6.0.3",
49
+ "@semantic-release/git": "^10.0.1",
50
+ "@semantic-release/github": "^10.3.5",
51
+ "@semantic-release/npm": "^11.0.2",
52
+ "@types/node": "^20.12.12",
53
+ "@types/pidusage": "^2.0.5",
54
+ "bullmq": "^5.58.7",
55
+ "commander": "^14.0.1",
56
+ "conventional-changelog-conventionalcommits": "^7",
57
+ "ioredis": "^5.4.1",
58
+ "jiti": "^2.5.1",
59
+ "pidusage": "^4.0.1",
60
+ "pino": "^9.12.0",
61
+ "pretty-ms": "^9.3.0",
62
+ "semantic-release": "^23.0.0",
63
+ "tsdown": "^0.15.4",
64
+ "typescript": "^5.6.2",
65
+ "vitest": "^2.0.5",
66
+ "winston": "^3.18.3"
67
+ },
68
+ "peerDependencies": {
69
+ "ioredis": ">=5"
70
+ },
71
+ "scripts": {
72
+ "build": "tsdown src/index.ts --format esm --format cjs --dts --sourcemap && node scripts/copy-lua.mjs",
73
+ "prebuild": "pnpm test:lua",
74
+ "test": "vitest run --no-file-parallelism",
75
+ "test:lua": "node scripts/validate-lua-scripts.js",
76
+ "test:many": "for i in {1..200}; do pnpm run test; done",
77
+ "dev:test": "vitest --watch",
78
+ "monitor": "jiti cli.ts",
79
+ "benchmark": "jiti benchmark/benchmark.ts",
80
+ "benchmark:both": "pnpm benchmark --mq both --jobs 5000 --workers 4 --job-type cpu --multi-process && pnpm format:fix -- benchmark/results",
81
+ "benchmark:both:empty": "pnpm benchmark --mq both --jobs 5000 --workers 4 --job-type empty --multi-process && pnpm format:fix -- benchmark/results",
82
+ "benchmark:groupmq": "pnpm benchmark --mq groupmq --jobs 5000 --workers 4 --job-type cpu --multi-process && pnpm format:fix -- benchmark/results",
83
+ "benchmark:groupmq:dragonfly": "pnpm benchmark --db dragonfly --mq groupmq --jobs 5000 --workers 10 --job-type cpu --multi-process && pnpm format:fix -- benchmark/results",
84
+ "benchmark:bullmq": "pnpm benchmark --mq bullmq --jobs 5000 --workers 4 --job-type cpu --multi-process && pnpm format:fix -- benchmark/results",
85
+ "format": "biome format .",
86
+ "format:fix": "biome format --write",
87
+ "format:fixall": "biome format --write .",
88
+ "lint": "biome check .",
89
+ "lint:fix": "biome check --write --unsafe .",
90
+ "typecheck": "tsc --noEmit"
91
+ }
92
+ }