omniq 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omniq/client.py +1 -1
- omniq/core/scripts/ack_fail.lua +119 -0
- omniq/core/scripts/ack_success.lua +90 -0
- omniq/core/scripts/enqueue.lua +96 -0
- omniq/core/scripts/heartbeat.lua +63 -0
- omniq/core/scripts/pause.lua +12 -0
- omniq/core/scripts/promote_delayed.lua +59 -0
- omniq/core/scripts/reap_expired.lua +105 -0
- omniq/core/scripts/reserve.lua +149 -0
- omniq/core/scripts/resume.lua +12 -0
- omniq/scripts.py +3 -3
- {omniq-1.0.0.dist-info → omniq-1.0.2.dist-info}/METADATA +1 -1
- omniq-1.0.2.dist-info/RECORD +23 -0
- omniq-1.0.0.dist-info/RECORD +0 -14
- {omniq-1.0.0.dist-info → omniq-1.0.2.dist-info}/WHEEL +0 -0
- {omniq-1.0.0.dist-info → omniq-1.0.2.dist-info}/top_level.txt +0 -0
omniq/client.py
CHANGED
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
-- ACK_FAIL (hybrid; failed kept forever; token-gated)
|
|
2
|
+
-- ARGV:
|
|
3
|
+
-- 1 base
|
|
4
|
+
-- 2 job_id
|
|
5
|
+
-- 3 now_ms
|
|
6
|
+
-- 4 lease_token
|
|
7
|
+
-- 5 error (optional; stored as last_error)
|
|
8
|
+
|
|
9
|
+
local base = ARGV[1]
|
|
10
|
+
local job_id = ARGV[2]
|
|
11
|
+
local now_ms = tonumber(ARGV[3] or "0")
|
|
12
|
+
local lease_token = ARGV[4]
|
|
13
|
+
local err_msg = ARGV[5] -- optional
|
|
14
|
+
|
|
15
|
+
local DEFAULT_GROUP_LIMIT = 1
|
|
16
|
+
local MAX_ERR_BYTES = 4096 -- bound the stored error size
|
|
17
|
+
|
|
18
|
+
local k_job = base .. ":job:" .. job_id
|
|
19
|
+
local k_active = base .. ":active"
|
|
20
|
+
local k_delayed = base .. ":delayed"
|
|
21
|
+
local k_failed = base .. ":failed"
|
|
22
|
+
local k_gready = base .. ":groups:ready"
|
|
23
|
+
|
|
24
|
+
local function to_i(v)
|
|
25
|
+
if v == false or v == nil or v == '' then return 0 end
|
|
26
|
+
local n = tonumber(v)
|
|
27
|
+
if n == nil then return 0 end
|
|
28
|
+
return math.floor(n)
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
local function dec_floor0(key)
|
|
32
|
+
local v = to_i(redis.call("DECR", key))
|
|
33
|
+
if v < 0 then
|
|
34
|
+
redis.call("SET", key, "0")
|
|
35
|
+
return 0
|
|
36
|
+
end
|
|
37
|
+
return v
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
local function group_limit_for(gid)
|
|
41
|
+
local k_glimit = base .. ":g:" .. gid .. ":limit"
|
|
42
|
+
local lim = to_i(redis.call("GET", k_glimit))
|
|
43
|
+
if lim <= 0 then return DEFAULT_GROUP_LIMIT end
|
|
44
|
+
return lim
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
-- NEW: bounded last_error write (optional)
|
|
48
|
+
local function maybe_store_last_error()
|
|
49
|
+
if err_msg == nil or err_msg == "" then return end
|
|
50
|
+
-- best-effort truncate
|
|
51
|
+
if string.len(err_msg) > MAX_ERR_BYTES then
|
|
52
|
+
err_msg = string.sub(err_msg, 1, MAX_ERR_BYTES)
|
|
53
|
+
end
|
|
54
|
+
redis.call("HSET", k_job,
|
|
55
|
+
"last_error", err_msg,
|
|
56
|
+
"last_error_ms", tostring(now_ms)
|
|
57
|
+
)
|
|
58
|
+
end
|
|
59
|
+
|
|
60
|
+
-- token required
|
|
61
|
+
if lease_token == nil or lease_token == "" then
|
|
62
|
+
return {"ERR", "TOKEN_REQUIRED"}
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
-- token must match the current owner attempt
|
|
66
|
+
local cur_token = redis.call("HGET", k_job, "lease_token") or ""
|
|
67
|
+
if cur_token ~= lease_token then
|
|
68
|
+
return {"ERR", "TOKEN_MISMATCH"}
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
-- must still be active (prevents double-ACK and stale workers after reaper/retry)
|
|
72
|
+
if redis.call("ZREM", k_active, job_id) ~= 1 then
|
|
73
|
+
return {"ERR", "NOT_ACTIVE"}
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
-- NEW: store error after we confirm active+token (so stale workers can't write errors)
|
|
77
|
+
maybe_store_last_error()
|
|
78
|
+
|
|
79
|
+
-- group bookkeeping (if job is grouped)
|
|
80
|
+
local gid = redis.call("HGET", k_job, "gid")
|
|
81
|
+
if gid and gid ~= "" then
|
|
82
|
+
local k_ginflight = base .. ":g:" .. gid .. ":inflight"
|
|
83
|
+
local inflight = dec_floor0(k_ginflight)
|
|
84
|
+
local limit = group_limit_for(gid)
|
|
85
|
+
local k_gwait = base .. ":g:" .. gid .. ":wait"
|
|
86
|
+
if inflight < limit and to_i(redis.call("LLEN", k_gwait)) > 0 then
|
|
87
|
+
redis.call("ZADD", k_gready, now_ms, gid)
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
local attempt = to_i(redis.call("HGET", k_job, "attempt"))
|
|
92
|
+
local max_attempts = to_i(redis.call("HGET", k_job, "max_attempts"))
|
|
93
|
+
if max_attempts <= 0 then max_attempts = 1 end
|
|
94
|
+
local backoff_ms = to_i(redis.call("HGET", k_job, "backoff_ms"))
|
|
95
|
+
|
|
96
|
+
-- terminal failure => move to failed history (kept forever)
|
|
97
|
+
if attempt >= max_attempts then
|
|
98
|
+
redis.call("HSET", k_job,
|
|
99
|
+
"state", "failed",
|
|
100
|
+
"updated_ms", tostring(now_ms),
|
|
101
|
+
"lease_token", "",
|
|
102
|
+
"lock_until_ms", ""
|
|
103
|
+
)
|
|
104
|
+
redis.call("LPUSH", k_failed, job_id)
|
|
105
|
+
return {"FAILED"}
|
|
106
|
+
end
|
|
107
|
+
|
|
108
|
+
-- retry path
|
|
109
|
+
local due_ms = now_ms + backoff_ms
|
|
110
|
+
redis.call("HSET", k_job,
|
|
111
|
+
"state", "delayed",
|
|
112
|
+
"due_ms", tostring(due_ms),
|
|
113
|
+
"updated_ms", tostring(now_ms),
|
|
114
|
+
"lease_token", "",
|
|
115
|
+
"lock_until_ms", ""
|
|
116
|
+
)
|
|
117
|
+
redis.call("ZADD", k_delayed, due_ms, job_id)
|
|
118
|
+
|
|
119
|
+
return {"RETRY", tostring(due_ms)}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
-- ACK_SUCCESS (hybrid + retention + token-gated)
|
|
2
|
+
-- ARGV:
|
|
3
|
+
-- 1 base
|
|
4
|
+
-- 2 job_id
|
|
5
|
+
-- 3 now_ms
|
|
6
|
+
-- 4 lease_token
|
|
7
|
+
|
|
8
|
+
local base = ARGV[1]
|
|
9
|
+
local job_id = ARGV[2]
|
|
10
|
+
local now_ms = tonumber(ARGV[3] or "0")
|
|
11
|
+
local lease_token = ARGV[4]
|
|
12
|
+
|
|
13
|
+
local DEFAULT_GROUP_LIMIT = 1
|
|
14
|
+
|
|
15
|
+
local k_job = base .. ":job:" .. job_id
|
|
16
|
+
local k_active = base .. ":active"
|
|
17
|
+
local k_completed = base .. ":completed"
|
|
18
|
+
local k_gready = base .. ":groups:ready"
|
|
19
|
+
|
|
20
|
+
local KEEP_COMPLETED = 100
|
|
21
|
+
|
|
22
|
+
local function to_i(v)
|
|
23
|
+
if v == false or v == nil or v == '' then return 0 end
|
|
24
|
+
local n = tonumber(v)
|
|
25
|
+
if n == nil then return 0 end
|
|
26
|
+
return math.floor(n)
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
local function dec_floor0(key)
|
|
30
|
+
local v = to_i(redis.call("DECR", key))
|
|
31
|
+
if v < 0 then
|
|
32
|
+
redis.call("SET", key, "0")
|
|
33
|
+
return 0
|
|
34
|
+
end
|
|
35
|
+
return v
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
local function group_limit_for(gid)
|
|
39
|
+
local k_glimit = base .. ":g:" .. gid .. ":limit"
|
|
40
|
+
local lim = to_i(redis.call("GET", k_glimit))
|
|
41
|
+
if lim <= 0 then return DEFAULT_GROUP_LIMIT end
|
|
42
|
+
return lim
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
-- token required
|
|
46
|
+
if lease_token == nil or lease_token == "" then
|
|
47
|
+
return {"ERR", "TOKEN_REQUIRED"}
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
-- token must match the current owner attempt
|
|
51
|
+
local cur_token = redis.call("HGET", k_job, "lease_token") or ""
|
|
52
|
+
if cur_token ~= lease_token then
|
|
53
|
+
return {"ERR", "TOKEN_MISMATCH"}
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
-- must still be active (prevents double-ACK and stale workers after reaper/retry)
|
|
57
|
+
if redis.call("ZREM", k_active, job_id) ~= 1 then
|
|
58
|
+
return {"ERR", "NOT_ACTIVE"}
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
-- mark completed + clear token
|
|
62
|
+
redis.call("HSET", k_job,
|
|
63
|
+
"state", "completed",
|
|
64
|
+
"updated_ms", tostring(now_ms),
|
|
65
|
+
"lease_token", "",
|
|
66
|
+
"lock_until_ms", ""
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
-- group bookkeeping (if job is grouped)
|
|
70
|
+
local gid = redis.call("HGET", k_job, "gid")
|
|
71
|
+
if gid and gid ~= "" then
|
|
72
|
+
local k_ginflight = base .. ":g:" .. gid .. ":inflight"
|
|
73
|
+
local inflight = dec_floor0(k_ginflight)
|
|
74
|
+
local limit = group_limit_for(gid)
|
|
75
|
+
local k_gwait = base .. ":g:" .. gid .. ":wait"
|
|
76
|
+
if inflight < limit and to_i(redis.call("LLEN", k_gwait)) > 0 then
|
|
77
|
+
redis.call("ZADD", k_gready, now_ms, gid)
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
-- retention list (trim completed list + delete overflow job hashes)
|
|
82
|
+
redis.call("LPUSH", k_completed, job_id)
|
|
83
|
+
while redis.call("LLEN", k_completed) > KEEP_COMPLETED do
|
|
84
|
+
local old_id = redis.call("RPOP", k_completed)
|
|
85
|
+
if old_id then
|
|
86
|
+
redis.call("DEL", base .. ":job:" .. old_id)
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
return {"OK"}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
-- ENQUEUE (hybrid: ungrouped by default, groups opt-in)
|
|
2
|
+
-- ARGV:
|
|
3
|
+
-- 1 base
|
|
4
|
+
-- 2 job_id
|
|
5
|
+
-- 3 payload
|
|
6
|
+
-- 4 max_attempts
|
|
7
|
+
-- 5 timeout_ms
|
|
8
|
+
-- 6 backoff_ms
|
|
9
|
+
-- 7 now_ms
|
|
10
|
+
-- 8 due_ms
|
|
11
|
+
-- 9 gid (optional; if set/non-empty => grouped job)
|
|
12
|
+
-- 10 group_limit (optional; used to initialize the group limit if not set)
|
|
13
|
+
|
|
14
|
+
local base = ARGV[1]
|
|
15
|
+
local job_id = ARGV[2]
|
|
16
|
+
local payload = ARGV[3] or ""
|
|
17
|
+
local max_attempts = tonumber(ARGV[4] or "1")
|
|
18
|
+
local timeout_ms = tonumber(ARGV[5] or "60000")
|
|
19
|
+
local backoff_ms = tonumber(ARGV[6] or "5000")
|
|
20
|
+
local now_ms = tonumber(ARGV[7] or "0")
|
|
21
|
+
local due_ms = tonumber(ARGV[8] or "0")
|
|
22
|
+
local gid = ARGV[9]
|
|
23
|
+
local group_limit = tonumber(ARGV[10] or "0")
|
|
24
|
+
|
|
25
|
+
local DEFAULT_GROUP_LIMIT = 1
|
|
26
|
+
|
|
27
|
+
local k_job = base .. ":job:" .. job_id
|
|
28
|
+
local k_delayed = base .. ":delayed"
|
|
29
|
+
local k_wait = base .. ":wait"
|
|
30
|
+
local k_has_groups = base .. ":has_groups"
|
|
31
|
+
|
|
32
|
+
local is_grouped = (gid ~= nil and gid ~= "")
|
|
33
|
+
|
|
34
|
+
-- persist job
|
|
35
|
+
if is_grouped then
|
|
36
|
+
redis.call("HSET", k_job,
|
|
37
|
+
"id", job_id,
|
|
38
|
+
"payload", payload,
|
|
39
|
+
"gid", gid,
|
|
40
|
+
"state", "wait",
|
|
41
|
+
"attempt", "0",
|
|
42
|
+
"max_attempts", tostring(max_attempts),
|
|
43
|
+
"timeout_ms", tostring(timeout_ms),
|
|
44
|
+
"backoff_ms", tostring(backoff_ms),
|
|
45
|
+
"created_ms", tostring(now_ms),
|
|
46
|
+
"updated_ms", tostring(now_ms)
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
-- mark queue as having groups (inspection only)
|
|
50
|
+
redis.call("SET", k_has_groups, "1")
|
|
51
|
+
|
|
52
|
+
-- initialize group limit lazily (first writer wins)
|
|
53
|
+
local k_glimit = base .. ":g:" .. gid .. ":limit"
|
|
54
|
+
if group_limit ~= nil and group_limit > 0 then
|
|
55
|
+
if redis.call("EXISTS", k_glimit) == 0 then
|
|
56
|
+
redis.call("SET", k_glimit, tostring(group_limit))
|
|
57
|
+
end
|
|
58
|
+
end
|
|
59
|
+
else
|
|
60
|
+
redis.call("HSET", k_job,
|
|
61
|
+
"id", job_id,
|
|
62
|
+
"payload", payload,
|
|
63
|
+
"state", "wait",
|
|
64
|
+
"attempt", "0",
|
|
65
|
+
"max_attempts", tostring(max_attempts),
|
|
66
|
+
"timeout_ms", tostring(timeout_ms),
|
|
67
|
+
"backoff_ms", tostring(backoff_ms),
|
|
68
|
+
"created_ms", tostring(now_ms),
|
|
69
|
+
"updated_ms", tostring(now_ms)
|
|
70
|
+
)
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
-- route job
|
|
74
|
+
if due_ms ~= nil and due_ms > now_ms then
|
|
75
|
+
redis.call("ZADD", k_delayed, due_ms, job_id)
|
|
76
|
+
redis.call("HSET", k_job, "state", "delayed", "due_ms", tostring(due_ms))
|
|
77
|
+
else
|
|
78
|
+
if is_grouped then
|
|
79
|
+
local k_gwait = base .. ":g:" .. gid .. ":wait"
|
|
80
|
+
redis.call("RPUSH", k_gwait, job_id)
|
|
81
|
+
|
|
82
|
+
-- if group has capacity, put it in the ready set
|
|
83
|
+
local k_ginflight = base .. ":g:" .. gid .. ":inflight"
|
|
84
|
+
local inflight = tonumber(redis.call("GET", k_ginflight) or "0")
|
|
85
|
+
|
|
86
|
+
local limit = tonumber(redis.call("GET", base .. ":g:" .. gid .. ":limit") or tostring(DEFAULT_GROUP_LIMIT))
|
|
87
|
+
if inflight < limit then
|
|
88
|
+
local k_gready = base .. ":groups:ready"
|
|
89
|
+
redis.call("ZADD", k_gready, now_ms, gid)
|
|
90
|
+
end
|
|
91
|
+
else
|
|
92
|
+
redis.call("RPUSH", k_wait, job_id)
|
|
93
|
+
end
|
|
94
|
+
end
|
|
95
|
+
|
|
96
|
+
return {"OK", job_id}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
-- HEARTBEAT (lease renewal / keep-alive)
|
|
2
|
+
-- Extends the lease of an active job, gated by lease_token.
|
|
3
|
+
--
|
|
4
|
+
-- ARGV:
|
|
5
|
+
-- 1 base
|
|
6
|
+
-- 2 job_id
|
|
7
|
+
-- 3 now_ms
|
|
8
|
+
-- 4 lease_token
|
|
9
|
+
|
|
10
|
+
local base = ARGV[1]
|
|
11
|
+
local job_id = ARGV[2]
|
|
12
|
+
local now_ms = tonumber(ARGV[3] or "0")
|
|
13
|
+
local lease_token = ARGV[4]
|
|
14
|
+
|
|
15
|
+
local k_job = base .. ":job:" .. job_id
|
|
16
|
+
local k_active = base .. ":active"
|
|
17
|
+
|
|
18
|
+
local function to_i(v)
|
|
19
|
+
if v == false or v == nil or v == '' then return 0 end
|
|
20
|
+
local n = tonumber(v)
|
|
21
|
+
if n == nil then return 0 end
|
|
22
|
+
return math.floor(n)
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
-- token required
|
|
26
|
+
if lease_token == nil or lease_token == "" then
|
|
27
|
+
return {"ERR", "TOKEN_REQUIRED"}
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
-- token must match current attempt owner
|
|
31
|
+
local cur_token = redis.call("HGET", k_job, "lease_token") or ""
|
|
32
|
+
if cur_token ~= lease_token then
|
|
33
|
+
return {"ERR", "TOKEN_MISMATCH"}
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
-- must still be active (prevents extending after reaper/retry)
|
|
37
|
+
local cur_score = redis.call("ZSCORE", k_active, job_id)
|
|
38
|
+
if not cur_score then
|
|
39
|
+
return {"ERR", "NOT_ACTIVE"}
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
local cur_lock_until = tonumber(cur_score) or 0
|
|
43
|
+
|
|
44
|
+
-- extend using the job's configured timeout_ms
|
|
45
|
+
local timeout_ms = to_i(redis.call("HGET", k_job, "timeout_ms"))
|
|
46
|
+
if timeout_ms <= 0 then timeout_ms = 60000 end
|
|
47
|
+
|
|
48
|
+
-- MONOTONIC EXTENSION:
|
|
49
|
+
-- never shorten the lease if now_ms goes backwards
|
|
50
|
+
local base_ms = cur_lock_until
|
|
51
|
+
if now_ms > base_ms then
|
|
52
|
+
base_ms = now_ms
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
local lock_until = base_ms + timeout_ms
|
|
56
|
+
|
|
57
|
+
redis.call("HSET", k_job,
|
|
58
|
+
"lock_until_ms", tostring(lock_until),
|
|
59
|
+
"updated_ms", tostring(now_ms)
|
|
60
|
+
)
|
|
61
|
+
redis.call("ZADD", k_active, lock_until, job_id)
|
|
62
|
+
|
|
63
|
+
return {"OK", tostring(lock_until)}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
-- PROMOTE_DELAYED (hybrid)
|
|
2
|
+
-- ARGV:
|
|
3
|
+
-- 1 base
|
|
4
|
+
-- 2 now_ms
|
|
5
|
+
-- 3 max_promote
|
|
6
|
+
|
|
7
|
+
local base = ARGV[1]
|
|
8
|
+
local now_ms = tonumber(ARGV[2] or "0")
|
|
9
|
+
local max_promote = tonumber(ARGV[3] or "1000")
|
|
10
|
+
|
|
11
|
+
local DEFAULT_GROUP_LIMIT = 1
|
|
12
|
+
|
|
13
|
+
local k_delayed = base .. ":delayed"
|
|
14
|
+
local k_wait = base .. ":wait"
|
|
15
|
+
local k_gready = base .. ":groups:ready"
|
|
16
|
+
|
|
17
|
+
local function to_i(v)
|
|
18
|
+
if v == false or v == nil or v == '' then return 0 end
|
|
19
|
+
local n = tonumber(v)
|
|
20
|
+
if n == nil then return 0 end
|
|
21
|
+
return math.floor(n)
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
local function group_limit_for(gid)
|
|
25
|
+
local k_glimit = base .. ":g:" .. gid .. ":limit"
|
|
26
|
+
local lim = to_i(redis.call("GET", k_glimit))
|
|
27
|
+
if lim <= 0 then return DEFAULT_GROUP_LIMIT end
|
|
28
|
+
return lim
|
|
29
|
+
end
|
|
30
|
+
|
|
31
|
+
local ids = redis.call("ZRANGEBYSCORE", k_delayed, "-inf", now_ms, "LIMIT", 0, max_promote)
|
|
32
|
+
local promoted = 0
|
|
33
|
+
|
|
34
|
+
for i=1,#ids do
|
|
35
|
+
local job_id = ids[i]
|
|
36
|
+
if redis.call("ZREM", k_delayed, job_id) == 1 then
|
|
37
|
+
local k_job = base .. ":job:" .. job_id
|
|
38
|
+
redis.call("HSET", k_job, "state", "wait", "updated_ms", tostring(now_ms))
|
|
39
|
+
|
|
40
|
+
local gid = redis.call("HGET", k_job, "gid")
|
|
41
|
+
if gid and gid ~= "" then
|
|
42
|
+
local k_gwait = base .. ":g:" .. gid .. ":wait"
|
|
43
|
+
redis.call("RPUSH", k_gwait, job_id)
|
|
44
|
+
|
|
45
|
+
-- if group has capacity, mark it as ready
|
|
46
|
+
local inflight = to_i(redis.call("GET", base .. ":g:" .. gid .. ":inflight"))
|
|
47
|
+
local limit = group_limit_for(gid)
|
|
48
|
+
if inflight < limit then
|
|
49
|
+
redis.call("ZADD", k_gready, now_ms, gid)
|
|
50
|
+
end
|
|
51
|
+
else
|
|
52
|
+
redis.call("RPUSH", k_wait, job_id)
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
promoted = promoted + 1
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
return {"OK", tostring(promoted)}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
-- REAP_EXPIRED (stalled recovery)
|
|
2
|
+
-- ARGV:
|
|
3
|
+
-- 1 base
|
|
4
|
+
-- 2 now_ms
|
|
5
|
+
-- 3 max_reap
|
|
6
|
+
|
|
7
|
+
local base = ARGV[1]
|
|
8
|
+
local now_ms = tonumber(ARGV[2] or "0")
|
|
9
|
+
local max_reap = tonumber(ARGV[3] or "1000")
|
|
10
|
+
|
|
11
|
+
local DEFAULT_GROUP_LIMIT = 1
|
|
12
|
+
|
|
13
|
+
local k_active = base .. ":active"
|
|
14
|
+
local k_delayed = base .. ":delayed"
|
|
15
|
+
local k_failed = base .. ":failed"
|
|
16
|
+
local k_gready = base .. ":groups:ready"
|
|
17
|
+
|
|
18
|
+
local function to_i(v)
|
|
19
|
+
if v == false or v == nil or v == '' then return 0 end
|
|
20
|
+
local n = tonumber(v)
|
|
21
|
+
if n == nil then return 0 end
|
|
22
|
+
return math.floor(n)
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
local function dec_floor0(key)
|
|
26
|
+
local v = to_i(redis.call("DECR", key))
|
|
27
|
+
if v < 0 then
|
|
28
|
+
redis.call("SET", key, "0")
|
|
29
|
+
return 0
|
|
30
|
+
end
|
|
31
|
+
return v
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
local function group_limit_for(gid)
|
|
35
|
+
local k_glimit = base .. ":g:" .. gid .. ":limit"
|
|
36
|
+
local lim = to_i(redis.call("GET", k_glimit))
|
|
37
|
+
if lim <= 0 then return DEFAULT_GROUP_LIMIT end
|
|
38
|
+
return lim
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
local ids = redis.call("ZRANGEBYSCORE", k_active, "-inf", now_ms, "LIMIT", 0, max_reap)
|
|
42
|
+
local reaped = 0
|
|
43
|
+
|
|
44
|
+
for i=1,#ids do
|
|
45
|
+
local job_id = ids[i]
|
|
46
|
+
|
|
47
|
+
-- re-check current score in case lease was extended after scan
|
|
48
|
+
local score = redis.call("ZSCORE", k_active, job_id)
|
|
49
|
+
if score and tonumber(score) and tonumber(score) > now_ms then
|
|
50
|
+
-- still leased; skip
|
|
51
|
+
else
|
|
52
|
+
if redis.call("ZREM", k_active, job_id) == 1 then
|
|
53
|
+
local k_job = base .. ":job:" .. job_id
|
|
54
|
+
|
|
55
|
+
-- if job hash missing, nothing else to do
|
|
56
|
+
if redis.call("EXISTS", k_job) == 0 then
|
|
57
|
+
reaped = reaped + 1
|
|
58
|
+
else
|
|
59
|
+
-- IMPORTANT: invalidate attempt ownership
|
|
60
|
+
redis.call("HSET", k_job, "lease_token", "")
|
|
61
|
+
|
|
62
|
+
-- group bookkeeping (if grouped)
|
|
63
|
+
local gid = redis.call("HGET", k_job, "gid")
|
|
64
|
+
if gid and gid ~= "" then
|
|
65
|
+
local k_ginflight = base .. ":g:" .. gid .. ":inflight"
|
|
66
|
+
local inflight = dec_floor0(k_ginflight)
|
|
67
|
+
local limit = group_limit_for(gid)
|
|
68
|
+
local k_gwait = base .. ":g:" .. gid .. ":wait"
|
|
69
|
+
if inflight < limit and to_i(redis.call("LLEN", k_gwait)) > 0 then
|
|
70
|
+
redis.call("ZADD", k_gready, now_ms, gid)
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
local attempt = to_i(redis.call("HGET", k_job, "attempt"))
|
|
75
|
+
local max_attempts = to_i(redis.call("HGET", k_job, "max_attempts"))
|
|
76
|
+
if max_attempts <= 0 then max_attempts = 1 end
|
|
77
|
+
local backoff_ms = to_i(redis.call("HGET", k_job, "backoff_ms"))
|
|
78
|
+
|
|
79
|
+
if attempt >= max_attempts then
|
|
80
|
+
redis.call("HSET", k_job,
|
|
81
|
+
"state", "failed",
|
|
82
|
+
"updated_ms", tostring(now_ms),
|
|
83
|
+
"lease_token", "",
|
|
84
|
+
"lock_until_ms", ""
|
|
85
|
+
)
|
|
86
|
+
redis.call("LPUSH", k_failed, job_id)
|
|
87
|
+
else
|
|
88
|
+
local due_ms = now_ms + backoff_ms
|
|
89
|
+
redis.call("HSET", k_job,
|
|
90
|
+
"state", "delayed",
|
|
91
|
+
"due_ms", tostring(due_ms),
|
|
92
|
+
"updated_ms", tostring(now_ms),
|
|
93
|
+
"lease_token", "",
|
|
94
|
+
"lock_until_ms", ""
|
|
95
|
+
)
|
|
96
|
+
redis.call("ZADD", k_delayed, due_ms, job_id)
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
reaped = reaped + 1
|
|
100
|
+
end
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
end
|
|
104
|
+
|
|
105
|
+
return {"OK", tostring(reaped)}
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
-- RESERVE (hybrid: grouped + ungrouped with starvation-free fairness)
|
|
2
|
+
--
|
|
3
|
+
-- Returns:
|
|
4
|
+
-- ["EMPTY"]
|
|
5
|
+
-- or
|
|
6
|
+
-- ["JOB", job_id, payload, lock_until_ms, attempt, gid, lease_token]
|
|
7
|
+
--
|
|
8
|
+
-- ARGV:
|
|
9
|
+
-- 1 base
|
|
10
|
+
-- 2 now_ms
|
|
11
|
+
|
|
12
|
+
local base = ARGV[1]
|
|
13
|
+
local k_paused = base .. ":paused"
|
|
14
|
+
|
|
15
|
+
if redis.call("EXISTS", k_paused) == 1 then
|
|
16
|
+
return {"PAUSED"}
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
local now_ms = tonumber(ARGV[2] or "0")
|
|
20
|
+
|
|
21
|
+
local DEFAULT_GROUP_LIMIT = 1
|
|
22
|
+
local MAX_GROUP_POPS = 10
|
|
23
|
+
|
|
24
|
+
local k_wait = base .. ":wait"
|
|
25
|
+
local k_active = base .. ":active"
|
|
26
|
+
local k_gready = base .. ":groups:ready"
|
|
27
|
+
local k_rr = base .. ":lane:rr"
|
|
28
|
+
|
|
29
|
+
-- sequence key used to generate lease tokens
|
|
30
|
+
local k_token_seq = base .. ":lease:seq"
|
|
31
|
+
|
|
32
|
+
local function to_i(v)
|
|
33
|
+
if v == false or v == nil or v == '' then return 0 end
|
|
34
|
+
local n = tonumber(v)
|
|
35
|
+
if n == nil then return 0 end
|
|
36
|
+
return math.floor(n)
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
local function new_lease_token(job_id)
|
|
40
|
+
local seq = redis.call("INCR", k_token_seq)
|
|
41
|
+
return redis.sha1hex(job_id .. ":" .. tostring(now_ms) .. ":" .. tostring(seq))
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
local function lease_job(job_id)
|
|
45
|
+
local k_job = base .. ":job:" .. job_id
|
|
46
|
+
|
|
47
|
+
local timeout_ms = to_i(redis.call("HGET", k_job, "timeout_ms"))
|
|
48
|
+
if timeout_ms <= 0 then timeout_ms = 60000 end
|
|
49
|
+
|
|
50
|
+
local attempt = to_i(redis.call("HGET", k_job, "attempt")) + 1
|
|
51
|
+
local lock_until = now_ms + timeout_ms
|
|
52
|
+
|
|
53
|
+
local payload = redis.call("HGET", k_job, "payload") or ""
|
|
54
|
+
local gid = redis.call("HGET", k_job, "gid") or ""
|
|
55
|
+
|
|
56
|
+
-- NEW: token per attempt
|
|
57
|
+
local lease_token = new_lease_token(job_id)
|
|
58
|
+
|
|
59
|
+
redis.call("HSET", k_job,
|
|
60
|
+
"state", "active",
|
|
61
|
+
"attempt", tostring(attempt),
|
|
62
|
+
"lock_until_ms", tostring(lock_until),
|
|
63
|
+
"lease_token", lease_token,
|
|
64
|
+
"updated_ms", tostring(now_ms)
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
redis.call("ZADD", k_active, lock_until, job_id)
|
|
68
|
+
|
|
69
|
+
return {"JOB", job_id, payload, tostring(lock_until), tostring(attempt), gid, lease_token}
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
local function try_ungrouped()
|
|
73
|
+
local job_id = redis.call("LPOP", k_wait)
|
|
74
|
+
if not job_id then
|
|
75
|
+
return nil
|
|
76
|
+
end
|
|
77
|
+
return lease_job(job_id)
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
local function group_limit_for(gid)
|
|
81
|
+
local k_glimit = base .. ":g:" .. gid .. ":limit"
|
|
82
|
+
local lim = to_i(redis.call("GET", k_glimit))
|
|
83
|
+
if lim <= 0 then return DEFAULT_GROUP_LIMIT end
|
|
84
|
+
return lim
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
local function try_grouped()
|
|
88
|
+
for _ = 1, MAX_GROUP_POPS do
|
|
89
|
+
local popped = redis.call("ZPOPMIN", k_gready, 1)
|
|
90
|
+
if not popped or #popped == 0 then
|
|
91
|
+
return nil
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
local gid = popped[1]
|
|
95
|
+
if not gid or gid == "" then
|
|
96
|
+
-- invalid gid; skip
|
|
97
|
+
else
|
|
98
|
+
local k_gwait = base .. ":g:" .. gid .. ":wait"
|
|
99
|
+
local k_ginflight = base .. ":g:" .. gid .. ":inflight"
|
|
100
|
+
|
|
101
|
+
local inflight = to_i(redis.call("GET", k_ginflight))
|
|
102
|
+
local limit = group_limit_for(gid)
|
|
103
|
+
|
|
104
|
+
if inflight >= limit then
|
|
105
|
+
-- popped gid but no capacity: re-add with tiny delay to reduce churn
|
|
106
|
+
redis.call("ZADD", k_gready, now_ms + 1, gid)
|
|
107
|
+
else
|
|
108
|
+
local job_id = redis.call("LPOP", k_gwait)
|
|
109
|
+
if not job_id then
|
|
110
|
+
-- group empty; skip (enqueue.lua will re-add gid when new jobs arrive)
|
|
111
|
+
else
|
|
112
|
+
inflight = to_i(redis.call("INCR", k_ginflight))
|
|
113
|
+
|
|
114
|
+
if inflight < limit and to_i(redis.call("LLEN", k_gwait)) > 0 then
|
|
115
|
+
redis.call("ZADD", k_gready, now_ms, gid)
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
return lease_job(job_id)
|
|
119
|
+
end
|
|
120
|
+
end
|
|
121
|
+
end
|
|
122
|
+
end
|
|
123
|
+
|
|
124
|
+
return nil
|
|
125
|
+
end
|
|
126
|
+
|
|
127
|
+
local rr = to_i(redis.call("GET", k_rr))
|
|
128
|
+
|
|
129
|
+
local res
|
|
130
|
+
if rr == 0 then
|
|
131
|
+
res = try_grouped()
|
|
132
|
+
if not res then res = try_ungrouped() end
|
|
133
|
+
else
|
|
134
|
+
res = try_ungrouped()
|
|
135
|
+
if not res then res = try_grouped() end
|
|
136
|
+
end
|
|
137
|
+
|
|
138
|
+
if not res then
|
|
139
|
+
return {"EMPTY"}
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
-- flip rr for starvation-free fairness
|
|
143
|
+
if rr == 0 then
|
|
144
|
+
redis.call("SET", k_rr, "1")
|
|
145
|
+
else
|
|
146
|
+
redis.call("SET", k_rr, "0")
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
return res
|
omniq/scripts.py
CHANGED
|
@@ -17,9 +17,9 @@ class OmniqScripts:
|
|
|
17
17
|
pause_sha: str
|
|
18
18
|
resume_sha: str
|
|
19
19
|
|
|
20
|
-
def default_scripts_dir(
|
|
21
|
-
here = os.path.dirname(
|
|
22
|
-
return os.path.
|
|
20
|
+
def default_scripts_dir() -> str:
|
|
21
|
+
here = os.path.dirname(__file__)
|
|
22
|
+
return os.path.join(here, "core", "scripts")
|
|
23
23
|
|
|
24
24
|
def load_scripts(r: ScriptLoader, scripts_dir: str) -> OmniqScripts:
|
|
25
25
|
def load_one(name: str) -> str:
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
omniq/__init__.py,sha256=PoDH4HY02Q_8tshE6OgRkPSWnGpJDvAovNWlV-NeCw0,98
|
|
2
|
+
omniq/_ops.py,sha256=oSy3o1-0PRj_B1V8Ga7n7y83NpUYoxF872SDxsLpNZY,7656
|
|
3
|
+
omniq/client.py,sha256=6gh1G92aUYpVWtsqTMm5m7wrpXbI5Vu6PJdV2hwGU0E,4635
|
|
4
|
+
omniq/clock.py,sha256=YTpVrkx1hyop4GU9DuGXfu23n5KPsgGb_1DINiiV0FU,69
|
|
5
|
+
omniq/consumer.py,sha256=z-yFYRIDX6HTD5hf4Xls7EYp5_zRw9Jc_B6RQO8o4Ws,11140
|
|
6
|
+
omniq/ids.py,sha256=bGkDDZfYaZvFEnUD02TylWA05UpDYztQwndIscxFjm8,63
|
|
7
|
+
omniq/monitor.py,sha256=N0qbGUJfqwUubHLvgMbemswhcQLamwsc4_5OYgqFyr0,4008
|
|
8
|
+
omniq/scripts.py,sha256=LfSf7468im9k0gpICx57RAyhYqcFLYKDKN2dQRDmOxw,1246
|
|
9
|
+
omniq/transport.py,sha256=4Nj-RoyZG0L0aEbCleNCF1bWQHW7J4yVgPGmebWxGPE,1309
|
|
10
|
+
omniq/types.py,sha256=KXj-Z-uPV7lO3fmmyK6QYL8pJiPoYYei8UcPf0v1YUU,743
|
|
11
|
+
omniq/core/scripts/ack_fail.lua,sha256=S5PemA7SGUekj-Y36YmpWWXxmYBGEsEfsZfvVBJQYcA,3342
|
|
12
|
+
omniq/core/scripts/ack_success.lua,sha256=ksXl4C5RdcHg5BMXxZrP-mW0rmHehiKaF2q9jpl5By8,2349
|
|
13
|
+
omniq/core/scripts/enqueue.lua,sha256=ibUeIjmwtbCarI16tNr_AzmqWHxXwqplcY4mXoXeBOY,2822
|
|
14
|
+
omniq/core/scripts/heartbeat.lua,sha256=6ZUMJHTx40pwNFX6D_9-TT5vJOSioaDRV02xQszUSKY,1600
|
|
15
|
+
omniq/core/scripts/pause.lua,sha256=dRCm8YcgZGzPU2Je9KQmXa3se3eV9pKe4uskO5kGdVQ,184
|
|
16
|
+
omniq/core/scripts/promote_delayed.lua,sha256=IX3Lm1KMOt7OH6ZBZi5DW7QzluLdguDC-i7e23OzloM,1604
|
|
17
|
+
omniq/core/scripts/reap_expired.lua,sha256=DXAJJm4jMt4ddpAKNVARSf4wG87gwkT5Rq1e6P5URx4,3106
|
|
18
|
+
omniq/core/scripts/reserve.lua,sha256=L4Ty_IZATxCabHaiuJNia0BiHX9uIg-oAzdXDw-4_fI,3739
|
|
19
|
+
omniq/core/scripts/resume.lua,sha256=o8NtGKkFjQT18bkQ_BkkWo406k0tVUVlyBF0DmFobUc,177
|
|
20
|
+
omniq-1.0.2.dist-info/METADATA,sha256=KeAj1sFGox2ZxTAKPWw-UsXNmVo5zmPVwWjmv5iEkYA,177
|
|
21
|
+
omniq-1.0.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
22
|
+
omniq-1.0.2.dist-info/top_level.txt,sha256=SMvOWui1e7OpLJn5BC_QsiZQIqsjhNfURCd7Ru9CuRE,6
|
|
23
|
+
omniq-1.0.2.dist-info/RECORD,,
|
omniq-1.0.0.dist-info/RECORD
DELETED
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
omniq/__init__.py,sha256=PoDH4HY02Q_8tshE6OgRkPSWnGpJDvAovNWlV-NeCw0,98
|
|
2
|
-
omniq/_ops.py,sha256=oSy3o1-0PRj_B1V8Ga7n7y83NpUYoxF872SDxsLpNZY,7656
|
|
3
|
-
omniq/client.py,sha256=qqG3nsfgTjGtap8hKsLJd_9vd0Iui_8aMJdJQhQCdmk,4643
|
|
4
|
-
omniq/clock.py,sha256=YTpVrkx1hyop4GU9DuGXfu23n5KPsgGb_1DINiiV0FU,69
|
|
5
|
-
omniq/consumer.py,sha256=z-yFYRIDX6HTD5hf4Xls7EYp5_zRw9Jc_B6RQO8o4Ws,11140
|
|
6
|
-
omniq/ids.py,sha256=bGkDDZfYaZvFEnUD02TylWA05UpDYztQwndIscxFjm8,63
|
|
7
|
-
omniq/monitor.py,sha256=N0qbGUJfqwUubHLvgMbemswhcQLamwsc4_5OYgqFyr0,4008
|
|
8
|
-
omniq/scripts.py,sha256=E5fhtc5m6qhEwCvIz__nwSWrH0L3xGkJXRO29ajY6kI,1316
|
|
9
|
-
omniq/transport.py,sha256=4Nj-RoyZG0L0aEbCleNCF1bWQHW7J4yVgPGmebWxGPE,1309
|
|
10
|
-
omniq/types.py,sha256=KXj-Z-uPV7lO3fmmyK6QYL8pJiPoYYei8UcPf0v1YUU,743
|
|
11
|
-
omniq-1.0.0.dist-info/METADATA,sha256=G-imH3HvAVfNKvBhNR9jwq4wFDZEWrZgfoH_8zD4q8M,177
|
|
12
|
-
omniq-1.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
13
|
-
omniq-1.0.0.dist-info/top_level.txt,sha256=SMvOWui1e7OpLJn5BC_QsiZQIqsjhNfURCd7Ru9CuRE,6
|
|
14
|
-
omniq-1.0.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|