omniq 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- omniq/__init__.py +3 -0
- omniq/_ops.py +231 -0
- omniq/client.py +136 -0
- omniq/clock.py +4 -0
- omniq/consumer.py +318 -0
- omniq/ids.py +4 -0
- omniq/monitor.py +138 -0
- omniq/scripts.py +40 -0
- omniq/transport.py +45 -0
- omniq/types.py +35 -0
- omniq-1.0.0.dist-info/METADATA +7 -0
- omniq-1.0.0.dist-info/RECORD +14 -0
- omniq-1.0.0.dist-info/WHEEL +5 -0
- omniq-1.0.0.dist-info/top_level.txt +1 -0
omniq/__init__.py
ADDED
omniq/_ops.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Optional, Any
|
|
4
|
+
|
|
5
|
+
from .clock import now_ms
|
|
6
|
+
from .ids import new_ulid
|
|
7
|
+
from .types import PayloadT, ReservePaused, ReserveJob, ReserveResult, AckFailResult
|
|
8
|
+
from .transport import RedisLike
|
|
9
|
+
from .scripts import OmniqScripts
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class OmniqOps:
|
|
13
|
+
r: RedisLike
|
|
14
|
+
scripts: OmniqScripts
|
|
15
|
+
|
|
16
|
+
@staticmethod
|
|
17
|
+
def queue_base(queue_name: str) -> str:
|
|
18
|
+
if "{" in queue_name and "}" in queue_name:
|
|
19
|
+
return queue_name
|
|
20
|
+
return "{" + queue_name + "}"
|
|
21
|
+
|
|
22
|
+
def publish(
|
|
23
|
+
self,
|
|
24
|
+
*,
|
|
25
|
+
queue: str,
|
|
26
|
+
payload: Any,
|
|
27
|
+
job_id: Optional[str] = None,
|
|
28
|
+
max_attempts: int = 3,
|
|
29
|
+
timeout_ms: int = 30_000,
|
|
30
|
+
backoff_ms: int = 5_000,
|
|
31
|
+
due_ms: int = 0,
|
|
32
|
+
now_ms_override: int = 0,
|
|
33
|
+
gid: Optional[str] = None,
|
|
34
|
+
group_limit: int = 0,
|
|
35
|
+
) -> str:
|
|
36
|
+
if not isinstance(payload, (dict, list)):
|
|
37
|
+
raise TypeError(
|
|
38
|
+
"publish(payload=...) must be a dict or list (structured JSON). "
|
|
39
|
+
"Wrap strings as {'text': '...'} or {'value': '...'}."
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
base = self.queue_base(queue)
|
|
43
|
+
nms = now_ms_override or now_ms()
|
|
44
|
+
|
|
45
|
+
jid = job_id or new_ulid()
|
|
46
|
+
|
|
47
|
+
payload_s = json.dumps(payload, separators=(",", ":"), ensure_ascii=False)
|
|
48
|
+
|
|
49
|
+
gid_s = (gid or "").strip()
|
|
50
|
+
glimit_s = str(int(group_limit)) if group_limit and group_limit > 0 else "0"
|
|
51
|
+
|
|
52
|
+
argv = [
|
|
53
|
+
base,
|
|
54
|
+
jid,
|
|
55
|
+
payload_s,
|
|
56
|
+
str(int(max_attempts)),
|
|
57
|
+
str(int(timeout_ms)),
|
|
58
|
+
str(int(backoff_ms)),
|
|
59
|
+
str(int(nms)),
|
|
60
|
+
str(int(due_ms)),
|
|
61
|
+
gid_s,
|
|
62
|
+
glimit_s,
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
res = self.r.evalsha(self.scripts.enqueue_sha, 0, *argv)
|
|
66
|
+
|
|
67
|
+
if not isinstance(res, list) or len(res) < 2:
|
|
68
|
+
raise RuntimeError(f"Unexpected ENQUEUE response: {res}")
|
|
69
|
+
|
|
70
|
+
status = str(res[0])
|
|
71
|
+
out_id = str(res[1])
|
|
72
|
+
|
|
73
|
+
if status != "OK":
|
|
74
|
+
raise RuntimeError(f"ENQUEUE failed: {status}")
|
|
75
|
+
|
|
76
|
+
return out_id
|
|
77
|
+
|
|
78
|
+
def pause(self, *, queue: str) -> str:
|
|
79
|
+
base = self.queue_base(queue)
|
|
80
|
+
res = self.r.evalsha(self.scripts.pause_sha, 0, base)
|
|
81
|
+
return str(res)
|
|
82
|
+
|
|
83
|
+
def resume(self, *, queue: str) -> int:
|
|
84
|
+
base = self.queue_base(queue)
|
|
85
|
+
res = self.r.evalsha(self.scripts.resume_sha, 0, base)
|
|
86
|
+
try:
|
|
87
|
+
return int(res)
|
|
88
|
+
except Exception:
|
|
89
|
+
return 0
|
|
90
|
+
|
|
91
|
+
def is_paused(self, *, queue: str) -> bool:
|
|
92
|
+
base = self.queue_base(queue)
|
|
93
|
+
return self.r.exists(base + ":paused") == 1
|
|
94
|
+
|
|
95
|
+
def reserve(self, *, queue: str, now_ms_override: int = 0) -> ReserveResult:
|
|
96
|
+
base = self.queue_base(queue)
|
|
97
|
+
nms = now_ms_override or now_ms()
|
|
98
|
+
|
|
99
|
+
res = self.r.evalsha(self.scripts.reserve_sha, 0, base, str(int(nms)))
|
|
100
|
+
|
|
101
|
+
if not isinstance(res, list) or len(res) < 1:
|
|
102
|
+
raise RuntimeError(f"Unexpected RESERVE response: {res}")
|
|
103
|
+
|
|
104
|
+
if res[0] == "EMPTY":
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
if res[0] == "PAUSED":
|
|
108
|
+
return ReservePaused()
|
|
109
|
+
|
|
110
|
+
if res[0] != "JOB" or len(res) < 7:
|
|
111
|
+
raise RuntimeError(f"Unexpected RESERVE response: {res}")
|
|
112
|
+
|
|
113
|
+
return ReserveJob(
|
|
114
|
+
status="JOB",
|
|
115
|
+
job_id=str(res[1]),
|
|
116
|
+
payload=str(res[2]),
|
|
117
|
+
lock_until_ms=int(res[3]),
|
|
118
|
+
attempt=int(res[4]),
|
|
119
|
+
gid=str(res[5] or ""),
|
|
120
|
+
lease_token=str(res[6] or ""),
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
def heartbeat(self, *, queue: str, job_id: str, lease_token: str, now_ms_override: int = 0) -> int:
|
|
124
|
+
base = self.queue_base(queue)
|
|
125
|
+
nms = now_ms_override or now_ms()
|
|
126
|
+
|
|
127
|
+
res = self.r.evalsha(self.scripts.heartbeat_sha, 0, base, job_id, str(int(nms)), lease_token)
|
|
128
|
+
|
|
129
|
+
if not isinstance(res, list) or len(res) < 1:
|
|
130
|
+
raise RuntimeError(f"Unexpected HEARTBEAT response: {res}")
|
|
131
|
+
|
|
132
|
+
if res[0] == "OK":
|
|
133
|
+
return int(res[1])
|
|
134
|
+
|
|
135
|
+
if res[0] == "ERR":
|
|
136
|
+
reason = str(res[1]) if len(res) > 1 else "UNKNOWN"
|
|
137
|
+
raise RuntimeError(f"HEARTBEAT failed: {reason}")
|
|
138
|
+
|
|
139
|
+
raise RuntimeError(f"Unexpected HEARTBEAT response: {res}")
|
|
140
|
+
|
|
141
|
+
def ack_success(self, *, queue: str, job_id: str, lease_token: str, now_ms_override: int = 0) -> None:
|
|
142
|
+
base = self.queue_base(queue)
|
|
143
|
+
nms = now_ms_override or now_ms()
|
|
144
|
+
|
|
145
|
+
res = self.r.evalsha(self.scripts.ack_success_sha, 0, base, job_id, str(int(nms)), lease_token)
|
|
146
|
+
|
|
147
|
+
if not isinstance(res, list) or len(res) < 1:
|
|
148
|
+
raise RuntimeError(f"Unexpected ACK_SUCCESS response: {res}")
|
|
149
|
+
|
|
150
|
+
if res[0] == "OK":
|
|
151
|
+
return
|
|
152
|
+
|
|
153
|
+
if res[0] == "ERR":
|
|
154
|
+
reason = str(res[1]) if len(res) > 1 else "UNKNOWN"
|
|
155
|
+
raise RuntimeError(f"ACK_SUCCESS failed: {reason}")
|
|
156
|
+
|
|
157
|
+
raise RuntimeError(f"Unexpected ACK_SUCCESS response: {res}")
|
|
158
|
+
|
|
159
|
+
def ack_fail(
|
|
160
|
+
self,
|
|
161
|
+
*,
|
|
162
|
+
queue: str,
|
|
163
|
+
job_id: str,
|
|
164
|
+
lease_token: str,
|
|
165
|
+
error: Optional[str] = None,
|
|
166
|
+
now_ms_override: int = 0,
|
|
167
|
+
) -> AckFailResult:
|
|
168
|
+
base = self.queue_base(queue)
|
|
169
|
+
nms = now_ms_override or now_ms()
|
|
170
|
+
|
|
171
|
+
if error is None or str(error).strip() == "":
|
|
172
|
+
res = self.r.evalsha(self.scripts.ack_fail_sha, 0, base, job_id, str(int(nms)), lease_token)
|
|
173
|
+
else:
|
|
174
|
+
err_s = str(error)
|
|
175
|
+
res = self.r.evalsha(self.scripts.ack_fail_sha, 0, base, job_id, str(int(nms)), lease_token, err_s)
|
|
176
|
+
|
|
177
|
+
if not isinstance(res, list) or len(res) < 1:
|
|
178
|
+
raise RuntimeError(f"Unexpected ACK_FAIL response: {res}")
|
|
179
|
+
|
|
180
|
+
if res[0] == "RETRY":
|
|
181
|
+
return ("RETRY", int(res[1]))
|
|
182
|
+
|
|
183
|
+
if res[0] == "FAILED":
|
|
184
|
+
return ("FAILED", None)
|
|
185
|
+
|
|
186
|
+
if res[0] == "ERR":
|
|
187
|
+
reason = str(res[1]) if len(res) > 1 else "UNKNOWN"
|
|
188
|
+
raise RuntimeError(f"ACK_FAIL failed: {reason}")
|
|
189
|
+
|
|
190
|
+
raise RuntimeError(f"Unexpected ACK_FAIL response: {res}")
|
|
191
|
+
|
|
192
|
+
def promote_delayed(self, *, queue: str, max_promote: int = 1000, now_ms_override: int = 0) -> int:
|
|
193
|
+
base = self.queue_base(queue)
|
|
194
|
+
nms = now_ms_override or now_ms()
|
|
195
|
+
|
|
196
|
+
res = self.r.evalsha(self.scripts.promote_delayed_sha, 0, base, str(int(nms)), str(int(max_promote)))
|
|
197
|
+
|
|
198
|
+
if not isinstance(res, list) or len(res) < 2 or res[0] != "OK":
|
|
199
|
+
raise RuntimeError(f"Unexpected PROMOTE_DELAYED response: {res}")
|
|
200
|
+
|
|
201
|
+
return int(res[1])
|
|
202
|
+
|
|
203
|
+
def reap_expired(self, *, queue: str, max_reap: int = 1000, now_ms_override: int = 0) -> int:
|
|
204
|
+
base = self.queue_base(queue)
|
|
205
|
+
nms = now_ms_override or now_ms()
|
|
206
|
+
|
|
207
|
+
res = self.r.evalsha(self.scripts.reap_expired_sha, 0, base, str(int(nms)), str(int(max_reap)))
|
|
208
|
+
|
|
209
|
+
if not isinstance(res, list) or len(res) < 2 or res[0] != "OK":
|
|
210
|
+
raise RuntimeError(f"Unexpected REAP_EXPIRED response: {res}")
|
|
211
|
+
|
|
212
|
+
return int(res[1])
|
|
213
|
+
|
|
214
|
+
def job_timeout_ms(self, *, queue: str, job_id: str, default_ms: int = 60_000) -> int:
|
|
215
|
+
base = self.queue_base(queue)
|
|
216
|
+
k_job = base + ":job:" + job_id
|
|
217
|
+
v = self.r.hget(k_job, "timeout_ms")
|
|
218
|
+
try:
|
|
219
|
+
n = int(v) if v is not None and v != "" else 0
|
|
220
|
+
except Exception:
|
|
221
|
+
n = 0
|
|
222
|
+
return n if n > 0 else int(default_ms)
|
|
223
|
+
|
|
224
|
+
@staticmethod
|
|
225
|
+
def paused_backoff_s(poll_interval_s: float) -> float:
|
|
226
|
+
return max(0.25, float(poll_interval_s) * 10.0)
|
|
227
|
+
|
|
228
|
+
@staticmethod
|
|
229
|
+
def derive_heartbeat_interval_s(timeout_ms: int) -> float:
|
|
230
|
+
half = max(1.0, (float(timeout_ms) / 1000.0) / 2.0)
|
|
231
|
+
return max(1.0, min(10.0, half))
|
omniq/client.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Callable, Optional, Any
|
|
3
|
+
|
|
4
|
+
from ._ops import OmniqOps
|
|
5
|
+
from .consumer import consume as consume_loop
|
|
6
|
+
from .scripts import load_scripts, default_scripts_dir
|
|
7
|
+
from .transport import RedisConnOpts, build_redis_client, RedisLike
|
|
8
|
+
from .types import PayloadT, ReserveResult, AckFailResult
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class OmniqClient:
|
|
12
|
+
_ops: OmniqOps
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
*,
|
|
17
|
+
redis: Optional[RedisLike] = None,
|
|
18
|
+
redis_url: Optional[str] = None,
|
|
19
|
+
host: Optional[str] = None,
|
|
20
|
+
port: int = 6379,
|
|
21
|
+
db: int = 0,
|
|
22
|
+
username: Optional[str] = None,
|
|
23
|
+
password: Optional[str] = None,
|
|
24
|
+
ssl: bool = False,
|
|
25
|
+
scripts_dir: Optional[str] = None,
|
|
26
|
+
):
|
|
27
|
+
if redis is not None:
|
|
28
|
+
r = redis
|
|
29
|
+
else:
|
|
30
|
+
r = build_redis_client(
|
|
31
|
+
RedisConnOpts(
|
|
32
|
+
redis_url=redis_url,
|
|
33
|
+
host=host,
|
|
34
|
+
port=port,
|
|
35
|
+
db=db,
|
|
36
|
+
username=username,
|
|
37
|
+
password=password,
|
|
38
|
+
ssl=ssl,
|
|
39
|
+
)
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
if scripts_dir is None:
|
|
43
|
+
scripts_dir = default_scripts_dir(__file__)
|
|
44
|
+
scripts = load_scripts(r, scripts_dir)
|
|
45
|
+
|
|
46
|
+
self._ops = OmniqOps(r=r, scripts=scripts)
|
|
47
|
+
|
|
48
|
+
@staticmethod
|
|
49
|
+
def queue_base(queue_name: str) -> str:
|
|
50
|
+
return OmniqOps.queue_base(queue_name)
|
|
51
|
+
|
|
52
|
+
def publish(
|
|
53
|
+
self,
|
|
54
|
+
*,
|
|
55
|
+
queue: str,
|
|
56
|
+
payload: Any,
|
|
57
|
+
job_id: Optional[str] = None,
|
|
58
|
+
max_attempts: int = 3,
|
|
59
|
+
timeout_ms: int = 60_000,
|
|
60
|
+
backoff_ms: int = 5_000,
|
|
61
|
+
due_ms: int = 0,
|
|
62
|
+
gid: Optional[str] = None,
|
|
63
|
+
group_limit: int = 0,
|
|
64
|
+
) -> str:
|
|
65
|
+
return self._ops.publish(
|
|
66
|
+
queue=queue,
|
|
67
|
+
payload=payload,
|
|
68
|
+
job_id=job_id,
|
|
69
|
+
max_attempts=max_attempts,
|
|
70
|
+
timeout_ms=timeout_ms,
|
|
71
|
+
backoff_ms=backoff_ms,
|
|
72
|
+
due_ms=due_ms,
|
|
73
|
+
gid=gid,
|
|
74
|
+
group_limit=group_limit,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
def reserve(self, *, queue: str, now_ms_override: int = 0) -> ReserveResult:
|
|
78
|
+
return self._ops.reserve(queue=queue, now_ms_override=now_ms_override)
|
|
79
|
+
|
|
80
|
+
def heartbeat(self, *, queue: str, job_id: str, lease_token: str, now_ms_override: int = 0) -> int:
|
|
81
|
+
return self._ops.heartbeat(queue=queue, job_id=job_id, lease_token=lease_token, now_ms_override=now_ms_override)
|
|
82
|
+
|
|
83
|
+
def ack_success(self, *, queue: str, job_id: str, lease_token: str, now_ms_override: int = 0) -> None:
|
|
84
|
+
return self._ops.ack_success(queue=queue, job_id=job_id, lease_token=lease_token, now_ms_override=now_ms_override)
|
|
85
|
+
|
|
86
|
+
def ack_fail(self, *, queue: str, job_id: str, lease_token: str, now_ms_override: int = 0) -> AckFailResult:
|
|
87
|
+
return self._ops.ack_fail(queue=queue, job_id=job_id, lease_token=lease_token, now_ms_override=now_ms_override)
|
|
88
|
+
|
|
89
|
+
def promote_delayed(self, *, queue: str, max_promote: int = 1000, now_ms_override: int = 0) -> int:
|
|
90
|
+
return self._ops.promote_delayed(queue=queue, max_promote=max_promote, now_ms_override=now_ms_override)
|
|
91
|
+
|
|
92
|
+
def reap_expired(self, *, queue: str, max_reap: int = 1000, now_ms_override: int = 0) -> int:
|
|
93
|
+
return self._ops.reap_expired(queue=queue, max_reap=max_reap, now_ms_override=now_ms_override)
|
|
94
|
+
|
|
95
|
+
def pause(self, *, queue: str) -> str:
|
|
96
|
+
return self._ops.pause(queue=queue)
|
|
97
|
+
|
|
98
|
+
def resume(self, *, queue: str) -> int:
|
|
99
|
+
return self._ops.resume(queue=queue)
|
|
100
|
+
|
|
101
|
+
def is_paused(self, *, queue: str) -> bool:
|
|
102
|
+
return self._ops.is_paused(queue=queue)
|
|
103
|
+
|
|
104
|
+
def consume(
|
|
105
|
+
self,
|
|
106
|
+
*,
|
|
107
|
+
queue: str,
|
|
108
|
+
handler: Callable[[Any], None],
|
|
109
|
+
poll_interval_s: float = 0.05,
|
|
110
|
+
promote_interval_s: float = 1.0,
|
|
111
|
+
promote_batch: int = 1000,
|
|
112
|
+
reap_interval_s: float = 1.0,
|
|
113
|
+
reap_batch: int = 1000,
|
|
114
|
+
heartbeat_interval_s: Optional[float] = None,
|
|
115
|
+
verbose: bool = False,
|
|
116
|
+
logger: Callable[[str], None] = print,
|
|
117
|
+
drain: bool = True,
|
|
118
|
+
) -> None:
|
|
119
|
+
return consume_loop(
|
|
120
|
+
self._ops,
|
|
121
|
+
queue=queue,
|
|
122
|
+
handler=handler,
|
|
123
|
+
poll_interval_s=poll_interval_s,
|
|
124
|
+
promote_interval_s=promote_interval_s,
|
|
125
|
+
promote_batch=promote_batch,
|
|
126
|
+
reap_interval_s=reap_interval_s,
|
|
127
|
+
reap_batch=reap_batch,
|
|
128
|
+
heartbeat_interval_s=heartbeat_interval_s,
|
|
129
|
+
verbose=verbose,
|
|
130
|
+
logger=logger,
|
|
131
|
+
drain=drain,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
@property
|
|
135
|
+
def ops(self) -> OmniqOps:
|
|
136
|
+
return self._ops
|
omniq/clock.py
ADDED
omniq/consumer.py
ADDED
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import threading
|
|
3
|
+
import time
|
|
4
|
+
import signal
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Any, Callable, Dict, Optional
|
|
7
|
+
|
|
8
|
+
from ._ops import OmniqOps
|
|
9
|
+
from .types import JobCtx, ReserveJob
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class StopController:
|
|
13
|
+
stop: bool = False
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class HeartbeatHandle:
|
|
17
|
+
stop_evt: threading.Event
|
|
18
|
+
flags: Dict[str, bool]
|
|
19
|
+
thread: threading.Thread
|
|
20
|
+
|
|
21
|
+
def _install_sigterm_handler(ctrl: StopController, logger, verbose: bool) -> None:
|
|
22
|
+
def on_sigterm(signum, _frame):
|
|
23
|
+
ctrl.stop = True
|
|
24
|
+
if verbose:
|
|
25
|
+
_safe_log(logger, f"[consume] SIGTERM received (stopping...)")
|
|
26
|
+
|
|
27
|
+
signal.signal(signal.SIGTERM, on_sigterm)
|
|
28
|
+
|
|
29
|
+
def start_heartbeater(
|
|
30
|
+
ops: OmniqOps,
|
|
31
|
+
*,
|
|
32
|
+
queue: str,
|
|
33
|
+
job_id: str,
|
|
34
|
+
lease_token: str,
|
|
35
|
+
interval_s: float,
|
|
36
|
+
) -> HeartbeatHandle:
|
|
37
|
+
stop_evt = threading.Event()
|
|
38
|
+
flags: Dict[str, bool] = {"lost": False}
|
|
39
|
+
|
|
40
|
+
def hb_loop():
|
|
41
|
+
try:
|
|
42
|
+
ops.heartbeat(queue=queue, job_id=job_id, lease_token=lease_token)
|
|
43
|
+
except Exception as e:
|
|
44
|
+
msg = str(e)
|
|
45
|
+
if "NOT_ACTIVE" in msg or "TOKEN_MISMATCH" in msg:
|
|
46
|
+
flags["lost"] = True
|
|
47
|
+
stop_evt.set()
|
|
48
|
+
return
|
|
49
|
+
|
|
50
|
+
while not stop_evt.wait(interval_s):
|
|
51
|
+
try:
|
|
52
|
+
ops.heartbeat(queue=queue, job_id=job_id, lease_token=lease_token)
|
|
53
|
+
except Exception as e:
|
|
54
|
+
msg = str(e)
|
|
55
|
+
if "NOT_ACTIVE" in msg or "TOKEN_MISMATCH" in msg:
|
|
56
|
+
flags["lost"] = True
|
|
57
|
+
stop_evt.set()
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
t = threading.Thread(target=hb_loop, daemon=True)
|
|
61
|
+
t.start()
|
|
62
|
+
return HeartbeatHandle(stop_evt=stop_evt, flags=flags, thread=t)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _safe_log(logger: Callable[[str], None], msg: str) -> None:
|
|
66
|
+
try:
|
|
67
|
+
logger(msg)
|
|
68
|
+
except Exception:
|
|
69
|
+
pass
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _payload_preview(payload: Any, max_len: int = 300) -> str:
|
|
73
|
+
try:
|
|
74
|
+
s = payload if isinstance(payload, str) else json.dumps(payload, ensure_ascii=False)
|
|
75
|
+
except Exception:
|
|
76
|
+
s = str(payload)
|
|
77
|
+
if len(s) > max_len:
|
|
78
|
+
return s[:max_len] + "…"
|
|
79
|
+
return s
|
|
80
|
+
|
|
81
|
+
def consume(
|
|
82
|
+
ops: OmniqOps,
|
|
83
|
+
*,
|
|
84
|
+
queue: str,
|
|
85
|
+
handler: Callable[[JobCtx], None],
|
|
86
|
+
poll_interval_s: float = 0.05,
|
|
87
|
+
promote_interval_s: float = 1.0,
|
|
88
|
+
promote_batch: int = 1000,
|
|
89
|
+
reap_interval_s: float = 1.0,
|
|
90
|
+
reap_batch: int = 1000,
|
|
91
|
+
heartbeat_interval_s: Optional[float] = None,
|
|
92
|
+
verbose: bool = False,
|
|
93
|
+
logger: Callable[[str], None] = print,
|
|
94
|
+
stop_on_ctrl_c: bool = True,
|
|
95
|
+
drain: bool = True,
|
|
96
|
+
) -> None:
|
|
97
|
+
"""
|
|
98
|
+
Shutdown semantics:
|
|
99
|
+
|
|
100
|
+
- drain=True:
|
|
101
|
+
Ctrl+C (SIGINT) requests stop AFTER current job finishes.
|
|
102
|
+
We override SIGINT so it does NOT raise KeyboardInterrupt (graceful).
|
|
103
|
+
Second Ctrl+C => hard exit.
|
|
104
|
+
|
|
105
|
+
- drain=False:
|
|
106
|
+
Ctrl+C raises KeyboardInterrupt normally (fast stop).
|
|
107
|
+
We do NOT override SIGINT.
|
|
108
|
+
SIGTERM always requests stop; if drain=False we exit ASAP after reserve.
|
|
109
|
+
|
|
110
|
+
Notes:
|
|
111
|
+
- We cannot force-stop user code safely. drain=False relies on KeyboardInterrupt to interrupt sleep/py code.
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
last_promote = 0.0
|
|
115
|
+
last_reap = 0.0
|
|
116
|
+
|
|
117
|
+
# ensure StopController has sigint_count
|
|
118
|
+
if not hasattr(StopController, "__annotations__") or "sigint_count" not in getattr(StopController, "__annotations__", {}):
|
|
119
|
+
# fallback: still works without sigint_count, but no "double Ctrl+C" behavior
|
|
120
|
+
ctrl = StopController(stop=False) # type: ignore
|
|
121
|
+
ctrl.sigint_count = 0 # type: ignore
|
|
122
|
+
else:
|
|
123
|
+
ctrl = StopController(stop=False, sigint_count=0) # type: ignore
|
|
124
|
+
|
|
125
|
+
if stop_on_ctrl_c:
|
|
126
|
+
import signal
|
|
127
|
+
|
|
128
|
+
# Always handle SIGTERM (docker/k8s stop)
|
|
129
|
+
def on_sigterm(signum, _frame):
|
|
130
|
+
ctrl.stop = True
|
|
131
|
+
if verbose:
|
|
132
|
+
_safe_log(logger, f"[consume] SIGTERM received; stopping... queue={queue}")
|
|
133
|
+
|
|
134
|
+
signal.signal(signal.SIGTERM, on_sigterm)
|
|
135
|
+
|
|
136
|
+
if drain:
|
|
137
|
+
# drain=True: override SIGINT so handler is NOT interrupted.
|
|
138
|
+
# first Ctrl+C => request stop-after-job
|
|
139
|
+
# second Ctrl+C => hard exit (KeyboardInterrupt)
|
|
140
|
+
prev = signal.getsignal(signal.SIGINT)
|
|
141
|
+
|
|
142
|
+
def on_sigint(signum, frame):
|
|
143
|
+
ctrl.sigint_count += 1
|
|
144
|
+
if ctrl.sigint_count >= 2:
|
|
145
|
+
if verbose:
|
|
146
|
+
_safe_log(logger, f"[consume] SIGINT x2; hard exit now. queue={queue}")
|
|
147
|
+
# restore default and re-raise via default behavior
|
|
148
|
+
try:
|
|
149
|
+
signal.signal(signal.SIGINT, prev if prev else signal.SIG_DFL)
|
|
150
|
+
except Exception:
|
|
151
|
+
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
|
152
|
+
raise KeyboardInterrupt
|
|
153
|
+
|
|
154
|
+
ctrl.stop = True
|
|
155
|
+
if verbose:
|
|
156
|
+
_safe_log(logger, f"[consume] Ctrl+C received; draining current job then exiting. queue={queue}")
|
|
157
|
+
|
|
158
|
+
signal.signal(signal.SIGINT, on_sigint)
|
|
159
|
+
else:
|
|
160
|
+
# drain=False: do NOT override SIGINT
|
|
161
|
+
pass
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
while True:
|
|
165
|
+
# if stop requested and idle, exit
|
|
166
|
+
if ctrl.stop:
|
|
167
|
+
if verbose:
|
|
168
|
+
_safe_log(logger, f"[consume] stop requested; exiting (idle). queue={queue}")
|
|
169
|
+
return
|
|
170
|
+
|
|
171
|
+
now_s = time.time()
|
|
172
|
+
|
|
173
|
+
if now_s - last_promote >= promote_interval_s:
|
|
174
|
+
try:
|
|
175
|
+
ops.promote_delayed(queue=queue, max_promote=promote_batch)
|
|
176
|
+
except Exception:
|
|
177
|
+
pass
|
|
178
|
+
last_promote = now_s
|
|
179
|
+
|
|
180
|
+
if now_s - last_reap >= reap_interval_s:
|
|
181
|
+
try:
|
|
182
|
+
ops.reap_expired(queue=queue, max_reap=reap_batch)
|
|
183
|
+
except Exception:
|
|
184
|
+
pass
|
|
185
|
+
last_reap = now_s
|
|
186
|
+
|
|
187
|
+
try:
|
|
188
|
+
res = ops.reserve(queue=queue)
|
|
189
|
+
except Exception as e:
|
|
190
|
+
if verbose:
|
|
191
|
+
_safe_log(logger, f"[consume] reserve error: {e}")
|
|
192
|
+
time.sleep(0.2)
|
|
193
|
+
continue
|
|
194
|
+
|
|
195
|
+
if res is None:
|
|
196
|
+
time.sleep(poll_interval_s)
|
|
197
|
+
continue
|
|
198
|
+
|
|
199
|
+
if getattr(res, "status", "") == "PAUSED":
|
|
200
|
+
time.sleep(ops.paused_backoff_s(poll_interval_s))
|
|
201
|
+
continue
|
|
202
|
+
|
|
203
|
+
assert isinstance(res, ReserveJob)
|
|
204
|
+
if not res.lease_token:
|
|
205
|
+
if verbose:
|
|
206
|
+
_safe_log(logger, f"[consume] invalid reserve (missing lease_token) job_id={res.job_id}")
|
|
207
|
+
time.sleep(0.2)
|
|
208
|
+
continue
|
|
209
|
+
|
|
210
|
+
# if stop requested right after reserve:
|
|
211
|
+
# - drain=False => exit ASAP (no handler, no ack, stop heartbeats)
|
|
212
|
+
# - drain=True => process this one job, then exit after ack
|
|
213
|
+
if ctrl.stop and not drain:
|
|
214
|
+
if verbose:
|
|
215
|
+
_safe_log(logger, f"[consume] stop requested; fast-exit after reserve job_id={res.job_id}")
|
|
216
|
+
return
|
|
217
|
+
|
|
218
|
+
# payload is JSON (contract). keep defensive fallback.
|
|
219
|
+
try:
|
|
220
|
+
payload_obj: Any = json.loads(res.payload)
|
|
221
|
+
except Exception:
|
|
222
|
+
payload_obj = res.payload
|
|
223
|
+
|
|
224
|
+
ctx = JobCtx(
|
|
225
|
+
queue=queue,
|
|
226
|
+
job_id=res.job_id,
|
|
227
|
+
payload_raw=res.payload,
|
|
228
|
+
payload=payload_obj,
|
|
229
|
+
attempt=res.attempt,
|
|
230
|
+
lock_until_ms=res.lock_until_ms,
|
|
231
|
+
lease_token=res.lease_token,
|
|
232
|
+
gid=res.gid,
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
if verbose:
|
|
236
|
+
pv = _payload_preview(ctx.payload)
|
|
237
|
+
gid_s = ctx.gid or "-"
|
|
238
|
+
_safe_log(logger, f"[consume] received job_id={ctx.job_id} attempt={ctx.attempt} gid={gid_s} payload={pv}")
|
|
239
|
+
|
|
240
|
+
# heartbeat cadence
|
|
241
|
+
if heartbeat_interval_s is not None:
|
|
242
|
+
hb_s = float(heartbeat_interval_s)
|
|
243
|
+
else:
|
|
244
|
+
timeout_ms = ops.job_timeout_ms(queue=queue, job_id=res.job_id)
|
|
245
|
+
hb_s = ops.derive_heartbeat_interval_s(timeout_ms)
|
|
246
|
+
|
|
247
|
+
hb = start_heartbeater(
|
|
248
|
+
ops,
|
|
249
|
+
queue=queue,
|
|
250
|
+
job_id=res.job_id,
|
|
251
|
+
lease_token=res.lease_token,
|
|
252
|
+
interval_s=hb_s,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
try:
|
|
256
|
+
handler(ctx)
|
|
257
|
+
|
|
258
|
+
hb.stop_evt.set()
|
|
259
|
+
|
|
260
|
+
if not hb.flags.get("lost", False):
|
|
261
|
+
try:
|
|
262
|
+
ops.ack_success(queue=queue, job_id=res.job_id, lease_token=res.lease_token)
|
|
263
|
+
if verbose:
|
|
264
|
+
_safe_log(logger, f"[consume] ack success job_id={ctx.job_id}")
|
|
265
|
+
except Exception as e:
|
|
266
|
+
if verbose:
|
|
267
|
+
_safe_log(logger, f"[consume] ack success error job_id={ctx.job_id}: {e}")
|
|
268
|
+
|
|
269
|
+
except KeyboardInterrupt:
|
|
270
|
+
# This only happens in drain=False mode (SIGINT not overridden),
|
|
271
|
+
# or on double Ctrl+C in drain=True mode.
|
|
272
|
+
if verbose:
|
|
273
|
+
_safe_log(logger, f"[consume] KeyboardInterrupt; exiting now. queue={queue}")
|
|
274
|
+
hb.stop_evt.set()
|
|
275
|
+
return
|
|
276
|
+
|
|
277
|
+
except Exception as e:
|
|
278
|
+
hb.stop_evt.set()
|
|
279
|
+
|
|
280
|
+
if not hb.flags.get("lost", False):
|
|
281
|
+
try:
|
|
282
|
+
err = f"{type(e).__name__}: {e}"
|
|
283
|
+
result = ops.ack_fail(
|
|
284
|
+
queue=queue,
|
|
285
|
+
job_id=res.job_id,
|
|
286
|
+
lease_token=res.lease_token,
|
|
287
|
+
error=err,
|
|
288
|
+
)
|
|
289
|
+
if verbose:
|
|
290
|
+
if result[0] == "RETRY":
|
|
291
|
+
_safe_log(logger, f"[consume] ack fail job_id={ctx.job_id} => RETRY due_ms={result[1]}")
|
|
292
|
+
else:
|
|
293
|
+
_safe_log(logger, f"[consume] ack fail job_id={ctx.job_id} => FAILED")
|
|
294
|
+
_safe_log(logger, f"[consume] error job_id={ctx.job_id} => {err}")
|
|
295
|
+
except Exception as e2:
|
|
296
|
+
if verbose:
|
|
297
|
+
_safe_log(logger, f"[consume] ack fail error job_id={ctx.job_id}: {e2}")
|
|
298
|
+
|
|
299
|
+
finally:
|
|
300
|
+
try:
|
|
301
|
+
hb.thread.join(timeout=0.1)
|
|
302
|
+
except Exception:
|
|
303
|
+
pass
|
|
304
|
+
|
|
305
|
+
# drain=True: if stop requested (SIGTERM or Ctrl+C), exit after finishing this job
|
|
306
|
+
if ctrl.stop and drain:
|
|
307
|
+
if verbose:
|
|
308
|
+
_safe_log(logger, f"[consume] stop requested; exiting after draining job_id={ctx.job_id}")
|
|
309
|
+
return
|
|
310
|
+
|
|
311
|
+
except KeyboardInterrupt:
|
|
312
|
+
# Ctrl+C while idle in drain=False mode, or double Ctrl+C in drain=True mode
|
|
313
|
+
if verbose:
|
|
314
|
+
_safe_log(logger, f"[consume] KeyboardInterrupt (outer); exiting now. queue={queue}")
|
|
315
|
+
return
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
|
omniq/ids.py
ADDED
omniq/monitor.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import List
|
|
3
|
+
|
|
4
|
+
@dataclass(frozen=True)
|
|
5
|
+
class QueueCounts:
|
|
6
|
+
paused: bool
|
|
7
|
+
waiting: int
|
|
8
|
+
active: int
|
|
9
|
+
delayed: int
|
|
10
|
+
completed: int
|
|
11
|
+
failed: int
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass(frozen=True)
|
|
15
|
+
class GroupStatus:
|
|
16
|
+
gid: str
|
|
17
|
+
inflight: int
|
|
18
|
+
limit: int
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass(frozen=True)
|
|
22
|
+
class ActiveSample:
|
|
23
|
+
job_id: str
|
|
24
|
+
gid: str
|
|
25
|
+
lock_until_ms: int
|
|
26
|
+
attempt: int
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass(frozen=True)
|
|
30
|
+
class DelayedSample:
|
|
31
|
+
job_id: str
|
|
32
|
+
gid: str
|
|
33
|
+
due_ms: int
|
|
34
|
+
attempt: int
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class QueueMonitor:
|
|
38
|
+
def __init__(self, uq):
|
|
39
|
+
self._uq = uq
|
|
40
|
+
self._r = getattr(uq, "r", None) or getattr(getattr(uq, "ops", None), "r", None) or getattr(getattr(uq, "_ops", None), "r", None)
|
|
41
|
+
if self._r is None:
|
|
42
|
+
raise ValueError("QueueMonitor needs redis access (inject from server, do not expose to UI callers).")
|
|
43
|
+
|
|
44
|
+
def _base(self, queue: str) -> str:
|
|
45
|
+
return self._uq.queue_base(queue)
|
|
46
|
+
|
|
47
|
+
def counts(self, queue: str) -> QueueCounts:
|
|
48
|
+
base = self._base(queue)
|
|
49
|
+
r = self._r
|
|
50
|
+
|
|
51
|
+
paused = r.exists(f"{base}:paused") == 1
|
|
52
|
+
|
|
53
|
+
waiting = int(r.llen(f"{base}:wait") or 0)
|
|
54
|
+
active = int(r.zcard(f"{base}:active") or 0)
|
|
55
|
+
delayed = int(r.zcard(f"{base}:delayed") or 0)
|
|
56
|
+
completed = int(r.llen(f"{base}:completed") or 0)
|
|
57
|
+
failed = int(r.llen(f"{base}:failed") or 0)
|
|
58
|
+
|
|
59
|
+
return QueueCounts(
|
|
60
|
+
paused=paused,
|
|
61
|
+
waiting=waiting,
|
|
62
|
+
active=active,
|
|
63
|
+
delayed=delayed,
|
|
64
|
+
completed=completed,
|
|
65
|
+
failed=failed,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
def groups_ready(self, queue: str, limit: int = 200) -> List[str]:
|
|
69
|
+
base = self._base(queue)
|
|
70
|
+
r = self._r
|
|
71
|
+
limit = max(1, min(int(limit), 2000))
|
|
72
|
+
try:
|
|
73
|
+
gids = r.zrange(f"{base}:groups:ready", 0, limit - 1)
|
|
74
|
+
return [g for g in gids if g]
|
|
75
|
+
except Exception:
|
|
76
|
+
return []
|
|
77
|
+
|
|
78
|
+
def group_status(self, queue: str, gids: List[str], default_limit: int = 1) -> List[GroupStatus]:
|
|
79
|
+
base = self._base(queue)
|
|
80
|
+
r = self._r
|
|
81
|
+
|
|
82
|
+
out: List[GroupStatus] = []
|
|
83
|
+
for gid in gids:
|
|
84
|
+
inflight = int(r.get(f"{base}:g:{gid}:inflight") or "0")
|
|
85
|
+
|
|
86
|
+
raw = r.get(f"{base}:g:{gid}:limit")
|
|
87
|
+
try:
|
|
88
|
+
gl = int(raw) if raw else 0
|
|
89
|
+
except Exception:
|
|
90
|
+
gl = 0
|
|
91
|
+
limit = gl if gl > 0 else int(default_limit)
|
|
92
|
+
|
|
93
|
+
out.append(GroupStatus(gid=str(gid), inflight=inflight, limit=limit))
|
|
94
|
+
return out
|
|
95
|
+
|
|
96
|
+
def sample_active(self, queue: str, limit: int = 50) -> List[ActiveSample]:
|
|
97
|
+
base = self._base(queue)
|
|
98
|
+
r = self._r
|
|
99
|
+
limit = max(1, min(int(limit), 500))
|
|
100
|
+
|
|
101
|
+
job_ids = r.zrange(f"{base}:active", 0, limit - 1)
|
|
102
|
+
out: List[ActiveSample] = []
|
|
103
|
+
|
|
104
|
+
for jid in job_ids:
|
|
105
|
+
k_job = f"{base}:job:{jid}"
|
|
106
|
+
gid, attempt = r.hmget(k_job, "gid", "attempt")
|
|
107
|
+
score = r.zscore(f"{base}:active", jid) or 0
|
|
108
|
+
out.append(
|
|
109
|
+
ActiveSample(
|
|
110
|
+
job_id=str(jid),
|
|
111
|
+
gid=str(gid or ""),
|
|
112
|
+
lock_until_ms=int(score),
|
|
113
|
+
attempt=int(attempt or 0),
|
|
114
|
+
)
|
|
115
|
+
)
|
|
116
|
+
return out
|
|
117
|
+
|
|
118
|
+
def sample_delayed(self, queue: str, limit: int = 50) -> List[DelayedSample]:
|
|
119
|
+
base = self._base(queue)
|
|
120
|
+
r = self._r
|
|
121
|
+
limit = max(1, min(int(limit), 500))
|
|
122
|
+
|
|
123
|
+
job_ids = r.zrange(f"{base}:delayed", 0, limit - 1)
|
|
124
|
+
out: List[DelayedSample] = []
|
|
125
|
+
|
|
126
|
+
for jid in job_ids:
|
|
127
|
+
k_job = f"{base}:job:{jid}"
|
|
128
|
+
gid, attempt = r.hmget(k_job, "gid", "attempt")
|
|
129
|
+
due = r.zscore(f"{base}:delayed", jid) or 0
|
|
130
|
+
out.append(
|
|
131
|
+
DelayedSample(
|
|
132
|
+
job_id=str(jid),
|
|
133
|
+
gid=str(gid or ""),
|
|
134
|
+
due_ms=int(due),
|
|
135
|
+
attempt=int(attempt or 0),
|
|
136
|
+
)
|
|
137
|
+
)
|
|
138
|
+
return out
|
omniq/scripts.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Protocol
|
|
4
|
+
|
|
5
|
+
class ScriptLoader(Protocol):
|
|
6
|
+
def script_load(self, script: str) -> str: ...
|
|
7
|
+
|
|
8
|
+
@dataclass(frozen=True)
|
|
9
|
+
class OmniqScripts:
|
|
10
|
+
enqueue_sha: str
|
|
11
|
+
reserve_sha: str
|
|
12
|
+
ack_success_sha: str
|
|
13
|
+
ack_fail_sha: str
|
|
14
|
+
promote_delayed_sha: str
|
|
15
|
+
reap_expired_sha: str
|
|
16
|
+
heartbeat_sha: str
|
|
17
|
+
pause_sha: str
|
|
18
|
+
resume_sha: str
|
|
19
|
+
|
|
20
|
+
def default_scripts_dir(current_file: str) -> str:
|
|
21
|
+
here = os.path.dirname(os.path.abspath(current_file))
|
|
22
|
+
return os.path.abspath(os.path.join(here, "..", "..", "scripts", "scripts"))
|
|
23
|
+
|
|
24
|
+
def load_scripts(r: ScriptLoader, scripts_dir: str) -> OmniqScripts:
|
|
25
|
+
def load_one(name: str) -> str:
|
|
26
|
+
path = os.path.join(scripts_dir, name)
|
|
27
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
28
|
+
return r.script_load(f.read())
|
|
29
|
+
|
|
30
|
+
return OmniqScripts(
|
|
31
|
+
enqueue_sha=load_one("enqueue.lua"),
|
|
32
|
+
reserve_sha=load_one("reserve.lua"),
|
|
33
|
+
ack_success_sha=load_one("ack_success.lua"),
|
|
34
|
+
ack_fail_sha=load_one("ack_fail.lua"),
|
|
35
|
+
promote_delayed_sha=load_one("promote_delayed.lua"),
|
|
36
|
+
reap_expired_sha=load_one("reap_expired.lua"),
|
|
37
|
+
heartbeat_sha=load_one("heartbeat.lua"),
|
|
38
|
+
pause_sha=load_one("pause.lua"),
|
|
39
|
+
resume_sha=load_one("resume.lua"),
|
|
40
|
+
)
|
omniq/transport.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Any, Optional, Protocol
|
|
5
|
+
|
|
6
|
+
import redis
|
|
7
|
+
|
|
8
|
+
class RedisLike(Protocol):
|
|
9
|
+
def evalsha(self, sha: str, numkeys: int, *args: str) -> Any: ...
|
|
10
|
+
def script_load(self, script: str) -> str: ...
|
|
11
|
+
def exists(self, key: str) -> int: ...
|
|
12
|
+
def hget(self, key: str, field: str) -> Optional[str]: ...
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass(frozen=True)
|
|
16
|
+
class RedisConnOpts:
|
|
17
|
+
redis_url: Optional[str] = None
|
|
18
|
+
host: Optional[str] = None
|
|
19
|
+
port: int = 6379
|
|
20
|
+
db: int = 0
|
|
21
|
+
username: Optional[str] = None
|
|
22
|
+
password: Optional[str] = None
|
|
23
|
+
ssl: bool = False
|
|
24
|
+
socket_timeout: Optional[float] = None
|
|
25
|
+
socket_connect_timeout: Optional[float] = None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def build_redis_client(opts: RedisConnOpts) -> redis.Redis:
|
|
29
|
+
if opts.redis_url:
|
|
30
|
+
return redis.Redis.from_url(opts.redis_url, decode_responses=True)
|
|
31
|
+
|
|
32
|
+
if not opts.host:
|
|
33
|
+
raise ValueError("RedisConnOpts requires host (or redis_url)")
|
|
34
|
+
|
|
35
|
+
return redis.Redis(
|
|
36
|
+
host=opts.host,
|
|
37
|
+
port=int(opts.port),
|
|
38
|
+
db=int(opts.db),
|
|
39
|
+
username=opts.username,
|
|
40
|
+
password=opts.password,
|
|
41
|
+
ssl=bool(opts.ssl),
|
|
42
|
+
socket_timeout=opts.socket_timeout,
|
|
43
|
+
socket_connect_timeout=opts.socket_connect_timeout,
|
|
44
|
+
decode_responses=True,
|
|
45
|
+
)
|
omniq/types.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Any, Dict, Optional, Tuple, Union, Literal
|
|
3
|
+
|
|
4
|
+
PayloadT = Union[Dict[str, Any], list]
|
|
5
|
+
|
|
6
|
+
@dataclass(frozen=True)
|
|
7
|
+
class JobCtx:
|
|
8
|
+
queue: str
|
|
9
|
+
job_id: str
|
|
10
|
+
payload_raw: str
|
|
11
|
+
payload: PayloadT
|
|
12
|
+
attempt: int
|
|
13
|
+
lock_until_ms: int
|
|
14
|
+
lease_token: str
|
|
15
|
+
gid: str = ""
|
|
16
|
+
|
|
17
|
+
ReserveEmpty = None
|
|
18
|
+
|
|
19
|
+
@dataclass(frozen=True)
|
|
20
|
+
class ReservePaused:
|
|
21
|
+
status: Literal["PAUSED"] = "PAUSED"
|
|
22
|
+
|
|
23
|
+
@dataclass(frozen=True)
|
|
24
|
+
class ReserveJob:
|
|
25
|
+
status: Literal["JOB"]
|
|
26
|
+
job_id: str
|
|
27
|
+
payload: str
|
|
28
|
+
lock_until_ms: int
|
|
29
|
+
attempt: int
|
|
30
|
+
gid: str
|
|
31
|
+
lease_token: str
|
|
32
|
+
|
|
33
|
+
ReserveResult = Union[ReserveEmpty, ReservePaused, ReserveJob]
|
|
34
|
+
|
|
35
|
+
AckFailResult = Tuple[Literal["RETRY", "FAILED"], Optional[int]]
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
omniq/__init__.py,sha256=PoDH4HY02Q_8tshE6OgRkPSWnGpJDvAovNWlV-NeCw0,98
|
|
2
|
+
omniq/_ops.py,sha256=oSy3o1-0PRj_B1V8Ga7n7y83NpUYoxF872SDxsLpNZY,7656
|
|
3
|
+
omniq/client.py,sha256=qqG3nsfgTjGtap8hKsLJd_9vd0Iui_8aMJdJQhQCdmk,4643
|
|
4
|
+
omniq/clock.py,sha256=YTpVrkx1hyop4GU9DuGXfu23n5KPsgGb_1DINiiV0FU,69
|
|
5
|
+
omniq/consumer.py,sha256=z-yFYRIDX6HTD5hf4Xls7EYp5_zRw9Jc_B6RQO8o4Ws,11140
|
|
6
|
+
omniq/ids.py,sha256=bGkDDZfYaZvFEnUD02TylWA05UpDYztQwndIscxFjm8,63
|
|
7
|
+
omniq/monitor.py,sha256=N0qbGUJfqwUubHLvgMbemswhcQLamwsc4_5OYgqFyr0,4008
|
|
8
|
+
omniq/scripts.py,sha256=E5fhtc5m6qhEwCvIz__nwSWrH0L3xGkJXRO29ajY6kI,1316
|
|
9
|
+
omniq/transport.py,sha256=4Nj-RoyZG0L0aEbCleNCF1bWQHW7J4yVgPGmebWxGPE,1309
|
|
10
|
+
omniq/types.py,sha256=KXj-Z-uPV7lO3fmmyK6QYL8pJiPoYYei8UcPf0v1YUU,743
|
|
11
|
+
omniq-1.0.0.dist-info/METADATA,sha256=G-imH3HvAVfNKvBhNR9jwq4wFDZEWrZgfoH_8zD4q8M,177
|
|
12
|
+
omniq-1.0.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
13
|
+
omniq-1.0.0.dist-info/top_level.txt,sha256=SMvOWui1e7OpLJn5BC_QsiZQIqsjhNfURCd7Ru9CuRE,6
|
|
14
|
+
omniq-1.0.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
omniq
|