svc-infra 0.1.596__py3-none-any.whl → 0.1.597__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of svc-infra might be problematic. Click here for more details.
- svc_infra/cli/__init__.py +4 -0
- svc_infra/cli/__main__.py +4 -0
- svc_infra/cli/cmds/__init__.py +2 -0
- svc_infra/cli/cmds/jobs/__init__.py +1 -0
- svc_infra/cli/cmds/jobs/jobs_cmds.py +43 -0
- svc_infra/db/outbox.py +9 -1
- svc_infra/jobs/builtins/outbox_processor.py +38 -0
- svc_infra/jobs/builtins/webhook_delivery.py +59 -0
- svc_infra/jobs/easy.py +32 -0
- svc_infra/jobs/loader.py +45 -0
- svc_infra/jobs/queue.py +73 -0
- svc_infra/jobs/redis_queue.py +191 -0
- svc_infra/jobs/scheduler.py +41 -0
- svc_infra/jobs/worker.py +24 -0
- {svc_infra-0.1.596.dist-info → svc_infra-0.1.597.dist-info}/METADATA +1 -1
- {svc_infra-0.1.596.dist-info → svc_infra-0.1.597.dist-info}/RECORD +18 -7
- {svc_infra-0.1.596.dist-info → svc_infra-0.1.597.dist-info}/WHEEL +0 -0
- {svc_infra-0.1.596.dist-info → svc_infra-0.1.597.dist-info}/entry_points.txt +0 -0
svc_infra/cli/__init__.py
CHANGED
|
@@ -4,6 +4,7 @@ import typer
|
|
|
4
4
|
|
|
5
5
|
from svc_infra.cli.cmds import (
|
|
6
6
|
_HELP,
|
|
7
|
+
jobs_app,
|
|
7
8
|
register_alembic,
|
|
8
9
|
register_mongo,
|
|
9
10
|
register_mongo_scaffold,
|
|
@@ -26,6 +27,9 @@ register_mongo_scaffold(app)
|
|
|
26
27
|
# -- observability commands ---
|
|
27
28
|
register_obs(app)
|
|
28
29
|
|
|
30
|
+
# -- jobs commands ---
|
|
31
|
+
app.add_typer(jobs_app, name="jobs")
|
|
32
|
+
|
|
29
33
|
|
|
30
34
|
def main():
|
|
31
35
|
app()
|
svc_infra/cli/cmds/__init__.py
CHANGED
|
@@ -4,6 +4,7 @@ from svc_infra.cli.cmds.db.nosql.mongo.mongo_scaffold_cmds import (
|
|
|
4
4
|
)
|
|
5
5
|
from svc_infra.cli.cmds.db.sql.alembic_cmds import register as register_alembic
|
|
6
6
|
from svc_infra.cli.cmds.db.sql.sql_scaffold_cmds import register as register_sql_scaffold
|
|
7
|
+
from svc_infra.cli.cmds.jobs.jobs_cmds import app as jobs_app
|
|
7
8
|
from svc_infra.cli.cmds.obs.obs_cmds import register as register_obs
|
|
8
9
|
|
|
9
10
|
from .help import _HELP
|
|
@@ -14,5 +15,6 @@ __all__ = [
|
|
|
14
15
|
"register_mongo",
|
|
15
16
|
"register_mongo_scaffold",
|
|
16
17
|
"register_obs",
|
|
18
|
+
"jobs_app",
|
|
17
19
|
"_HELP",
|
|
18
20
|
]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
import typer
|
|
7
|
+
|
|
8
|
+
from svc_infra.jobs.easy import easy_jobs
|
|
9
|
+
from svc_infra.jobs.loader import schedule_from_env
|
|
10
|
+
from svc_infra.jobs.worker import process_one
|
|
11
|
+
|
|
12
|
+
app = typer.Typer(help="Background jobs and scheduler commands")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@app.command("run")
|
|
16
|
+
def run(
|
|
17
|
+
poll_interval: float = typer.Option(0.5, help="Sleep seconds between loops when idle"),
|
|
18
|
+
max_loops: Optional[int] = typer.Option(None, help="Max loops before exit (for tests)"),
|
|
19
|
+
):
|
|
20
|
+
"""Run scheduler ticks and process jobs in a simple loop."""
|
|
21
|
+
|
|
22
|
+
queue, scheduler = easy_jobs()
|
|
23
|
+
# load schedule from env JSON if provided
|
|
24
|
+
schedule_from_env(scheduler)
|
|
25
|
+
|
|
26
|
+
async def _loop():
|
|
27
|
+
loops = 0
|
|
28
|
+
while True:
|
|
29
|
+
await scheduler.tick()
|
|
30
|
+
processed = await process_one(queue, _noop_handler)
|
|
31
|
+
if not processed:
|
|
32
|
+
# idle
|
|
33
|
+
await asyncio.sleep(poll_interval)
|
|
34
|
+
if max_loops is not None:
|
|
35
|
+
loops += 1
|
|
36
|
+
if loops >= max_loops:
|
|
37
|
+
break
|
|
38
|
+
|
|
39
|
+
async def _noop_handler(job):
|
|
40
|
+
# Default handler does nothing; users should write their own runners
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
asyncio.run(_loop())
|
svc_infra/db/outbox.py
CHANGED
|
@@ -20,7 +20,12 @@ class OutboxStore(Protocol):
|
|
|
20
20
|
pass
|
|
21
21
|
|
|
22
22
|
def fetch_next(self, *, topics: Optional[Iterable[str]] = None) -> Optional[OutboxMessage]:
|
|
23
|
-
"""Return the next unprocessed message (FIFO per-topic), or None
|
|
23
|
+
"""Return the next undispatched, unprocessed message (FIFO per-topic), or None.
|
|
24
|
+
|
|
25
|
+
Notes:
|
|
26
|
+
- Messages with attempts > 0 are considered "dispatched" to the job queue and won't be re-enqueued.
|
|
27
|
+
- Delivery retries are handled by the job queue worker, not by re-reading the outbox.
|
|
28
|
+
"""
|
|
24
29
|
pass
|
|
25
30
|
|
|
26
31
|
def mark_processed(self, msg_id: int) -> None:
|
|
@@ -48,6 +53,9 @@ class InMemoryOutboxStore:
|
|
|
48
53
|
for msg in self._messages:
|
|
49
54
|
if msg.processed_at is not None:
|
|
50
55
|
continue
|
|
56
|
+
# skip already dispatched messages (attempts>0)
|
|
57
|
+
if msg.attempts > 0:
|
|
58
|
+
continue
|
|
51
59
|
if allowed is not None and msg.topic not in allowed:
|
|
52
60
|
continue
|
|
53
61
|
return msg
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Iterable, Optional
|
|
4
|
+
|
|
5
|
+
from svc_infra.db.outbox import OutboxStore
|
|
6
|
+
from svc_infra.jobs.queue import JobQueue
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def make_outbox_tick(
|
|
10
|
+
outbox: OutboxStore,
|
|
11
|
+
queue: JobQueue,
|
|
12
|
+
*,
|
|
13
|
+
topics: Optional[Iterable[str]] = None,
|
|
14
|
+
job_name_prefix: str = "outbox",
|
|
15
|
+
):
|
|
16
|
+
"""Return an async task function to move one outbox message into the job queue.
|
|
17
|
+
|
|
18
|
+
- It fetches at most one unprocessed message per tick to avoid starving others.
|
|
19
|
+
- The enqueued job name is f"{job_name_prefix}.{topic}" to allow routing.
|
|
20
|
+
- The job payload contains `outbox_id`, `topic`, and original `payload`.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
dispatched: set[int] = set()
|
|
24
|
+
|
|
25
|
+
async def _tick():
|
|
26
|
+
# Outbox is sync; this wrapper is async for scheduler compatibility
|
|
27
|
+
msg = outbox.fetch_next(topics=topics)
|
|
28
|
+
if not msg:
|
|
29
|
+
return
|
|
30
|
+
if msg.id in dispatched:
|
|
31
|
+
return
|
|
32
|
+
job_name = f"{job_name_prefix}.{msg.topic}"
|
|
33
|
+
queue.enqueue(job_name, {"outbox_id": msg.id, "topic": msg.topic, "payload": msg.payload})
|
|
34
|
+
# mark as dispatched (bump attempts) so it won't be re-enqueued by fetch_next
|
|
35
|
+
outbox.mark_failed(msg.id)
|
|
36
|
+
dispatched.add(msg.id)
|
|
37
|
+
|
|
38
|
+
return _tick
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import hashlib
|
|
4
|
+
import hmac
|
|
5
|
+
import json
|
|
6
|
+
from typing import Dict
|
|
7
|
+
|
|
8
|
+
import httpx
|
|
9
|
+
|
|
10
|
+
from svc_infra.db.inbox import InboxStore
|
|
11
|
+
from svc_infra.db.outbox import OutboxStore
|
|
12
|
+
from svc_infra.jobs.queue import Job
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _compute_signature(secret: str, payload: Dict) -> str:
|
|
16
|
+
body = json.dumps(payload, separators=(",", ":")).encode()
|
|
17
|
+
return hmac.new(secret.encode(), body, hashlib.sha256).hexdigest()
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def make_webhook_handler(
|
|
21
|
+
*,
|
|
22
|
+
outbox: OutboxStore,
|
|
23
|
+
inbox: InboxStore,
|
|
24
|
+
get_webhook_url_for_topic,
|
|
25
|
+
get_secret_for_topic,
|
|
26
|
+
header_name: str = "X-Signature",
|
|
27
|
+
):
|
|
28
|
+
"""Return an async job handler to deliver webhooks.
|
|
29
|
+
|
|
30
|
+
Expected job payload shape:
|
|
31
|
+
{"outbox_id": int, "topic": str, "payload": {...}}
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
async def _handler(job: Job) -> None:
|
|
35
|
+
data = job.payload or {}
|
|
36
|
+
outbox_id = data.get("outbox_id")
|
|
37
|
+
topic = data.get("topic")
|
|
38
|
+
payload = data.get("payload") or {}
|
|
39
|
+
if not outbox_id or not topic:
|
|
40
|
+
# Nothing we can do; ack to avoid poison loop
|
|
41
|
+
return
|
|
42
|
+
# dedupe by outbox_id via inbox
|
|
43
|
+
key = f"webhook:{outbox_id}"
|
|
44
|
+
if not inbox.mark_if_new(key, ttl_seconds=24 * 3600):
|
|
45
|
+
# already delivered
|
|
46
|
+
outbox.mark_processed(int(outbox_id))
|
|
47
|
+
return
|
|
48
|
+
url = get_webhook_url_for_topic(topic)
|
|
49
|
+
secret = get_secret_for_topic(topic)
|
|
50
|
+
sig = _compute_signature(secret, payload)
|
|
51
|
+
async with httpx.AsyncClient(timeout=10) as client:
|
|
52
|
+
resp = await client.post(url, json=payload, headers={header_name: sig})
|
|
53
|
+
if 200 <= resp.status_code < 300:
|
|
54
|
+
outbox.mark_processed(int(outbox_id))
|
|
55
|
+
return
|
|
56
|
+
# allow retry on non-2xx: raise to trigger fail/backoff
|
|
57
|
+
raise RuntimeError(f"webhook delivery failed: {resp.status_code}")
|
|
58
|
+
|
|
59
|
+
return _handler
|
svc_infra/jobs/easy.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from redis import Redis
|
|
6
|
+
|
|
7
|
+
from .queue import InMemoryJobQueue, JobQueue
|
|
8
|
+
from .redis_queue import RedisJobQueue
|
|
9
|
+
from .scheduler import InMemoryScheduler
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class JobsConfig:
|
|
13
|
+
def __init__(self, driver: str | None = None):
|
|
14
|
+
# Future: support redis/sql drivers via extras
|
|
15
|
+
self.driver = driver or os.getenv("JOBS_DRIVER", "memory").lower()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def easy_jobs(*, driver: str | None = None) -> tuple[JobQueue, InMemoryScheduler]:
|
|
19
|
+
"""One-call wiring for jobs: returns (queue, scheduler).
|
|
20
|
+
|
|
21
|
+
Defaults to in-memory implementations for local/dev. ENV override via JOBS_DRIVER.
|
|
22
|
+
"""
|
|
23
|
+
cfg = JobsConfig(driver=driver)
|
|
24
|
+
# Choose backend
|
|
25
|
+
if cfg.driver == "redis":
|
|
26
|
+
url = os.getenv("REDIS_URL", "redis://localhost:6379/0")
|
|
27
|
+
client = Redis.from_url(url)
|
|
28
|
+
queue = RedisJobQueue(client)
|
|
29
|
+
else:
|
|
30
|
+
queue = InMemoryJobQueue()
|
|
31
|
+
scheduler = InMemoryScheduler()
|
|
32
|
+
return queue, scheduler
|
svc_infra/jobs/loader.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import importlib
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
from typing import Awaitable, Callable
|
|
8
|
+
|
|
9
|
+
from .scheduler import InMemoryScheduler
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _resolve_target(path: str) -> Callable[[], Awaitable[None]]:
|
|
13
|
+
mod_name, func_name = path.split(":", 1)
|
|
14
|
+
mod = importlib.import_module(mod_name)
|
|
15
|
+
fn = getattr(mod, func_name)
|
|
16
|
+
if asyncio.iscoroutinefunction(fn):
|
|
17
|
+
return fn # type: ignore[return-value]
|
|
18
|
+
|
|
19
|
+
# wrap sync into async
|
|
20
|
+
async def _wrapped():
|
|
21
|
+
fn()
|
|
22
|
+
|
|
23
|
+
return _wrapped
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def schedule_from_env(scheduler: InMemoryScheduler, env_var: str = "JOBS_SCHEDULE_JSON") -> None:
|
|
27
|
+
data = os.getenv(env_var)
|
|
28
|
+
if not data:
|
|
29
|
+
return
|
|
30
|
+
try:
|
|
31
|
+
tasks = json.loads(data)
|
|
32
|
+
except json.JSONDecodeError:
|
|
33
|
+
return
|
|
34
|
+
if not isinstance(tasks, list):
|
|
35
|
+
return
|
|
36
|
+
for t in tasks:
|
|
37
|
+
try:
|
|
38
|
+
name = t["name"]
|
|
39
|
+
interval = int(t.get("interval_seconds", 60))
|
|
40
|
+
target = t["target"]
|
|
41
|
+
fn = _resolve_target(target)
|
|
42
|
+
scheduler.add_task(name, interval, fn)
|
|
43
|
+
except Exception:
|
|
44
|
+
# ignore bad entries
|
|
45
|
+
continue
|
svc_infra/jobs/queue.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime, timedelta, timezone
|
|
5
|
+
from typing import Any, Dict, Optional, Protocol
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class Job:
|
|
10
|
+
id: str
|
|
11
|
+
name: str
|
|
12
|
+
payload: Dict[str, Any]
|
|
13
|
+
available_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
14
|
+
attempts: int = 0
|
|
15
|
+
max_attempts: int = 5
|
|
16
|
+
backoff_seconds: int = 60 # base backoff for retry
|
|
17
|
+
last_error: Optional[str] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class JobQueue(Protocol):
|
|
21
|
+
def enqueue(self, name: str, payload: Dict[str, Any], *, delay_seconds: int = 0) -> Job:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
def reserve_next(self) -> Optional[Job]:
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
def ack(self, job_id: str) -> None:
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
def fail(self, job_id: str, *, error: str | None = None) -> None:
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class InMemoryJobQueue:
|
|
35
|
+
"""Simple in-memory queue for tests and local runs.
|
|
36
|
+
|
|
37
|
+
Single-threaded reserve/ack/fail semantics. Not suitable for production.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self):
|
|
41
|
+
self._seq = 0
|
|
42
|
+
self._jobs: list[Job] = []
|
|
43
|
+
|
|
44
|
+
def _next_id(self) -> str:
|
|
45
|
+
self._seq += 1
|
|
46
|
+
return str(self._seq)
|
|
47
|
+
|
|
48
|
+
def enqueue(self, name: str, payload: Dict[str, Any], *, delay_seconds: int = 0) -> Job:
|
|
49
|
+
when = datetime.now(timezone.utc) + timedelta(seconds=delay_seconds)
|
|
50
|
+
job = Job(id=self._next_id(), name=name, payload=dict(payload), available_at=when)
|
|
51
|
+
self._jobs.append(job)
|
|
52
|
+
return job
|
|
53
|
+
|
|
54
|
+
def reserve_next(self) -> Optional[Job]:
|
|
55
|
+
now = datetime.now(timezone.utc)
|
|
56
|
+
for job in self._jobs:
|
|
57
|
+
if job.available_at <= now and job.attempts >= 0 and job.attempts < job.max_attempts:
|
|
58
|
+
job.attempts += 1
|
|
59
|
+
return job
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
def ack(self, job_id: str) -> None:
|
|
63
|
+
self._jobs = [j for j in self._jobs if j.id != job_id]
|
|
64
|
+
|
|
65
|
+
def fail(self, job_id: str, *, error: str | None = None) -> None:
|
|
66
|
+
now = datetime.now(timezone.utc)
|
|
67
|
+
for job in self._jobs:
|
|
68
|
+
if job.id == job_id:
|
|
69
|
+
job.last_error = error
|
|
70
|
+
# Exponential backoff: base * attempts
|
|
71
|
+
delay = job.backoff_seconds * max(1, job.attempts)
|
|
72
|
+
job.available_at = now + timedelta(seconds=delay)
|
|
73
|
+
return
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from dataclasses import asdict
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
from typing import Dict, Optional
|
|
7
|
+
|
|
8
|
+
from redis import Redis
|
|
9
|
+
|
|
10
|
+
from .queue import Job, JobQueue
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class RedisJobQueue(JobQueue):
|
|
14
|
+
"""Redis-backed job queue with visibility timeout and delayed retries.
|
|
15
|
+
|
|
16
|
+
Keys (with optional prefix):
|
|
17
|
+
- {p}:ready (LIST) ready job ids
|
|
18
|
+
- {p}:processing (LIST) in-flight job ids
|
|
19
|
+
- {p}:processing_vt (ZSET) id -> visible_at (epoch seconds)
|
|
20
|
+
- {p}:delayed (ZSET) id -> available_at (epoch seconds)
|
|
21
|
+
- {p}:seq (STRING) INCR for job ids
|
|
22
|
+
- {p}:job:{id} (HASH) job fields (json payload)
|
|
23
|
+
- {p}:dlq (LIST) dead-letter job ids
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self, client: Redis, *, prefix: str = "jobs", visibility_timeout: int = 60):
|
|
27
|
+
self._r = client
|
|
28
|
+
self._p = prefix
|
|
29
|
+
self._vt = visibility_timeout
|
|
30
|
+
|
|
31
|
+
# Key helpers
|
|
32
|
+
def _k(self, name: str) -> str:
|
|
33
|
+
return f"{self._p}:{name}"
|
|
34
|
+
|
|
35
|
+
def _job_key(self, job_id: str) -> str:
|
|
36
|
+
return f"{self._p}:job:{job_id}"
|
|
37
|
+
|
|
38
|
+
# Core ops
|
|
39
|
+
def enqueue(self, name: str, payload: Dict, *, delay_seconds: int = 0) -> Job:
|
|
40
|
+
now = datetime.now(timezone.utc)
|
|
41
|
+
job_id = str(self._r.incr(self._k("seq")))
|
|
42
|
+
job = Job(id=job_id, name=name, payload=dict(payload))
|
|
43
|
+
# Persist job
|
|
44
|
+
data = asdict(job)
|
|
45
|
+
data["payload"] = json.dumps(data["payload"]) # store payload as JSON string
|
|
46
|
+
# available_at stored as ISO format
|
|
47
|
+
data["available_at"] = job.available_at.isoformat()
|
|
48
|
+
self._r.hset(
|
|
49
|
+
self._job_key(job_id), mapping={k: str(v) for k, v in data.items() if v is not None}
|
|
50
|
+
)
|
|
51
|
+
if delay_seconds and delay_seconds > 0:
|
|
52
|
+
at = int(now.timestamp()) + int(delay_seconds)
|
|
53
|
+
self._r.zadd(self._k("delayed"), {job_id: at})
|
|
54
|
+
else:
|
|
55
|
+
# push to ready
|
|
56
|
+
self._r.lpush(self._k("ready"), job_id)
|
|
57
|
+
return job
|
|
58
|
+
|
|
59
|
+
def _move_due_delayed_to_ready(self) -> None:
|
|
60
|
+
now_ts = int(datetime.now(timezone.utc).timestamp())
|
|
61
|
+
ids = self._r.zrangebyscore(self._k("delayed"), "-inf", now_ts)
|
|
62
|
+
if not ids:
|
|
63
|
+
return
|
|
64
|
+
pipe = self._r.pipeline()
|
|
65
|
+
for jid in ids:
|
|
66
|
+
jid_s = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
67
|
+
pipe.lpush(self._k("ready"), jid_s)
|
|
68
|
+
pipe.zrem(self._k("delayed"), jid_s)
|
|
69
|
+
pipe.execute()
|
|
70
|
+
|
|
71
|
+
def _requeue_timed_out_processing(self) -> None:
|
|
72
|
+
now_ts = int(datetime.now(timezone.utc).timestamp())
|
|
73
|
+
ids = self._r.zrangebyscore(self._k("processing_vt"), "-inf", now_ts)
|
|
74
|
+
if not ids:
|
|
75
|
+
return
|
|
76
|
+
pipe = self._r.pipeline()
|
|
77
|
+
for jid in ids:
|
|
78
|
+
jid_s = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
79
|
+
pipe.lrem(self._k("processing"), 1, jid_s)
|
|
80
|
+
pipe.lpush(self._k("ready"), jid_s)
|
|
81
|
+
pipe.zrem(self._k("processing_vt"), jid_s)
|
|
82
|
+
# clear stale visibility timestamp so next reservation can set a fresh one
|
|
83
|
+
pipe.hdel(self._job_key(jid_s), "visible_at")
|
|
84
|
+
pipe.execute()
|
|
85
|
+
|
|
86
|
+
def reserve_next(self) -> Optional[Job]:
|
|
87
|
+
# opportunistically move due delayed jobs
|
|
88
|
+
self._move_due_delayed_to_ready()
|
|
89
|
+
# move timed-out processing jobs back to ready before reserving
|
|
90
|
+
self._requeue_timed_out_processing()
|
|
91
|
+
jid = self._r.rpoplpush(self._k("ready"), self._k("processing"))
|
|
92
|
+
if not jid:
|
|
93
|
+
return None
|
|
94
|
+
job_id = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
95
|
+
key = self._job_key(job_id)
|
|
96
|
+
data = self._r.hgetall(key)
|
|
97
|
+
if not data:
|
|
98
|
+
# corrupted entry; ack and skip
|
|
99
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
100
|
+
return None
|
|
101
|
+
|
|
102
|
+
# Decode fields
|
|
103
|
+
def _get(field: str, default: Optional[str] = None) -> Optional[str]:
|
|
104
|
+
val = (
|
|
105
|
+
data.get(field.encode())
|
|
106
|
+
if isinstance(next(iter(data.keys())), bytes)
|
|
107
|
+
else data.get(field)
|
|
108
|
+
)
|
|
109
|
+
if val is None:
|
|
110
|
+
return default
|
|
111
|
+
return val.decode() if isinstance(val, (bytes, bytearray)) else str(val)
|
|
112
|
+
|
|
113
|
+
attempts = int(_get("attempts", "0")) + 1
|
|
114
|
+
max_attempts = int(_get("max_attempts", "5"))
|
|
115
|
+
backoff_seconds = int(_get("backoff_seconds", "60"))
|
|
116
|
+
name = _get("name", "") or ""
|
|
117
|
+
payload_json = _get("payload", "{}") or "{}"
|
|
118
|
+
try:
|
|
119
|
+
payload = json.loads(payload_json)
|
|
120
|
+
except Exception: # pragma: no cover
|
|
121
|
+
payload = {}
|
|
122
|
+
available_at_str = _get("available_at")
|
|
123
|
+
available_at = (
|
|
124
|
+
datetime.fromisoformat(available_at_str)
|
|
125
|
+
if available_at_str
|
|
126
|
+
else datetime.now(timezone.utc)
|
|
127
|
+
)
|
|
128
|
+
# If exceeded max_attempts → DLQ and skip
|
|
129
|
+
if attempts > max_attempts:
|
|
130
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
131
|
+
self._r.lpush(self._k("dlq"), job_id)
|
|
132
|
+
return None
|
|
133
|
+
# Update attempts and visibility timeout
|
|
134
|
+
visible_at = int(datetime.now(timezone.utc).timestamp()) + int(self._vt)
|
|
135
|
+
pipe = self._r.pipeline()
|
|
136
|
+
pipe.hset(key, mapping={"attempts": attempts, "visible_at": visible_at})
|
|
137
|
+
pipe.zadd(self._k("processing_vt"), {job_id: visible_at})
|
|
138
|
+
pipe.execute()
|
|
139
|
+
return Job(
|
|
140
|
+
id=job_id,
|
|
141
|
+
name=name,
|
|
142
|
+
payload=payload,
|
|
143
|
+
available_at=available_at,
|
|
144
|
+
attempts=attempts,
|
|
145
|
+
max_attempts=max_attempts,
|
|
146
|
+
backoff_seconds=backoff_seconds,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
def ack(self, job_id: str) -> None:
|
|
150
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
151
|
+
self._r.zrem(self._k("processing_vt"), job_id)
|
|
152
|
+
self._r.delete(self._job_key(job_id))
|
|
153
|
+
|
|
154
|
+
def fail(self, job_id: str, *, error: str | None = None) -> None:
|
|
155
|
+
key = self._job_key(job_id)
|
|
156
|
+
data = self._r.hgetall(key)
|
|
157
|
+
if not data:
|
|
158
|
+
# nothing to do
|
|
159
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
160
|
+
return
|
|
161
|
+
|
|
162
|
+
def _get(field: str, default: Optional[str] = None) -> Optional[str]:
|
|
163
|
+
val = (
|
|
164
|
+
data.get(field.encode())
|
|
165
|
+
if isinstance(next(iter(data.keys())), bytes)
|
|
166
|
+
else data.get(field)
|
|
167
|
+
)
|
|
168
|
+
if val is None:
|
|
169
|
+
return default
|
|
170
|
+
return val.decode() if isinstance(val, (bytes, bytearray)) else str(val)
|
|
171
|
+
|
|
172
|
+
attempts = int(_get("attempts", "0"))
|
|
173
|
+
max_attempts = int(_get("max_attempts", "5"))
|
|
174
|
+
backoff_seconds = int(_get("backoff_seconds", "60"))
|
|
175
|
+
now_ts = int(datetime.now(timezone.utc).timestamp())
|
|
176
|
+
# DLQ if at or beyond max_attempts
|
|
177
|
+
if attempts >= max_attempts:
|
|
178
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
179
|
+
self._r.zrem(self._k("processing_vt"), job_id)
|
|
180
|
+
self._r.lpush(self._k("dlq"), job_id)
|
|
181
|
+
return
|
|
182
|
+
delay = backoff_seconds * max(1, attempts)
|
|
183
|
+
available_at_ts = now_ts + delay
|
|
184
|
+
mapping = {
|
|
185
|
+
"last_error": error or "",
|
|
186
|
+
"available_at": datetime.fromtimestamp(available_at_ts, tz=timezone.utc).isoformat(),
|
|
187
|
+
}
|
|
188
|
+
self._r.hset(key, mapping=mapping)
|
|
189
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
190
|
+
self._r.zrem(self._k("processing_vt"), job_id)
|
|
191
|
+
self._r.zadd(self._k("delayed"), {job_id: available_at_ts})
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from datetime import datetime, timedelta, timezone
|
|
5
|
+
from typing import Awaitable, Callable, Dict
|
|
6
|
+
|
|
7
|
+
CronFunc = Callable[[], Awaitable[None]]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class ScheduledTask:
|
|
12
|
+
name: str
|
|
13
|
+
interval_seconds: int
|
|
14
|
+
func: CronFunc
|
|
15
|
+
next_run_at: datetime
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InMemoryScheduler:
|
|
19
|
+
"""Interval-based scheduler for simple periodic tasks (tests/local).
|
|
20
|
+
|
|
21
|
+
Not a full cron parser. Tracks next_run_at per task.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self):
|
|
25
|
+
self._tasks: Dict[str, ScheduledTask] = {}
|
|
26
|
+
|
|
27
|
+
def add_task(self, name: str, interval_seconds: int, func: CronFunc) -> None:
|
|
28
|
+
now = datetime.now(timezone.utc)
|
|
29
|
+
self._tasks[name] = ScheduledTask(
|
|
30
|
+
name=name,
|
|
31
|
+
interval_seconds=interval_seconds,
|
|
32
|
+
func=func,
|
|
33
|
+
next_run_at=now + timedelta(seconds=interval_seconds),
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
async def tick(self) -> None:
|
|
37
|
+
now = datetime.now(timezone.utc)
|
|
38
|
+
for task in self._tasks.values():
|
|
39
|
+
if task.next_run_at <= now:
|
|
40
|
+
await task.func()
|
|
41
|
+
task.next_run_at = now + timedelta(seconds=task.interval_seconds)
|
svc_infra/jobs/worker.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Awaitable, Callable
|
|
4
|
+
|
|
5
|
+
from .queue import Job, JobQueue
|
|
6
|
+
|
|
7
|
+
ProcessFunc = Callable[[Job], Awaitable[None]]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def process_one(queue: JobQueue, handler: ProcessFunc) -> bool:
|
|
11
|
+
"""Reserve a job, process with handler, ack on success or fail with backoff.
|
|
12
|
+
|
|
13
|
+
Returns True if a job was processed (success or fail), False if no job was available.
|
|
14
|
+
"""
|
|
15
|
+
job = queue.reserve_next()
|
|
16
|
+
if not job:
|
|
17
|
+
return False
|
|
18
|
+
try:
|
|
19
|
+
await handler(job)
|
|
20
|
+
except Exception as exc: # pragma: no cover - exercise in tests by raising
|
|
21
|
+
queue.fail(job.id, error=str(exc))
|
|
22
|
+
return True
|
|
23
|
+
queue.ack(job.id)
|
|
24
|
+
return True
|
|
@@ -119,8 +119,9 @@ svc_infra/cache/resources.py,sha256=BhvPAZvCQ-fitUdniGEOOE4g1ZvljdCA_R5pR8WfJz4,
|
|
|
119
119
|
svc_infra/cache/tags.py,sha256=9URw4BRlnb4QFAYpDI36fMms6642xq4TeV9jqsEjzE8,2625
|
|
120
120
|
svc_infra/cache/ttl.py,sha256=_lWvNx1CTE4RcFEOUYkADd7_k4I13SLmtK0AMRUq2OM,1945
|
|
121
121
|
svc_infra/cache/utils.py,sha256=-LWr5IiJCNm3pwaoeCVlxNknnO2ChNKFcAGlFU98kjg,4856
|
|
122
|
-
svc_infra/cli/__init__.py,sha256=
|
|
123
|
-
svc_infra/cli/
|
|
122
|
+
svc_infra/cli/__init__.py,sha256=wsmFGr8wiKeoIW7pImcHt6piEV5KZQR2IDfgh3yHpyY,699
|
|
123
|
+
svc_infra/cli/__main__.py,sha256=5BjNuyet8AY-POwoF5rGt722rHQ7tJ0Vf0UFUfzzi-I,58
|
|
124
|
+
svc_infra/cli/cmds/__init__.py,sha256=HyUBE2pvhlTF5Uk03x_fqj4cbdX1Ri2CyHLUFBNK2UE,691
|
|
124
125
|
svc_infra/cli/cmds/db/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
125
126
|
svc_infra/cli/cmds/db/nosql/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
126
127
|
svc_infra/cli/cmds/db/nosql/mongo/README.md,sha256=0u3XLeoBd0XQzXwwfEiFISMIij11TJ9iOGzrysBvsFk,1788
|
|
@@ -131,6 +132,8 @@ svc_infra/cli/cmds/db/sql/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
|
131
132
|
svc_infra/cli/cmds/db/sql/alembic_cmds.py,sha256=KjumtKSOZR1UxbpZUuqllpknerDLNcY-0kqqqxiOnL4,7664
|
|
132
133
|
svc_infra/cli/cmds/db/sql/sql_scaffold_cmds.py,sha256=eNTCqHXOxgl9H3WTbGVn9BHXYwCpjIEJsDqhEFdrYMM,4613
|
|
133
134
|
svc_infra/cli/cmds/help.py,sha256=wGfZFMYaR2ZPwW2JwKDU7M3m4AtdCd8GRQ412AmEBUM,758
|
|
135
|
+
svc_infra/cli/cmds/jobs/__init__.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35
|
|
136
|
+
svc_infra/cli/cmds/jobs/jobs_cmds.py,sha256=l-w5GuR82GWR_F1CA7WPYAM895XBD8TQj_hZ6retBv0,1252
|
|
134
137
|
svc_infra/cli/cmds/obs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
135
138
|
svc_infra/cli/cmds/obs/obs_cmds.py,sha256=fltUZu5fcnZdl0_JPJBIxIaA1Xqpw1BXE-SWBP-PRuY,6485
|
|
136
139
|
svc_infra/cli/foundation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -160,7 +163,7 @@ svc_infra/db/nosql/service.py,sha256=CtltFp1Bwm4wCQnFLDtH5-P5NmUEzkWSAf3htoiTBCQ
|
|
|
160
163
|
svc_infra/db/nosql/service_with_hooks.py,sha256=rNH6renb-ppc8Y07jX5eSQnkkhJct2IZCq7mM9aBb48,747
|
|
161
164
|
svc_infra/db/nosql/types.py,sha256=lcyuoZvBHRlGD24WL2HCEG5YmCpwo7qB4VYAckcY-WE,814
|
|
162
165
|
svc_infra/db/nosql/utils.py,sha256=3u7X8WEPO1Cwy1SmZHmFMMbDfu1HhapJUAFbSMe3J9g,3524
|
|
163
|
-
svc_infra/db/outbox.py,sha256=
|
|
166
|
+
svc_infra/db/outbox.py,sha256=1ZIXaYMgQ2wLZX40iR8MKrRL5QNgoMP_jjSr-gSLAjY,3363
|
|
164
167
|
svc_infra/db/sql/README.md,sha256=OI1T7SiY4_f0eTWQGtIeUsgkFqzvloh1vctOm6nvIvU,8581
|
|
165
168
|
svc_infra/db/sql/__init__.py,sha256=PkDutfhzofY0jbE83ZuxbrvXhogvP1tmk5MniyfwQws,159
|
|
166
169
|
svc_infra/db/sql/apikey.py,sha256=27-4GAieD8NxoVKHw_WF2cj8A4UXbcnvtUUTztbo_yw,5019
|
|
@@ -191,6 +194,14 @@ svc_infra/db/sql/uniq_hooks.py,sha256=6gCnO0_Y-rhB0p-VuY0mZ9m1u3haiLWI3Ns_iUTqF_
|
|
|
191
194
|
svc_infra/db/sql/utils.py,sha256=nzuDcDhnVNehx5Y9BZLgxw8fvpfYbxTfXQsgnznVf4w,32862
|
|
192
195
|
svc_infra/db/sql/versioning.py,sha256=okZu2ad5RAFXNLXJgGpcQvZ5bc6gPjRWzwiBT0rEJJw,400
|
|
193
196
|
svc_infra/db/utils.py,sha256=aTD49VJSEu319kIWJ1uijUoP51co4lNJ3S0_tvuyGio,802
|
|
197
|
+
svc_infra/jobs/builtins/outbox_processor.py,sha256=VZoehNyjdaV_MmV74WMcbZR6z9E3VFMtZC-pxEwK0x0,1247
|
|
198
|
+
svc_infra/jobs/builtins/webhook_delivery.py,sha256=6D_nmwpPOyrkzx4MM2vrpA0JKGfWbWo4BBavYEXhpDQ,1894
|
|
199
|
+
svc_infra/jobs/easy.py,sha256=eix-OxWeE3vdkY3GGNoYM0GAyOxc928SpiSzMkr9k0A,977
|
|
200
|
+
svc_infra/jobs/loader.py,sha256=LFO6gOacj6rT698vkDg0YfcHDRTue4zus3Nl9QrS5R0,1164
|
|
201
|
+
svc_infra/jobs/queue.py,sha256=PS5f4CJm5_K7icojTxZOwC6uKw3O2M-jE111u85ySbA,2288
|
|
202
|
+
svc_infra/jobs/redis_queue.py,sha256=wgmWKslF1dkYscJe49UgUX7gwEuGyOUWEb0-pn82I3g,7543
|
|
203
|
+
svc_infra/jobs/scheduler.py,sha256=dTUEEyEuTVHNmJT8wPdMu4YjnTN7R_YW67gtCKpqC7M,1180
|
|
204
|
+
svc_infra/jobs/worker.py,sha256=T2A575_mnieJHPOYU_FseubLA_HQf9pB4CkRgzRJBHU,694
|
|
194
205
|
svc_infra/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
195
206
|
svc_infra/mcp/svc_infra_mcp.py,sha256=NmBY7AM3_pnHAumE-eM5Njr8kpb7Gh1-fjcZAEammiI,1927
|
|
196
207
|
svc_infra/obs/README.md,sha256=wOABJUOhuj0ftGt24ZfuChlFNJTYvYq4KM_rcRIdWRU,7884
|
|
@@ -252,7 +263,7 @@ svc_infra/security/permissions.py,sha256=fQm7-OcJJkWsScDcjS2gwmqaW93zQqltaHRl6bv
|
|
|
252
263
|
svc_infra/security/session.py,sha256=JkClqoZ-Moo9yqHzCREXMVSpzyjbn2Zh6zCjtWO93Ik,2848
|
|
253
264
|
svc_infra/security/signed_cookies.py,sha256=2t61BgjsBaTzU46bt7IUJo7lwDRE9_eS4vmAQXJ8mlY,2219
|
|
254
265
|
svc_infra/utils.py,sha256=VX1yjTx61-YvAymyRhGy18DhybiVdPddiYD_FlKTbJU,952
|
|
255
|
-
svc_infra-0.1.
|
|
256
|
-
svc_infra-0.1.
|
|
257
|
-
svc_infra-0.1.
|
|
258
|
-
svc_infra-0.1.
|
|
266
|
+
svc_infra-0.1.597.dist-info/METADATA,sha256=GN5xri15URUe0_f-BESdwDp7BzNL5DBj5penjmrOcuw,3527
|
|
267
|
+
svc_infra-0.1.597.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
268
|
+
svc_infra-0.1.597.dist-info/entry_points.txt,sha256=6x_nZOsjvn6hRZsMgZLgTasaCSKCgAjsGhACe_CiP0U,48
|
|
269
|
+
svc_infra-0.1.597.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|