svc-infra 0.1.596__py3-none-any.whl → 0.1.598__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of svc-infra might be problematic. Click here for more details.
- svc_infra/cli/__init__.py +4 -0
- svc_infra/cli/__main__.py +4 -0
- svc_infra/cli/cmds/__init__.py +2 -0
- svc_infra/cli/cmds/jobs/__init__.py +1 -0
- svc_infra/cli/cmds/jobs/jobs_cmds.py +43 -0
- svc_infra/db/inbox.py +12 -0
- svc_infra/db/outbox.py +9 -1
- svc_infra/jobs/builtins/outbox_processor.py +38 -0
- svc_infra/jobs/builtins/webhook_delivery.py +78 -0
- svc_infra/jobs/easy.py +32 -0
- svc_infra/jobs/loader.py +45 -0
- svc_infra/jobs/queue.py +73 -0
- svc_infra/jobs/redis_queue.py +191 -0
- svc_infra/jobs/scheduler.py +41 -0
- svc_infra/jobs/worker.py +24 -0
- svc_infra/webhooks/__init__.py +1 -0
- svc_infra/webhooks/fastapi.py +37 -0
- svc_infra/webhooks/router.py +55 -0
- svc_infra/webhooks/service.py +59 -0
- svc_infra/webhooks/signing.py +30 -0
- {svc_infra-0.1.596.dist-info → svc_infra-0.1.598.dist-info}/METADATA +1 -1
- {svc_infra-0.1.596.dist-info → svc_infra-0.1.598.dist-info}/RECORD +24 -8
- {svc_infra-0.1.596.dist-info → svc_infra-0.1.598.dist-info}/WHEEL +0 -0
- {svc_infra-0.1.596.dist-info → svc_infra-0.1.598.dist-info}/entry_points.txt +0 -0
svc_infra/cli/__init__.py
CHANGED
|
@@ -4,6 +4,7 @@ import typer
|
|
|
4
4
|
|
|
5
5
|
from svc_infra.cli.cmds import (
|
|
6
6
|
_HELP,
|
|
7
|
+
jobs_app,
|
|
7
8
|
register_alembic,
|
|
8
9
|
register_mongo,
|
|
9
10
|
register_mongo_scaffold,
|
|
@@ -26,6 +27,9 @@ register_mongo_scaffold(app)
|
|
|
26
27
|
# -- observability commands ---
|
|
27
28
|
register_obs(app)
|
|
28
29
|
|
|
30
|
+
# -- jobs commands ---
|
|
31
|
+
app.add_typer(jobs_app, name="jobs")
|
|
32
|
+
|
|
29
33
|
|
|
30
34
|
def main():
|
|
31
35
|
app()
|
svc_infra/cli/cmds/__init__.py
CHANGED
|
@@ -4,6 +4,7 @@ from svc_infra.cli.cmds.db.nosql.mongo.mongo_scaffold_cmds import (
|
|
|
4
4
|
)
|
|
5
5
|
from svc_infra.cli.cmds.db.sql.alembic_cmds import register as register_alembic
|
|
6
6
|
from svc_infra.cli.cmds.db.sql.sql_scaffold_cmds import register as register_sql_scaffold
|
|
7
|
+
from svc_infra.cli.cmds.jobs.jobs_cmds import app as jobs_app
|
|
7
8
|
from svc_infra.cli.cmds.obs.obs_cmds import register as register_obs
|
|
8
9
|
|
|
9
10
|
from .help import _HELP
|
|
@@ -14,5 +15,6 @@ __all__ = [
|
|
|
14
15
|
"register_mongo",
|
|
15
16
|
"register_mongo_scaffold",
|
|
16
17
|
"register_obs",
|
|
18
|
+
"jobs_app",
|
|
17
19
|
"_HELP",
|
|
18
20
|
]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
import typer
|
|
7
|
+
|
|
8
|
+
from svc_infra.jobs.easy import easy_jobs
|
|
9
|
+
from svc_infra.jobs.loader import schedule_from_env
|
|
10
|
+
from svc_infra.jobs.worker import process_one
|
|
11
|
+
|
|
12
|
+
app = typer.Typer(help="Background jobs and scheduler commands")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@app.command("run")
|
|
16
|
+
def run(
|
|
17
|
+
poll_interval: float = typer.Option(0.5, help="Sleep seconds between loops when idle"),
|
|
18
|
+
max_loops: Optional[int] = typer.Option(None, help="Max loops before exit (for tests)"),
|
|
19
|
+
):
|
|
20
|
+
"""Run scheduler ticks and process jobs in a simple loop."""
|
|
21
|
+
|
|
22
|
+
queue, scheduler = easy_jobs()
|
|
23
|
+
# load schedule from env JSON if provided
|
|
24
|
+
schedule_from_env(scheduler)
|
|
25
|
+
|
|
26
|
+
async def _loop():
|
|
27
|
+
loops = 0
|
|
28
|
+
while True:
|
|
29
|
+
await scheduler.tick()
|
|
30
|
+
processed = await process_one(queue, _noop_handler)
|
|
31
|
+
if not processed:
|
|
32
|
+
# idle
|
|
33
|
+
await asyncio.sleep(poll_interval)
|
|
34
|
+
if max_loops is not None:
|
|
35
|
+
loops += 1
|
|
36
|
+
if loops >= max_loops:
|
|
37
|
+
break
|
|
38
|
+
|
|
39
|
+
async def _noop_handler(job):
|
|
40
|
+
# Default handler does nothing; users should write their own runners
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
asyncio.run(_loop())
|
svc_infra/db/inbox.py
CHANGED
|
@@ -13,6 +13,10 @@ class InboxStore(Protocol):
|
|
|
13
13
|
"""Optional: remove expired keys, return number purged."""
|
|
14
14
|
...
|
|
15
15
|
|
|
16
|
+
def is_marked(self, key: str) -> bool:
|
|
17
|
+
"""Return True if key is already marked (not expired), without modifying it."""
|
|
18
|
+
...
|
|
19
|
+
|
|
16
20
|
|
|
17
21
|
class InMemoryInboxStore:
|
|
18
22
|
def __init__(self) -> None:
|
|
@@ -33,6 +37,11 @@ class InMemoryInboxStore:
|
|
|
33
37
|
self._keys.pop(k, None)
|
|
34
38
|
return len(to_del)
|
|
35
39
|
|
|
40
|
+
def is_marked(self, key: str) -> bool:
|
|
41
|
+
now = time.time()
|
|
42
|
+
exp = self._keys.get(key)
|
|
43
|
+
return bool(exp and exp > now)
|
|
44
|
+
|
|
36
45
|
|
|
37
46
|
class SqlInboxStore:
|
|
38
47
|
"""Skeleton for a SQL-backed inbox store (dedupe table).
|
|
@@ -53,3 +62,6 @@ class SqlInboxStore:
|
|
|
53
62
|
|
|
54
63
|
def purge_expired(self) -> int: # pragma: no cover - skeleton
|
|
55
64
|
raise NotImplementedError
|
|
65
|
+
|
|
66
|
+
def is_marked(self, key: str) -> bool: # pragma: no cover - skeleton
|
|
67
|
+
raise NotImplementedError
|
svc_infra/db/outbox.py
CHANGED
|
@@ -20,7 +20,12 @@ class OutboxStore(Protocol):
|
|
|
20
20
|
pass
|
|
21
21
|
|
|
22
22
|
def fetch_next(self, *, topics: Optional[Iterable[str]] = None) -> Optional[OutboxMessage]:
|
|
23
|
-
"""Return the next unprocessed message (FIFO per-topic), or None
|
|
23
|
+
"""Return the next undispatched, unprocessed message (FIFO per-topic), or None.
|
|
24
|
+
|
|
25
|
+
Notes:
|
|
26
|
+
- Messages with attempts > 0 are considered "dispatched" to the job queue and won't be re-enqueued.
|
|
27
|
+
- Delivery retries are handled by the job queue worker, not by re-reading the outbox.
|
|
28
|
+
"""
|
|
24
29
|
pass
|
|
25
30
|
|
|
26
31
|
def mark_processed(self, msg_id: int) -> None:
|
|
@@ -48,6 +53,9 @@ class InMemoryOutboxStore:
|
|
|
48
53
|
for msg in self._messages:
|
|
49
54
|
if msg.processed_at is not None:
|
|
50
55
|
continue
|
|
56
|
+
# skip already dispatched messages (attempts>0)
|
|
57
|
+
if msg.attempts > 0:
|
|
58
|
+
continue
|
|
51
59
|
if allowed is not None and msg.topic not in allowed:
|
|
52
60
|
continue
|
|
53
61
|
return msg
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Iterable, Optional
|
|
4
|
+
|
|
5
|
+
from svc_infra.db.outbox import OutboxStore
|
|
6
|
+
from svc_infra.jobs.queue import JobQueue
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def make_outbox_tick(
|
|
10
|
+
outbox: OutboxStore,
|
|
11
|
+
queue: JobQueue,
|
|
12
|
+
*,
|
|
13
|
+
topics: Optional[Iterable[str]] = None,
|
|
14
|
+
job_name_prefix: str = "outbox",
|
|
15
|
+
):
|
|
16
|
+
"""Return an async task function to move one outbox message into the job queue.
|
|
17
|
+
|
|
18
|
+
- It fetches at most one unprocessed message per tick to avoid starving others.
|
|
19
|
+
- The enqueued job name is f"{job_name_prefix}.{topic}" to allow routing.
|
|
20
|
+
- The job payload contains `outbox_id`, `topic`, and original `payload`.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
dispatched: set[int] = set()
|
|
24
|
+
|
|
25
|
+
async def _tick():
|
|
26
|
+
# Outbox is sync; this wrapper is async for scheduler compatibility
|
|
27
|
+
msg = outbox.fetch_next(topics=topics)
|
|
28
|
+
if not msg:
|
|
29
|
+
return
|
|
30
|
+
if msg.id in dispatched:
|
|
31
|
+
return
|
|
32
|
+
job_name = f"{job_name_prefix}.{msg.topic}"
|
|
33
|
+
queue.enqueue(job_name, {"outbox_id": msg.id, "topic": msg.topic, "payload": msg.payload})
|
|
34
|
+
# mark as dispatched (bump attempts) so it won't be re-enqueued by fetch_next
|
|
35
|
+
outbox.mark_failed(msg.id)
|
|
36
|
+
dispatched.add(msg.id)
|
|
37
|
+
|
|
38
|
+
return _tick
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
|
|
5
|
+
from svc_infra.db.inbox import InboxStore
|
|
6
|
+
from svc_infra.db.outbox import OutboxStore
|
|
7
|
+
from svc_infra.jobs.queue import Job
|
|
8
|
+
from svc_infra.webhooks.signing import sign
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def make_webhook_handler(
|
|
12
|
+
*,
|
|
13
|
+
outbox: OutboxStore,
|
|
14
|
+
inbox: InboxStore,
|
|
15
|
+
get_webhook_url_for_topic,
|
|
16
|
+
get_secret_for_topic,
|
|
17
|
+
header_name: str = "X-Signature",
|
|
18
|
+
):
|
|
19
|
+
"""Return an async job handler to deliver webhooks.
|
|
20
|
+
|
|
21
|
+
Expected job payload shape:
|
|
22
|
+
{"outbox_id": int, "topic": str, "payload": {...}}
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
async def _handler(job: Job) -> None:
|
|
26
|
+
data = job.payload or {}
|
|
27
|
+
outbox_id = data.get("outbox_id")
|
|
28
|
+
topic = data.get("topic")
|
|
29
|
+
payload = data.get("payload") or {}
|
|
30
|
+
if not outbox_id or not topic:
|
|
31
|
+
# Nothing we can do; ack to avoid poison loop
|
|
32
|
+
return
|
|
33
|
+
# dedupe marker key (marked after successful delivery)
|
|
34
|
+
key = f"webhook:{outbox_id}"
|
|
35
|
+
if inbox.is_marked(key):
|
|
36
|
+
# already delivered
|
|
37
|
+
outbox.mark_processed(int(outbox_id))
|
|
38
|
+
return
|
|
39
|
+
event = payload.get("event") if isinstance(payload, dict) else None
|
|
40
|
+
subscription = payload.get("subscription") if isinstance(payload, dict) else None
|
|
41
|
+
if event is not None and subscription is not None:
|
|
42
|
+
delivery_payload = event
|
|
43
|
+
url = subscription.get("url") or get_webhook_url_for_topic(topic)
|
|
44
|
+
secret = subscription.get("secret") or get_secret_for_topic(topic)
|
|
45
|
+
subscription_id = subscription.get("id")
|
|
46
|
+
else:
|
|
47
|
+
delivery_payload = payload
|
|
48
|
+
url = get_webhook_url_for_topic(topic)
|
|
49
|
+
secret = get_secret_for_topic(topic)
|
|
50
|
+
subscription_id = None
|
|
51
|
+
sig = sign(secret, delivery_payload)
|
|
52
|
+
headers = {
|
|
53
|
+
header_name: sig,
|
|
54
|
+
"X-Event-Id": str(outbox_id),
|
|
55
|
+
"X-Topic": str(topic),
|
|
56
|
+
"X-Attempt": str(job.attempts or 1),
|
|
57
|
+
"X-Signature-Alg": "hmac-sha256",
|
|
58
|
+
"X-Signature-Version": "v1",
|
|
59
|
+
}
|
|
60
|
+
if subscription_id:
|
|
61
|
+
headers["X-Webhook-Subscription"] = str(subscription_id)
|
|
62
|
+
# include event payload version if present
|
|
63
|
+
version = None
|
|
64
|
+
if isinstance(delivery_payload, dict):
|
|
65
|
+
version = delivery_payload.get("version")
|
|
66
|
+
if version is not None:
|
|
67
|
+
headers["X-Payload-Version"] = str(version)
|
|
68
|
+
async with httpx.AsyncClient(timeout=10) as client:
|
|
69
|
+
resp = await client.post(url, json=delivery_payload, headers=headers)
|
|
70
|
+
if 200 <= resp.status_code < 300:
|
|
71
|
+
# record delivery and mark processed
|
|
72
|
+
inbox.mark_if_new(key, ttl_seconds=24 * 3600)
|
|
73
|
+
outbox.mark_processed(int(outbox_id))
|
|
74
|
+
return
|
|
75
|
+
# allow retry on non-2xx: raise to trigger fail/backoff
|
|
76
|
+
raise RuntimeError(f"webhook delivery failed: {resp.status_code}")
|
|
77
|
+
|
|
78
|
+
return _handler
|
svc_infra/jobs/easy.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
from redis import Redis
|
|
6
|
+
|
|
7
|
+
from .queue import InMemoryJobQueue, JobQueue
|
|
8
|
+
from .redis_queue import RedisJobQueue
|
|
9
|
+
from .scheduler import InMemoryScheduler
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class JobsConfig:
|
|
13
|
+
def __init__(self, driver: str | None = None):
|
|
14
|
+
# Future: support redis/sql drivers via extras
|
|
15
|
+
self.driver = driver or os.getenv("JOBS_DRIVER", "memory").lower()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def easy_jobs(*, driver: str | None = None) -> tuple[JobQueue, InMemoryScheduler]:
|
|
19
|
+
"""One-call wiring for jobs: returns (queue, scheduler).
|
|
20
|
+
|
|
21
|
+
Defaults to in-memory implementations for local/dev. ENV override via JOBS_DRIVER.
|
|
22
|
+
"""
|
|
23
|
+
cfg = JobsConfig(driver=driver)
|
|
24
|
+
# Choose backend
|
|
25
|
+
if cfg.driver == "redis":
|
|
26
|
+
url = os.getenv("REDIS_URL", "redis://localhost:6379/0")
|
|
27
|
+
client = Redis.from_url(url)
|
|
28
|
+
queue = RedisJobQueue(client)
|
|
29
|
+
else:
|
|
30
|
+
queue = InMemoryJobQueue()
|
|
31
|
+
scheduler = InMemoryScheduler()
|
|
32
|
+
return queue, scheduler
|
svc_infra/jobs/loader.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import importlib
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
from typing import Awaitable, Callable
|
|
8
|
+
|
|
9
|
+
from .scheduler import InMemoryScheduler
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _resolve_target(path: str) -> Callable[[], Awaitable[None]]:
|
|
13
|
+
mod_name, func_name = path.split(":", 1)
|
|
14
|
+
mod = importlib.import_module(mod_name)
|
|
15
|
+
fn = getattr(mod, func_name)
|
|
16
|
+
if asyncio.iscoroutinefunction(fn):
|
|
17
|
+
return fn # type: ignore[return-value]
|
|
18
|
+
|
|
19
|
+
# wrap sync into async
|
|
20
|
+
async def _wrapped():
|
|
21
|
+
fn()
|
|
22
|
+
|
|
23
|
+
return _wrapped
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def schedule_from_env(scheduler: InMemoryScheduler, env_var: str = "JOBS_SCHEDULE_JSON") -> None:
|
|
27
|
+
data = os.getenv(env_var)
|
|
28
|
+
if not data:
|
|
29
|
+
return
|
|
30
|
+
try:
|
|
31
|
+
tasks = json.loads(data)
|
|
32
|
+
except json.JSONDecodeError:
|
|
33
|
+
return
|
|
34
|
+
if not isinstance(tasks, list):
|
|
35
|
+
return
|
|
36
|
+
for t in tasks:
|
|
37
|
+
try:
|
|
38
|
+
name = t["name"]
|
|
39
|
+
interval = int(t.get("interval_seconds", 60))
|
|
40
|
+
target = t["target"]
|
|
41
|
+
fn = _resolve_target(target)
|
|
42
|
+
scheduler.add_task(name, interval, fn)
|
|
43
|
+
except Exception:
|
|
44
|
+
# ignore bad entries
|
|
45
|
+
continue
|
svc_infra/jobs/queue.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime, timedelta, timezone
|
|
5
|
+
from typing import Any, Dict, Optional, Protocol
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class Job:
|
|
10
|
+
id: str
|
|
11
|
+
name: str
|
|
12
|
+
payload: Dict[str, Any]
|
|
13
|
+
available_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
|
14
|
+
attempts: int = 0
|
|
15
|
+
max_attempts: int = 5
|
|
16
|
+
backoff_seconds: int = 60 # base backoff for retry
|
|
17
|
+
last_error: Optional[str] = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class JobQueue(Protocol):
|
|
21
|
+
def enqueue(self, name: str, payload: Dict[str, Any], *, delay_seconds: int = 0) -> Job:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
def reserve_next(self) -> Optional[Job]:
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
def ack(self, job_id: str) -> None:
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
def fail(self, job_id: str, *, error: str | None = None) -> None:
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class InMemoryJobQueue:
|
|
35
|
+
"""Simple in-memory queue for tests and local runs.
|
|
36
|
+
|
|
37
|
+
Single-threaded reserve/ack/fail semantics. Not suitable for production.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self):
|
|
41
|
+
self._seq = 0
|
|
42
|
+
self._jobs: list[Job] = []
|
|
43
|
+
|
|
44
|
+
def _next_id(self) -> str:
|
|
45
|
+
self._seq += 1
|
|
46
|
+
return str(self._seq)
|
|
47
|
+
|
|
48
|
+
def enqueue(self, name: str, payload: Dict[str, Any], *, delay_seconds: int = 0) -> Job:
|
|
49
|
+
when = datetime.now(timezone.utc) + timedelta(seconds=delay_seconds)
|
|
50
|
+
job = Job(id=self._next_id(), name=name, payload=dict(payload), available_at=when)
|
|
51
|
+
self._jobs.append(job)
|
|
52
|
+
return job
|
|
53
|
+
|
|
54
|
+
def reserve_next(self) -> Optional[Job]:
|
|
55
|
+
now = datetime.now(timezone.utc)
|
|
56
|
+
for job in self._jobs:
|
|
57
|
+
if job.available_at <= now and job.attempts >= 0 and job.attempts < job.max_attempts:
|
|
58
|
+
job.attempts += 1
|
|
59
|
+
return job
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
def ack(self, job_id: str) -> None:
|
|
63
|
+
self._jobs = [j for j in self._jobs if j.id != job_id]
|
|
64
|
+
|
|
65
|
+
def fail(self, job_id: str, *, error: str | None = None) -> None:
|
|
66
|
+
now = datetime.now(timezone.utc)
|
|
67
|
+
for job in self._jobs:
|
|
68
|
+
if job.id == job_id:
|
|
69
|
+
job.last_error = error
|
|
70
|
+
# Exponential backoff: base * attempts
|
|
71
|
+
delay = job.backoff_seconds * max(1, job.attempts)
|
|
72
|
+
job.available_at = now + timedelta(seconds=delay)
|
|
73
|
+
return
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from dataclasses import asdict
|
|
5
|
+
from datetime import datetime, timezone
|
|
6
|
+
from typing import Dict, Optional
|
|
7
|
+
|
|
8
|
+
from redis import Redis
|
|
9
|
+
|
|
10
|
+
from .queue import Job, JobQueue
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class RedisJobQueue(JobQueue):
|
|
14
|
+
"""Redis-backed job queue with visibility timeout and delayed retries.
|
|
15
|
+
|
|
16
|
+
Keys (with optional prefix):
|
|
17
|
+
- {p}:ready (LIST) ready job ids
|
|
18
|
+
- {p}:processing (LIST) in-flight job ids
|
|
19
|
+
- {p}:processing_vt (ZSET) id -> visible_at (epoch seconds)
|
|
20
|
+
- {p}:delayed (ZSET) id -> available_at (epoch seconds)
|
|
21
|
+
- {p}:seq (STRING) INCR for job ids
|
|
22
|
+
- {p}:job:{id} (HASH) job fields (json payload)
|
|
23
|
+
- {p}:dlq (LIST) dead-letter job ids
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self, client: Redis, *, prefix: str = "jobs", visibility_timeout: int = 60):
|
|
27
|
+
self._r = client
|
|
28
|
+
self._p = prefix
|
|
29
|
+
self._vt = visibility_timeout
|
|
30
|
+
|
|
31
|
+
# Key helpers
|
|
32
|
+
def _k(self, name: str) -> str:
|
|
33
|
+
return f"{self._p}:{name}"
|
|
34
|
+
|
|
35
|
+
def _job_key(self, job_id: str) -> str:
|
|
36
|
+
return f"{self._p}:job:{job_id}"
|
|
37
|
+
|
|
38
|
+
# Core ops
|
|
39
|
+
def enqueue(self, name: str, payload: Dict, *, delay_seconds: int = 0) -> Job:
|
|
40
|
+
now = datetime.now(timezone.utc)
|
|
41
|
+
job_id = str(self._r.incr(self._k("seq")))
|
|
42
|
+
job = Job(id=job_id, name=name, payload=dict(payload))
|
|
43
|
+
# Persist job
|
|
44
|
+
data = asdict(job)
|
|
45
|
+
data["payload"] = json.dumps(data["payload"]) # store payload as JSON string
|
|
46
|
+
# available_at stored as ISO format
|
|
47
|
+
data["available_at"] = job.available_at.isoformat()
|
|
48
|
+
self._r.hset(
|
|
49
|
+
self._job_key(job_id), mapping={k: str(v) for k, v in data.items() if v is not None}
|
|
50
|
+
)
|
|
51
|
+
if delay_seconds and delay_seconds > 0:
|
|
52
|
+
at = int(now.timestamp()) + int(delay_seconds)
|
|
53
|
+
self._r.zadd(self._k("delayed"), {job_id: at})
|
|
54
|
+
else:
|
|
55
|
+
# push to ready
|
|
56
|
+
self._r.lpush(self._k("ready"), job_id)
|
|
57
|
+
return job
|
|
58
|
+
|
|
59
|
+
def _move_due_delayed_to_ready(self) -> None:
|
|
60
|
+
now_ts = int(datetime.now(timezone.utc).timestamp())
|
|
61
|
+
ids = self._r.zrangebyscore(self._k("delayed"), "-inf", now_ts)
|
|
62
|
+
if not ids:
|
|
63
|
+
return
|
|
64
|
+
pipe = self._r.pipeline()
|
|
65
|
+
for jid in ids:
|
|
66
|
+
jid_s = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
67
|
+
pipe.lpush(self._k("ready"), jid_s)
|
|
68
|
+
pipe.zrem(self._k("delayed"), jid_s)
|
|
69
|
+
pipe.execute()
|
|
70
|
+
|
|
71
|
+
def _requeue_timed_out_processing(self) -> None:
|
|
72
|
+
now_ts = int(datetime.now(timezone.utc).timestamp())
|
|
73
|
+
ids = self._r.zrangebyscore(self._k("processing_vt"), "-inf", now_ts)
|
|
74
|
+
if not ids:
|
|
75
|
+
return
|
|
76
|
+
pipe = self._r.pipeline()
|
|
77
|
+
for jid in ids:
|
|
78
|
+
jid_s = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
79
|
+
pipe.lrem(self._k("processing"), 1, jid_s)
|
|
80
|
+
pipe.lpush(self._k("ready"), jid_s)
|
|
81
|
+
pipe.zrem(self._k("processing_vt"), jid_s)
|
|
82
|
+
# clear stale visibility timestamp so next reservation can set a fresh one
|
|
83
|
+
pipe.hdel(self._job_key(jid_s), "visible_at")
|
|
84
|
+
pipe.execute()
|
|
85
|
+
|
|
86
|
+
def reserve_next(self) -> Optional[Job]:
|
|
87
|
+
# opportunistically move due delayed jobs
|
|
88
|
+
self._move_due_delayed_to_ready()
|
|
89
|
+
# move timed-out processing jobs back to ready before reserving
|
|
90
|
+
self._requeue_timed_out_processing()
|
|
91
|
+
jid = self._r.rpoplpush(self._k("ready"), self._k("processing"))
|
|
92
|
+
if not jid:
|
|
93
|
+
return None
|
|
94
|
+
job_id = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
|
|
95
|
+
key = self._job_key(job_id)
|
|
96
|
+
data = self._r.hgetall(key)
|
|
97
|
+
if not data:
|
|
98
|
+
# corrupted entry; ack and skip
|
|
99
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
100
|
+
return None
|
|
101
|
+
|
|
102
|
+
# Decode fields
|
|
103
|
+
def _get(field: str, default: Optional[str] = None) -> Optional[str]:
|
|
104
|
+
val = (
|
|
105
|
+
data.get(field.encode())
|
|
106
|
+
if isinstance(next(iter(data.keys())), bytes)
|
|
107
|
+
else data.get(field)
|
|
108
|
+
)
|
|
109
|
+
if val is None:
|
|
110
|
+
return default
|
|
111
|
+
return val.decode() if isinstance(val, (bytes, bytearray)) else str(val)
|
|
112
|
+
|
|
113
|
+
attempts = int(_get("attempts", "0")) + 1
|
|
114
|
+
max_attempts = int(_get("max_attempts", "5"))
|
|
115
|
+
backoff_seconds = int(_get("backoff_seconds", "60"))
|
|
116
|
+
name = _get("name", "") or ""
|
|
117
|
+
payload_json = _get("payload", "{}") or "{}"
|
|
118
|
+
try:
|
|
119
|
+
payload = json.loads(payload_json)
|
|
120
|
+
except Exception: # pragma: no cover
|
|
121
|
+
payload = {}
|
|
122
|
+
available_at_str = _get("available_at")
|
|
123
|
+
available_at = (
|
|
124
|
+
datetime.fromisoformat(available_at_str)
|
|
125
|
+
if available_at_str
|
|
126
|
+
else datetime.now(timezone.utc)
|
|
127
|
+
)
|
|
128
|
+
# If exceeded max_attempts → DLQ and skip
|
|
129
|
+
if attempts > max_attempts:
|
|
130
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
131
|
+
self._r.lpush(self._k("dlq"), job_id)
|
|
132
|
+
return None
|
|
133
|
+
# Update attempts and visibility timeout
|
|
134
|
+
visible_at = int(datetime.now(timezone.utc).timestamp()) + int(self._vt)
|
|
135
|
+
pipe = self._r.pipeline()
|
|
136
|
+
pipe.hset(key, mapping={"attempts": attempts, "visible_at": visible_at})
|
|
137
|
+
pipe.zadd(self._k("processing_vt"), {job_id: visible_at})
|
|
138
|
+
pipe.execute()
|
|
139
|
+
return Job(
|
|
140
|
+
id=job_id,
|
|
141
|
+
name=name,
|
|
142
|
+
payload=payload,
|
|
143
|
+
available_at=available_at,
|
|
144
|
+
attempts=attempts,
|
|
145
|
+
max_attempts=max_attempts,
|
|
146
|
+
backoff_seconds=backoff_seconds,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
def ack(self, job_id: str) -> None:
|
|
150
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
151
|
+
self._r.zrem(self._k("processing_vt"), job_id)
|
|
152
|
+
self._r.delete(self._job_key(job_id))
|
|
153
|
+
|
|
154
|
+
def fail(self, job_id: str, *, error: str | None = None) -> None:
|
|
155
|
+
key = self._job_key(job_id)
|
|
156
|
+
data = self._r.hgetall(key)
|
|
157
|
+
if not data:
|
|
158
|
+
# nothing to do
|
|
159
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
160
|
+
return
|
|
161
|
+
|
|
162
|
+
def _get(field: str, default: Optional[str] = None) -> Optional[str]:
|
|
163
|
+
val = (
|
|
164
|
+
data.get(field.encode())
|
|
165
|
+
if isinstance(next(iter(data.keys())), bytes)
|
|
166
|
+
else data.get(field)
|
|
167
|
+
)
|
|
168
|
+
if val is None:
|
|
169
|
+
return default
|
|
170
|
+
return val.decode() if isinstance(val, (bytes, bytearray)) else str(val)
|
|
171
|
+
|
|
172
|
+
attempts = int(_get("attempts", "0"))
|
|
173
|
+
max_attempts = int(_get("max_attempts", "5"))
|
|
174
|
+
backoff_seconds = int(_get("backoff_seconds", "60"))
|
|
175
|
+
now_ts = int(datetime.now(timezone.utc).timestamp())
|
|
176
|
+
# DLQ if at or beyond max_attempts
|
|
177
|
+
if attempts >= max_attempts:
|
|
178
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
179
|
+
self._r.zrem(self._k("processing_vt"), job_id)
|
|
180
|
+
self._r.lpush(self._k("dlq"), job_id)
|
|
181
|
+
return
|
|
182
|
+
delay = backoff_seconds * max(1, attempts)
|
|
183
|
+
available_at_ts = now_ts + delay
|
|
184
|
+
mapping = {
|
|
185
|
+
"last_error": error or "",
|
|
186
|
+
"available_at": datetime.fromtimestamp(available_at_ts, tz=timezone.utc).isoformat(),
|
|
187
|
+
}
|
|
188
|
+
self._r.hset(key, mapping=mapping)
|
|
189
|
+
self._r.lrem(self._k("processing"), 1, job_id)
|
|
190
|
+
self._r.zrem(self._k("processing_vt"), job_id)
|
|
191
|
+
self._r.zadd(self._k("delayed"), {job_id: available_at_ts})
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from datetime import datetime, timedelta, timezone
|
|
5
|
+
from typing import Awaitable, Callable, Dict
|
|
6
|
+
|
|
7
|
+
CronFunc = Callable[[], Awaitable[None]]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class ScheduledTask:
|
|
12
|
+
name: str
|
|
13
|
+
interval_seconds: int
|
|
14
|
+
func: CronFunc
|
|
15
|
+
next_run_at: datetime
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class InMemoryScheduler:
|
|
19
|
+
"""Interval-based scheduler for simple periodic tasks (tests/local).
|
|
20
|
+
|
|
21
|
+
Not a full cron parser. Tracks next_run_at per task.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self):
|
|
25
|
+
self._tasks: Dict[str, ScheduledTask] = {}
|
|
26
|
+
|
|
27
|
+
def add_task(self, name: str, interval_seconds: int, func: CronFunc) -> None:
|
|
28
|
+
now = datetime.now(timezone.utc)
|
|
29
|
+
self._tasks[name] = ScheduledTask(
|
|
30
|
+
name=name,
|
|
31
|
+
interval_seconds=interval_seconds,
|
|
32
|
+
func=func,
|
|
33
|
+
next_run_at=now + timedelta(seconds=interval_seconds),
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
async def tick(self) -> None:
|
|
37
|
+
now = datetime.now(timezone.utc)
|
|
38
|
+
for task in self._tasks.values():
|
|
39
|
+
if task.next_run_at <= now:
|
|
40
|
+
await task.func()
|
|
41
|
+
task.next_run_at = now + timedelta(seconds=task.interval_seconds)
|
svc_infra/jobs/worker.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Awaitable, Callable
|
|
4
|
+
|
|
5
|
+
from .queue import Job, JobQueue
|
|
6
|
+
|
|
7
|
+
ProcessFunc = Callable[[Job], Awaitable[None]]
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def process_one(queue: JobQueue, handler: ProcessFunc) -> bool:
|
|
11
|
+
"""Reserve a job, process with handler, ack on success or fail with backoff.
|
|
12
|
+
|
|
13
|
+
Returns True if a job was processed (success or fail), False if no job was available.
|
|
14
|
+
"""
|
|
15
|
+
job = queue.reserve_next()
|
|
16
|
+
if not job:
|
|
17
|
+
return False
|
|
18
|
+
try:
|
|
19
|
+
await handler(job)
|
|
20
|
+
except Exception as exc: # pragma: no cover - exercise in tests by raising
|
|
21
|
+
queue.fail(job.id, error=str(exc))
|
|
22
|
+
return True
|
|
23
|
+
queue.ack(job.id)
|
|
24
|
+
return True
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Callable, Sequence
|
|
4
|
+
|
|
5
|
+
from fastapi import HTTPException, Request, status
|
|
6
|
+
|
|
7
|
+
from .signing import verify, verify_any
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def require_signature(
|
|
11
|
+
secrets_provider: Callable[[], str | Sequence[str]],
|
|
12
|
+
*,
|
|
13
|
+
header_name: str = "X-Signature",
|
|
14
|
+
):
|
|
15
|
+
async def _dep(request: Request):
|
|
16
|
+
sig = request.headers.get(header_name)
|
|
17
|
+
if not sig:
|
|
18
|
+
raise HTTPException(
|
|
19
|
+
status_code=status.HTTP_401_UNAUTHORIZED, detail="missing signature"
|
|
20
|
+
)
|
|
21
|
+
try:
|
|
22
|
+
body = await request.json()
|
|
23
|
+
except Exception:
|
|
24
|
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="invalid JSON body")
|
|
25
|
+
secrets = secrets_provider()
|
|
26
|
+
ok = False
|
|
27
|
+
if isinstance(secrets, str):
|
|
28
|
+
ok = verify(secrets, body, sig)
|
|
29
|
+
else:
|
|
30
|
+
ok = verify_any(secrets, body, sig)
|
|
31
|
+
if not ok:
|
|
32
|
+
raise HTTPException(
|
|
33
|
+
status_code=status.HTTP_401_UNAUTHORIZED, detail="invalid signature"
|
|
34
|
+
)
|
|
35
|
+
return body
|
|
36
|
+
|
|
37
|
+
return _dep
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from fastapi import APIRouter, Depends, HTTPException
|
|
6
|
+
|
|
7
|
+
from svc_infra.db.outbox import InMemoryOutboxStore, OutboxStore
|
|
8
|
+
|
|
9
|
+
from .service import InMemoryWebhookSubscriptions, WebhookService
|
|
10
|
+
|
|
11
|
+
router = APIRouter(prefix="/_webhooks", tags=["webhooks"])
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_outbox() -> OutboxStore:
|
|
15
|
+
# For now expose an in-memory default. Apps can override via DI.
|
|
16
|
+
# In production, provide a proper store through dependency override.
|
|
17
|
+
return InMemoryOutboxStore()
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_subs() -> InMemoryWebhookSubscriptions:
|
|
21
|
+
return InMemoryWebhookSubscriptions()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def get_service(
|
|
25
|
+
outbox: OutboxStore = Depends(get_outbox),
|
|
26
|
+
subs: InMemoryWebhookSubscriptions = Depends(get_subs),
|
|
27
|
+
) -> WebhookService:
|
|
28
|
+
return WebhookService(outbox=outbox, subs=subs)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@router.post("/subscriptions")
|
|
32
|
+
def add_subscription(
|
|
33
|
+
body: Dict[str, Any],
|
|
34
|
+
subs: InMemoryWebhookSubscriptions = Depends(get_subs),
|
|
35
|
+
):
|
|
36
|
+
topic = body.get("topic")
|
|
37
|
+
url = body.get("url")
|
|
38
|
+
secret = body.get("secret")
|
|
39
|
+
if not topic or not url or not secret:
|
|
40
|
+
raise HTTPException(status_code=400, detail="Missing topic/url/secret")
|
|
41
|
+
subs.add(topic, url, secret)
|
|
42
|
+
return {"ok": True}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@router.post("/test-fire")
|
|
46
|
+
def test_fire(
|
|
47
|
+
body: Dict[str, Any],
|
|
48
|
+
svc: WebhookService = Depends(get_service),
|
|
49
|
+
):
|
|
50
|
+
topic = body.get("topic")
|
|
51
|
+
payload = body.get("payload") or {}
|
|
52
|
+
if not topic:
|
|
53
|
+
raise HTTPException(status_code=400, detail="Missing topic")
|
|
54
|
+
outbox_id = svc.publish(topic, payload)
|
|
55
|
+
return {"ok": True, "outbox_id": outbox_id}
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from typing import Dict, List
|
|
6
|
+
|
|
7
|
+
from uuid import uuid4
|
|
8
|
+
|
|
9
|
+
from svc_infra.db.outbox import OutboxStore
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class WebhookSubscription:
|
|
14
|
+
topic: str
|
|
15
|
+
url: str
|
|
16
|
+
secret: str
|
|
17
|
+
id: str = field(default_factory=lambda: uuid4().hex)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class InMemoryWebhookSubscriptions:
|
|
21
|
+
def __init__(self):
|
|
22
|
+
self._subs: Dict[str, List[WebhookSubscription]] = {}
|
|
23
|
+
|
|
24
|
+
def add(self, topic: str, url: str, secret: str) -> None:
|
|
25
|
+
self._subs.setdefault(topic, []).append(WebhookSubscription(topic, url, secret))
|
|
26
|
+
|
|
27
|
+
def get_for_topic(self, topic: str) -> List[WebhookSubscription]:
|
|
28
|
+
return list(self._subs.get(topic, []))
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class WebhookService:
|
|
32
|
+
def __init__(self, outbox: OutboxStore, subs: InMemoryWebhookSubscriptions):
|
|
33
|
+
self._outbox = outbox
|
|
34
|
+
self._subs = subs
|
|
35
|
+
|
|
36
|
+
def publish(self, topic: str, payload: Dict, *, version: int = 1) -> int:
|
|
37
|
+
created_at = datetime.now(timezone.utc).isoformat()
|
|
38
|
+
base_event = {
|
|
39
|
+
"topic": topic,
|
|
40
|
+
"payload": payload,
|
|
41
|
+
"version": version,
|
|
42
|
+
"created_at": created_at,
|
|
43
|
+
}
|
|
44
|
+
# For each subscription, enqueue an outbox message with subscriber identity
|
|
45
|
+
last_id = 0
|
|
46
|
+
for sub in self._subs.get_for_topic(topic):
|
|
47
|
+
event = dict(base_event)
|
|
48
|
+
msg_payload = {
|
|
49
|
+
"event": event,
|
|
50
|
+
"subscription": {
|
|
51
|
+
"id": sub.id,
|
|
52
|
+
"topic": sub.topic,
|
|
53
|
+
"url": sub.url,
|
|
54
|
+
"secret": sub.secret,
|
|
55
|
+
},
|
|
56
|
+
}
|
|
57
|
+
msg = self._outbox.enqueue(topic, msg_payload)
|
|
58
|
+
last_id = msg.id
|
|
59
|
+
return last_id
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import hashlib
|
|
4
|
+
import hmac
|
|
5
|
+
import json
|
|
6
|
+
from typing import Dict, Iterable
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def canonical_body(payload: Dict) -> bytes:
|
|
10
|
+
return json.dumps(payload, separators=(",", ":"), sort_keys=True).encode()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def sign(secret: str, payload: Dict) -> str:
|
|
14
|
+
body = canonical_body(payload)
|
|
15
|
+
return hmac.new(secret.encode(), body, hashlib.sha256).hexdigest()
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def verify(secret: str, payload: Dict, signature: str) -> bool:
|
|
19
|
+
expected = sign(secret, payload)
|
|
20
|
+
try:
|
|
21
|
+
return hmac.compare_digest(expected, signature)
|
|
22
|
+
except Exception:
|
|
23
|
+
return False
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def verify_any(secrets: Iterable[str], payload: Dict, signature: str) -> bool:
|
|
27
|
+
for s in secrets:
|
|
28
|
+
if verify(s, payload, signature):
|
|
29
|
+
return True
|
|
30
|
+
return False
|
|
@@ -119,8 +119,9 @@ svc_infra/cache/resources.py,sha256=BhvPAZvCQ-fitUdniGEOOE4g1ZvljdCA_R5pR8WfJz4,
|
|
|
119
119
|
svc_infra/cache/tags.py,sha256=9URw4BRlnb4QFAYpDI36fMms6642xq4TeV9jqsEjzE8,2625
|
|
120
120
|
svc_infra/cache/ttl.py,sha256=_lWvNx1CTE4RcFEOUYkADd7_k4I13SLmtK0AMRUq2OM,1945
|
|
121
121
|
svc_infra/cache/utils.py,sha256=-LWr5IiJCNm3pwaoeCVlxNknnO2ChNKFcAGlFU98kjg,4856
|
|
122
|
-
svc_infra/cli/__init__.py,sha256=
|
|
123
|
-
svc_infra/cli/
|
|
122
|
+
svc_infra/cli/__init__.py,sha256=wsmFGr8wiKeoIW7pImcHt6piEV5KZQR2IDfgh3yHpyY,699
|
|
123
|
+
svc_infra/cli/__main__.py,sha256=5BjNuyet8AY-POwoF5rGt722rHQ7tJ0Vf0UFUfzzi-I,58
|
|
124
|
+
svc_infra/cli/cmds/__init__.py,sha256=HyUBE2pvhlTF5Uk03x_fqj4cbdX1Ri2CyHLUFBNK2UE,691
|
|
124
125
|
svc_infra/cli/cmds/db/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
125
126
|
svc_infra/cli/cmds/db/nosql/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
126
127
|
svc_infra/cli/cmds/db/nosql/mongo/README.md,sha256=0u3XLeoBd0XQzXwwfEiFISMIij11TJ9iOGzrysBvsFk,1788
|
|
@@ -131,6 +132,8 @@ svc_infra/cli/cmds/db/sql/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
|
131
132
|
svc_infra/cli/cmds/db/sql/alembic_cmds.py,sha256=KjumtKSOZR1UxbpZUuqllpknerDLNcY-0kqqqxiOnL4,7664
|
|
132
133
|
svc_infra/cli/cmds/db/sql/sql_scaffold_cmds.py,sha256=eNTCqHXOxgl9H3WTbGVn9BHXYwCpjIEJsDqhEFdrYMM,4613
|
|
133
134
|
svc_infra/cli/cmds/help.py,sha256=wGfZFMYaR2ZPwW2JwKDU7M3m4AtdCd8GRQ412AmEBUM,758
|
|
135
|
+
svc_infra/cli/cmds/jobs/__init__.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35
|
|
136
|
+
svc_infra/cli/cmds/jobs/jobs_cmds.py,sha256=l-w5GuR82GWR_F1CA7WPYAM895XBD8TQj_hZ6retBv0,1252
|
|
134
137
|
svc_infra/cli/cmds/obs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
135
138
|
svc_infra/cli/cmds/obs/obs_cmds.py,sha256=fltUZu5fcnZdl0_JPJBIxIaA1Xqpw1BXE-SWBP-PRuY,6485
|
|
136
139
|
svc_infra/cli/foundation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -138,7 +141,7 @@ svc_infra/cli/foundation/runner.py,sha256=RbfjKwb3aHk1Y0MYU8xMpKRpIqRVMVr8GuL2ED
|
|
|
138
141
|
svc_infra/cli/foundation/typer_bootstrap.py,sha256=KapgH1R-qON9FuYH1KYlVx_5sJvjmAGl25pB61XCpm4,985
|
|
139
142
|
svc_infra/db/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
140
143
|
svc_infra/db/crud_schema.py,sha256=-fv-Om1lHVt6lcNbie6A2kRcPex4SDByUPfks6SpmUc,2521
|
|
141
|
-
svc_infra/db/inbox.py,sha256=
|
|
144
|
+
svc_infra/db/inbox.py,sha256=drxLRLHaMRrCDgo_8wj12do80wDh5ssHV6LGkaM98no,1996
|
|
142
145
|
svc_infra/db/nosql/__init__.py,sha256=5ETPHk-KYUtc-efuGzDFQmWkT0xFtYy8YWOHobMZhvM,154
|
|
143
146
|
svc_infra/db/nosql/base.py,sha256=p47VVpwWvGNkyWe5RDSmGaUFyZovcyNqirMqoHFQ4QU,230
|
|
144
147
|
svc_infra/db/nosql/constants.py,sha256=Z9bJImxwb8D7vovASFegv8XMwaWcM28tsKJV2SjywXE,416
|
|
@@ -160,7 +163,7 @@ svc_infra/db/nosql/service.py,sha256=CtltFp1Bwm4wCQnFLDtH5-P5NmUEzkWSAf3htoiTBCQ
|
|
|
160
163
|
svc_infra/db/nosql/service_with_hooks.py,sha256=rNH6renb-ppc8Y07jX5eSQnkkhJct2IZCq7mM9aBb48,747
|
|
161
164
|
svc_infra/db/nosql/types.py,sha256=lcyuoZvBHRlGD24WL2HCEG5YmCpwo7qB4VYAckcY-WE,814
|
|
162
165
|
svc_infra/db/nosql/utils.py,sha256=3u7X8WEPO1Cwy1SmZHmFMMbDfu1HhapJUAFbSMe3J9g,3524
|
|
163
|
-
svc_infra/db/outbox.py,sha256=
|
|
166
|
+
svc_infra/db/outbox.py,sha256=1ZIXaYMgQ2wLZX40iR8MKrRL5QNgoMP_jjSr-gSLAjY,3363
|
|
164
167
|
svc_infra/db/sql/README.md,sha256=OI1T7SiY4_f0eTWQGtIeUsgkFqzvloh1vctOm6nvIvU,8581
|
|
165
168
|
svc_infra/db/sql/__init__.py,sha256=PkDutfhzofY0jbE83ZuxbrvXhogvP1tmk5MniyfwQws,159
|
|
166
169
|
svc_infra/db/sql/apikey.py,sha256=27-4GAieD8NxoVKHw_WF2cj8A4UXbcnvtUUTztbo_yw,5019
|
|
@@ -191,6 +194,14 @@ svc_infra/db/sql/uniq_hooks.py,sha256=6gCnO0_Y-rhB0p-VuY0mZ9m1u3haiLWI3Ns_iUTqF_
|
|
|
191
194
|
svc_infra/db/sql/utils.py,sha256=nzuDcDhnVNehx5Y9BZLgxw8fvpfYbxTfXQsgnznVf4w,32862
|
|
192
195
|
svc_infra/db/sql/versioning.py,sha256=okZu2ad5RAFXNLXJgGpcQvZ5bc6gPjRWzwiBT0rEJJw,400
|
|
193
196
|
svc_infra/db/utils.py,sha256=aTD49VJSEu319kIWJ1uijUoP51co4lNJ3S0_tvuyGio,802
|
|
197
|
+
svc_infra/jobs/builtins/outbox_processor.py,sha256=VZoehNyjdaV_MmV74WMcbZR6z9E3VFMtZC-pxEwK0x0,1247
|
|
198
|
+
svc_infra/jobs/builtins/webhook_delivery.py,sha256=z_cl6YKwnduGjGaB8ZoUpKhFcEAhUZqqBma8v2FO1so,2982
|
|
199
|
+
svc_infra/jobs/easy.py,sha256=eix-OxWeE3vdkY3GGNoYM0GAyOxc928SpiSzMkr9k0A,977
|
|
200
|
+
svc_infra/jobs/loader.py,sha256=LFO6gOacj6rT698vkDg0YfcHDRTue4zus3Nl9QrS5R0,1164
|
|
201
|
+
svc_infra/jobs/queue.py,sha256=PS5f4CJm5_K7icojTxZOwC6uKw3O2M-jE111u85ySbA,2288
|
|
202
|
+
svc_infra/jobs/redis_queue.py,sha256=wgmWKslF1dkYscJe49UgUX7gwEuGyOUWEb0-pn82I3g,7543
|
|
203
|
+
svc_infra/jobs/scheduler.py,sha256=dTUEEyEuTVHNmJT8wPdMu4YjnTN7R_YW67gtCKpqC7M,1180
|
|
204
|
+
svc_infra/jobs/worker.py,sha256=T2A575_mnieJHPOYU_FseubLA_HQf9pB4CkRgzRJBHU,694
|
|
194
205
|
svc_infra/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
195
206
|
svc_infra/mcp/svc_infra_mcp.py,sha256=NmBY7AM3_pnHAumE-eM5Njr8kpb7Gh1-fjcZAEammiI,1927
|
|
196
207
|
svc_infra/obs/README.md,sha256=wOABJUOhuj0ftGt24ZfuChlFNJTYvYq4KM_rcRIdWRU,7884
|
|
@@ -252,7 +263,12 @@ svc_infra/security/permissions.py,sha256=fQm7-OcJJkWsScDcjS2gwmqaW93zQqltaHRl6bv
|
|
|
252
263
|
svc_infra/security/session.py,sha256=JkClqoZ-Moo9yqHzCREXMVSpzyjbn2Zh6zCjtWO93Ik,2848
|
|
253
264
|
svc_infra/security/signed_cookies.py,sha256=2t61BgjsBaTzU46bt7IUJo7lwDRE9_eS4vmAQXJ8mlY,2219
|
|
254
265
|
svc_infra/utils.py,sha256=VX1yjTx61-YvAymyRhGy18DhybiVdPddiYD_FlKTbJU,952
|
|
255
|
-
svc_infra
|
|
256
|
-
svc_infra
|
|
257
|
-
svc_infra
|
|
258
|
-
svc_infra
|
|
266
|
+
svc_infra/webhooks/__init__.py,sha256=U4S_2y3zgLZVfMenHRaJFBW8yqh2mUBuI291LGQVOJ8,35
|
|
267
|
+
svc_infra/webhooks/fastapi.py,sha256=BCNvGNxukf6dC2a4i-6en-PrjBGV19YvCWOot5lXWsA,1101
|
|
268
|
+
svc_infra/webhooks/router.py,sha256=6JvAVPMEth_xxHX-IsIOcyMgHX7g1H0OVxVXKLuMp9w,1596
|
|
269
|
+
svc_infra/webhooks/service.py,sha256=hWgiJRXKBwKunJOx91C7EcLUkotDtD3Xp0RT6vj2IC0,1797
|
|
270
|
+
svc_infra/webhooks/signing.py,sha256=NCwdZzmravUe7HVIK_uXK0qqf12FG-_MVsgPvOw6lsM,784
|
|
271
|
+
svc_infra-0.1.598.dist-info/METADATA,sha256=OwFbEqh9yMLpWr5rcN3Bp0M_ywJypISbUxabiMuQZY8,3527
|
|
272
|
+
svc_infra-0.1.598.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
|
273
|
+
svc_infra-0.1.598.dist-info/entry_points.txt,sha256=6x_nZOsjvn6hRZsMgZLgTasaCSKCgAjsGhACe_CiP0U,48
|
|
274
|
+
svc_infra-0.1.598.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|