svc-infra 0.1.595__py3-none-any.whl → 0.1.597__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of svc-infra might be problematic. Click here for more details.

svc_infra/db/outbox.py ADDED
@@ -0,0 +1,104 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from datetime import datetime, timezone
5
+ from typing import Any, Dict, Iterable, List, Optional, Protocol
6
+
7
+
8
+ @dataclass
9
+ class OutboxMessage:
10
+ id: int
11
+ topic: str
12
+ payload: Dict[str, Any]
13
+ created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
14
+ attempts: int = 0
15
+ processed_at: Optional[datetime] = None
16
+
17
+
18
+ class OutboxStore(Protocol):
19
+ def enqueue(self, topic: str, payload: Dict[str, Any]) -> OutboxMessage:
20
+ pass
21
+
22
+ def fetch_next(self, *, topics: Optional[Iterable[str]] = None) -> Optional[OutboxMessage]:
23
+ """Return the next undispatched, unprocessed message (FIFO per-topic), or None.
24
+
25
+ Notes:
26
+ - Messages with attempts > 0 are considered "dispatched" to the job queue and won't be re-enqueued.
27
+ - Delivery retries are handled by the job queue worker, not by re-reading the outbox.
28
+ """
29
+ pass
30
+
31
+ def mark_processed(self, msg_id: int) -> None:
32
+ pass
33
+
34
+ def mark_failed(self, msg_id: int) -> None:
35
+ pass
36
+
37
+
38
+ class InMemoryOutboxStore:
39
+ """Simple in-memory outbox for tests and local runs."""
40
+
41
+ def __init__(self):
42
+ self._seq = 0
43
+ self._messages: List[OutboxMessage] = []
44
+
45
+ def enqueue(self, topic: str, payload: Dict[str, Any]) -> OutboxMessage:
46
+ self._seq += 1
47
+ msg = OutboxMessage(id=self._seq, topic=topic, payload=dict(payload))
48
+ self._messages.append(msg)
49
+ return msg
50
+
51
+ def fetch_next(self, *, topics: Optional[Iterable[str]] = None) -> Optional[OutboxMessage]:
52
+ allowed = set(topics) if topics else None
53
+ for msg in self._messages:
54
+ if msg.processed_at is not None:
55
+ continue
56
+ # skip already dispatched messages (attempts>0)
57
+ if msg.attempts > 0:
58
+ continue
59
+ if allowed is not None and msg.topic not in allowed:
60
+ continue
61
+ return msg
62
+ return None
63
+
64
+ def mark_processed(self, msg_id: int) -> None:
65
+ for msg in self._messages:
66
+ if msg.id == msg_id:
67
+ msg.processed_at = datetime.now(timezone.utc)
68
+ return
69
+
70
+ def mark_failed(self, msg_id: int) -> None:
71
+ for msg in self._messages:
72
+ if msg.id == msg_id:
73
+ msg.attempts += 1
74
+ return
75
+
76
+
77
+ class SqlOutboxStore:
78
+ """Skeleton for a SQL-backed outbox store.
79
+
80
+ Implementations should:
81
+ - INSERT on enqueue
82
+ - SELECT FOR UPDATE SKIP LOCKED (or equivalent) to fetch next
83
+ - UPDATE processed_at (and attempts on failure)
84
+ """
85
+
86
+ def __init__(self, session_factory):
87
+ self._session_factory = session_factory
88
+
89
+ # Placeholders to outline the API; not implemented here.
90
+ def enqueue(
91
+ self, topic: str, payload: Dict[str, Any]
92
+ ) -> OutboxMessage: # pragma: no cover - skeleton
93
+ raise NotImplementedError
94
+
95
+ def fetch_next(
96
+ self, *, topics: Optional[Iterable[str]] = None
97
+ ) -> Optional[OutboxMessage]: # pragma: no cover - skeleton
98
+ raise NotImplementedError
99
+
100
+ def mark_processed(self, msg_id: int) -> None: # pragma: no cover - skeleton
101
+ raise NotImplementedError
102
+
103
+ def mark_failed(self, msg_id: int) -> None: # pragma: no cover - skeleton
104
+ raise NotImplementedError
@@ -0,0 +1,14 @@
1
+ from __future__ import annotations
2
+
3
+ from sqlalchemy import Integer
4
+ from sqlalchemy.orm import Mapped, mapped_column
5
+
6
+
7
+ class Versioned:
8
+ """Mixin for optimistic locking with integer version.
9
+
10
+ - Initialize version=1 on insert (via default=1)
11
+ - Bump version in app code before commit to detect mismatches.
12
+ """
13
+
14
+ version: Mapped[int] = mapped_column(Integer, nullable=False, default=1)
@@ -0,0 +1,38 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Iterable, Optional
4
+
5
+ from svc_infra.db.outbox import OutboxStore
6
+ from svc_infra.jobs.queue import JobQueue
7
+
8
+
9
+ def make_outbox_tick(
10
+ outbox: OutboxStore,
11
+ queue: JobQueue,
12
+ *,
13
+ topics: Optional[Iterable[str]] = None,
14
+ job_name_prefix: str = "outbox",
15
+ ):
16
+ """Return an async task function to move one outbox message into the job queue.
17
+
18
+ - It fetches at most one unprocessed message per tick to avoid starving others.
19
+ - The enqueued job name is f"{job_name_prefix}.{topic}" to allow routing.
20
+ - The job payload contains `outbox_id`, `topic`, and original `payload`.
21
+ """
22
+
23
+ dispatched: set[int] = set()
24
+
25
+ async def _tick():
26
+ # Outbox is sync; this wrapper is async for scheduler compatibility
27
+ msg = outbox.fetch_next(topics=topics)
28
+ if not msg:
29
+ return
30
+ if msg.id in dispatched:
31
+ return
32
+ job_name = f"{job_name_prefix}.{msg.topic}"
33
+ queue.enqueue(job_name, {"outbox_id": msg.id, "topic": msg.topic, "payload": msg.payload})
34
+ # mark as dispatched (bump attempts) so it won't be re-enqueued by fetch_next
35
+ outbox.mark_failed(msg.id)
36
+ dispatched.add(msg.id)
37
+
38
+ return _tick
@@ -0,0 +1,59 @@
1
+ from __future__ import annotations
2
+
3
+ import hashlib
4
+ import hmac
5
+ import json
6
+ from typing import Dict
7
+
8
+ import httpx
9
+
10
+ from svc_infra.db.inbox import InboxStore
11
+ from svc_infra.db.outbox import OutboxStore
12
+ from svc_infra.jobs.queue import Job
13
+
14
+
15
+ def _compute_signature(secret: str, payload: Dict) -> str:
16
+ body = json.dumps(payload, separators=(",", ":")).encode()
17
+ return hmac.new(secret.encode(), body, hashlib.sha256).hexdigest()
18
+
19
+
20
+ def make_webhook_handler(
21
+ *,
22
+ outbox: OutboxStore,
23
+ inbox: InboxStore,
24
+ get_webhook_url_for_topic,
25
+ get_secret_for_topic,
26
+ header_name: str = "X-Signature",
27
+ ):
28
+ """Return an async job handler to deliver webhooks.
29
+
30
+ Expected job payload shape:
31
+ {"outbox_id": int, "topic": str, "payload": {...}}
32
+ """
33
+
34
+ async def _handler(job: Job) -> None:
35
+ data = job.payload or {}
36
+ outbox_id = data.get("outbox_id")
37
+ topic = data.get("topic")
38
+ payload = data.get("payload") or {}
39
+ if not outbox_id or not topic:
40
+ # Nothing we can do; ack to avoid poison loop
41
+ return
42
+ # dedupe by outbox_id via inbox
43
+ key = f"webhook:{outbox_id}"
44
+ if not inbox.mark_if_new(key, ttl_seconds=24 * 3600):
45
+ # already delivered
46
+ outbox.mark_processed(int(outbox_id))
47
+ return
48
+ url = get_webhook_url_for_topic(topic)
49
+ secret = get_secret_for_topic(topic)
50
+ sig = _compute_signature(secret, payload)
51
+ async with httpx.AsyncClient(timeout=10) as client:
52
+ resp = await client.post(url, json=payload, headers={header_name: sig})
53
+ if 200 <= resp.status_code < 300:
54
+ outbox.mark_processed(int(outbox_id))
55
+ return
56
+ # allow retry on non-2xx: raise to trigger fail/backoff
57
+ raise RuntimeError(f"webhook delivery failed: {resp.status_code}")
58
+
59
+ return _handler
svc_infra/jobs/easy.py ADDED
@@ -0,0 +1,32 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+
5
+ from redis import Redis
6
+
7
+ from .queue import InMemoryJobQueue, JobQueue
8
+ from .redis_queue import RedisJobQueue
9
+ from .scheduler import InMemoryScheduler
10
+
11
+
12
+ class JobsConfig:
13
+ def __init__(self, driver: str | None = None):
14
+ # Future: support redis/sql drivers via extras
15
+ self.driver = driver or os.getenv("JOBS_DRIVER", "memory").lower()
16
+
17
+
18
+ def easy_jobs(*, driver: str | None = None) -> tuple[JobQueue, InMemoryScheduler]:
19
+ """One-call wiring for jobs: returns (queue, scheduler).
20
+
21
+ Defaults to in-memory implementations for local/dev. ENV override via JOBS_DRIVER.
22
+ """
23
+ cfg = JobsConfig(driver=driver)
24
+ # Choose backend
25
+ if cfg.driver == "redis":
26
+ url = os.getenv("REDIS_URL", "redis://localhost:6379/0")
27
+ client = Redis.from_url(url)
28
+ queue = RedisJobQueue(client)
29
+ else:
30
+ queue = InMemoryJobQueue()
31
+ scheduler = InMemoryScheduler()
32
+ return queue, scheduler
@@ -0,0 +1,45 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import importlib
5
+ import json
6
+ import os
7
+ from typing import Awaitable, Callable
8
+
9
+ from .scheduler import InMemoryScheduler
10
+
11
+
12
+ def _resolve_target(path: str) -> Callable[[], Awaitable[None]]:
13
+ mod_name, func_name = path.split(":", 1)
14
+ mod = importlib.import_module(mod_name)
15
+ fn = getattr(mod, func_name)
16
+ if asyncio.iscoroutinefunction(fn):
17
+ return fn # type: ignore[return-value]
18
+
19
+ # wrap sync into async
20
+ async def _wrapped():
21
+ fn()
22
+
23
+ return _wrapped
24
+
25
+
26
+ def schedule_from_env(scheduler: InMemoryScheduler, env_var: str = "JOBS_SCHEDULE_JSON") -> None:
27
+ data = os.getenv(env_var)
28
+ if not data:
29
+ return
30
+ try:
31
+ tasks = json.loads(data)
32
+ except json.JSONDecodeError:
33
+ return
34
+ if not isinstance(tasks, list):
35
+ return
36
+ for t in tasks:
37
+ try:
38
+ name = t["name"]
39
+ interval = int(t.get("interval_seconds", 60))
40
+ target = t["target"]
41
+ fn = _resolve_target(target)
42
+ scheduler.add_task(name, interval, fn)
43
+ except Exception:
44
+ # ignore bad entries
45
+ continue
@@ -0,0 +1,73 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from datetime import datetime, timedelta, timezone
5
+ from typing import Any, Dict, Optional, Protocol
6
+
7
+
8
+ @dataclass
9
+ class Job:
10
+ id: str
11
+ name: str
12
+ payload: Dict[str, Any]
13
+ available_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
14
+ attempts: int = 0
15
+ max_attempts: int = 5
16
+ backoff_seconds: int = 60 # base backoff for retry
17
+ last_error: Optional[str] = None
18
+
19
+
20
+ class JobQueue(Protocol):
21
+ def enqueue(self, name: str, payload: Dict[str, Any], *, delay_seconds: int = 0) -> Job:
22
+ pass
23
+
24
+ def reserve_next(self) -> Optional[Job]:
25
+ pass
26
+
27
+ def ack(self, job_id: str) -> None:
28
+ pass
29
+
30
+ def fail(self, job_id: str, *, error: str | None = None) -> None:
31
+ pass
32
+
33
+
34
+ class InMemoryJobQueue:
35
+ """Simple in-memory queue for tests and local runs.
36
+
37
+ Single-threaded reserve/ack/fail semantics. Not suitable for production.
38
+ """
39
+
40
+ def __init__(self):
41
+ self._seq = 0
42
+ self._jobs: list[Job] = []
43
+
44
+ def _next_id(self) -> str:
45
+ self._seq += 1
46
+ return str(self._seq)
47
+
48
+ def enqueue(self, name: str, payload: Dict[str, Any], *, delay_seconds: int = 0) -> Job:
49
+ when = datetime.now(timezone.utc) + timedelta(seconds=delay_seconds)
50
+ job = Job(id=self._next_id(), name=name, payload=dict(payload), available_at=when)
51
+ self._jobs.append(job)
52
+ return job
53
+
54
+ def reserve_next(self) -> Optional[Job]:
55
+ now = datetime.now(timezone.utc)
56
+ for job in self._jobs:
57
+ if job.available_at <= now and job.attempts >= 0 and job.attempts < job.max_attempts:
58
+ job.attempts += 1
59
+ return job
60
+ return None
61
+
62
+ def ack(self, job_id: str) -> None:
63
+ self._jobs = [j for j in self._jobs if j.id != job_id]
64
+
65
+ def fail(self, job_id: str, *, error: str | None = None) -> None:
66
+ now = datetime.now(timezone.utc)
67
+ for job in self._jobs:
68
+ if job.id == job_id:
69
+ job.last_error = error
70
+ # Exponential backoff: base * attempts
71
+ delay = job.backoff_seconds * max(1, job.attempts)
72
+ job.available_at = now + timedelta(seconds=delay)
73
+ return
@@ -0,0 +1,191 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from dataclasses import asdict
5
+ from datetime import datetime, timezone
6
+ from typing import Dict, Optional
7
+
8
+ from redis import Redis
9
+
10
+ from .queue import Job, JobQueue
11
+
12
+
13
+ class RedisJobQueue(JobQueue):
14
+ """Redis-backed job queue with visibility timeout and delayed retries.
15
+
16
+ Keys (with optional prefix):
17
+ - {p}:ready (LIST) ready job ids
18
+ - {p}:processing (LIST) in-flight job ids
19
+ - {p}:processing_vt (ZSET) id -> visible_at (epoch seconds)
20
+ - {p}:delayed (ZSET) id -> available_at (epoch seconds)
21
+ - {p}:seq (STRING) INCR for job ids
22
+ - {p}:job:{id} (HASH) job fields (json payload)
23
+ - {p}:dlq (LIST) dead-letter job ids
24
+ """
25
+
26
+ def __init__(self, client: Redis, *, prefix: str = "jobs", visibility_timeout: int = 60):
27
+ self._r = client
28
+ self._p = prefix
29
+ self._vt = visibility_timeout
30
+
31
+ # Key helpers
32
+ def _k(self, name: str) -> str:
33
+ return f"{self._p}:{name}"
34
+
35
+ def _job_key(self, job_id: str) -> str:
36
+ return f"{self._p}:job:{job_id}"
37
+
38
+ # Core ops
39
+ def enqueue(self, name: str, payload: Dict, *, delay_seconds: int = 0) -> Job:
40
+ now = datetime.now(timezone.utc)
41
+ job_id = str(self._r.incr(self._k("seq")))
42
+ job = Job(id=job_id, name=name, payload=dict(payload))
43
+ # Persist job
44
+ data = asdict(job)
45
+ data["payload"] = json.dumps(data["payload"]) # store payload as JSON string
46
+ # available_at stored as ISO format
47
+ data["available_at"] = job.available_at.isoformat()
48
+ self._r.hset(
49
+ self._job_key(job_id), mapping={k: str(v) for k, v in data.items() if v is not None}
50
+ )
51
+ if delay_seconds and delay_seconds > 0:
52
+ at = int(now.timestamp()) + int(delay_seconds)
53
+ self._r.zadd(self._k("delayed"), {job_id: at})
54
+ else:
55
+ # push to ready
56
+ self._r.lpush(self._k("ready"), job_id)
57
+ return job
58
+
59
+ def _move_due_delayed_to_ready(self) -> None:
60
+ now_ts = int(datetime.now(timezone.utc).timestamp())
61
+ ids = self._r.zrangebyscore(self._k("delayed"), "-inf", now_ts)
62
+ if not ids:
63
+ return
64
+ pipe = self._r.pipeline()
65
+ for jid in ids:
66
+ jid_s = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
67
+ pipe.lpush(self._k("ready"), jid_s)
68
+ pipe.zrem(self._k("delayed"), jid_s)
69
+ pipe.execute()
70
+
71
+ def _requeue_timed_out_processing(self) -> None:
72
+ now_ts = int(datetime.now(timezone.utc).timestamp())
73
+ ids = self._r.zrangebyscore(self._k("processing_vt"), "-inf", now_ts)
74
+ if not ids:
75
+ return
76
+ pipe = self._r.pipeline()
77
+ for jid in ids:
78
+ jid_s = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
79
+ pipe.lrem(self._k("processing"), 1, jid_s)
80
+ pipe.lpush(self._k("ready"), jid_s)
81
+ pipe.zrem(self._k("processing_vt"), jid_s)
82
+ # clear stale visibility timestamp so next reservation can set a fresh one
83
+ pipe.hdel(self._job_key(jid_s), "visible_at")
84
+ pipe.execute()
85
+
86
+ def reserve_next(self) -> Optional[Job]:
87
+ # opportunistically move due delayed jobs
88
+ self._move_due_delayed_to_ready()
89
+ # move timed-out processing jobs back to ready before reserving
90
+ self._requeue_timed_out_processing()
91
+ jid = self._r.rpoplpush(self._k("ready"), self._k("processing"))
92
+ if not jid:
93
+ return None
94
+ job_id = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
95
+ key = self._job_key(job_id)
96
+ data = self._r.hgetall(key)
97
+ if not data:
98
+ # corrupted entry; ack and skip
99
+ self._r.lrem(self._k("processing"), 1, job_id)
100
+ return None
101
+
102
+ # Decode fields
103
+ def _get(field: str, default: Optional[str] = None) -> Optional[str]:
104
+ val = (
105
+ data.get(field.encode())
106
+ if isinstance(next(iter(data.keys())), bytes)
107
+ else data.get(field)
108
+ )
109
+ if val is None:
110
+ return default
111
+ return val.decode() if isinstance(val, (bytes, bytearray)) else str(val)
112
+
113
+ attempts = int(_get("attempts", "0")) + 1
114
+ max_attempts = int(_get("max_attempts", "5"))
115
+ backoff_seconds = int(_get("backoff_seconds", "60"))
116
+ name = _get("name", "") or ""
117
+ payload_json = _get("payload", "{}") or "{}"
118
+ try:
119
+ payload = json.loads(payload_json)
120
+ except Exception: # pragma: no cover
121
+ payload = {}
122
+ available_at_str = _get("available_at")
123
+ available_at = (
124
+ datetime.fromisoformat(available_at_str)
125
+ if available_at_str
126
+ else datetime.now(timezone.utc)
127
+ )
128
+ # If exceeded max_attempts → DLQ and skip
129
+ if attempts > max_attempts:
130
+ self._r.lrem(self._k("processing"), 1, job_id)
131
+ self._r.lpush(self._k("dlq"), job_id)
132
+ return None
133
+ # Update attempts and visibility timeout
134
+ visible_at = int(datetime.now(timezone.utc).timestamp()) + int(self._vt)
135
+ pipe = self._r.pipeline()
136
+ pipe.hset(key, mapping={"attempts": attempts, "visible_at": visible_at})
137
+ pipe.zadd(self._k("processing_vt"), {job_id: visible_at})
138
+ pipe.execute()
139
+ return Job(
140
+ id=job_id,
141
+ name=name,
142
+ payload=payload,
143
+ available_at=available_at,
144
+ attempts=attempts,
145
+ max_attempts=max_attempts,
146
+ backoff_seconds=backoff_seconds,
147
+ )
148
+
149
+ def ack(self, job_id: str) -> None:
150
+ self._r.lrem(self._k("processing"), 1, job_id)
151
+ self._r.zrem(self._k("processing_vt"), job_id)
152
+ self._r.delete(self._job_key(job_id))
153
+
154
+ def fail(self, job_id: str, *, error: str | None = None) -> None:
155
+ key = self._job_key(job_id)
156
+ data = self._r.hgetall(key)
157
+ if not data:
158
+ # nothing to do
159
+ self._r.lrem(self._k("processing"), 1, job_id)
160
+ return
161
+
162
+ def _get(field: str, default: Optional[str] = None) -> Optional[str]:
163
+ val = (
164
+ data.get(field.encode())
165
+ if isinstance(next(iter(data.keys())), bytes)
166
+ else data.get(field)
167
+ )
168
+ if val is None:
169
+ return default
170
+ return val.decode() if isinstance(val, (bytes, bytearray)) else str(val)
171
+
172
+ attempts = int(_get("attempts", "0"))
173
+ max_attempts = int(_get("max_attempts", "5"))
174
+ backoff_seconds = int(_get("backoff_seconds", "60"))
175
+ now_ts = int(datetime.now(timezone.utc).timestamp())
176
+ # DLQ if at or beyond max_attempts
177
+ if attempts >= max_attempts:
178
+ self._r.lrem(self._k("processing"), 1, job_id)
179
+ self._r.zrem(self._k("processing_vt"), job_id)
180
+ self._r.lpush(self._k("dlq"), job_id)
181
+ return
182
+ delay = backoff_seconds * max(1, attempts)
183
+ available_at_ts = now_ts + delay
184
+ mapping = {
185
+ "last_error": error or "",
186
+ "available_at": datetime.fromtimestamp(available_at_ts, tz=timezone.utc).isoformat(),
187
+ }
188
+ self._r.hset(key, mapping=mapping)
189
+ self._r.lrem(self._k("processing"), 1, job_id)
190
+ self._r.zrem(self._k("processing_vt"), job_id)
191
+ self._r.zadd(self._k("delayed"), {job_id: available_at_ts})
@@ -0,0 +1,41 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from datetime import datetime, timedelta, timezone
5
+ from typing import Awaitable, Callable, Dict
6
+
7
+ CronFunc = Callable[[], Awaitable[None]]
8
+
9
+
10
+ @dataclass
11
+ class ScheduledTask:
12
+ name: str
13
+ interval_seconds: int
14
+ func: CronFunc
15
+ next_run_at: datetime
16
+
17
+
18
+ class InMemoryScheduler:
19
+ """Interval-based scheduler for simple periodic tasks (tests/local).
20
+
21
+ Not a full cron parser. Tracks next_run_at per task.
22
+ """
23
+
24
+ def __init__(self):
25
+ self._tasks: Dict[str, ScheduledTask] = {}
26
+
27
+ def add_task(self, name: str, interval_seconds: int, func: CronFunc) -> None:
28
+ now = datetime.now(timezone.utc)
29
+ self._tasks[name] = ScheduledTask(
30
+ name=name,
31
+ interval_seconds=interval_seconds,
32
+ func=func,
33
+ next_run_at=now + timedelta(seconds=interval_seconds),
34
+ )
35
+
36
+ async def tick(self) -> None:
37
+ now = datetime.now(timezone.utc)
38
+ for task in self._tasks.values():
39
+ if task.next_run_at <= now:
40
+ await task.func()
41
+ task.next_run_at = now + timedelta(seconds=task.interval_seconds)
@@ -0,0 +1,24 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Awaitable, Callable
4
+
5
+ from .queue import Job, JobQueue
6
+
7
+ ProcessFunc = Callable[[Job], Awaitable[None]]
8
+
9
+
10
+ async def process_one(queue: JobQueue, handler: ProcessFunc) -> bool:
11
+ """Reserve a job, process with handler, ack on success or fail with backoff.
12
+
13
+ Returns True if a job was processed (success or fail), False if no job was available.
14
+ """
15
+ job = queue.reserve_next()
16
+ if not job:
17
+ return False
18
+ try:
19
+ await handler(job)
20
+ except Exception as exc: # pragma: no cover - exercise in tests by raising
21
+ queue.fail(job.id, error=str(exc))
22
+ return True
23
+ queue.ack(job.id)
24
+ return True
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: svc-infra
3
- Version: 0.1.595
3
+ Version: 0.1.597
4
4
  Summary: Infrastructure for building and deploying prod-ready services
5
5
  License: MIT
6
6
  Keywords: fastapi,sqlalchemy,alembic,auth,infra,async,pydantic