svc-infra 0.1.506__py3-none-any.whl → 0.1.654__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (202) hide show
  1. svc_infra/apf_payments/README.md +732 -0
  2. svc_infra/apf_payments/alembic.py +11 -0
  3. svc_infra/apf_payments/models.py +339 -0
  4. svc_infra/apf_payments/provider/__init__.py +4 -0
  5. svc_infra/apf_payments/provider/aiydan.py +797 -0
  6. svc_infra/apf_payments/provider/base.py +270 -0
  7. svc_infra/apf_payments/provider/registry.py +31 -0
  8. svc_infra/apf_payments/provider/stripe.py +873 -0
  9. svc_infra/apf_payments/schemas.py +333 -0
  10. svc_infra/apf_payments/service.py +892 -0
  11. svc_infra/apf_payments/settings.py +67 -0
  12. svc_infra/api/fastapi/__init__.py +6 -0
  13. svc_infra/api/fastapi/admin/__init__.py +3 -0
  14. svc_infra/api/fastapi/admin/add.py +231 -0
  15. svc_infra/api/fastapi/apf_payments/__init__.py +0 -0
  16. svc_infra/api/fastapi/apf_payments/router.py +1082 -0
  17. svc_infra/api/fastapi/apf_payments/setup.py +73 -0
  18. svc_infra/api/fastapi/auth/add.py +15 -6
  19. svc_infra/api/fastapi/auth/gaurd.py +67 -5
  20. svc_infra/api/fastapi/auth/mfa/router.py +18 -9
  21. svc_infra/api/fastapi/auth/routers/account.py +3 -2
  22. svc_infra/api/fastapi/auth/routers/apikey_router.py +11 -5
  23. svc_infra/api/fastapi/auth/routers/oauth_router.py +82 -37
  24. svc_infra/api/fastapi/auth/routers/session_router.py +63 -0
  25. svc_infra/api/fastapi/auth/security.py +3 -1
  26. svc_infra/api/fastapi/auth/settings.py +2 -0
  27. svc_infra/api/fastapi/auth/state.py +1 -1
  28. svc_infra/api/fastapi/billing/router.py +64 -0
  29. svc_infra/api/fastapi/billing/setup.py +19 -0
  30. svc_infra/api/fastapi/cache/add.py +9 -5
  31. svc_infra/api/fastapi/db/nosql/mongo/add.py +33 -27
  32. svc_infra/api/fastapi/db/sql/add.py +40 -18
  33. svc_infra/api/fastapi/db/sql/crud_router.py +176 -14
  34. svc_infra/api/fastapi/db/sql/session.py +16 -0
  35. svc_infra/api/fastapi/db/sql/users.py +14 -2
  36. svc_infra/api/fastapi/dependencies/ratelimit.py +116 -0
  37. svc_infra/api/fastapi/docs/add.py +160 -0
  38. svc_infra/api/fastapi/docs/landing.py +1 -1
  39. svc_infra/api/fastapi/docs/scoped.py +254 -0
  40. svc_infra/api/fastapi/dual/dualize.py +38 -33
  41. svc_infra/api/fastapi/dual/router.py +48 -1
  42. svc_infra/api/fastapi/dx.py +3 -3
  43. svc_infra/api/fastapi/http/__init__.py +0 -0
  44. svc_infra/api/fastapi/http/concurrency.py +14 -0
  45. svc_infra/api/fastapi/http/conditional.py +33 -0
  46. svc_infra/api/fastapi/http/deprecation.py +21 -0
  47. svc_infra/api/fastapi/middleware/errors/handlers.py +45 -7
  48. svc_infra/api/fastapi/middleware/graceful_shutdown.py +87 -0
  49. svc_infra/api/fastapi/middleware/idempotency.py +116 -0
  50. svc_infra/api/fastapi/middleware/idempotency_store.py +187 -0
  51. svc_infra/api/fastapi/middleware/optimistic_lock.py +37 -0
  52. svc_infra/api/fastapi/middleware/ratelimit.py +119 -0
  53. svc_infra/api/fastapi/middleware/ratelimit_store.py +84 -0
  54. svc_infra/api/fastapi/middleware/request_id.py +23 -0
  55. svc_infra/api/fastapi/middleware/request_size_limit.py +36 -0
  56. svc_infra/api/fastapi/middleware/timeout.py +148 -0
  57. svc_infra/api/fastapi/openapi/mutators.py +768 -55
  58. svc_infra/api/fastapi/ops/add.py +73 -0
  59. svc_infra/api/fastapi/pagination.py +363 -0
  60. svc_infra/api/fastapi/paths/auth.py +14 -14
  61. svc_infra/api/fastapi/paths/prefix.py +0 -1
  62. svc_infra/api/fastapi/paths/user.py +1 -1
  63. svc_infra/api/fastapi/routers/ping.py +1 -0
  64. svc_infra/api/fastapi/setup.py +48 -15
  65. svc_infra/api/fastapi/tenancy/add.py +19 -0
  66. svc_infra/api/fastapi/tenancy/context.py +112 -0
  67. svc_infra/api/fastapi/versioned.py +101 -0
  68. svc_infra/app/README.md +5 -5
  69. svc_infra/billing/__init__.py +23 -0
  70. svc_infra/billing/async_service.py +147 -0
  71. svc_infra/billing/jobs.py +230 -0
  72. svc_infra/billing/models.py +131 -0
  73. svc_infra/billing/quotas.py +101 -0
  74. svc_infra/billing/schemas.py +33 -0
  75. svc_infra/billing/service.py +115 -0
  76. svc_infra/bundled_docs/README.md +5 -0
  77. svc_infra/bundled_docs/__init__.py +1 -0
  78. svc_infra/bundled_docs/getting-started.md +6 -0
  79. svc_infra/cache/__init__.py +4 -0
  80. svc_infra/cache/add.py +158 -0
  81. svc_infra/cache/backend.py +5 -2
  82. svc_infra/cache/decorators.py +19 -1
  83. svc_infra/cache/keys.py +24 -4
  84. svc_infra/cli/__init__.py +32 -8
  85. svc_infra/cli/__main__.py +4 -0
  86. svc_infra/cli/cmds/__init__.py +10 -0
  87. svc_infra/cli/cmds/db/nosql/mongo/mongo_cmds.py +4 -3
  88. svc_infra/cli/cmds/db/nosql/mongo/mongo_scaffold_cmds.py +4 -4
  89. svc_infra/cli/cmds/db/sql/alembic_cmds.py +120 -14
  90. svc_infra/cli/cmds/db/sql/sql_export_cmds.py +80 -0
  91. svc_infra/cli/cmds/db/sql/sql_scaffold_cmds.py +5 -4
  92. svc_infra/cli/cmds/docs/docs_cmds.py +140 -0
  93. svc_infra/cli/cmds/dx/__init__.py +12 -0
  94. svc_infra/cli/cmds/dx/dx_cmds.py +99 -0
  95. svc_infra/cli/cmds/help.py +4 -0
  96. svc_infra/cli/cmds/jobs/__init__.py +1 -0
  97. svc_infra/cli/cmds/jobs/jobs_cmds.py +43 -0
  98. svc_infra/cli/cmds/obs/obs_cmds.py +4 -3
  99. svc_infra/cli/cmds/sdk/__init__.py +0 -0
  100. svc_infra/cli/cmds/sdk/sdk_cmds.py +102 -0
  101. svc_infra/data/add.py +61 -0
  102. svc_infra/data/backup.py +53 -0
  103. svc_infra/data/erasure.py +45 -0
  104. svc_infra/data/fixtures.py +40 -0
  105. svc_infra/data/retention.py +55 -0
  106. svc_infra/db/inbox.py +67 -0
  107. svc_infra/db/nosql/mongo/README.md +13 -13
  108. svc_infra/db/outbox.py +104 -0
  109. svc_infra/db/sql/apikey.py +1 -1
  110. svc_infra/db/sql/authref.py +61 -0
  111. svc_infra/db/sql/core.py +2 -2
  112. svc_infra/db/sql/repository.py +52 -12
  113. svc_infra/db/sql/resource.py +5 -0
  114. svc_infra/db/sql/scaffold.py +16 -4
  115. svc_infra/db/sql/templates/models_schemas/auth/schemas.py.tmpl +1 -1
  116. svc_infra/db/sql/templates/setup/env_async.py.tmpl +199 -76
  117. svc_infra/db/sql/templates/setup/env_sync.py.tmpl +231 -79
  118. svc_infra/db/sql/tenant.py +79 -0
  119. svc_infra/db/sql/utils.py +18 -4
  120. svc_infra/db/sql/versioning.py +14 -0
  121. svc_infra/docs/acceptance-matrix.md +71 -0
  122. svc_infra/docs/acceptance.md +44 -0
  123. svc_infra/docs/admin.md +425 -0
  124. svc_infra/docs/adr/0002-background-jobs-and-scheduling.md +40 -0
  125. svc_infra/docs/adr/0003-webhooks-framework.md +24 -0
  126. svc_infra/docs/adr/0004-tenancy-model.md +42 -0
  127. svc_infra/docs/adr/0005-data-lifecycle.md +86 -0
  128. svc_infra/docs/adr/0006-ops-slos-and-metrics.md +47 -0
  129. svc_infra/docs/adr/0007-docs-and-sdks.md +83 -0
  130. svc_infra/docs/adr/0008-billing-primitives.md +143 -0
  131. svc_infra/docs/adr/0009-acceptance-harness.md +40 -0
  132. svc_infra/docs/adr/0010-timeouts-and-resource-limits.md +54 -0
  133. svc_infra/docs/adr/0011-admin-scope-and-impersonation.md +73 -0
  134. svc_infra/docs/api.md +59 -0
  135. svc_infra/docs/auth.md +11 -0
  136. svc_infra/docs/billing.md +190 -0
  137. svc_infra/docs/cache.md +76 -0
  138. svc_infra/docs/cli.md +74 -0
  139. svc_infra/docs/contributing.md +34 -0
  140. svc_infra/docs/data-lifecycle.md +52 -0
  141. svc_infra/docs/database.md +14 -0
  142. svc_infra/docs/docs-and-sdks.md +62 -0
  143. svc_infra/docs/environment.md +114 -0
  144. svc_infra/docs/getting-started.md +63 -0
  145. svc_infra/docs/idempotency.md +111 -0
  146. svc_infra/docs/jobs.md +67 -0
  147. svc_infra/docs/observability.md +16 -0
  148. svc_infra/docs/ops.md +37 -0
  149. svc_infra/docs/rate-limiting.md +125 -0
  150. svc_infra/docs/repo-review.md +48 -0
  151. svc_infra/docs/security.md +176 -0
  152. svc_infra/docs/tenancy.md +35 -0
  153. svc_infra/docs/timeouts-and-resource-limits.md +147 -0
  154. svc_infra/docs/versioned-integrations.md +146 -0
  155. svc_infra/docs/webhooks.md +112 -0
  156. svc_infra/dx/add.py +63 -0
  157. svc_infra/dx/changelog.py +74 -0
  158. svc_infra/dx/checks.py +67 -0
  159. svc_infra/http/__init__.py +13 -0
  160. svc_infra/http/client.py +72 -0
  161. svc_infra/jobs/builtins/outbox_processor.py +38 -0
  162. svc_infra/jobs/builtins/webhook_delivery.py +90 -0
  163. svc_infra/jobs/easy.py +32 -0
  164. svc_infra/jobs/loader.py +45 -0
  165. svc_infra/jobs/queue.py +81 -0
  166. svc_infra/jobs/redis_queue.py +191 -0
  167. svc_infra/jobs/runner.py +75 -0
  168. svc_infra/jobs/scheduler.py +41 -0
  169. svc_infra/jobs/worker.py +40 -0
  170. svc_infra/mcp/svc_infra_mcp.py +85 -28
  171. svc_infra/obs/README.md +2 -0
  172. svc_infra/obs/add.py +54 -7
  173. svc_infra/obs/grafana/dashboards/http-overview.json +45 -0
  174. svc_infra/obs/metrics/__init__.py +53 -0
  175. svc_infra/obs/metrics.py +52 -0
  176. svc_infra/security/add.py +201 -0
  177. svc_infra/security/audit.py +130 -0
  178. svc_infra/security/audit_service.py +73 -0
  179. svc_infra/security/headers.py +52 -0
  180. svc_infra/security/hibp.py +95 -0
  181. svc_infra/security/jwt_rotation.py +53 -0
  182. svc_infra/security/lockout.py +96 -0
  183. svc_infra/security/models.py +255 -0
  184. svc_infra/security/org_invites.py +128 -0
  185. svc_infra/security/passwords.py +77 -0
  186. svc_infra/security/permissions.py +149 -0
  187. svc_infra/security/session.py +98 -0
  188. svc_infra/security/signed_cookies.py +80 -0
  189. svc_infra/webhooks/__init__.py +16 -0
  190. svc_infra/webhooks/add.py +322 -0
  191. svc_infra/webhooks/fastapi.py +37 -0
  192. svc_infra/webhooks/router.py +55 -0
  193. svc_infra/webhooks/service.py +67 -0
  194. svc_infra/webhooks/signing.py +30 -0
  195. svc_infra-0.1.654.dist-info/METADATA +154 -0
  196. svc_infra-0.1.654.dist-info/RECORD +352 -0
  197. svc_infra/api/fastapi/deps.py +0 -3
  198. svc_infra-0.1.506.dist-info/METADATA +0 -78
  199. svc_infra-0.1.506.dist-info/RECORD +0 -213
  200. /svc_infra/{api/fastapi/schemas → apf_payments}/__init__.py +0 -0
  201. {svc_infra-0.1.506.dist-info → svc_infra-0.1.654.dist-info}/WHEEL +0 -0
  202. {svc_infra-0.1.506.dist-info → svc_infra-0.1.654.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,72 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from typing import Any, Dict, Optional
5
+
6
+ import httpx
7
+
8
+ from svc_infra.app.env import pick
9
+
10
+
11
+ def _parse_float_env(name: str, default: float) -> float:
12
+ raw = os.getenv(name)
13
+ if raw is None or raw == "":
14
+ return default
15
+ try:
16
+ return float(raw)
17
+ except ValueError:
18
+ return default
19
+
20
+
21
+ def get_default_timeout_seconds() -> float:
22
+ """Return default outbound HTTP client timeout in seconds.
23
+
24
+ Env var: HTTP_CLIENT_TIMEOUT_SECONDS (float)
25
+ Defaults: 10.0 seconds for all envs unless overridden; tweakable via pick() if needed.
26
+ """
27
+ default = pick(prod=10.0, nonprod=10.0)
28
+ return _parse_float_env("HTTP_CLIENT_TIMEOUT_SECONDS", default)
29
+
30
+
31
+ def make_timeout(seconds: float | None = None) -> httpx.Timeout:
32
+ s = seconds if seconds is not None else get_default_timeout_seconds()
33
+ # Apply same timeout for connect/read/write/pool for simplicity
34
+ return httpx.Timeout(timeout=s)
35
+
36
+
37
+ def new_httpx_client(
38
+ *,
39
+ timeout_seconds: Optional[float] = None,
40
+ headers: Optional[Dict[str, str]] = None,
41
+ base_url: Optional[str] = None,
42
+ **kwargs: Any,
43
+ ) -> httpx.Client:
44
+ """Create a sync httpx Client with default timeout and optional headers/base_url.
45
+
46
+ Callers can override timeout_seconds; remaining kwargs are forwarded to httpx.Client.
47
+ """
48
+ timeout = make_timeout(timeout_seconds)
49
+ # httpx doesn't accept base_url=None; only pass if non-None
50
+ client_kwargs = {"timeout": timeout, "headers": headers, **kwargs}
51
+ if base_url is not None:
52
+ client_kwargs["base_url"] = base_url
53
+ return httpx.Client(**client_kwargs)
54
+
55
+
56
+ def new_async_httpx_client(
57
+ *,
58
+ timeout_seconds: Optional[float] = None,
59
+ headers: Optional[Dict[str, str]] = None,
60
+ base_url: Optional[str] = None,
61
+ **kwargs: Any,
62
+ ) -> httpx.AsyncClient:
63
+ """Create an async httpx AsyncClient with default timeout and optional headers/base_url.
64
+
65
+ Callers can override timeout_seconds; remaining kwargs are forwarded to httpx.AsyncClient.
66
+ """
67
+ timeout = make_timeout(timeout_seconds)
68
+ # httpx doesn't accept base_url=None; only pass if non-None
69
+ client_kwargs = {"timeout": timeout, "headers": headers, **kwargs}
70
+ if base_url is not None:
71
+ client_kwargs["base_url"] = base_url
72
+ return httpx.AsyncClient(**client_kwargs)
@@ -0,0 +1,38 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Iterable, Optional
4
+
5
+ from svc_infra.db.outbox import OutboxStore
6
+ from svc_infra.jobs.queue import JobQueue
7
+
8
+
9
+ def make_outbox_tick(
10
+ outbox: OutboxStore,
11
+ queue: JobQueue,
12
+ *,
13
+ topics: Optional[Iterable[str]] = None,
14
+ job_name_prefix: str = "outbox",
15
+ ):
16
+ """Return an async task function to move one outbox message into the job queue.
17
+
18
+ - It fetches at most one unprocessed message per tick to avoid starving others.
19
+ - The enqueued job name is f"{job_name_prefix}.{topic}" to allow routing.
20
+ - The job payload contains `outbox_id`, `topic`, and original `payload`.
21
+ """
22
+
23
+ dispatched: set[int] = set()
24
+
25
+ async def _tick():
26
+ # Outbox is sync; this wrapper is async for scheduler compatibility
27
+ msg = outbox.fetch_next(topics=topics)
28
+ if not msg:
29
+ return
30
+ if msg.id in dispatched:
31
+ return
32
+ job_name = f"{job_name_prefix}.{msg.topic}"
33
+ queue.enqueue(job_name, {"outbox_id": msg.id, "topic": msg.topic, "payload": msg.payload})
34
+ # mark as dispatched (bump attempts) so it won't be re-enqueued by fetch_next
35
+ outbox.mark_failed(msg.id)
36
+ dispatched.add(msg.id)
37
+
38
+ return _tick
@@ -0,0 +1,90 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+
5
+ from svc_infra.db.inbox import InboxStore
6
+ from svc_infra.db.outbox import OutboxStore
7
+ from svc_infra.http import get_default_timeout_seconds, new_async_httpx_client
8
+ from svc_infra.jobs.queue import Job
9
+ from svc_infra.webhooks.signing import sign
10
+
11
+
12
+ def make_webhook_handler(
13
+ *,
14
+ outbox: OutboxStore,
15
+ inbox: InboxStore,
16
+ get_webhook_url_for_topic,
17
+ get_secret_for_topic,
18
+ header_name: str = "X-Signature",
19
+ ):
20
+ """Return an async job handler to deliver webhooks.
21
+
22
+ Expected job payload shape:
23
+ {"outbox_id": int, "topic": str, "payload": {...}}
24
+ """
25
+
26
+ async def _handler(job: Job) -> None:
27
+ data = job.payload or {}
28
+ outbox_id = data.get("outbox_id")
29
+ topic = data.get("topic")
30
+ payload = data.get("payload") or {}
31
+ if not outbox_id or not topic:
32
+ # Nothing we can do; ack to avoid poison loop
33
+ return
34
+ # dedupe marker key (marked after successful delivery)
35
+ key = f"webhook:{outbox_id}"
36
+ if inbox.is_marked(key):
37
+ # already delivered
38
+ outbox.mark_processed(int(outbox_id))
39
+ return
40
+ event = payload.get("event") if isinstance(payload, dict) else None
41
+ subscription = payload.get("subscription") if isinstance(payload, dict) else None
42
+ if event is not None and subscription is not None:
43
+ delivery_payload = event
44
+ url = subscription.get("url") or get_webhook_url_for_topic(topic)
45
+ secret = subscription.get("secret") or get_secret_for_topic(topic)
46
+ subscription_id = subscription.get("id")
47
+ else:
48
+ delivery_payload = payload
49
+ url = get_webhook_url_for_topic(topic)
50
+ secret = get_secret_for_topic(topic)
51
+ subscription_id = None
52
+ sig = sign(secret, delivery_payload)
53
+ headers = {
54
+ header_name: sig,
55
+ "X-Event-Id": str(outbox_id),
56
+ "X-Topic": str(topic),
57
+ "X-Attempt": str(job.attempts or 1),
58
+ "X-Signature-Alg": "hmac-sha256",
59
+ "X-Signature-Version": "v1",
60
+ }
61
+ if subscription_id:
62
+ headers["X-Webhook-Subscription"] = str(subscription_id)
63
+ # include event payload version if present
64
+ version = None
65
+ if isinstance(delivery_payload, dict):
66
+ version = delivery_payload.get("version")
67
+ if version is not None:
68
+ headers["X-Payload-Version"] = str(version)
69
+ # Derive timeout: dedicated WEBHOOK_DELIVERY_TIMEOUT_SECONDS or default HTTP client timeout
70
+ timeout_seconds = None
71
+ env_timeout = os.getenv("WEBHOOK_DELIVERY_TIMEOUT_SECONDS")
72
+ if env_timeout:
73
+ try:
74
+ timeout_seconds = float(env_timeout)
75
+ except ValueError:
76
+ timeout_seconds = get_default_timeout_seconds()
77
+ else:
78
+ timeout_seconds = get_default_timeout_seconds()
79
+
80
+ async with new_async_httpx_client(timeout_seconds=timeout_seconds) as client:
81
+ resp = await client.post(url, json=delivery_payload, headers=headers)
82
+ if 200 <= resp.status_code < 300:
83
+ # record delivery and mark processed
84
+ inbox.mark_if_new(key, ttl_seconds=24 * 3600)
85
+ outbox.mark_processed(int(outbox_id))
86
+ return
87
+ # allow retry on non-2xx: raise to trigger fail/backoff
88
+ raise RuntimeError(f"webhook delivery failed: {resp.status_code}")
89
+
90
+ return _handler
svc_infra/jobs/easy.py ADDED
@@ -0,0 +1,32 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+
5
+ from redis import Redis
6
+
7
+ from .queue import InMemoryJobQueue, JobQueue
8
+ from .redis_queue import RedisJobQueue
9
+ from .scheduler import InMemoryScheduler
10
+
11
+
12
+ class JobsConfig:
13
+ def __init__(self, driver: str | None = None):
14
+ # Future: support redis/sql drivers via extras
15
+ self.driver = driver or os.getenv("JOBS_DRIVER", "memory").lower()
16
+
17
+
18
+ def easy_jobs(*, driver: str | None = None) -> tuple[JobQueue, InMemoryScheduler]:
19
+ """One-call wiring for jobs: returns (queue, scheduler).
20
+
21
+ Defaults to in-memory implementations for local/dev. ENV override via JOBS_DRIVER.
22
+ """
23
+ cfg = JobsConfig(driver=driver)
24
+ # Choose backend
25
+ if cfg.driver == "redis":
26
+ url = os.getenv("REDIS_URL", "redis://localhost:6379/0")
27
+ client = Redis.from_url(url)
28
+ queue = RedisJobQueue(client)
29
+ else:
30
+ queue = InMemoryJobQueue()
31
+ scheduler = InMemoryScheduler()
32
+ return queue, scheduler
@@ -0,0 +1,45 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import importlib
5
+ import json
6
+ import os
7
+ from typing import Awaitable, Callable
8
+
9
+ from .scheduler import InMemoryScheduler
10
+
11
+
12
+ def _resolve_target(path: str) -> Callable[[], Awaitable[None]]:
13
+ mod_name, func_name = path.split(":", 1)
14
+ mod = importlib.import_module(mod_name)
15
+ fn = getattr(mod, func_name)
16
+ if asyncio.iscoroutinefunction(fn):
17
+ return fn # type: ignore[return-value]
18
+
19
+ # wrap sync into async
20
+ async def _wrapped():
21
+ fn()
22
+
23
+ return _wrapped
24
+
25
+
26
+ def schedule_from_env(scheduler: InMemoryScheduler, env_var: str = "JOBS_SCHEDULE_JSON") -> None:
27
+ data = os.getenv(env_var)
28
+ if not data:
29
+ return
30
+ try:
31
+ tasks = json.loads(data)
32
+ except json.JSONDecodeError:
33
+ return
34
+ if not isinstance(tasks, list):
35
+ return
36
+ for t in tasks:
37
+ try:
38
+ name = t["name"]
39
+ interval = int(t.get("interval_seconds", 60))
40
+ target = t["target"]
41
+ fn = _resolve_target(target)
42
+ scheduler.add_task(name, interval, fn)
43
+ except Exception:
44
+ # ignore bad entries
45
+ continue
@@ -0,0 +1,81 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from datetime import datetime, timedelta, timezone
5
+ from typing import Any, Dict, Optional, Protocol
6
+
7
+
8
+ @dataclass
9
+ class Job:
10
+ id: str
11
+ name: str
12
+ payload: Dict[str, Any]
13
+ available_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
14
+ attempts: int = 0
15
+ max_attempts: int = 5
16
+ backoff_seconds: int = 60 # base backoff for retry
17
+ last_error: Optional[str] = None
18
+
19
+
20
+ class JobQueue(Protocol):
21
+ def enqueue(self, name: str, payload: Dict[str, Any], *, delay_seconds: int = 0) -> Job:
22
+ pass
23
+
24
+ def reserve_next(self) -> Optional[Job]:
25
+ pass
26
+
27
+ def ack(self, job_id: str) -> None:
28
+ pass
29
+
30
+ def fail(self, job_id: str, *, error: str | None = None) -> None:
31
+ pass
32
+
33
+
34
+ class InMemoryJobQueue:
35
+ """Simple in-memory queue for tests and local runs.
36
+
37
+ Single-threaded reserve/ack/fail semantics. Not suitable for production.
38
+ """
39
+
40
+ def __init__(self):
41
+ self._seq = 0
42
+ self._jobs: list[Job] = []
43
+
44
+ def _next_id(self) -> str:
45
+ self._seq += 1
46
+ return str(self._seq)
47
+
48
+ def enqueue(self, name: str, payload: Dict[str, Any], *, delay_seconds: int = 0) -> Job:
49
+ when = datetime.now(timezone.utc) + timedelta(seconds=delay_seconds)
50
+ job = Job(id=self._next_id(), name=name, payload=dict(payload), available_at=when)
51
+ self._jobs.append(job)
52
+ return job
53
+
54
+ def reserve_next(self) -> Optional[Job]:
55
+ now = datetime.now(timezone.utc)
56
+ for job in self._jobs:
57
+ if job.available_at <= now and job.attempts >= 0 and job.attempts < job.max_attempts:
58
+ job.attempts += 1
59
+ return job
60
+ return None
61
+
62
+ def ack(self, job_id: str) -> None:
63
+ self._jobs = [j for j in self._jobs if j.id != job_id]
64
+
65
+ def fail(self, job_id: str, *, error: str | None = None) -> None:
66
+ now = datetime.now(timezone.utc)
67
+ for job in self._jobs:
68
+ if job.id == job_id:
69
+ job.last_error = error
70
+ # Exponential backoff: base * attempts
71
+ delay = job.backoff_seconds * max(1, job.attempts)
72
+ if delay > 0:
73
+ # Add a tiny fudge so an immediate subsequent poll in ultra-fast
74
+ # environments (like our acceptance API) doesn't re-reserve the job.
75
+ # This keeps tests deterministic without impacting semantics.
76
+ job.available_at = now + timedelta(seconds=delay, milliseconds=250)
77
+ else:
78
+ # When backoff is explicitly zero (e.g., unit tests forcing
79
+ # immediate retry), make the job available right away.
80
+ job.available_at = now
81
+ return
@@ -0,0 +1,191 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from dataclasses import asdict
5
+ from datetime import datetime, timezone
6
+ from typing import Dict, Optional
7
+
8
+ from redis import Redis
9
+
10
+ from .queue import Job, JobQueue
11
+
12
+
13
+ class RedisJobQueue(JobQueue):
14
+ """Redis-backed job queue with visibility timeout and delayed retries.
15
+
16
+ Keys (with optional prefix):
17
+ - {p}:ready (LIST) ready job ids
18
+ - {p}:processing (LIST) in-flight job ids
19
+ - {p}:processing_vt (ZSET) id -> visible_at (epoch seconds)
20
+ - {p}:delayed (ZSET) id -> available_at (epoch seconds)
21
+ - {p}:seq (STRING) INCR for job ids
22
+ - {p}:job:{id} (HASH) job fields (json payload)
23
+ - {p}:dlq (LIST) dead-letter job ids
24
+ """
25
+
26
+ def __init__(self, client: Redis, *, prefix: str = "jobs", visibility_timeout: int = 60):
27
+ self._r = client
28
+ self._p = prefix
29
+ self._vt = visibility_timeout
30
+
31
+ # Key helpers
32
+ def _k(self, name: str) -> str:
33
+ return f"{self._p}:{name}"
34
+
35
+ def _job_key(self, job_id: str) -> str:
36
+ return f"{self._p}:job:{job_id}"
37
+
38
+ # Core ops
39
+ def enqueue(self, name: str, payload: Dict, *, delay_seconds: int = 0) -> Job:
40
+ now = datetime.now(timezone.utc)
41
+ job_id = str(self._r.incr(self._k("seq")))
42
+ job = Job(id=job_id, name=name, payload=dict(payload))
43
+ # Persist job
44
+ data = asdict(job)
45
+ data["payload"] = json.dumps(data["payload"]) # store payload as JSON string
46
+ # available_at stored as ISO format
47
+ data["available_at"] = job.available_at.isoformat()
48
+ self._r.hset(
49
+ self._job_key(job_id), mapping={k: str(v) for k, v in data.items() if v is not None}
50
+ )
51
+ if delay_seconds and delay_seconds > 0:
52
+ at = int(now.timestamp()) + int(delay_seconds)
53
+ self._r.zadd(self._k("delayed"), {job_id: at})
54
+ else:
55
+ # push to ready
56
+ self._r.lpush(self._k("ready"), job_id)
57
+ return job
58
+
59
+ def _move_due_delayed_to_ready(self) -> None:
60
+ now_ts = int(datetime.now(timezone.utc).timestamp())
61
+ ids = self._r.zrangebyscore(self._k("delayed"), "-inf", now_ts)
62
+ if not ids:
63
+ return
64
+ pipe = self._r.pipeline()
65
+ for jid in ids:
66
+ jid_s = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
67
+ pipe.lpush(self._k("ready"), jid_s)
68
+ pipe.zrem(self._k("delayed"), jid_s)
69
+ pipe.execute()
70
+
71
+ def _requeue_timed_out_processing(self) -> None:
72
+ now_ts = int(datetime.now(timezone.utc).timestamp())
73
+ ids = self._r.zrangebyscore(self._k("processing_vt"), "-inf", now_ts)
74
+ if not ids:
75
+ return
76
+ pipe = self._r.pipeline()
77
+ for jid in ids:
78
+ jid_s = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
79
+ pipe.lrem(self._k("processing"), 1, jid_s)
80
+ pipe.lpush(self._k("ready"), jid_s)
81
+ pipe.zrem(self._k("processing_vt"), jid_s)
82
+ # clear stale visibility timestamp so next reservation can set a fresh one
83
+ pipe.hdel(self._job_key(jid_s), "visible_at")
84
+ pipe.execute()
85
+
86
+ def reserve_next(self) -> Optional[Job]:
87
+ # opportunistically move due delayed jobs
88
+ self._move_due_delayed_to_ready()
89
+ # move timed-out processing jobs back to ready before reserving
90
+ self._requeue_timed_out_processing()
91
+ jid = self._r.rpoplpush(self._k("ready"), self._k("processing"))
92
+ if not jid:
93
+ return None
94
+ job_id = jid.decode() if isinstance(jid, (bytes, bytearray)) else str(jid)
95
+ key = self._job_key(job_id)
96
+ data = self._r.hgetall(key)
97
+ if not data:
98
+ # corrupted entry; ack and skip
99
+ self._r.lrem(self._k("processing"), 1, job_id)
100
+ return None
101
+
102
+ # Decode fields
103
+ def _get(field: str, default: Optional[str] = None) -> Optional[str]:
104
+ val = (
105
+ data.get(field.encode())
106
+ if isinstance(next(iter(data.keys())), bytes)
107
+ else data.get(field)
108
+ )
109
+ if val is None:
110
+ return default
111
+ return val.decode() if isinstance(val, (bytes, bytearray)) else str(val)
112
+
113
+ attempts = int(_get("attempts", "0")) + 1
114
+ max_attempts = int(_get("max_attempts", "5"))
115
+ backoff_seconds = int(_get("backoff_seconds", "60"))
116
+ name = _get("name", "") or ""
117
+ payload_json = _get("payload", "{}") or "{}"
118
+ try:
119
+ payload = json.loads(payload_json)
120
+ except Exception: # pragma: no cover
121
+ payload = {}
122
+ available_at_str = _get("available_at")
123
+ available_at = (
124
+ datetime.fromisoformat(available_at_str)
125
+ if available_at_str
126
+ else datetime.now(timezone.utc)
127
+ )
128
+ # If exceeded max_attempts → DLQ and skip
129
+ if attempts > max_attempts:
130
+ self._r.lrem(self._k("processing"), 1, job_id)
131
+ self._r.lpush(self._k("dlq"), job_id)
132
+ return None
133
+ # Update attempts and visibility timeout
134
+ visible_at = int(datetime.now(timezone.utc).timestamp()) + int(self._vt)
135
+ pipe = self._r.pipeline()
136
+ pipe.hset(key, mapping={"attempts": attempts, "visible_at": visible_at})
137
+ pipe.zadd(self._k("processing_vt"), {job_id: visible_at})
138
+ pipe.execute()
139
+ return Job(
140
+ id=job_id,
141
+ name=name,
142
+ payload=payload,
143
+ available_at=available_at,
144
+ attempts=attempts,
145
+ max_attempts=max_attempts,
146
+ backoff_seconds=backoff_seconds,
147
+ )
148
+
149
+ def ack(self, job_id: str) -> None:
150
+ self._r.lrem(self._k("processing"), 1, job_id)
151
+ self._r.zrem(self._k("processing_vt"), job_id)
152
+ self._r.delete(self._job_key(job_id))
153
+
154
+ def fail(self, job_id: str, *, error: str | None = None) -> None:
155
+ key = self._job_key(job_id)
156
+ data = self._r.hgetall(key)
157
+ if not data:
158
+ # nothing to do
159
+ self._r.lrem(self._k("processing"), 1, job_id)
160
+ return
161
+
162
+ def _get(field: str, default: Optional[str] = None) -> Optional[str]:
163
+ val = (
164
+ data.get(field.encode())
165
+ if isinstance(next(iter(data.keys())), bytes)
166
+ else data.get(field)
167
+ )
168
+ if val is None:
169
+ return default
170
+ return val.decode() if isinstance(val, (bytes, bytearray)) else str(val)
171
+
172
+ attempts = int(_get("attempts", "0"))
173
+ max_attempts = int(_get("max_attempts", "5"))
174
+ backoff_seconds = int(_get("backoff_seconds", "60"))
175
+ now_ts = int(datetime.now(timezone.utc).timestamp())
176
+ # DLQ if at or beyond max_attempts
177
+ if attempts >= max_attempts:
178
+ self._r.lrem(self._k("processing"), 1, job_id)
179
+ self._r.zrem(self._k("processing_vt"), job_id)
180
+ self._r.lpush(self._k("dlq"), job_id)
181
+ return
182
+ delay = backoff_seconds * max(1, attempts)
183
+ available_at_ts = now_ts + delay
184
+ mapping = {
185
+ "last_error": error or "",
186
+ "available_at": datetime.fromtimestamp(available_at_ts, tz=timezone.utc).isoformat(),
187
+ }
188
+ self._r.hset(key, mapping=mapping)
189
+ self._r.lrem(self._k("processing"), 1, job_id)
190
+ self._r.zrem(self._k("processing_vt"), job_id)
191
+ self._r.zadd(self._k("delayed"), {job_id: available_at_ts})
@@ -0,0 +1,75 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import contextlib
5
+ from typing import Awaitable, Callable, Optional
6
+
7
+ from .queue import JobQueue
8
+
9
+ ProcessFunc = Callable[[object], Awaitable[None]]
10
+
11
+
12
+ class WorkerRunner:
13
+ """Cooperative worker loop with graceful stop.
14
+
15
+ - start(): begin polling the queue and processing jobs
16
+ - stop(grace_seconds): signal stop, wait up to grace for current job to finish
17
+ """
18
+
19
+ def __init__(self, queue: JobQueue, handler: ProcessFunc, *, poll_interval: float = 0.25):
20
+ self._queue = queue
21
+ self._handler = handler
22
+ self._poll_interval = poll_interval
23
+ self._task: Optional[asyncio.Task] = None
24
+ self._stopping = asyncio.Event()
25
+ self._inflight: Optional[asyncio.Task] = None
26
+
27
+ async def _loop(self) -> None:
28
+ try:
29
+ while not self._stopping.is_set():
30
+ job = self._queue.reserve_next()
31
+ if not job:
32
+ await asyncio.sleep(self._poll_interval)
33
+ continue
34
+
35
+ # Process one job; track in-flight task for stop()
36
+ async def _run():
37
+ try:
38
+ await self._handler(job)
39
+ except Exception as exc: # pragma: no cover
40
+ self._queue.fail(job.id, error=str(exc))
41
+ return
42
+ self._queue.ack(job.id)
43
+
44
+ self._inflight = asyncio.create_task(_run())
45
+ try:
46
+ await self._inflight
47
+ finally:
48
+ self._inflight = None
49
+ finally:
50
+ # exiting loop
51
+ pass
52
+
53
+ def start(self) -> asyncio.Task:
54
+ if self._task is None or self._task.done():
55
+ self._task = asyncio.create_task(self._loop())
56
+ return self._task
57
+
58
+ async def stop(self, *, grace_seconds: float = 10.0) -> None:
59
+ self._stopping.set()
60
+ # Wait for in-flight job to complete, up to grace
61
+ if self._inflight is not None and not self._inflight.done():
62
+ try:
63
+ await asyncio.wait_for(self._inflight, timeout=grace_seconds)
64
+ except asyncio.TimeoutError:
65
+ # Give up; job will be retried if your queue supports visibility timeouts
66
+ pass
67
+ # Finally, wait for loop to exit (should be quick since stopping is set)
68
+ if self._task is not None:
69
+ try:
70
+ await asyncio.wait_for(self._task, timeout=max(0.1, self._poll_interval + 0.1))
71
+ except asyncio.TimeoutError:
72
+ # Cancel as a last resort
73
+ self._task.cancel()
74
+ with contextlib.suppress(Exception):
75
+ await self._task
@@ -0,0 +1,41 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from datetime import datetime, timedelta, timezone
5
+ from typing import Awaitable, Callable, Dict
6
+
7
+ CronFunc = Callable[[], Awaitable[None]]
8
+
9
+
10
+ @dataclass
11
+ class ScheduledTask:
12
+ name: str
13
+ interval_seconds: int
14
+ func: CronFunc
15
+ next_run_at: datetime
16
+
17
+
18
+ class InMemoryScheduler:
19
+ """Interval-based scheduler for simple periodic tasks (tests/local).
20
+
21
+ Not a full cron parser. Tracks next_run_at per task.
22
+ """
23
+
24
+ def __init__(self):
25
+ self._tasks: Dict[str, ScheduledTask] = {}
26
+
27
+ def add_task(self, name: str, interval_seconds: int, func: CronFunc) -> None:
28
+ now = datetime.now(timezone.utc)
29
+ self._tasks[name] = ScheduledTask(
30
+ name=name,
31
+ interval_seconds=interval_seconds,
32
+ func=func,
33
+ next_run_at=now + timedelta(seconds=interval_seconds),
34
+ )
35
+
36
+ async def tick(self) -> None:
37
+ now = datetime.now(timezone.utc)
38
+ for task in self._tasks.values():
39
+ if task.next_run_at <= now:
40
+ await task.func()
41
+ task.next_run_at = now + timedelta(seconds=task.interval_seconds)