svc-infra 0.1.600__py3-none-any.whl → 0.1.640__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of svc-infra might be problematic. Click here for more details.
- svc_infra/api/fastapi/admin/__init__.py +3 -0
- svc_infra/api/fastapi/admin/add.py +231 -0
- svc_infra/api/fastapi/billing/router.py +64 -0
- svc_infra/api/fastapi/billing/setup.py +19 -0
- svc_infra/api/fastapi/db/sql/add.py +32 -13
- svc_infra/api/fastapi/db/sql/crud_router.py +178 -16
- svc_infra/api/fastapi/db/sql/session.py +16 -0
- svc_infra/api/fastapi/dependencies/ratelimit.py +57 -7
- svc_infra/api/fastapi/docs/add.py +160 -0
- svc_infra/api/fastapi/docs/landing.py +1 -1
- svc_infra/api/fastapi/middleware/errors/handlers.py +45 -7
- svc_infra/api/fastapi/middleware/graceful_shutdown.py +87 -0
- svc_infra/api/fastapi/middleware/ratelimit.py +59 -1
- svc_infra/api/fastapi/middleware/ratelimit_store.py +12 -6
- svc_infra/api/fastapi/middleware/timeout.py +148 -0
- svc_infra/api/fastapi/openapi/mutators.py +114 -0
- svc_infra/api/fastapi/ops/add.py +73 -0
- svc_infra/api/fastapi/pagination.py +3 -1
- svc_infra/api/fastapi/routers/ping.py +1 -0
- svc_infra/api/fastapi/setup.py +11 -1
- svc_infra/api/fastapi/tenancy/add.py +19 -0
- svc_infra/api/fastapi/tenancy/context.py +112 -0
- svc_infra/app/README.md +5 -5
- svc_infra/billing/__init__.py +23 -0
- svc_infra/billing/async_service.py +147 -0
- svc_infra/billing/jobs.py +230 -0
- svc_infra/billing/models.py +131 -0
- svc_infra/billing/quotas.py +101 -0
- svc_infra/billing/schemas.py +33 -0
- svc_infra/billing/service.py +115 -0
- svc_infra/bundled_docs/README.md +5 -0
- svc_infra/bundled_docs/__init__.py +1 -0
- svc_infra/bundled_docs/getting-started.md +6 -0
- svc_infra/cache/__init__.py +4 -0
- svc_infra/cache/add.py +158 -0
- svc_infra/cache/backend.py +5 -2
- svc_infra/cache/decorators.py +19 -1
- svc_infra/cache/keys.py +24 -4
- svc_infra/cli/__init__.py +28 -8
- svc_infra/cli/cmds/__init__.py +8 -0
- svc_infra/cli/cmds/db/nosql/mongo/mongo_cmds.py +4 -3
- svc_infra/cli/cmds/db/nosql/mongo/mongo_scaffold_cmds.py +4 -4
- svc_infra/cli/cmds/db/sql/alembic_cmds.py +80 -11
- svc_infra/cli/cmds/db/sql/sql_export_cmds.py +80 -0
- svc_infra/cli/cmds/db/sql/sql_scaffold_cmds.py +3 -3
- svc_infra/cli/cmds/docs/docs_cmds.py +140 -0
- svc_infra/cli/cmds/dx/__init__.py +12 -0
- svc_infra/cli/cmds/dx/dx_cmds.py +99 -0
- svc_infra/cli/cmds/help.py +4 -0
- svc_infra/cli/cmds/obs/obs_cmds.py +4 -3
- svc_infra/cli/cmds/sdk/__init__.py +0 -0
- svc_infra/cli/cmds/sdk/sdk_cmds.py +102 -0
- svc_infra/data/add.py +61 -0
- svc_infra/data/backup.py +53 -0
- svc_infra/data/erasure.py +45 -0
- svc_infra/data/fixtures.py +40 -0
- svc_infra/data/retention.py +55 -0
- svc_infra/db/nosql/mongo/README.md +13 -13
- svc_infra/db/sql/repository.py +51 -11
- svc_infra/db/sql/resource.py +5 -0
- svc_infra/db/sql/templates/setup/env_async.py.tmpl +9 -1
- svc_infra/db/sql/templates/setup/env_sync.py.tmpl +9 -2
- svc_infra/db/sql/tenant.py +79 -0
- svc_infra/db/sql/utils.py +18 -4
- svc_infra/docs/acceptance-matrix.md +71 -0
- svc_infra/docs/acceptance.md +44 -0
- svc_infra/docs/admin.md +425 -0
- svc_infra/docs/adr/0002-background-jobs-and-scheduling.md +40 -0
- svc_infra/docs/adr/0003-webhooks-framework.md +24 -0
- svc_infra/docs/adr/0004-tenancy-model.md +42 -0
- svc_infra/docs/adr/0005-data-lifecycle.md +86 -0
- svc_infra/docs/adr/0006-ops-slos-and-metrics.md +47 -0
- svc_infra/docs/adr/0007-docs-and-sdks.md +83 -0
- svc_infra/docs/adr/0008-billing-primitives.md +143 -0
- svc_infra/docs/adr/0009-acceptance-harness.md +40 -0
- svc_infra/docs/adr/0010-timeouts-and-resource-limits.md +54 -0
- svc_infra/docs/adr/0011-admin-scope-and-impersonation.md +73 -0
- svc_infra/docs/api.md +59 -0
- svc_infra/docs/auth.md +11 -0
- svc_infra/docs/billing.md +190 -0
- svc_infra/docs/cache.md +76 -0
- svc_infra/docs/cli.md +74 -0
- svc_infra/docs/contributing.md +34 -0
- svc_infra/docs/data-lifecycle.md +52 -0
- svc_infra/docs/database.md +14 -0
- svc_infra/docs/docs-and-sdks.md +62 -0
- svc_infra/docs/environment.md +114 -0
- svc_infra/docs/getting-started.md +63 -0
- svc_infra/docs/idempotency.md +111 -0
- svc_infra/docs/jobs.md +67 -0
- svc_infra/docs/observability.md +16 -0
- svc_infra/docs/ops.md +37 -0
- svc_infra/docs/rate-limiting.md +125 -0
- svc_infra/docs/repo-review.md +48 -0
- svc_infra/docs/security.md +176 -0
- svc_infra/docs/tenancy.md +35 -0
- svc_infra/docs/timeouts-and-resource-limits.md +147 -0
- svc_infra/docs/webhooks.md +112 -0
- svc_infra/dx/add.py +63 -0
- svc_infra/dx/changelog.py +74 -0
- svc_infra/dx/checks.py +67 -0
- svc_infra/http/__init__.py +13 -0
- svc_infra/http/client.py +72 -0
- svc_infra/jobs/builtins/webhook_delivery.py +14 -2
- svc_infra/jobs/queue.py +9 -1
- svc_infra/jobs/runner.py +75 -0
- svc_infra/jobs/worker.py +17 -1
- svc_infra/mcp/svc_infra_mcp.py +85 -28
- svc_infra/obs/add.py +54 -7
- svc_infra/obs/grafana/dashboards/http-overview.json +45 -0
- svc_infra/security/headers.py +15 -2
- svc_infra/security/hibp.py +6 -2
- svc_infra/security/permissions.py +1 -0
- svc_infra/webhooks/service.py +10 -2
- {svc_infra-0.1.600.dist-info → svc_infra-0.1.640.dist-info}/METADATA +40 -14
- {svc_infra-0.1.600.dist-info → svc_infra-0.1.640.dist-info}/RECORD +118 -44
- {svc_infra-0.1.600.dist-info → svc_infra-0.1.640.dist-info}/WHEEL +0 -0
- {svc_infra-0.1.600.dist-info → svc_infra-0.1.640.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# Webhooks Framework
|
|
2
|
+
|
|
3
|
+
This module provides primitives to publish events to external consumers via webhooks, verify inbound signatures, and handle robust retries using the shared JobQueue and Outbox patterns.
|
|
4
|
+
|
|
5
|
+
> ℹ️ Webhook helper environment expectations live in [Environment Reference](environment.md).
|
|
6
|
+
|
|
7
|
+
## Quickstart
|
|
8
|
+
|
|
9
|
+
- Subscriptions and publishing:
|
|
10
|
+
|
|
11
|
+
```python
|
|
12
|
+
from svc_infra.webhooks.service import InMemoryWebhookSubscriptions, WebhookService
|
|
13
|
+
from svc_infra.db.outbox import InMemoryOutboxStore
|
|
14
|
+
|
|
15
|
+
subs = InMemoryWebhookSubscriptions()
|
|
16
|
+
subs.add("invoice.created", "https://example.com/webhook", "sekrit")
|
|
17
|
+
svc = WebhookService(outbox=InMemoryOutboxStore(), subs=subs)
|
|
18
|
+
svc.publish("invoice.created", {"id": "inv_1", "version": 1})
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
- Delivery worker and headers:
|
|
22
|
+
|
|
23
|
+
```python
|
|
24
|
+
from svc_infra.jobs.builtins.webhook_delivery import make_webhook_handler
|
|
25
|
+
from svc_infra.jobs.worker import process_one
|
|
26
|
+
|
|
27
|
+
handler = make_webhook_handler(
|
|
28
|
+
outbox=..., inbox=..., get_webhook_url_for_topic=lambda t: url, get_secret_for_topic=lambda t: secret,
|
|
29
|
+
)
|
|
30
|
+
# process_one(queue, handler) will POST JSON with headers:
|
|
31
|
+
# X-Event-Id, X-Topic, X-Attempt, X-Signature (HMAC-SHA256), X-Signature-Alg, X-Signature-Version, X-Payload-Version
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
- Verification (FastAPI):
|
|
35
|
+
|
|
36
|
+
```python
|
|
37
|
+
from fastapi import Depends, FastAPI
|
|
38
|
+
from svc_infra.webhooks.fastapi import require_signature
|
|
39
|
+
from svc_infra.webhooks.signing import sign
|
|
40
|
+
|
|
41
|
+
app = FastAPI()
|
|
42
|
+
app.post("/webhook")(lambda body=Depends(require_signature(lambda: ["old","new"])): {"ok": True})
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## FastAPI wiring
|
|
46
|
+
|
|
47
|
+
- Attach the router with shared in-memory stores (great for tests / local runs):
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
from fastapi import FastAPI
|
|
51
|
+
|
|
52
|
+
from svc_infra.webhooks import add_webhooks
|
|
53
|
+
|
|
54
|
+
app = FastAPI()
|
|
55
|
+
add_webhooks(app)
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
- Respect environment overrides for Redis-backed stores by exporting `REDIS_URL`
|
|
59
|
+
and selecting the backend via `WEBHOOKS_OUTBOX=redis` (optional
|
|
60
|
+
`WEBHOOKS_INBOX=redis` for the dedupe store). The helper records the chosen
|
|
61
|
+
instances on `app.state` for further customisation:
|
|
62
|
+
|
|
63
|
+
```python
|
|
64
|
+
import os
|
|
65
|
+
|
|
66
|
+
os.environ["WEBHOOKS_OUTBOX"] = "redis"
|
|
67
|
+
os.environ["WEBHOOKS_INBOX"] = "redis"
|
|
68
|
+
|
|
69
|
+
app = FastAPI()
|
|
70
|
+
add_webhooks(app) # creates RedisOutboxStore / RedisInboxStore when redis-py is available
|
|
71
|
+
|
|
72
|
+
# Later you can inspect or extend behaviour:
|
|
73
|
+
app.state.webhooks_subscriptions.add("invoice.created", "https://example.com/webhook", "sekrit")
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
- Provide explicit overrides (e.g. dependency-injected SQL stores) or reuse your
|
|
77
|
+
existing job queue / scheduler. Passing a queue automatically registers the
|
|
78
|
+
outbox tick and delivery handler so your worker loop can process jobs:
|
|
79
|
+
|
|
80
|
+
```python
|
|
81
|
+
from svc_infra.jobs.easy import easy_jobs
|
|
82
|
+
|
|
83
|
+
queue, scheduler = easy_jobs()
|
|
84
|
+
|
|
85
|
+
add_webhooks(
|
|
86
|
+
app,
|
|
87
|
+
outbox=my_outbox_store,
|
|
88
|
+
inbox=lambda: my_inbox_store, # factories are supported
|
|
89
|
+
queue=queue,
|
|
90
|
+
scheduler=scheduler,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# scheduler.add_task(...) is handled internally when both queue and scheduler are supplied
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
## Runner wiring
|
|
97
|
+
|
|
98
|
+
If you prefer explicit wiring, you can still register the tick manually:
|
|
99
|
+
|
|
100
|
+
```python
|
|
101
|
+
from svc_infra.jobs.easy import easy_jobs
|
|
102
|
+
from svc_infra.jobs.builtins.outbox_processor import make_outbox_tick
|
|
103
|
+
|
|
104
|
+
queue, scheduler = easy_jobs() # uses JOBS_DRIVER and REDIS_URL
|
|
105
|
+
scheduler.add_task("outbox", 1, make_outbox_tick(outbox_store, queue))
|
|
106
|
+
# Start runner: `svc-infra jobs run`
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Notes
|
|
110
|
+
- Retries/backoff are handled by the JobQueue; delivery marks Inbox after success to prevent duplicates.
|
|
111
|
+
- For production subscriptions and inbox/outbox, provide persistent implementations and override DI in your app.
|
|
112
|
+
- Signature rotation supported via `verify_any` and FastAPI dependency accepting multiple secrets.
|
svc_infra/dx/add.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def write_ci_workflow(
|
|
7
|
+
*,
|
|
8
|
+
target_dir: str | Path,
|
|
9
|
+
name: str = "ci.yml",
|
|
10
|
+
python_version: str = "3.12",
|
|
11
|
+
) -> Path:
|
|
12
|
+
"""Write a minimal CI workflow file (GitHub Actions) with tests/lint/type steps."""
|
|
13
|
+
p = Path(target_dir) / ".github" / "workflows" / name
|
|
14
|
+
p.parent.mkdir(parents=True, exist_ok=True)
|
|
15
|
+
content = f"""
|
|
16
|
+
name: CI
|
|
17
|
+
|
|
18
|
+
on:
|
|
19
|
+
push:
|
|
20
|
+
branches: [ main ]
|
|
21
|
+
pull_request:
|
|
22
|
+
|
|
23
|
+
jobs:
|
|
24
|
+
build:
|
|
25
|
+
runs-on: ubuntu-latest
|
|
26
|
+
steps:
|
|
27
|
+
- uses: actions/checkout@v4
|
|
28
|
+
- uses: actions/setup-python@v5
|
|
29
|
+
with:
|
|
30
|
+
python-version: '{python_version}'
|
|
31
|
+
- name: Install Poetry
|
|
32
|
+
run: pipx install poetry
|
|
33
|
+
- name: Install deps
|
|
34
|
+
run: poetry install
|
|
35
|
+
- name: Lint
|
|
36
|
+
run: poetry run flake8 --select=E,F
|
|
37
|
+
- name: Typecheck
|
|
38
|
+
run: poetry run mypy src
|
|
39
|
+
- name: Tests
|
|
40
|
+
run: poetry run pytest -q -W error
|
|
41
|
+
"""
|
|
42
|
+
p.write_text(content.strip() + "\n")
|
|
43
|
+
return p
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def write_openapi_lint_config(*, target_dir: str | Path, name: str = ".redocly.yaml") -> Path:
|
|
47
|
+
"""Write a minimal OpenAPI lint config placeholder (Redocly)."""
|
|
48
|
+
p = Path(target_dir) / name
|
|
49
|
+
content = """
|
|
50
|
+
apis:
|
|
51
|
+
main:
|
|
52
|
+
root: openapi.json
|
|
53
|
+
|
|
54
|
+
rules:
|
|
55
|
+
operation-operationId: warn
|
|
56
|
+
no-unused-components: warn
|
|
57
|
+
security-defined: off
|
|
58
|
+
"""
|
|
59
|
+
p.write_text(content.strip() + "\n")
|
|
60
|
+
return p
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
__all__ = ["write_ci_workflow", "write_openapi_lint_config"]
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from datetime import date as _date
|
|
5
|
+
from typing import Sequence
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass(frozen=True)
|
|
9
|
+
class Commit:
|
|
10
|
+
sha: str
|
|
11
|
+
subject: str
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
_SECTION_ORDER = [
|
|
15
|
+
("feat", "Features"),
|
|
16
|
+
("fix", "Bug Fixes"),
|
|
17
|
+
("perf", "Performance"),
|
|
18
|
+
("refactor", "Refactors"),
|
|
19
|
+
]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _classify(subject: str) -> tuple[str, str]:
|
|
23
|
+
"""Return (type, title) where title is display name of the section."""
|
|
24
|
+
lower = subject.strip().lower()
|
|
25
|
+
for t, title in _SECTION_ORDER:
|
|
26
|
+
if lower.startswith(t + ":") or lower.startswith(t + "("):
|
|
27
|
+
return (t, title)
|
|
28
|
+
return ("other", "Other")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _format_item(commit: Commit) -> str:
|
|
32
|
+
subj = commit.subject.strip()
|
|
33
|
+
# Strip leading type(scope): if present
|
|
34
|
+
i = subj.find(": ")
|
|
35
|
+
if i != -1 and i < 20: # conventional commit prefix
|
|
36
|
+
pretty = subj[i + 2 :].strip()
|
|
37
|
+
else:
|
|
38
|
+
pretty = subj
|
|
39
|
+
return f"- {pretty} ({commit.sha})"
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def generate_release_section(
|
|
43
|
+
*,
|
|
44
|
+
version: str,
|
|
45
|
+
commits: Sequence[Commit],
|
|
46
|
+
release_date: str | None = None,
|
|
47
|
+
) -> str:
|
|
48
|
+
"""Generate a markdown release section from commits.
|
|
49
|
+
|
|
50
|
+
Group by type: feat, fix, perf, refactor; everything else under Other.
|
|
51
|
+
"""
|
|
52
|
+
if release_date is None:
|
|
53
|
+
release_date = _date.today().isoformat()
|
|
54
|
+
|
|
55
|
+
buckets: dict[str, list[str]] = {k: [] for k, _ in _SECTION_ORDER}
|
|
56
|
+
buckets["other"] = []
|
|
57
|
+
|
|
58
|
+
for c in commits:
|
|
59
|
+
typ, _ = _classify(c.subject)
|
|
60
|
+
buckets.setdefault(typ, []).append(_format_item(c))
|
|
61
|
+
|
|
62
|
+
lines: list[str] = [f"## v{version} - {release_date}", ""]
|
|
63
|
+
for key, title in _SECTION_ORDER + [("other", "Other")]:
|
|
64
|
+
items = buckets.get(key) or []
|
|
65
|
+
if not items:
|
|
66
|
+
continue
|
|
67
|
+
lines.append(f"### {title}")
|
|
68
|
+
lines.extend(items)
|
|
69
|
+
lines.append("")
|
|
70
|
+
|
|
71
|
+
return "\n".join(lines).rstrip() + "\n"
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
__all__ = ["Commit", "generate_release_section"]
|
svc_infra/dx/checks.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _load_json(path: str | Path) -> dict:
|
|
7
|
+
import json
|
|
8
|
+
|
|
9
|
+
p = Path(path)
|
|
10
|
+
return json.loads(p.read_text())
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def check_openapi_problem_schema(
|
|
14
|
+
schema: dict | None = None, *, path: str | Path | None = None
|
|
15
|
+
) -> None:
|
|
16
|
+
"""Validate OpenAPI has a Problem schema with required fields and formats.
|
|
17
|
+
|
|
18
|
+
Raises ValueError with a descriptive message on failure.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
if schema is None:
|
|
22
|
+
if path is None:
|
|
23
|
+
raise ValueError("either schema or path must be provided")
|
|
24
|
+
schema = _load_json(path)
|
|
25
|
+
|
|
26
|
+
comps = (schema or {}).get("components") or {}
|
|
27
|
+
prob = (comps.get("schemas") or {}).get("Problem")
|
|
28
|
+
if not isinstance(prob, dict):
|
|
29
|
+
raise ValueError("Problem schema missing under components.schemas.Problem")
|
|
30
|
+
|
|
31
|
+
props = prob.get("properties") or {}
|
|
32
|
+
# Required keys presence
|
|
33
|
+
for key in ("type", "title", "status", "detail", "instance", "code"):
|
|
34
|
+
if key not in props:
|
|
35
|
+
raise ValueError(f"Problem.{key} missing in properties")
|
|
36
|
+
|
|
37
|
+
# instance must be uri-reference per our convention
|
|
38
|
+
inst = props.get("instance") or {}
|
|
39
|
+
if inst.get("format") != "uri-reference":
|
|
40
|
+
raise ValueError("Problem.instance must have format 'uri-reference'")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def check_migrations_up_to_date(*, project_root: str | Path = ".") -> None:
|
|
44
|
+
"""Best-effort migrations check: passes if alembic env present and head is reachable.
|
|
45
|
+
|
|
46
|
+
This is a lightweight stub that can be extended per-project. For now, it checks
|
|
47
|
+
that an Alembic env exists when 'alembic.ini' is present; it does not execute DB calls.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
root = Path(project_root)
|
|
51
|
+
# If alembic.ini is absent, there's nothing to check here
|
|
52
|
+
if not (root / "alembic.ini").exists():
|
|
53
|
+
return
|
|
54
|
+
# Ensure versions/ dir exists under migrations path if configured, default to 'migrations'
|
|
55
|
+
mig_dir = root / "migrations"
|
|
56
|
+
if not mig_dir.exists():
|
|
57
|
+
# tolerate alternative layout via env; keep stub permissive
|
|
58
|
+
return
|
|
59
|
+
versions = mig_dir / "versions"
|
|
60
|
+
if not versions.exists():
|
|
61
|
+
raise ValueError("Alembic migrations directory missing versions/ subfolder")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
__all__ = [
|
|
65
|
+
"check_openapi_problem_schema",
|
|
66
|
+
"check_migrations_up_to_date",
|
|
67
|
+
]
|
svc_infra/http/client.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Optional
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from svc_infra.app.env import pick
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _parse_float_env(name: str, default: float) -> float:
|
|
12
|
+
raw = os.getenv(name)
|
|
13
|
+
if raw is None or raw == "":
|
|
14
|
+
return default
|
|
15
|
+
try:
|
|
16
|
+
return float(raw)
|
|
17
|
+
except ValueError:
|
|
18
|
+
return default
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def get_default_timeout_seconds() -> float:
|
|
22
|
+
"""Return default outbound HTTP client timeout in seconds.
|
|
23
|
+
|
|
24
|
+
Env var: HTTP_CLIENT_TIMEOUT_SECONDS (float)
|
|
25
|
+
Defaults: 10.0 seconds for all envs unless overridden; tweakable via pick() if needed.
|
|
26
|
+
"""
|
|
27
|
+
default = pick(prod=10.0, nonprod=10.0)
|
|
28
|
+
return _parse_float_env("HTTP_CLIENT_TIMEOUT_SECONDS", default)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def make_timeout(seconds: float | None = None) -> httpx.Timeout:
|
|
32
|
+
s = seconds if seconds is not None else get_default_timeout_seconds()
|
|
33
|
+
# Apply same timeout for connect/read/write/pool for simplicity
|
|
34
|
+
return httpx.Timeout(timeout=s)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def new_httpx_client(
|
|
38
|
+
*,
|
|
39
|
+
timeout_seconds: Optional[float] = None,
|
|
40
|
+
headers: Optional[Dict[str, str]] = None,
|
|
41
|
+
base_url: Optional[str] = None,
|
|
42
|
+
**kwargs: Any,
|
|
43
|
+
) -> httpx.Client:
|
|
44
|
+
"""Create a sync httpx Client with default timeout and optional headers/base_url.
|
|
45
|
+
|
|
46
|
+
Callers can override timeout_seconds; remaining kwargs are forwarded to httpx.Client.
|
|
47
|
+
"""
|
|
48
|
+
timeout = make_timeout(timeout_seconds)
|
|
49
|
+
# httpx doesn't accept base_url=None; only pass if non-None
|
|
50
|
+
client_kwargs = {"timeout": timeout, "headers": headers, **kwargs}
|
|
51
|
+
if base_url is not None:
|
|
52
|
+
client_kwargs["base_url"] = base_url
|
|
53
|
+
return httpx.Client(**client_kwargs)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def new_async_httpx_client(
|
|
57
|
+
*,
|
|
58
|
+
timeout_seconds: Optional[float] = None,
|
|
59
|
+
headers: Optional[Dict[str, str]] = None,
|
|
60
|
+
base_url: Optional[str] = None,
|
|
61
|
+
**kwargs: Any,
|
|
62
|
+
) -> httpx.AsyncClient:
|
|
63
|
+
"""Create an async httpx AsyncClient with default timeout and optional headers/base_url.
|
|
64
|
+
|
|
65
|
+
Callers can override timeout_seconds; remaining kwargs are forwarded to httpx.AsyncClient.
|
|
66
|
+
"""
|
|
67
|
+
timeout = make_timeout(timeout_seconds)
|
|
68
|
+
# httpx doesn't accept base_url=None; only pass if non-None
|
|
69
|
+
client_kwargs = {"timeout": timeout, "headers": headers, **kwargs}
|
|
70
|
+
if base_url is not None:
|
|
71
|
+
client_kwargs["base_url"] = base_url
|
|
72
|
+
return httpx.AsyncClient(**client_kwargs)
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
import
|
|
3
|
+
import os
|
|
4
4
|
|
|
5
5
|
from svc_infra.db.inbox import InboxStore
|
|
6
6
|
from svc_infra.db.outbox import OutboxStore
|
|
7
|
+
from svc_infra.http import get_default_timeout_seconds, new_async_httpx_client
|
|
7
8
|
from svc_infra.jobs.queue import Job
|
|
8
9
|
from svc_infra.webhooks.signing import sign
|
|
9
10
|
|
|
@@ -65,7 +66,18 @@ def make_webhook_handler(
|
|
|
65
66
|
version = delivery_payload.get("version")
|
|
66
67
|
if version is not None:
|
|
67
68
|
headers["X-Payload-Version"] = str(version)
|
|
68
|
-
|
|
69
|
+
# Derive timeout: dedicated WEBHOOK_DELIVERY_TIMEOUT_SECONDS or default HTTP client timeout
|
|
70
|
+
timeout_seconds = None
|
|
71
|
+
env_timeout = os.getenv("WEBHOOK_DELIVERY_TIMEOUT_SECONDS")
|
|
72
|
+
if env_timeout:
|
|
73
|
+
try:
|
|
74
|
+
timeout_seconds = float(env_timeout)
|
|
75
|
+
except ValueError:
|
|
76
|
+
timeout_seconds = get_default_timeout_seconds()
|
|
77
|
+
else:
|
|
78
|
+
timeout_seconds = get_default_timeout_seconds()
|
|
79
|
+
|
|
80
|
+
async with new_async_httpx_client(timeout_seconds=timeout_seconds) as client:
|
|
69
81
|
resp = await client.post(url, json=delivery_payload, headers=headers)
|
|
70
82
|
if 200 <= resp.status_code < 300:
|
|
71
83
|
# record delivery and mark processed
|
svc_infra/jobs/queue.py
CHANGED
|
@@ -69,5 +69,13 @@ class InMemoryJobQueue:
|
|
|
69
69
|
job.last_error = error
|
|
70
70
|
# Exponential backoff: base * attempts
|
|
71
71
|
delay = job.backoff_seconds * max(1, job.attempts)
|
|
72
|
-
|
|
72
|
+
if delay > 0:
|
|
73
|
+
# Add a tiny fudge so an immediate subsequent poll in ultra-fast
|
|
74
|
+
# environments (like our acceptance API) doesn't re-reserve the job.
|
|
75
|
+
# This keeps tests deterministic without impacting semantics.
|
|
76
|
+
job.available_at = now + timedelta(seconds=delay, milliseconds=250)
|
|
77
|
+
else:
|
|
78
|
+
# When backoff is explicitly zero (e.g., unit tests forcing
|
|
79
|
+
# immediate retry), make the job available right away.
|
|
80
|
+
job.available_at = now
|
|
73
81
|
return
|
svc_infra/jobs/runner.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import contextlib
|
|
5
|
+
from typing import Awaitable, Callable, Optional
|
|
6
|
+
|
|
7
|
+
from .queue import JobQueue
|
|
8
|
+
|
|
9
|
+
ProcessFunc = Callable[[object], Awaitable[None]]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class WorkerRunner:
|
|
13
|
+
"""Cooperative worker loop with graceful stop.
|
|
14
|
+
|
|
15
|
+
- start(): begin polling the queue and processing jobs
|
|
16
|
+
- stop(grace_seconds): signal stop, wait up to grace for current job to finish
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, queue: JobQueue, handler: ProcessFunc, *, poll_interval: float = 0.25):
|
|
20
|
+
self._queue = queue
|
|
21
|
+
self._handler = handler
|
|
22
|
+
self._poll_interval = poll_interval
|
|
23
|
+
self._task: Optional[asyncio.Task] = None
|
|
24
|
+
self._stopping = asyncio.Event()
|
|
25
|
+
self._inflight: Optional[asyncio.Task] = None
|
|
26
|
+
|
|
27
|
+
async def _loop(self) -> None:
|
|
28
|
+
try:
|
|
29
|
+
while not self._stopping.is_set():
|
|
30
|
+
job = self._queue.reserve_next()
|
|
31
|
+
if not job:
|
|
32
|
+
await asyncio.sleep(self._poll_interval)
|
|
33
|
+
continue
|
|
34
|
+
|
|
35
|
+
# Process one job; track in-flight task for stop()
|
|
36
|
+
async def _run():
|
|
37
|
+
try:
|
|
38
|
+
await self._handler(job)
|
|
39
|
+
except Exception as exc: # pragma: no cover
|
|
40
|
+
self._queue.fail(job.id, error=str(exc))
|
|
41
|
+
return
|
|
42
|
+
self._queue.ack(job.id)
|
|
43
|
+
|
|
44
|
+
self._inflight = asyncio.create_task(_run())
|
|
45
|
+
try:
|
|
46
|
+
await self._inflight
|
|
47
|
+
finally:
|
|
48
|
+
self._inflight = None
|
|
49
|
+
finally:
|
|
50
|
+
# exiting loop
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
def start(self) -> asyncio.Task:
|
|
54
|
+
if self._task is None or self._task.done():
|
|
55
|
+
self._task = asyncio.create_task(self._loop())
|
|
56
|
+
return self._task
|
|
57
|
+
|
|
58
|
+
async def stop(self, *, grace_seconds: float = 10.0) -> None:
|
|
59
|
+
self._stopping.set()
|
|
60
|
+
# Wait for in-flight job to complete, up to grace
|
|
61
|
+
if self._inflight is not None and not self._inflight.done():
|
|
62
|
+
try:
|
|
63
|
+
await asyncio.wait_for(self._inflight, timeout=grace_seconds)
|
|
64
|
+
except asyncio.TimeoutError:
|
|
65
|
+
# Give up; job will be retried if your queue supports visibility timeouts
|
|
66
|
+
pass
|
|
67
|
+
# Finally, wait for loop to exit (should be quick since stopping is set)
|
|
68
|
+
if self._task is not None:
|
|
69
|
+
try:
|
|
70
|
+
await asyncio.wait_for(self._task, timeout=max(0.1, self._poll_interval + 0.1))
|
|
71
|
+
except asyncio.TimeoutError:
|
|
72
|
+
# Cancel as a last resort
|
|
73
|
+
self._task.cancel()
|
|
74
|
+
with contextlib.suppress(Exception):
|
|
75
|
+
await self._task
|
svc_infra/jobs/worker.py
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import asyncio
|
|
4
|
+
import os
|
|
3
5
|
from typing import Awaitable, Callable
|
|
4
6
|
|
|
5
7
|
from .queue import Job, JobQueue
|
|
@@ -7,6 +9,16 @@ from .queue import Job, JobQueue
|
|
|
7
9
|
ProcessFunc = Callable[[Job], Awaitable[None]]
|
|
8
10
|
|
|
9
11
|
|
|
12
|
+
def _get_job_timeout_seconds() -> float | None:
|
|
13
|
+
raw = os.getenv("JOB_DEFAULT_TIMEOUT_SECONDS")
|
|
14
|
+
if not raw:
|
|
15
|
+
return None
|
|
16
|
+
try:
|
|
17
|
+
return float(raw)
|
|
18
|
+
except ValueError:
|
|
19
|
+
return None
|
|
20
|
+
|
|
21
|
+
|
|
10
22
|
async def process_one(queue: JobQueue, handler: ProcessFunc) -> bool:
|
|
11
23
|
"""Reserve a job, process with handler, ack on success or fail with backoff.
|
|
12
24
|
|
|
@@ -16,7 +28,11 @@ async def process_one(queue: JobQueue, handler: ProcessFunc) -> bool:
|
|
|
16
28
|
if not job:
|
|
17
29
|
return False
|
|
18
30
|
try:
|
|
19
|
-
|
|
31
|
+
timeout = _get_job_timeout_seconds()
|
|
32
|
+
if timeout and timeout > 0:
|
|
33
|
+
await asyncio.wait_for(handler(job), timeout=timeout)
|
|
34
|
+
else:
|
|
35
|
+
await handler(job)
|
|
20
36
|
except Exception as exc: # pragma: no cover - exercise in tests by raising
|
|
21
37
|
queue.fail(job.id, error=str(exc))
|
|
22
38
|
return True
|