svc-infra 0.1.629__py3-none-any.whl → 0.1.631__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of svc-infra might be problematic. Click here for more details.

@@ -0,0 +1,230 @@
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ from datetime import datetime, timezone
5
+ from typing import Any, Awaitable, Callable, Dict, Optional
6
+
7
+ from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
8
+
9
+ from svc_infra.jobs.queue import Job, JobQueue
10
+ from svc_infra.jobs.scheduler import InMemoryScheduler
11
+ from svc_infra.webhooks.service import WebhookService
12
+
13
+ from .async_service import AsyncBillingService
14
+
15
+
16
+ async def job_aggregate_daily(
17
+ session: AsyncSession, *, tenant_id: str, metric: str, day_start: datetime
18
+ ) -> None:
19
+ """
20
+ Aggregate usage for a tenant/metric for the given day_start (UTC).
21
+
22
+ Intended to be called from a scheduler/worker with an AsyncSession created by the host app.
23
+ """
24
+ svc = AsyncBillingService(session=session, tenant_id=tenant_id)
25
+ if day_start.tzinfo is None:
26
+ day_start = day_start.replace(tzinfo=timezone.utc)
27
+ await svc.aggregate_daily(metric=metric, day_start=day_start)
28
+
29
+
30
+ async def job_generate_monthly_invoice(
31
+ session: AsyncSession,
32
+ *,
33
+ tenant_id: str,
34
+ period_start: datetime,
35
+ period_end: datetime,
36
+ currency: str,
37
+ ) -> str:
38
+ """
39
+ Generate a monthly invoice for a tenant between [period_start, period_end).
40
+ Returns the internal invoice id.
41
+ """
42
+ svc = AsyncBillingService(session=session, tenant_id=tenant_id)
43
+ if period_start.tzinfo is None:
44
+ period_start = period_start.replace(tzinfo=timezone.utc)
45
+ if period_end.tzinfo is None:
46
+ period_end = period_end.replace(tzinfo=timezone.utc)
47
+ return await svc.generate_monthly_invoice(
48
+ period_start=period_start, period_end=period_end, currency=currency
49
+ )
50
+
51
+
52
+ # -------- Job helpers and handlers (scheduler/worker wiring) ---------
53
+
54
+ BILLING_AGGREGATE_JOB = "billing.aggregate_daily"
55
+ BILLING_INVOICE_JOB = "billing.generate_monthly_invoice"
56
+
57
+
58
+ def enqueue_aggregate_daily(
59
+ queue: JobQueue,
60
+ *,
61
+ tenant_id: str,
62
+ metric: str,
63
+ day_start: datetime,
64
+ delay_seconds: int = 0,
65
+ ) -> None:
66
+ payload = {
67
+ "tenant_id": tenant_id,
68
+ "metric": metric,
69
+ "day_start": day_start.astimezone(timezone.utc).isoformat(),
70
+ }
71
+ queue.enqueue(BILLING_AGGREGATE_JOB, payload, delay_seconds=delay_seconds)
72
+
73
+
74
+ def enqueue_generate_monthly_invoice(
75
+ queue: JobQueue,
76
+ *,
77
+ tenant_id: str,
78
+ period_start: datetime,
79
+ period_end: datetime,
80
+ currency: str,
81
+ delay_seconds: int = 0,
82
+ ) -> None:
83
+ payload = {
84
+ "tenant_id": tenant_id,
85
+ "period_start": period_start.astimezone(timezone.utc).isoformat(),
86
+ "period_end": period_end.astimezone(timezone.utc).isoformat(),
87
+ "currency": currency,
88
+ }
89
+ queue.enqueue(BILLING_INVOICE_JOB, payload, delay_seconds=delay_seconds)
90
+
91
+
92
+ def make_daily_aggregate_tick(
93
+ queue: JobQueue,
94
+ *,
95
+ tenant_id: str,
96
+ metric: str,
97
+ when: Optional[datetime] = None,
98
+ ):
99
+ """Return an async function that enqueues a daily aggregate job.
100
+
101
+ This is a simple helper for local/dev schedulers; it schedules an aggregate
102
+ for the UTC day of ``when`` (or now). Call repeatedly via a scheduler.
103
+ """
104
+
105
+ async def _tick():
106
+ ts = (when or datetime.now(timezone.utc)).astimezone(timezone.utc)
107
+ day_start = ts.replace(hour=0, minute=0, second=0, microsecond=0)
108
+ enqueue_aggregate_daily(queue, tenant_id=tenant_id, metric=metric, day_start=day_start)
109
+
110
+ return _tick
111
+
112
+
113
+ def make_billing_job_handler(
114
+ *,
115
+ session_factory: "async_sessionmaker[AsyncSession]",
116
+ webhooks: WebhookService,
117
+ ) -> Callable[[Job], Awaitable[None]]:
118
+ """Create a worker handler that processes billing jobs and emits webhooks.
119
+
120
+ Supported jobs and their expected payloads:
121
+ - billing.aggregate_daily {tenant_id, metric, day_start: ISO8601}
122
+ → emits topic 'billing.usage_aggregated'
123
+ - billing.generate_monthly_invoice {tenant_id, period_start: ISO8601, period_end: ISO8601, currency}
124
+ → emits topic 'billing.invoice.created'
125
+ """
126
+
127
+ async def _maybe_commit(session: Any) -> None:
128
+ """Commit if the session exposes a commit method (await if coroutine).
129
+
130
+ This makes the handler resilient in tests/dev where a dummy session is used.
131
+ """
132
+ commit = getattr(session, "commit", None)
133
+ if callable(commit):
134
+ result = commit()
135
+ if inspect.isawaitable(result):
136
+ await result
137
+
138
+ async def _handler(job: Job) -> None:
139
+ name = job.name
140
+ data: Dict[str, Any] = job.payload or {}
141
+ if name == BILLING_AGGREGATE_JOB:
142
+ tenant_id = str(data.get("tenant_id"))
143
+ metric = str(data.get("metric"))
144
+ day_raw = data.get("day_start")
145
+ if not tenant_id or not metric or not day_raw:
146
+ return
147
+ day_start = datetime.fromisoformat(str(day_raw))
148
+ async with session_factory() as session:
149
+ svc = AsyncBillingService(session=session, tenant_id=tenant_id)
150
+ total = await svc.aggregate_daily(metric=metric, day_start=day_start)
151
+ await _maybe_commit(session)
152
+ webhooks.publish(
153
+ "billing.usage_aggregated",
154
+ {
155
+ "tenant_id": tenant_id,
156
+ "metric": metric,
157
+ "day_start": day_start.astimezone(timezone.utc).isoformat(),
158
+ "total": int(total),
159
+ },
160
+ )
161
+ return
162
+ if name == BILLING_INVOICE_JOB:
163
+ tenant_id = str(data.get("tenant_id"))
164
+ period_start_raw = data.get("period_start")
165
+ period_end_raw = data.get("period_end")
166
+ currency = str(data.get("currency"))
167
+ if not tenant_id or not period_start_raw or not period_end_raw or not currency:
168
+ return
169
+ period_start = datetime.fromisoformat(str(period_start_raw))
170
+ period_end = datetime.fromisoformat(str(period_end_raw))
171
+ async with session_factory() as session:
172
+ svc = AsyncBillingService(session=session, tenant_id=tenant_id)
173
+ invoice_id = await svc.generate_monthly_invoice(
174
+ period_start=period_start, period_end=period_end, currency=currency
175
+ )
176
+ await _maybe_commit(session)
177
+ webhooks.publish(
178
+ "billing.invoice.created",
179
+ {
180
+ "tenant_id": tenant_id,
181
+ "invoice_id": invoice_id,
182
+ "period_start": period_start.astimezone(timezone.utc).isoformat(),
183
+ "period_end": period_end.astimezone(timezone.utc).isoformat(),
184
+ "currency": currency,
185
+ },
186
+ )
187
+ return
188
+ # Ignore unrelated jobs
189
+
190
+ return _handler
191
+
192
+
193
+ def add_billing_jobs(
194
+ *,
195
+ scheduler: InMemoryScheduler,
196
+ queue: JobQueue,
197
+ jobs: list[dict],
198
+ ) -> None:
199
+ """Register simple interval-based billing job enqueuers.
200
+
201
+ jobs: list of dicts with shape {"name": "aggregate", "tenant_id": ..., "metric": ..., "interval_seconds": 86400}
202
+ or {"name": "invoice", "tenant_id": ..., "period_start": ISO, "period_end": ISO, "currency": ..., "interval_seconds": 2592000}
203
+ """
204
+
205
+ for j in jobs:
206
+ name = j.get("name")
207
+ interval = int(j.get("interval_seconds", 86400))
208
+ if name == "aggregate":
209
+ tenant_id = j["tenant_id"]
210
+ metric = j["metric"]
211
+
212
+ async def _tick_fn(tid=tenant_id, m=metric):
213
+ # Enqueue for the current UTC day
214
+ now = datetime.now(timezone.utc)
215
+ day_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
216
+ enqueue_aggregate_daily(queue, tenant_id=tid, metric=m, day_start=day_start)
217
+
218
+ scheduler.add_task(f"billing.aggregate.{tenant_id}.{metric}", interval, _tick_fn)
219
+ elif name == "invoice":
220
+ tenant_id = j["tenant_id"]
221
+ currency = j["currency"]
222
+ pstart = datetime.fromisoformat(j["period_start"]).astimezone(timezone.utc)
223
+ pend = datetime.fromisoformat(j["period_end"]).astimezone(timezone.utc)
224
+
225
+ async def _tick_inv(tid=tenant_id, cs=currency, ps=pstart, pe=pend):
226
+ enqueue_generate_monthly_invoice(
227
+ queue, tenant_id=tid, period_start=ps, period_end=pe, currency=cs
228
+ )
229
+
230
+ scheduler.add_task(f"billing.invoice.{tenant_id}", interval, _tick_inv)
@@ -0,0 +1,101 @@
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime, timezone
4
+ from typing import Annotated, Optional
5
+
6
+ from fastapi import Depends, HTTPException, status
7
+ from sqlalchemy import select
8
+ from sqlalchemy.ext.asyncio import AsyncSession
9
+
10
+ from svc_infra.api.fastapi.db.sql.session import SqlSessionDep
11
+ from svc_infra.api.fastapi.tenancy.context import TenantId
12
+
13
+ from .models import PlanEntitlement, Subscription, UsageAggregate
14
+
15
+
16
+ async def _current_subscription(session: AsyncSession, tenant_id: str) -> Optional[Subscription]:
17
+ now = datetime.now(tz=timezone.utc)
18
+ row = (
19
+ (
20
+ await session.execute(
21
+ select(Subscription)
22
+ .where(Subscription.tenant_id == tenant_id)
23
+ .order_by(Subscription.effective_at.desc())
24
+ )
25
+ )
26
+ .scalars()
27
+ .first()
28
+ )
29
+ if row is None:
30
+ return None
31
+ # basic check: if ended_at is set and in the past, treat as inactive
32
+ if row.ended_at is not None and row.ended_at <= now:
33
+ return None
34
+ return row
35
+
36
+
37
+ def require_quota(metric: str, *, window: str = "day", soft: bool = True):
38
+ async def _dep(tenant_id: TenantId, session: SqlSessionDep) -> None:
39
+ sub = await _current_subscription(session, tenant_id)
40
+ if sub is None:
41
+ # no subscription → allow (unlimited) by default
42
+ return
43
+ ent = (
44
+ (
45
+ await session.execute(
46
+ select(PlanEntitlement).where(
47
+ PlanEntitlement.plan_id == sub.plan_id,
48
+ PlanEntitlement.key == metric,
49
+ PlanEntitlement.window == window,
50
+ )
51
+ )
52
+ )
53
+ .scalars()
54
+ .first()
55
+ )
56
+ if ent is None:
57
+ # no entitlement → unlimited
58
+ return
59
+ # compute current window start
60
+ now = datetime.now(tz=timezone.utc)
61
+ if window == "day":
62
+ period_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
63
+ granularity = "day"
64
+ elif window == "month":
65
+ period_start = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
66
+ granularity = "month" # we only aggregate per day, but future-proof
67
+ else:
68
+ period_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
69
+ granularity = "day"
70
+
71
+ used_row = (
72
+ (
73
+ await session.execute(
74
+ select(UsageAggregate).where(
75
+ UsageAggregate.tenant_id == tenant_id,
76
+ UsageAggregate.metric == metric,
77
+ UsageAggregate.granularity == granularity, # v1 daily baseline
78
+ UsageAggregate.period_start == period_start,
79
+ )
80
+ )
81
+ )
82
+ .scalars()
83
+ .first()
84
+ )
85
+ used = int(used_row.total) if used_row else 0
86
+ limit_ = int(ent.limit_per_window)
87
+ if used >= limit_:
88
+ if soft:
89
+ # allow but signal overage via header later (TODO: add header hook)
90
+ return
91
+ raise HTTPException(
92
+ status_code=status.HTTP_429_TOO_MANY_REQUESTS,
93
+ detail=f"Quota exceeded for {metric} in {window} window",
94
+ )
95
+
96
+ return _dep
97
+
98
+
99
+ QuotaDep = Annotated[None, Depends(require_quota)]
100
+
101
+ __all__ = ["require_quota"]
@@ -0,0 +1,33 @@
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime
4
+ from typing import Optional
5
+
6
+ from pydantic import BaseModel, Field, conint
7
+
8
+
9
+ class UsageIn(BaseModel):
10
+ metric: str = Field(..., min_length=1, max_length=64)
11
+ amount: conint(ge=0) = Field(..., description="Non-negative amount for the metric")
12
+ at: Optional[datetime] = Field(
13
+ default=None, description="Event timestamp (UTC). Defaults to server time if omitted."
14
+ )
15
+ idempotency_key: str = Field(..., min_length=1, max_length=128)
16
+ metadata: dict = Field(default_factory=dict)
17
+
18
+
19
+ class UsageAckOut(BaseModel):
20
+ id: str
21
+ accepted: bool = True
22
+
23
+
24
+ class UsageAggregateRow(BaseModel):
25
+ period_start: datetime
26
+ granularity: str
27
+ metric: str
28
+ total: int
29
+
30
+
31
+ class UsageAggregatesOut(BaseModel):
32
+ items: list[UsageAggregateRow] = Field(default_factory=list)
33
+ next_cursor: Optional[str] = None
@@ -5,6 +5,8 @@ This module offers high-level decorators for read/write caching, cache invalidat
5
5
  and resource-based cache management.
6
6
  """
7
7
 
8
+ from .add import add_cache
9
+
8
10
  # Core decorators - main public API
9
11
  from .decorators import cached # alias for cache_read
10
12
  from .decorators import mutates # alias for cache_write
@@ -32,4 +34,6 @@ __all__ = [
32
34
  # Resource-based caching
33
35
  "resource",
34
36
  "entity",
37
+ # Easy integration helper
38
+ "add_cache",
35
39
  ]
svc_infra/cache/add.py ADDED
@@ -0,0 +1,158 @@
1
+ from __future__ import annotations
2
+
3
+ """
4
+ Easy integration helper to wire the cache backend into an ASGI app lifecycle.
5
+
6
+ Contract:
7
+ - Idempotent: multiple calls are safe; startup/shutdown handlers are registered once.
8
+ - Env-driven defaults: respects CACHE_URL/REDIS_URL, CACHE_PREFIX, CACHE_VERSION, APP_ENV.
9
+ - Lifecycle: registers startup (init + readiness probe) and shutdown (graceful close).
10
+ - Ergonomics: exposes the underlying cache instance at app.state.cache by default.
11
+
12
+ This does not replace the per-function decorators (`cache_read`, `cache_write`) and
13
+ does not alter existing direct APIs; it simply standardizes initialization and wiring.
14
+ """
15
+
16
+ import logging
17
+ import os
18
+ from typing import Any, Callable, Optional
19
+
20
+ from svc_infra.cache.backend import DEFAULT_READINESS_TIMEOUT
21
+ from svc_infra.cache.backend import instance as _instance
22
+ from svc_infra.cache.backend import setup_cache as _setup_cache
23
+ from svc_infra.cache.backend import shutdown_cache as _shutdown_cache
24
+ from svc_infra.cache.backend import wait_ready as _wait_ready
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ def _derive_settings(
30
+ url: Optional[str], prefix: Optional[str], version: Optional[str]
31
+ ) -> tuple[str, str, str]:
32
+ """Derive cache settings from parameters or environment variables.
33
+
34
+ Precedence:
35
+ - explicit function arguments
36
+ - environment variables (CACHE_URL/REDIS_URL, CACHE_PREFIX, CACHE_VERSION)
37
+ - sensible defaults (mem://, "svc", "v1")
38
+ """
39
+
40
+ derived_url = url or os.getenv("CACHE_URL") or os.getenv("REDIS_URL") or "mem://"
41
+ derived_prefix = prefix or os.getenv("CACHE_PREFIX") or "svc"
42
+ derived_version = version or os.getenv("CACHE_VERSION") or "v1"
43
+ return derived_url, derived_prefix, derived_version
44
+
45
+
46
+ def add_cache(
47
+ app: Any | None = None,
48
+ *,
49
+ url: str | None = None,
50
+ prefix: str | None = None,
51
+ version: str | None = None,
52
+ readiness_timeout: float | None = None,
53
+ expose_state: bool = True,
54
+ state_key: str = "cache",
55
+ ) -> Callable[[], None]:
56
+ """Wire cache initialization and lifecycle into the ASGI app.
57
+
58
+ If an app is provided, registers startup/shutdown handlers. Otherwise performs
59
+ immediate initialization (best-effort) without awaiting readiness.
60
+
61
+ Returns a no-op shutdown callable for API symmetry with other helpers.
62
+ """
63
+
64
+ # Compute effective settings
65
+ eff_url, eff_prefix, eff_version = _derive_settings(url, prefix, version)
66
+
67
+ # If no app provided, do a simple init and return
68
+ if app is None:
69
+ try:
70
+ _setup_cache(url=eff_url, prefix=eff_prefix, version=eff_version)
71
+ logger.info(
72
+ "Cache initialized (no app wiring): backend=%s namespace=%s",
73
+ eff_url,
74
+ f"{eff_prefix}:{eff_version}",
75
+ )
76
+ except Exception:
77
+ logger.exception("Cache initialization failed (no app wiring)")
78
+ return lambda: None
79
+
80
+ # Idempotence: avoid duplicate wiring
81
+ try:
82
+ state = getattr(app, "state", None)
83
+ already = bool(getattr(state, "_svc_cache_wired", False))
84
+ except Exception:
85
+ state = None
86
+ already = False
87
+
88
+ if already:
89
+ logger.debug("add_cache: app already wired; skipping re-registration")
90
+ return lambda: None
91
+
92
+ # Define lifecycle handlers
93
+ async def _startup():
94
+ _setup_cache(url=eff_url, prefix=eff_prefix, version=eff_version)
95
+ try:
96
+ await _wait_ready(timeout=readiness_timeout or DEFAULT_READINESS_TIMEOUT)
97
+ except Exception:
98
+ # Bubble up to fail fast on startup; tests and prod prefer visibility
99
+ logger.exception("Cache readiness probe failed during startup")
100
+ raise
101
+ # Expose cache instance for convenience
102
+ if expose_state and hasattr(app, "state"):
103
+ try:
104
+ setattr(app.state, state_key, _instance())
105
+ except Exception:
106
+ logger.debug("Unable to expose cache instance on app.state", exc_info=True)
107
+
108
+ async def _shutdown():
109
+ try:
110
+ await _shutdown_cache()
111
+ except Exception:
112
+ # Best-effort; shutdown should not crash the app
113
+ logger.debug("Cache shutdown encountered errors (ignored)", exc_info=True)
114
+
115
+ # Register event handlers when supported
116
+ register_ok = False
117
+ try:
118
+ if hasattr(app, "add_event_handler"):
119
+ app.add_event_handler("startup", _startup)
120
+ app.add_event_handler("shutdown", _shutdown)
121
+ register_ok = True
122
+ except Exception:
123
+ register_ok = False
124
+
125
+ if not register_ok:
126
+ # Fallback: attempt FastAPI/Starlette .on_event decorators dynamically
127
+ try:
128
+ on_event = getattr(app, "on_event", None)
129
+ if callable(on_event):
130
+ on_event("startup")(_startup) # type: ignore[misc]
131
+ on_event("shutdown")(_shutdown) # type: ignore[misc]
132
+ register_ok = True
133
+ except Exception:
134
+ register_ok = False
135
+
136
+ # Mark wired and expose state immediately if desired
137
+ if hasattr(app, "state"):
138
+ try:
139
+ setattr(app.state, "_svc_cache_wired", True)
140
+ if expose_state and not hasattr(app.state, state_key):
141
+ setattr(app.state, state_key, _instance())
142
+ except Exception:
143
+ pass
144
+
145
+ if register_ok:
146
+ logger.info("Cache wired: url=%s namespace=%s", eff_url, f"{eff_prefix}:{eff_version}")
147
+ else:
148
+ # If we cannot register handlers, at least initialize now
149
+ try:
150
+ _setup_cache(url=eff_url, prefix=eff_prefix, version=eff_version)
151
+ except Exception:
152
+ logger.exception("Cache initialization failed (no event registration)")
153
+
154
+ # Return a simple shutdown handle for symmetry with other add_* helpers
155
+ return lambda: None
156
+
157
+
158
+ __all__ = ["add_cache"]
@@ -14,6 +14,40 @@ We need shared billing primitives to support both usage-based and subscription f
14
14
 
15
15
  Non-goals for v1: taxes/VAT, complex proration rules, refunds/credits automation, dunning flows, provider-specific webhooks/end-to-end reconciliation.
16
16
 
17
+ ## Analysis: APF Payments vs Billing Primitives
18
+
19
+ What APF Payments already covers (provider-facing):
20
+ - Subscriptions lifecycle via provider adapters and HTTP router
21
+ - Endpoints: create/update/cancel/get/list under `/payments/subscriptions` (see `api/fastapi/apf_payments/router.py`).
22
+ - Local mirror rows (e.g., `PaySubscription`) are persisted for reference, but state is owned by the provider (Stripe/Aiydan).
23
+ - Plans as Product + Price on the provider side
24
+ - APF Payments exposes products (`/payments/products`) and prices (`/payments/prices`). In Stripe semantics, a “plan” is represented by a product+price pair.
25
+ - There is no first-class internal Plan entity in APF Payments; plan semantics are encapsulated as provider product/price metadata.
26
+ - Invoices, invoice line items, and previews
27
+ - Create/finalize/void/pay invoices; add/list invoice lines; preview invoices — all via provider adapters.
28
+ - Usage records (metered billing) at the provider
29
+ - Create/list/get usage records mapped to provider subscription items or prices (`/payments/usage_records`).
30
+ - Cross-cutting:
31
+ - Tenant resolution, pagination, idempotency, and Problem+JSON errors are integrated.
32
+
33
+ What APF Payments does not cover (gaps filled by Billing Primitives):
34
+ - An internal, provider-agnostic Plan and Entitlement registry (keys, windows, limits).
35
+ - Quota enforcement at runtime (soft/hard limits) against internal entitlements.
36
+ - Internal usage ingestion and aggregation store independent of provider APIs
37
+ - `UsageEvent` and `UsageAggregate` tables, with idempotent ingestion and windowed rollups.
38
+ - Internal invoice modeling and generation from aggregates (not just provider invoices)
39
+ - `Invoice` and `InvoiceLine` entities produced from internal totals (jobs-based lifecycle).
40
+ - A dedicated `/_billing` router for usage ingestion and aggregate reads (tenant-scoped, RBAC-protected).
41
+
42
+ Where they intersect and can complement each other:
43
+ - You can continue to use APF Payments for provider-side subscriptions/invoices and also use Billing Primitives to meter internal features and enforce quotas.
44
+ - Optional bridging: a provider sync hook can map internally generated invoices/lines to provider invoices or payment intents when you want unified billing.
45
+ - Usage: internal `UsageEvent` can be mirrored to provider usage-records if desired, but internal aggregation enables analytics and quota decisions without provider round-trips.
46
+
47
+ Answering “Are plans and subscriptions covered in APF Payments?”
48
+ - Subscriptions: Yes — fully supported via `/payments/subscriptions` endpoints with adapters (Stripe/Aiydan). APF also persists a local `PaySubscription` record for reference.
49
+ - Plans: APF Payments does not expose a standalone internal Plan model. Instead, providers represent plans as Product + Price. Billing Primitives introduces an internal `Plan` and `PlanEntitlement` registry to support provider-agnostic limits and quotas.
50
+
17
51
  ## Decisions
18
52
 
19
53
  1) Internal-first data model with optional provider adapters
@@ -0,0 +1,54 @@
1
+ # ADR 0010: Timeouts & Resource Limits (A2)
2
+
3
+ ## Context
4
+ Services need consistent, configurable timeouts to protect against slowloris/body drip attacks, expensive handlers, slow downstreams, and long-running DB statements. Today we lack unified settings and middleware behavior; some httpx usages hard-code timeouts. We also want consistent Problem+JSON semantics for timeout errors.
5
+
6
+ ## Decision
7
+ Introduce environment-driven timeouts and wire them via FastAPI middlewares and helper factories:
8
+
9
+ - Request body read timeout: aborts slow body streaming (e.g., slowloris) with 408 Request Timeout.
10
+ - Overall request timeout: caps handler execution time and returns 504 Gateway Timeout.
11
+ - httpx client defaults: central helpers that pick a sane default timeout from env.
12
+ - DB statement timeout: future work (PG: SET LOCAL statement_timeout; SQLite/dev: asyncio.wait_for wrapper). Scoped in follow-ups.
13
+ - Graceful shutdown: track in-flight HTTP requests and wait up to grace period; provide worker runner with stop/grace.
14
+
15
+ ## Configuration
16
+ Environment variables (with suggested defaults):
17
+
18
+ - REQUEST_BODY_TIMEOUT_SECONDS: int, default 15 (prod), 30 (non-prod)
19
+ - REQUEST_TIMEOUT_SECONDS: int, default 30 (prod), 15 (non-prod)
20
+ - HTTP_CLIENT_TIMEOUT_SECONDS: float, default 10.0
21
+
22
+ These are read at process start. Services can override per-env.
23
+
24
+ ## Behavior
25
+ - Body read timeout → 408 application/problem+json with title "Request Timeout"; optional Retry-After not included by default.
26
+ - Handler timeout → 504 application/problem+json with title "Gateway Timeout"; include request trace_id in body if present.
27
+ - Errors use existing problem_response helper.
28
+
29
+ ## Placement
30
+ - Middlewares under svc_infra.api.fastapi.middleware.timeout
31
+ - Wiring in svc_infra.api.fastapi.setup._setup_middlewares (after RequestId, before error catching).
32
+ - httpx helpers under svc_infra.http.client: new_httpx_client/new_async_httpx_client with env-driven defaults.
33
+ - Graceful shutdown under svc_infra.api.fastapi.middleware.graceful_shutdown and svc_infra.jobs.runner.WorkerRunner.
34
+
35
+ ## Alternatives Considered
36
+ - Starlette TimeoutMiddleware: version support/behavior varies; custom middleware gives us consistent Problem+JSON and finer control across environments.
37
+
38
+ ## Consequences
39
+ - Adds two middlewares to every app created via setup_service_api/easy_service_app.
40
+ - Minor overhead per request; mitigated by simple asyncio.wait_for usage.
41
+
42
+ ## Follow-ups
43
+ - PG statement timeout integration; SQLite/dev wrapper.
44
+ - Jobs/webhook runner per-job timeout.
45
+ - Graceful shutdown drainage hooks for servers/workers.
46
+ - Acceptance tests A2-04..A2-06 per PLANS.
47
+
48
+ ## Change log
49
+ - 2025-10-21: Finalized httpx helpers design and placement; proceed to implementation.
50
+
51
+ ---
52
+ Status: Accepted
53
+ Date: 2025-10-21
54
+ Related: PLANS A2 — Timeouts & Resource Limits