supython 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supython/__init__.py +8 -0
- supython/admin/__init__.py +3 -0
- supython/admin/api/__init__.py +24 -0
- supython/admin/api/auth.py +118 -0
- supython/admin/api/auth_templates.py +67 -0
- supython/admin/api/auth_users.py +225 -0
- supython/admin/api/db.py +174 -0
- supython/admin/api/functions.py +92 -0
- supython/admin/api/jobs.py +192 -0
- supython/admin/api/ops.py +224 -0
- supython/admin/api/realtime.py +281 -0
- supython/admin/api/service_auth.py +49 -0
- supython/admin/api/service_auth_templates.py +83 -0
- supython/admin/api/service_auth_users.py +346 -0
- supython/admin/api/service_db.py +214 -0
- supython/admin/api/service_functions.py +287 -0
- supython/admin/api/service_jobs.py +282 -0
- supython/admin/api/service_ops.py +213 -0
- supython/admin/api/service_realtime.py +30 -0
- supython/admin/api/service_storage.py +220 -0
- supython/admin/api/storage.py +117 -0
- supython/admin/api/system.py +37 -0
- supython/admin/audit.py +29 -0
- supython/admin/deps.py +22 -0
- supython/admin/errors.py +16 -0
- supython/admin/schemas.py +310 -0
- supython/admin/session.py +52 -0
- supython/admin/spa.py +38 -0
- supython/admin/static/assets/Alert-dluGVkos.js +49 -0
- supython/admin/static/assets/Audit-Njung3HI.js +2 -0
- supython/admin/static/assets/Backups-DzPlFgrm.js +2 -0
- supython/admin/static/assets/Buckets-ByacGkU1.js +2 -0
- supython/admin/static/assets/Channels-BoIuTtam.js +353 -0
- supython/admin/static/assets/ChevronRight-CtQH1EQ1.js +2 -0
- supython/admin/static/assets/CodeViewer-Bqy7-wvH.js +2 -0
- supython/admin/static/assets/Crons-B67vc39F.js +2 -0
- supython/admin/static/assets/DashboardView-CUTFVL6k.js +2 -0
- supython/admin/static/assets/DataTable-COAAWEft.js +747 -0
- supython/admin/static/assets/DescriptionsItem-P8JUDaBs.js +75 -0
- supython/admin/static/assets/DrawerContent-TpYTFgF1.js +139 -0
- supython/admin/static/assets/Empty-cr2r7e2u.js +25 -0
- supython/admin/static/assets/EmptyState-DeDck-OL.js +2 -0
- supython/admin/static/assets/Grid-hFkp9F4P.js +2 -0
- supython/admin/static/assets/Input-DppYTq9C.js +259 -0
- supython/admin/static/assets/Invoke-DW3Nveeh.js +2 -0
- supython/admin/static/assets/JsonField-DibyJgun.js +2 -0
- supython/admin/static/assets/LoginView-BjLyE3Ds.css +1 -0
- supython/admin/static/assets/LoginView-CoOjECT_.js +111 -0
- supython/admin/static/assets/Logs-D9WYrnIT.js +2 -0
- supython/admin/static/assets/Logs-DS1XPa0h.css +1 -0
- supython/admin/static/assets/Migrations-DOSC2ddQ.js +2 -0
- supython/admin/static/assets/ObjectBrowser-_5w8vOX8.js +2 -0
- supython/admin/static/assets/Queue-CywZs6vI.js +2 -0
- supython/admin/static/assets/RefreshTokens-Ccjr53jg.js +2 -0
- supython/admin/static/assets/RlsEditor-BSlH9vSc.js +2 -0
- supython/admin/static/assets/Routes-BiLXE49D.js +2 -0
- supython/admin/static/assets/Routes-C-ianIGD.css +1 -0
- supython/admin/static/assets/SchemaBrowser-DKy2_KQi.css +1 -0
- supython/admin/static/assets/SchemaBrowser-XFvFbtDB.js +2 -0
- supython/admin/static/assets/Select-DIzZyRZb.js +434 -0
- supython/admin/static/assets/Space-n5-XcguU.js +400 -0
- supython/admin/static/assets/SqlEditor-b8pTsILY.js +3 -0
- supython/admin/static/assets/SqlWorkspace-BUS7IntH.js +104 -0
- supython/admin/static/assets/TableData-CQIagLKn.js +2 -0
- supython/admin/static/assets/Tag-D1fOKpTH.js +72 -0
- supython/admin/static/assets/Templates-BS-ugkdq.js +2 -0
- supython/admin/static/assets/Thing-CEAniuMg.js +107 -0
- supython/admin/static/assets/Users-wzwajhlh.js +2 -0
- supython/admin/static/assets/_plugin-vue_export-helper-DGA9ry_j.js +1 -0
- supython/admin/static/assets/dist-VXIJLCYq.js +13 -0
- supython/admin/static/assets/format-length-CGCY1rMh.js +2 -0
- supython/admin/static/assets/get-Ca6unauB.js +2 -0
- supython/admin/static/assets/index-CeE6v959.js +951 -0
- supython/admin/static/assets/pinia-COXwfrOX.js +2 -0
- supython/admin/static/assets/resources-Bt6thQCD.js +44 -0
- supython/admin/static/assets/use-locale-mtgM0a3a.js +2 -0
- supython/admin/static/assets/use-merged-state-BvhkaHNX.js +2 -0
- supython/admin/static/assets/useConfirm-tMjvBFXR.js +2 -0
- supython/admin/static/assets/useResource-C_rJCY8C.js +2 -0
- supython/admin/static/assets/useTable-CnZc5zhi.js +363 -0
- supython/admin/static/assets/useTable-Dg0XlRlq.css +1 -0
- supython/admin/static/assets/useToast-DsZKx0IX.js +2 -0
- supython/admin/static/assets/utils-sbXoq7Ir.js +2 -0
- supython/admin/static/favicon.svg +1 -0
- supython/admin/static/icons.svg +24 -0
- supython/admin/static/index.html +24 -0
- supython/app.py +149 -0
- supython/auth/__init__.py +3 -0
- supython/auth/_email_job.py +11 -0
- supython/auth/providers/__init__.py +34 -0
- supython/auth/providers/github.py +22 -0
- supython/auth/providers/google.py +19 -0
- supython/auth/providers/oauth.py +56 -0
- supython/auth/providers/registry.py +16 -0
- supython/auth/ratelimit.py +39 -0
- supython/auth/router.py +282 -0
- supython/auth/schemas.py +79 -0
- supython/auth/service.py +587 -0
- supython/body_size.py +184 -0
- supython/cli.py +1653 -0
- supython/client/__init__.py +67 -0
- supython/client/_auth.py +249 -0
- supython/client/_client.py +145 -0
- supython/client/_config.py +92 -0
- supython/client/_functions.py +69 -0
- supython/client/_storage.py +255 -0
- supython/client/py.typed +0 -0
- supython/db.py +151 -0
- supython/db_admin.py +8 -0
- supython/functions/__init__.py +19 -0
- supython/functions/context.py +262 -0
- supython/functions/loader.py +307 -0
- supython/functions/router.py +228 -0
- supython/functions/schemas.py +50 -0
- supython/gen/__init__.py +5 -0
- supython/gen/_introspect.py +137 -0
- supython/gen/types_py.py +270 -0
- supython/gen/types_ts.py +365 -0
- supython/health.py +229 -0
- supython/hooks.py +117 -0
- supython/jobs/__init__.py +31 -0
- supython/jobs/backends.py +97 -0
- supython/jobs/context.py +58 -0
- supython/jobs/cron.py +152 -0
- supython/jobs/cron_inproc.py +118 -0
- supython/jobs/decorators.py +76 -0
- supython/jobs/registry.py +79 -0
- supython/jobs/router.py +136 -0
- supython/jobs/schemas.py +92 -0
- supython/jobs/service.py +311 -0
- supython/jobs/worker.py +219 -0
- supython/jwks.py +257 -0
- supython/keyset.py +279 -0
- supython/logging_config.py +291 -0
- supython/mail.py +33 -0
- supython/mailer.py +65 -0
- supython/migrate.py +81 -0
- supython/migrations/0001_extensions_and_roles.sql +46 -0
- supython/migrations/0002_auth_schema.sql +66 -0
- supython/migrations/0003_demo_todos.sql +42 -0
- supython/migrations/0004_auth_v0_2.sql +47 -0
- supython/migrations/0005_storage_schema.sql +117 -0
- supython/migrations/0006_realtime_schema.sql +206 -0
- supython/migrations/0007_jobs_schema.sql +254 -0
- supython/migrations/0008_jobs_last_error.sql +56 -0
- supython/migrations/0009_auth_rate_limits.sql +33 -0
- supython/migrations/0010_worker_heartbeat.sql +14 -0
- supython/migrations/0011_admin_schema.sql +45 -0
- supython/migrations/0012_auth_banned_until.sql +10 -0
- supython/migrations/0013_email_templates.sql +19 -0
- supython/migrations/0014_realtime_payload_warning.sql +96 -0
- supython/migrations/0015_backups_schema.sql +14 -0
- supython/passwords.py +15 -0
- supython/realtime/__init__.py +6 -0
- supython/realtime/broker.py +814 -0
- supython/realtime/protocol.py +234 -0
- supython/realtime/router.py +184 -0
- supython/realtime/schemas.py +207 -0
- supython/realtime/service.py +261 -0
- supython/realtime/topics.py +175 -0
- supython/realtime/websocket.py +586 -0
- supython/scaffold/__init__.py +5 -0
- supython/scaffold/init_project.py +133 -0
- supython/scaffold/templates/Caddyfile.tmpl +4 -0
- supython/scaffold/templates/README.md.tmpl +22 -0
- supython/scaffold/templates/docker-compose.prod.yml.tmpl +84 -0
- supython/scaffold/templates/docker-compose.yml.tmpl +41 -0
- supython/scaffold/templates/docker_postgres_Dockerfile.tmpl +9 -0
- supython/scaffold/templates/docker_postgres_postgresql.conf.tmpl +3 -0
- supython/scaffold/templates/env.example.tmpl +149 -0
- supython/scaffold/templates/functions_README.md.tmpl +21 -0
- supython/scaffold/templates/gitignore.tmpl +14 -0
- supython/scaffold/templates/migrations/.gitkeep +0 -0
- supython/secretset.py +347 -0
- supython/security_headers.py +78 -0
- supython/settings.py +198 -0
- supython/storage/__init__.py +5 -0
- supython/storage/backends.py +392 -0
- supython/storage/router.py +341 -0
- supython/storage/schemas.py +50 -0
- supython/storage/service.py +445 -0
- supython/storage/signing.py +119 -0
- supython/tokens.py +85 -0
- supython-0.5.0.dist-info/METADATA +714 -0
- supython-0.5.0.dist-info/RECORD +188 -0
- supython-0.5.0.dist-info/WHEEL +4 -0
- supython-0.5.0.dist-info/entry_points.txt +2 -0
- supython-0.5.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""Jobs module — durable job queue with cron scheduling."""
|
|
2
|
+
|
|
3
|
+
from .backends import get_backend
|
|
4
|
+
from .context import JobCtx
|
|
5
|
+
from .decorators import job
|
|
6
|
+
from .registry import get_registry
|
|
7
|
+
from .router import router
|
|
8
|
+
from .schemas import EnqueueResult, JobResponse
|
|
9
|
+
from .service import JobError, enqueue
|
|
10
|
+
from .worker import Worker
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_worker() -> Worker:
|
|
14
|
+
from ..settings import get_settings
|
|
15
|
+
|
|
16
|
+
return Worker(get_settings())
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"Worker",
|
|
21
|
+
"enqueue",
|
|
22
|
+
"get_backend",
|
|
23
|
+
"get_registry",
|
|
24
|
+
"get_worker",
|
|
25
|
+
"job",
|
|
26
|
+
"JobCtx",
|
|
27
|
+
"JobError",
|
|
28
|
+
"JobResponse",
|
|
29
|
+
"EnqueueResult",
|
|
30
|
+
"router",
|
|
31
|
+
]
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
"""Job backend protocol + default Postgres-queue implementation.
|
|
2
|
+
|
|
3
|
+
Collapsed from a two-file ``backends/`` package into a single module during the
|
|
4
|
+
v0.5 grooming pass: until a second backend actually lands (arq / dramatiq as
|
|
5
|
+
optional extras), the package added no structure the Protocol does not already
|
|
6
|
+
express. When an additional backend is introduced it should be promoted back
|
|
7
|
+
to a package.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import logging
|
|
13
|
+
from typing import TYPE_CHECKING, Protocol, runtime_checkable
|
|
14
|
+
from uuid import UUID
|
|
15
|
+
|
|
16
|
+
from .. import db
|
|
17
|
+
from ..settings import Settings, get_settings
|
|
18
|
+
from .schemas import EnqueueResult, JobRecord
|
|
19
|
+
from .service import cancel as svc_cancel
|
|
20
|
+
from .service import enqueue as svc_enqueue
|
|
21
|
+
from .service import get_job as svc_get_job
|
|
22
|
+
from .service import list_jobs as svc_list_jobs
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
25
|
+
from .worker import Worker
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@runtime_checkable
|
|
31
|
+
class JobBackend(Protocol):
|
|
32
|
+
async def enqueue(self, **kwargs) -> EnqueueResult: ...
|
|
33
|
+
async def run(self) -> None: ...
|
|
34
|
+
async def cancel(self, job_id: UUID) -> None: ...
|
|
35
|
+
async def retry(self, job_id: UUID) -> None: ...
|
|
36
|
+
async def list_jobs(self, **kwargs) -> list[JobRecord]: ...
|
|
37
|
+
async def get_job(self, job_id: UUID) -> JobRecord | None: ...
|
|
38
|
+
async def health_check(self) -> dict: ...
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class PgQueueBackend:
|
|
42
|
+
"""Default backend — polls ``jobs.jobs`` with ``FOR UPDATE SKIP LOCKED``."""
|
|
43
|
+
|
|
44
|
+
def __init__(self, settings: Settings) -> None:
|
|
45
|
+
self._settings = settings
|
|
46
|
+
self._worker: Worker | None = None
|
|
47
|
+
|
|
48
|
+
async def enqueue(self, **kwargs) -> EnqueueResult:
|
|
49
|
+
async with db.as_service_role() as conn:
|
|
50
|
+
return await svc_enqueue(conn, **kwargs)
|
|
51
|
+
|
|
52
|
+
async def run(self) -> None:
|
|
53
|
+
from .worker import Worker
|
|
54
|
+
|
|
55
|
+
self._worker = Worker(self._settings)
|
|
56
|
+
await self._worker.start()
|
|
57
|
+
|
|
58
|
+
async def cancel(self, job_id: UUID) -> None:
|
|
59
|
+
async with db.as_service_role() as conn:
|
|
60
|
+
await svc_cancel(conn, job_id)
|
|
61
|
+
|
|
62
|
+
async def retry(self, job_id: UUID) -> None:
|
|
63
|
+
from .service import mark_failed_retry
|
|
64
|
+
|
|
65
|
+
async with db.as_service_role() as conn:
|
|
66
|
+
record = await svc_get_job(conn, job_id)
|
|
67
|
+
if record is None:
|
|
68
|
+
return
|
|
69
|
+
await mark_failed_retry(
|
|
70
|
+
conn,
|
|
71
|
+
job_id,
|
|
72
|
+
attempts=record.attempts,
|
|
73
|
+
backoff=record.backoff,
|
|
74
|
+
backoff_base_s=record.backoff_base_s,
|
|
75
|
+
backoff_max_s=record.backoff_max_s,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
async def list_jobs(self, **kwargs) -> list[JobRecord]:
|
|
79
|
+
async with db.as_service_role() as conn:
|
|
80
|
+
return await svc_list_jobs(conn, **kwargs)
|
|
81
|
+
|
|
82
|
+
async def get_job(self, job_id: UUID) -> JobRecord | None:
|
|
83
|
+
async with db.as_service_role() as conn:
|
|
84
|
+
return await svc_get_job(conn, job_id)
|
|
85
|
+
|
|
86
|
+
async def health_check(self) -> dict:
|
|
87
|
+
try:
|
|
88
|
+
async with db.as_service_role() as conn:
|
|
89
|
+
await conn.fetchval("select 1 from jobs.jobs limit 1")
|
|
90
|
+
return {"backend": "pg", "healthy": True}
|
|
91
|
+
except Exception as exc:
|
|
92
|
+
return {"backend": "pg", "healthy": False, "detail": str(exc)}
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def get_backend(settings: Settings | None = None) -> JobBackend:
|
|
96
|
+
s = settings or get_settings()
|
|
97
|
+
return PgQueueBackend(s)
|
supython/jobs/context.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
"""Job execution context — mirrors ``functions.context.Ctx`` for worker jobs.
|
|
2
|
+
|
|
3
|
+
``HookCtx`` and ``build_hook_ctx`` used to live here; they moved to
|
|
4
|
+
``supython.hooks`` so feature modules (auth, storage, ...) can fire hooks
|
|
5
|
+
without importing the jobs package. This file now only owns the job-side
|
|
6
|
+
context.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
from collections.abc import Awaitable, Callable
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from uuid import UUID
|
|
15
|
+
|
|
16
|
+
import asyncpg
|
|
17
|
+
|
|
18
|
+
from ..functions.context import PostgrestClient, StorageClient, _make_send_email
|
|
19
|
+
from ..mailer import EmailBackend, get_mailer
|
|
20
|
+
from ..settings import Settings, get_settings
|
|
21
|
+
from ..storage.backends import StorageBackend, get_backend
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class JobCtx:
|
|
26
|
+
db: asyncpg.Connection
|
|
27
|
+
settings: Settings
|
|
28
|
+
send_email: Callable[..., Awaitable[None]]
|
|
29
|
+
storage: StorageClient
|
|
30
|
+
postgrest: PostgrestClient
|
|
31
|
+
logger: logging.Logger
|
|
32
|
+
job_id: UUID | None = None
|
|
33
|
+
attempt: int = 0
|
|
34
|
+
name: str = ""
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def build_job_ctx(
|
|
38
|
+
*,
|
|
39
|
+
conn: asyncpg.Connection,
|
|
40
|
+
job_id: UUID | None = None,
|
|
41
|
+
attempt: int = 0,
|
|
42
|
+
name: str = "",
|
|
43
|
+
backend: StorageBackend | None = None,
|
|
44
|
+
mailer: EmailBackend | None = None,
|
|
45
|
+
settings: Settings | None = None,
|
|
46
|
+
) -> JobCtx:
|
|
47
|
+
s = settings or get_settings()
|
|
48
|
+
return JobCtx(
|
|
49
|
+
db=conn,
|
|
50
|
+
settings=s,
|
|
51
|
+
send_email=_make_send_email(mailer or get_mailer()),
|
|
52
|
+
storage=StorageClient(conn, backend or get_backend()),
|
|
53
|
+
postgrest=PostgrestClient(s.postgrest_url, None),
|
|
54
|
+
logger=logging.getLogger(f"supython.jobs.{name}"),
|
|
55
|
+
job_id=job_id,
|
|
56
|
+
attempt=attempt,
|
|
57
|
+
name=name,
|
|
58
|
+
)
|
supython/jobs/cron.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
"""Cron scheduling — ``pg_cron`` synchronisation only.
|
|
2
|
+
|
|
3
|
+
The in-process ``croniter`` fallback moved to :mod:`.cron_inproc` so that
|
|
4
|
+
the ``croniter`` import (an optional extra, see the v0.5 decision log) is
|
|
5
|
+
only touched when ``jobs_cron_backend == 'inproc'``. This module has no
|
|
6
|
+
optional-dep imports and is safe to load at app startup.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import contextlib
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
from collections.abc import AsyncIterator
|
|
15
|
+
|
|
16
|
+
import asyncpg
|
|
17
|
+
|
|
18
|
+
from .registry import get_registry
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@contextlib.asynccontextmanager
|
|
24
|
+
async def _as_login_role(conn: asyncpg.Connection) -> AsyncIterator[None]:
|
|
25
|
+
"""Temporarily step out of a NOLOGIN role for pg_cron scheduling.
|
|
26
|
+
|
|
27
|
+
pg_cron stamps ``current_user`` on the ``cron.job`` row at schedule
|
|
28
|
+
time and uses that role to initialise a background worker on every
|
|
29
|
+
tick. The caller of :func:`sync_pg_cron` is expected to be in
|
|
30
|
+
``service_role``, which is ``NOLOGIN`` (see
|
|
31
|
+
``migrations/0001_extensions_and_roles.sql``); every tick would then
|
|
32
|
+
FATAL with ``role "service_role" is not permitted to log in`` and
|
|
33
|
+
the schedule would silently never fire.
|
|
34
|
+
|
|
35
|
+
We hop back to the connection's pre-``service_role`` identity for
|
|
36
|
+
just the ``cron.schedule`` / ``cron.unschedule`` call. That identity
|
|
37
|
+
is whichever role opened the asyncpg connection (see
|
|
38
|
+
``DATABASE_URL``) and is therefore guaranteed LOGIN-capable.
|
|
39
|
+
|
|
40
|
+
Uses ``SET LOCAL`` when the caller is inside a transaction so the
|
|
41
|
+
hop doesn't leak past a ``db.as_service_role()`` block into the
|
|
42
|
+
pooled connection.
|
|
43
|
+
"""
|
|
44
|
+
prev_role = await conn.fetchval("select current_user")
|
|
45
|
+
in_txn = conn.is_in_transaction()
|
|
46
|
+
scope = "local " if in_txn else ""
|
|
47
|
+
await conn.execute(f"set {scope}role none")
|
|
48
|
+
try:
|
|
49
|
+
yield
|
|
50
|
+
finally:
|
|
51
|
+
await conn.execute(f'set {scope}role "{prev_role}"')
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
async def sync_pg_cron(conn: asyncpg.Connection) -> None:
|
|
55
|
+
"""Upsert ``pg_cron`` schedule rows from the registry and remove stale ones.
|
|
56
|
+
|
|
57
|
+
The caller is expected to have entered ``db.as_service_role()`` (or
|
|
58
|
+
the CLI equivalent) before invoking this — the ``jobs.cron_schedules``
|
|
59
|
+
table is owned by ``service_role``. For the actual ``cron.schedule``
|
|
60
|
+
/ ``cron.unschedule`` calls we transiently hop back to a LOGIN role
|
|
61
|
+
via :func:`_as_login_role` so pg_cron's background worker can
|
|
62
|
+
initialise a session at tick time.
|
|
63
|
+
|
|
64
|
+
When pg_cron is installed, errors from ``cron.schedule`` propagate
|
|
65
|
+
to the caller.
|
|
66
|
+
|
|
67
|
+
When pg_cron is NOT installed the metadata row is still upserted so
|
|
68
|
+
the in-process scheduler (or a manual runner) can pick it up, and
|
|
69
|
+
``cron.schedule`` is skipped entirely.
|
|
70
|
+
"""
|
|
71
|
+
registry = get_registry()
|
|
72
|
+
has_pg_cron = await conn.fetchval(
|
|
73
|
+
"select exists(select 1 from pg_extension where extname = 'pg_cron')"
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
for cron_defn in registry.iter_crons():
|
|
77
|
+
await conn.execute(
|
|
78
|
+
"""
|
|
79
|
+
insert into jobs.cron_schedules
|
|
80
|
+
(name, cron_expr, job_name, job_version, payload, queue, enabled)
|
|
81
|
+
values ($1, $2, $3, $4, $5::jsonb, $6, true)
|
|
82
|
+
on conflict (name) do update set
|
|
83
|
+
cron_expr = excluded.cron_expr,
|
|
84
|
+
job_name = excluded.job_name,
|
|
85
|
+
job_version = excluded.job_version,
|
|
86
|
+
payload = excluded.payload,
|
|
87
|
+
queue = excluded.queue,
|
|
88
|
+
enabled = excluded.enabled
|
|
89
|
+
""",
|
|
90
|
+
cron_defn.name,
|
|
91
|
+
cron_defn.cron_expr,
|
|
92
|
+
cron_defn.job_name,
|
|
93
|
+
cron_defn.job_version,
|
|
94
|
+
json.dumps(cron_defn.payload),
|
|
95
|
+
cron_defn.queue,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
if not has_pg_cron:
|
|
99
|
+
continue
|
|
100
|
+
|
|
101
|
+
# Build the pg_cron command via ``format(..., %L, %L, %L)`` so
|
|
102
|
+
# Postgres handles quoting/escaping of the payload JSON and text
|
|
103
|
+
# args instead of interpolating them from Python. This closes
|
|
104
|
+
# the pre-grooming bug where f-string interpolation produced
|
|
105
|
+
# invalid JSON for any non-trivial payload.
|
|
106
|
+
command = await conn.fetchval(
|
|
107
|
+
"""
|
|
108
|
+
select format(
|
|
109
|
+
'select jobs.enqueue(p_name := %L, p_payload := %L::jsonb, p_queue := %L)',
|
|
110
|
+
$1::text, $2::text, $3::text
|
|
111
|
+
)
|
|
112
|
+
""",
|
|
113
|
+
cron_defn.job_name,
|
|
114
|
+
json.dumps(cron_defn.payload),
|
|
115
|
+
cron_defn.queue,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
async with _as_login_role(conn):
|
|
119
|
+
await conn.execute(
|
|
120
|
+
"select cron.schedule($1, $2, $3)",
|
|
121
|
+
cron_defn.name,
|
|
122
|
+
cron_defn.cron_expr,
|
|
123
|
+
command,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
registered_names = {c.name for c in registry.iter_crons()}
|
|
127
|
+
existing = await conn.fetch("select name from jobs.cron_schedules")
|
|
128
|
+
for row in existing:
|
|
129
|
+
if row["name"] in registered_names:
|
|
130
|
+
continue
|
|
131
|
+
if has_pg_cron:
|
|
132
|
+
async with _as_login_role(conn):
|
|
133
|
+
# Stale metadata may no longer have a matching cron.job
|
|
134
|
+
# row (e.g. manually unscheduled); tolerate that single
|
|
135
|
+
# case. Everything else — permission denied, syntax
|
|
136
|
+
# error, etc. — indicates a misconfiguration and is
|
|
137
|
+
# surfaced to the caller.
|
|
138
|
+
try:
|
|
139
|
+
await conn.execute(
|
|
140
|
+
"select cron.unschedule($1)", row["name"]
|
|
141
|
+
)
|
|
142
|
+
except asyncpg.exceptions.RaiseError as exc:
|
|
143
|
+
if "could not find" not in str(exc):
|
|
144
|
+
raise
|
|
145
|
+
logger.debug(
|
|
146
|
+
"jobs.cron.unschedule_noop",
|
|
147
|
+
extra={"cron_name": row["name"]},
|
|
148
|
+
)
|
|
149
|
+
await conn.execute(
|
|
150
|
+
"delete from jobs.cron_schedules where name = $1",
|
|
151
|
+
row["name"],
|
|
152
|
+
)
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
"""Optional in-process cron scheduler (``croniter``-based).
|
|
2
|
+
|
|
3
|
+
Loaded only when ``jobs_cron_backend == 'inproc'``. ``croniter`` is an
|
|
4
|
+
optional extra — users opt in with ``pip install supython[cron-inproc]``
|
|
5
|
+
per the 2026-04-21 decision log row and §15.5 dependency budget.
|
|
6
|
+
|
|
7
|
+
Two bugs in the pre-grooming implementation were fixed here:
|
|
8
|
+
|
|
9
|
+
1. **Firing condition** — ``croniter.get_next(datetime)`` returns the next
|
|
10
|
+
fire time strictly after its anchor, so the old ``next_fire <= now``
|
|
11
|
+
test was never true. We now track a per-schedule ``_last_fire`` anchor
|
|
12
|
+
and compare ``next_fire`` against the current ``now`` after the sleep.
|
|
13
|
+
2. **Advisory lock lifetime** — ``pg_advisory_lock`` is session-scoped, so
|
|
14
|
+
acquiring on one pool connection and releasing on another leaks the
|
|
15
|
+
lock forever. Both now happen on the same connection for a single
|
|
16
|
+
cron tick.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import asyncio
|
|
22
|
+
import logging
|
|
23
|
+
from datetime import UTC, datetime
|
|
24
|
+
from typing import Any
|
|
25
|
+
|
|
26
|
+
from .. import db
|
|
27
|
+
from ..settings import Settings
|
|
28
|
+
from .registry import CronDefinition, get_registry
|
|
29
|
+
from .service import enqueue
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _require_croniter() -> Any:
|
|
35
|
+
try:
|
|
36
|
+
import croniter # type: ignore[import-not-found]
|
|
37
|
+
|
|
38
|
+
return croniter
|
|
39
|
+
except ImportError as exc: # pragma: no cover — depends on extras
|
|
40
|
+
raise ImportError(
|
|
41
|
+
"croniter is required for in-process cron scheduling. "
|
|
42
|
+
"Install with: pip install supython[cron-inproc]"
|
|
43
|
+
) from exc
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class InProcScheduler:
|
|
47
|
+
"""croniter-based fallback scheduler.
|
|
48
|
+
|
|
49
|
+
One tick per minute; at each tick, every registered cron expression that
|
|
50
|
+
has moved past its next scheduled fire time enqueues a single job.
|
|
51
|
+
Advisory-lock + idempotency-key form a belt-and-braces guard against
|
|
52
|
+
duplicate enqueues across replicas.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(self, settings: Settings) -> None:
|
|
56
|
+
self._settings = settings
|
|
57
|
+
self._running = False
|
|
58
|
+
self._last_fire: dict[str, datetime] = {}
|
|
59
|
+
|
|
60
|
+
async def start(self) -> None:
|
|
61
|
+
croniter_mod = _require_croniter()
|
|
62
|
+
self._running = True
|
|
63
|
+
|
|
64
|
+
registry = get_registry()
|
|
65
|
+
|
|
66
|
+
# Seed the ``_last_fire`` map so that the first tick does not
|
|
67
|
+
# retroactively fire schedules that would otherwise look "overdue"
|
|
68
|
+
# after a process restart.
|
|
69
|
+
now = datetime.now(UTC)
|
|
70
|
+
for cron_defn in registry.iter_crons():
|
|
71
|
+
if cron_defn.name not in self._last_fire:
|
|
72
|
+
self._last_fire[cron_defn.name] = now
|
|
73
|
+
|
|
74
|
+
while self._running:
|
|
75
|
+
now = datetime.now(UTC)
|
|
76
|
+
for cron_defn in registry.iter_crons():
|
|
77
|
+
try:
|
|
78
|
+
anchor = self._last_fire.get(cron_defn.name, now)
|
|
79
|
+
cron = croniter_mod.croniter(cron_defn.cron_expr, anchor)
|
|
80
|
+
next_fire = cron.get_next(datetime)
|
|
81
|
+
if next_fire <= now:
|
|
82
|
+
fired = await self._try_fire(cron_defn, next_fire)
|
|
83
|
+
if fired:
|
|
84
|
+
self._last_fire[cron_defn.name] = next_fire
|
|
85
|
+
except Exception:
|
|
86
|
+
logger.exception(
|
|
87
|
+
"jobs.cron.tick_error",
|
|
88
|
+
extra={"cron_name": cron_defn.name},
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
await asyncio.sleep(60)
|
|
92
|
+
|
|
93
|
+
async def stop(self) -> None:
|
|
94
|
+
self._running = False
|
|
95
|
+
|
|
96
|
+
async def _try_fire(
|
|
97
|
+
self, cron_defn: CronDefinition, tick: datetime
|
|
98
|
+
) -> bool:
|
|
99
|
+
"""Acquire-enqueue-release on one connection; skip if another worker has it."""
|
|
100
|
+
async with db.as_service_role() as conn:
|
|
101
|
+
locked = await conn.fetchval(
|
|
102
|
+
"select pg_try_advisory_lock(hashtext($1))", cron_defn.name
|
|
103
|
+
)
|
|
104
|
+
if not locked:
|
|
105
|
+
return False
|
|
106
|
+
try:
|
|
107
|
+
await enqueue(
|
|
108
|
+
conn,
|
|
109
|
+
name=cron_defn.job_name,
|
|
110
|
+
payload=cron_defn.payload,
|
|
111
|
+
queue=cron_defn.queue,
|
|
112
|
+
idempotency_key=f"{cron_defn.name}:{tick.isoformat()}",
|
|
113
|
+
)
|
|
114
|
+
return True
|
|
115
|
+
finally:
|
|
116
|
+
await conn.execute(
|
|
117
|
+
"select pg_advisory_unlock(hashtext($1))", cron_defn.name
|
|
118
|
+
)
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""Decorators for registering jobs and cron schedules."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Callable, Coroutine
|
|
6
|
+
from enum import StrEnum
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from .registry import CronDefinition, JobDefinition, get_registry
|
|
10
|
+
|
|
11
|
+
Handler = Callable[..., Coroutine[Any, Any, None]]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Backoff(StrEnum):
|
|
15
|
+
EXPONENTIAL = "exponential"
|
|
16
|
+
LINEAR = "linear"
|
|
17
|
+
CONSTANT = "constant"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def job(
|
|
21
|
+
name: str,
|
|
22
|
+
*,
|
|
23
|
+
version: int = 1,
|
|
24
|
+
max_attempts: int = 3,
|
|
25
|
+
backoff: Backoff | str = Backoff.EXPONENTIAL,
|
|
26
|
+
backoff_base_s: float = 5.0,
|
|
27
|
+
backoff_max_s: float = 300.0,
|
|
28
|
+
queue: str = "default",
|
|
29
|
+
role: str = "service_role",
|
|
30
|
+
claims_from: str | None = None,
|
|
31
|
+
accepts_payload: bool = True,
|
|
32
|
+
) -> Callable[..., Any]:
|
|
33
|
+
def decorator(fn: Handler) -> Handler:
|
|
34
|
+
get_registry().register_job(
|
|
35
|
+
JobDefinition(
|
|
36
|
+
name=name,
|
|
37
|
+
version=version,
|
|
38
|
+
handler=fn,
|
|
39
|
+
max_attempts=max_attempts,
|
|
40
|
+
backoff=backoff if isinstance(backoff, str) else backoff.value,
|
|
41
|
+
backoff_base_s=backoff_base_s,
|
|
42
|
+
backoff_max_s=backoff_max_s,
|
|
43
|
+
queue=queue,
|
|
44
|
+
role=role,
|
|
45
|
+
claims_from=claims_from,
|
|
46
|
+
accepts_payload=accepts_payload,
|
|
47
|
+
)
|
|
48
|
+
)
|
|
49
|
+
return fn
|
|
50
|
+
|
|
51
|
+
return decorator
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def cron(
|
|
55
|
+
cron_expr: str,
|
|
56
|
+
*,
|
|
57
|
+
name: str,
|
|
58
|
+
job_name: str | None = None,
|
|
59
|
+
job_version: int = 1,
|
|
60
|
+
payload: dict | None = None,
|
|
61
|
+
queue: str = "default",
|
|
62
|
+
) -> Callable[..., Any]:
|
|
63
|
+
def decorator(fn: Handler) -> Handler:
|
|
64
|
+
get_registry().register_cron(
|
|
65
|
+
CronDefinition(
|
|
66
|
+
name=name,
|
|
67
|
+
cron_expr=cron_expr,
|
|
68
|
+
job_name=job_name or name,
|
|
69
|
+
job_version=job_version,
|
|
70
|
+
payload=payload or {},
|
|
71
|
+
queue=queue,
|
|
72
|
+
)
|
|
73
|
+
)
|
|
74
|
+
return fn
|
|
75
|
+
|
|
76
|
+
return decorator
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""Job and cron definitions registry (process-global singleton)."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Callable, Coroutine
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class JobDefinition:
|
|
12
|
+
name: str
|
|
13
|
+
handler: Callable[..., Coroutine[Any, Any, None]] = field(repr=False)
|
|
14
|
+
version: int = 1
|
|
15
|
+
max_attempts: int = 3
|
|
16
|
+
backoff: str = "exponential"
|
|
17
|
+
backoff_base_s: float = 5.0
|
|
18
|
+
backoff_max_s: float = 300.0
|
|
19
|
+
queue: str = "default"
|
|
20
|
+
role: str = "service_role"
|
|
21
|
+
claims_from: str | None = None
|
|
22
|
+
accepts_payload: bool = True
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class CronDefinition:
|
|
27
|
+
name: str
|
|
28
|
+
cron_expr: str
|
|
29
|
+
job_name: str
|
|
30
|
+
job_version: int = 1
|
|
31
|
+
payload: dict = field(default_factory=dict)
|
|
32
|
+
queue: str = "default"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class Registry:
|
|
36
|
+
def __init__(self) -> None:
|
|
37
|
+
self._jobs: dict[tuple[str, int], JobDefinition] = {}
|
|
38
|
+
self._crons: dict[str, CronDefinition] = {}
|
|
39
|
+
|
|
40
|
+
def register_job(self, defn: JobDefinition) -> None:
|
|
41
|
+
key = (defn.name, defn.version)
|
|
42
|
+
if key in self._jobs:
|
|
43
|
+
raise ValueError(f"job {defn.name!r} v{defn.version} already registered")
|
|
44
|
+
self._jobs[key] = defn
|
|
45
|
+
|
|
46
|
+
def register_cron(self, defn: CronDefinition) -> None:
|
|
47
|
+
if defn.name in self._crons:
|
|
48
|
+
raise ValueError(f"cron {defn.name!r} already registered")
|
|
49
|
+
self._crons[defn.name] = defn
|
|
50
|
+
|
|
51
|
+
def get(self, name: str, version: int) -> JobDefinition | None:
|
|
52
|
+
return self._jobs.get((name, version))
|
|
53
|
+
|
|
54
|
+
def get_latest(self, name: str) -> JobDefinition | None:
|
|
55
|
+
versions = [v for (n, v) in self._jobs if n == name]
|
|
56
|
+
if not versions:
|
|
57
|
+
return None
|
|
58
|
+
return self._jobs[(name, max(versions))]
|
|
59
|
+
|
|
60
|
+
def iter_jobs(self):
|
|
61
|
+
return iter(self._jobs.values())
|
|
62
|
+
|
|
63
|
+
def iter_crons(self):
|
|
64
|
+
return iter(self._crons.values())
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
_registry: Registry | None = None
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def get_registry() -> Registry:
|
|
71
|
+
global _registry
|
|
72
|
+
if _registry is None:
|
|
73
|
+
_registry = Registry()
|
|
74
|
+
return _registry
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def reset_registry() -> None:
|
|
78
|
+
global _registry
|
|
79
|
+
_registry = None
|