supython 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- supython/__init__.py +24 -0
- supython/admin/__init__.py +3 -0
- supython/admin/api/__init__.py +24 -0
- supython/admin/api/auth.py +118 -0
- supython/admin/api/auth_templates.py +67 -0
- supython/admin/api/auth_users.py +225 -0
- supython/admin/api/db.py +174 -0
- supython/admin/api/functions.py +92 -0
- supython/admin/api/jobs.py +192 -0
- supython/admin/api/ops.py +224 -0
- supython/admin/api/realtime.py +281 -0
- supython/admin/api/service_auth.py +49 -0
- supython/admin/api/service_auth_templates.py +83 -0
- supython/admin/api/service_auth_users.py +346 -0
- supython/admin/api/service_db.py +214 -0
- supython/admin/api/service_functions.py +287 -0
- supython/admin/api/service_jobs.py +282 -0
- supython/admin/api/service_ops.py +213 -0
- supython/admin/api/service_realtime.py +30 -0
- supython/admin/api/service_storage.py +220 -0
- supython/admin/api/storage.py +117 -0
- supython/admin/api/system.py +37 -0
- supython/admin/audit.py +29 -0
- supython/admin/deps.py +22 -0
- supython/admin/errors.py +16 -0
- supython/admin/schemas.py +310 -0
- supython/admin/session.py +52 -0
- supython/admin/spa.py +38 -0
- supython/admin/static/assets/Alert-dluGVkos.js +49 -0
- supython/admin/static/assets/Audit-Njung3HI.js +2 -0
- supython/admin/static/assets/Backups-DzPlFgrm.js +2 -0
- supython/admin/static/assets/Buckets-ByacGkU1.js +2 -0
- supython/admin/static/assets/Channels-BoIuTtam.js +353 -0
- supython/admin/static/assets/ChevronRight-CtQH1EQ1.js +2 -0
- supython/admin/static/assets/CodeViewer-Bqy7-wvH.js +2 -0
- supython/admin/static/assets/Crons-B67vc39F.js +2 -0
- supython/admin/static/assets/DashboardView-CUTFVL6k.js +2 -0
- supython/admin/static/assets/DataTable-COAAWEft.js +747 -0
- supython/admin/static/assets/DescriptionsItem-P8JUDaBs.js +75 -0
- supython/admin/static/assets/DrawerContent-TpYTFgF1.js +139 -0
- supython/admin/static/assets/Empty-cr2r7e2u.js +25 -0
- supython/admin/static/assets/EmptyState-DeDck-OL.js +2 -0
- supython/admin/static/assets/Grid-hFkp9F4P.js +2 -0
- supython/admin/static/assets/Input-DppYTq9C.js +259 -0
- supython/admin/static/assets/Invoke-DW3Nveeh.js +2 -0
- supython/admin/static/assets/JsonField-DibyJgun.js +2 -0
- supython/admin/static/assets/LoginView-BjLyE3Ds.css +1 -0
- supython/admin/static/assets/LoginView-CoOjECT_.js +111 -0
- supython/admin/static/assets/Logs-D9WYrnIT.js +2 -0
- supython/admin/static/assets/Logs-DS1XPa0h.css +1 -0
- supython/admin/static/assets/Migrations-DOSC2ddQ.js +2 -0
- supython/admin/static/assets/ObjectBrowser-_5w8vOX8.js +2 -0
- supython/admin/static/assets/Queue-CywZs6vI.js +2 -0
- supython/admin/static/assets/RefreshTokens-Ccjr53jg.js +2 -0
- supython/admin/static/assets/RlsEditor-BSlH9vSc.js +2 -0
- supython/admin/static/assets/Routes-BiLXE49D.js +2 -0
- supython/admin/static/assets/Routes-C-ianIGD.css +1 -0
- supython/admin/static/assets/SchemaBrowser-DKy2_KQi.css +1 -0
- supython/admin/static/assets/SchemaBrowser-XFvFbtDB.js +2 -0
- supython/admin/static/assets/Select-DIzZyRZb.js +434 -0
- supython/admin/static/assets/Space-n5-XcguU.js +400 -0
- supython/admin/static/assets/SqlEditor-b8pTsILY.js +3 -0
- supython/admin/static/assets/SqlWorkspace-BUS7IntH.js +104 -0
- supython/admin/static/assets/TableData-CQIagLKn.js +2 -0
- supython/admin/static/assets/Tag-D1fOKpTH.js +72 -0
- supython/admin/static/assets/Templates-BS-ugkdq.js +2 -0
- supython/admin/static/assets/Thing-CEAniuMg.js +107 -0
- supython/admin/static/assets/Users-wzwajhlh.js +2 -0
- supython/admin/static/assets/_plugin-vue_export-helper-DGA9ry_j.js +1 -0
- supython/admin/static/assets/dist-VXIJLCYq.js +13 -0
- supython/admin/static/assets/format-length-CGCY1rMh.js +2 -0
- supython/admin/static/assets/get-Ca6unauB.js +2 -0
- supython/admin/static/assets/index-CeE6v959.js +951 -0
- supython/admin/static/assets/pinia-COXwfrOX.js +2 -0
- supython/admin/static/assets/resources-Bt6thQCD.js +44 -0
- supython/admin/static/assets/use-locale-mtgM0a3a.js +2 -0
- supython/admin/static/assets/use-merged-state-BvhkaHNX.js +2 -0
- supython/admin/static/assets/useConfirm-tMjvBFXR.js +2 -0
- supython/admin/static/assets/useResource-C_rJCY8C.js +2 -0
- supython/admin/static/assets/useTable-CnZc5zhi.js +363 -0
- supython/admin/static/assets/useTable-Dg0XlRlq.css +1 -0
- supython/admin/static/assets/useToast-DsZKx0IX.js +2 -0
- supython/admin/static/assets/utils-sbXoq7Ir.js +2 -0
- supython/admin/static/favicon.svg +1 -0
- supython/admin/static/icons.svg +24 -0
- supython/admin/static/index.html +24 -0
- supython/app.py +162 -0
- supython/auth/__init__.py +3 -0
- supython/auth/_email_job.py +11 -0
- supython/auth/providers/__init__.py +34 -0
- supython/auth/providers/github.py +22 -0
- supython/auth/providers/google.py +19 -0
- supython/auth/providers/oauth.py +56 -0
- supython/auth/providers/registry.py +16 -0
- supython/auth/ratelimit.py +39 -0
- supython/auth/router.py +282 -0
- supython/auth/schemas.py +79 -0
- supython/auth/service.py +587 -0
- supython/backups/__init__.py +24 -0
- supython/backups/_backup_job.py +170 -0
- supython/backups/schemas.py +18 -0
- supython/backups/service.py +217 -0
- supython/body_size.py +184 -0
- supython/cli.py +1663 -0
- supython/client/__init__.py +67 -0
- supython/client/_auth.py +249 -0
- supython/client/_client.py +145 -0
- supython/client/_config.py +92 -0
- supython/client/_functions.py +69 -0
- supython/client/_storage.py +255 -0
- supython/client/py.typed +0 -0
- supython/db.py +151 -0
- supython/db_admin.py +8 -0
- supython/extensions.py +36 -0
- supython/functions/__init__.py +19 -0
- supython/functions/context.py +262 -0
- supython/functions/loader.py +307 -0
- supython/functions/router.py +228 -0
- supython/functions/schemas.py +50 -0
- supython/gen/__init__.py +5 -0
- supython/gen/_introspect.py +137 -0
- supython/gen/types_py.py +270 -0
- supython/gen/types_ts.py +365 -0
- supython/health.py +229 -0
- supython/hooks.py +117 -0
- supython/jobs/__init__.py +31 -0
- supython/jobs/backends.py +97 -0
- supython/jobs/context.py +58 -0
- supython/jobs/cron.py +152 -0
- supython/jobs/cron_inproc.py +119 -0
- supython/jobs/decorators.py +76 -0
- supython/jobs/registry.py +79 -0
- supython/jobs/router.py +136 -0
- supython/jobs/schemas.py +92 -0
- supython/jobs/service.py +311 -0
- supython/jobs/worker.py +219 -0
- supython/jwks.py +257 -0
- supython/keyset.py +279 -0
- supython/logging_config.py +291 -0
- supython/mail.py +33 -0
- supython/mailer.py +65 -0
- supython/migrate.py +81 -0
- supython/migrations/0001_extensions_and_roles.sql +46 -0
- supython/migrations/0002_auth_schema.sql +66 -0
- supython/migrations/0003_demo_todos.sql +42 -0
- supython/migrations/0004_auth_v0_2.sql +47 -0
- supython/migrations/0005_storage_schema.sql +117 -0
- supython/migrations/0006_realtime_schema.sql +206 -0
- supython/migrations/0007_jobs_schema.sql +254 -0
- supython/migrations/0008_jobs_last_error.sql +56 -0
- supython/migrations/0009_auth_rate_limits.sql +33 -0
- supython/migrations/0010_worker_heartbeat.sql +14 -0
- supython/migrations/0011_admin_schema.sql +45 -0
- supython/migrations/0012_auth_banned_until.sql +10 -0
- supython/migrations/0013_email_templates.sql +19 -0
- supython/migrations/0014_realtime_payload_warning.sql +96 -0
- supython/migrations/0015_backups_schema.sql +14 -0
- supython/passwords.py +15 -0
- supython/realtime/__init__.py +6 -0
- supython/realtime/broker.py +814 -0
- supython/realtime/protocol.py +234 -0
- supython/realtime/router.py +184 -0
- supython/realtime/schemas.py +207 -0
- supython/realtime/service.py +261 -0
- supython/realtime/topics.py +175 -0
- supython/realtime/websocket.py +586 -0
- supython/scaffold/__init__.py +5 -0
- supython/scaffold/init_project.py +144 -0
- supython/scaffold/templates/Caddyfile.tmpl +4 -0
- supython/scaffold/templates/README.md.tmpl +22 -0
- supython/scaffold/templates/apps_hooks.py.tmpl +11 -0
- supython/scaffold/templates/apps_jobs.py.tmpl +8 -0
- supython/scaffold/templates/asgi.py.tmpl +14 -0
- supython/scaffold/templates/docker-compose.prod.yml.tmpl +84 -0
- supython/scaffold/templates/docker-compose.yml.tmpl +45 -0
- supython/scaffold/templates/docker_postgres_Dockerfile.tmpl +9 -0
- supython/scaffold/templates/docker_postgres_postgresql.conf.tmpl +3 -0
- supython/scaffold/templates/env.example.tmpl +168 -0
- supython/scaffold/templates/functions_README.md.tmpl +21 -0
- supython/scaffold/templates/gitignore.tmpl +14 -0
- supython/scaffold/templates/manage.py.tmpl +11 -0
- supython/scaffold/templates/migrations/.gitkeep +0 -0
- supython/scaffold/templates/package_init.py.tmpl +1 -0
- supython/scaffold/templates/settings.py.tmpl +31 -0
- supython/secretset.py +347 -0
- supython/security_headers.py +78 -0
- supython/settings.py +244 -0
- supython/settings_module.py +117 -0
- supython/storage/__init__.py +5 -0
- supython/storage/backends.py +392 -0
- supython/storage/router.py +341 -0
- supython/storage/schemas.py +50 -0
- supython/storage/service.py +445 -0
- supython/storage/signing.py +119 -0
- supython/tokens.py +85 -0
- supython-0.1.0.dist-info/METADATA +756 -0
- supython-0.1.0.dist-info/RECORD +200 -0
- supython-0.1.0.dist-info/WHEEL +4 -0
- supython-0.1.0.dist-info/entry_points.txt +2 -0
- supython-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,445 @@
|
|
|
1
|
+
"""Storage business logic.
|
|
2
|
+
|
|
3
|
+
Pure async functions. Take a role-scoped ``conn`` (from ``db.as_role(...)``)
|
|
4
|
+
plus a ``StorageBackend``. The metadata layer is the authority for who may
|
|
5
|
+
do what — every request first hits Postgres for an RLS-checked
|
|
6
|
+
SELECT/INSERT, then touches backend bytes. Orphaned bytes after a crash are
|
|
7
|
+
acceptable; orphaned metadata is not.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import logging
|
|
11
|
+
from collections.abc import AsyncIterator
|
|
12
|
+
|
|
13
|
+
import asyncpg
|
|
14
|
+
|
|
15
|
+
from ..settings import get_settings
|
|
16
|
+
from . import signing
|
|
17
|
+
from .backends import BackendError, ObjectStream, StorageBackend, make_object_key
|
|
18
|
+
from .schemas import BucketResponse, ObjectResponse, SignedUrlResponse
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class StorageError(Exception):
|
|
24
|
+
def __init__(self, code: str, message: str, status: int = 400) -> None:
|
|
25
|
+
super().__init__(message)
|
|
26
|
+
self.code = code
|
|
27
|
+
self.message = message
|
|
28
|
+
self.status = status
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# ---------------------------------------------------------------------------
|
|
32
|
+
# Row mappers
|
|
33
|
+
# ---------------------------------------------------------------------------
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _row_to_bucket(row: asyncpg.Record) -> BucketResponse:
|
|
37
|
+
return BucketResponse(
|
|
38
|
+
id=row["id"],
|
|
39
|
+
name=row["name"],
|
|
40
|
+
owner=row["owner"],
|
|
41
|
+
public=row["public"],
|
|
42
|
+
file_size_limit=row["file_size_limit"],
|
|
43
|
+
allowed_mime_types=row["allowed_mime_types"],
|
|
44
|
+
created_at=row["created_at"],
|
|
45
|
+
updated_at=row["updated_at"],
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _row_to_object(row: asyncpg.Record) -> ObjectResponse:
|
|
50
|
+
return ObjectResponse(
|
|
51
|
+
id=row["id"],
|
|
52
|
+
bucket_id=row["bucket_id"],
|
|
53
|
+
bucket=row["bucket_name"],
|
|
54
|
+
name=row["name"],
|
|
55
|
+
owner=row["owner"],
|
|
56
|
+
size=row["size"],
|
|
57
|
+
mime_type=row["mime_type"],
|
|
58
|
+
etag=row["etag"],
|
|
59
|
+
created_at=row["created_at"],
|
|
60
|
+
updated_at=row["updated_at"],
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
# ---------------------------------------------------------------------------
|
|
65
|
+
# Buckets
|
|
66
|
+
# ---------------------------------------------------------------------------
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
async def create_bucket(
|
|
70
|
+
conn: asyncpg.Connection,
|
|
71
|
+
*,
|
|
72
|
+
name: str,
|
|
73
|
+
public: bool,
|
|
74
|
+
file_size_limit: int | None,
|
|
75
|
+
allowed_mime_types: list[str] | None,
|
|
76
|
+
) -> BucketResponse:
|
|
77
|
+
try:
|
|
78
|
+
row = await conn.fetchrow(
|
|
79
|
+
"""
|
|
80
|
+
insert into storage.buckets
|
|
81
|
+
(name, owner, public, file_size_limit, allowed_mime_types)
|
|
82
|
+
values ($1, auth.uid(), $2, $3, $4)
|
|
83
|
+
returning id, name, owner, public, file_size_limit,
|
|
84
|
+
allowed_mime_types, created_at, updated_at
|
|
85
|
+
""",
|
|
86
|
+
name,
|
|
87
|
+
public,
|
|
88
|
+
file_size_limit,
|
|
89
|
+
allowed_mime_types,
|
|
90
|
+
)
|
|
91
|
+
except asyncpg.UniqueViolationError as exc:
|
|
92
|
+
raise StorageError("bucket_exists", f"Bucket {name!r} already exists", 409) from exc
|
|
93
|
+
except asyncpg.InsufficientPrivilegeError as exc:
|
|
94
|
+
raise StorageError("forbidden", "Not allowed to create buckets", 403) from exc
|
|
95
|
+
|
|
96
|
+
if row is None:
|
|
97
|
+
raise StorageError("forbidden", "Not allowed to create buckets", 403)
|
|
98
|
+
return _row_to_bucket(row)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
async def list_buckets(conn: asyncpg.Connection) -> list[BucketResponse]:
|
|
102
|
+
rows = await conn.fetch(
|
|
103
|
+
"""
|
|
104
|
+
select id, name, owner, public, file_size_limit,
|
|
105
|
+
allowed_mime_types, created_at, updated_at
|
|
106
|
+
from storage.buckets
|
|
107
|
+
order by name
|
|
108
|
+
"""
|
|
109
|
+
)
|
|
110
|
+
return [_row_to_bucket(r) for r in rows]
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
async def get_bucket(conn: asyncpg.Connection, name: str) -> BucketResponse:
|
|
114
|
+
row = await conn.fetchrow(
|
|
115
|
+
"""
|
|
116
|
+
select id, name, owner, public, file_size_limit,
|
|
117
|
+
allowed_mime_types, created_at, updated_at
|
|
118
|
+
from storage.buckets
|
|
119
|
+
where name = $1
|
|
120
|
+
""",
|
|
121
|
+
name,
|
|
122
|
+
)
|
|
123
|
+
if row is None:
|
|
124
|
+
raise StorageError("bucket_not_found", f"Bucket {name!r} not found", 404)
|
|
125
|
+
return _row_to_bucket(row)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
async def delete_bucket(
|
|
129
|
+
conn: asyncpg.Connection,
|
|
130
|
+
backend: StorageBackend,
|
|
131
|
+
name: str,
|
|
132
|
+
) -> None:
|
|
133
|
+
bucket = await get_bucket(conn, name)
|
|
134
|
+
keys = await conn.fetch(
|
|
135
|
+
"select name from storage.objects where bucket_id = $1",
|
|
136
|
+
bucket.id,
|
|
137
|
+
)
|
|
138
|
+
result = await conn.execute(
|
|
139
|
+
"delete from storage.buckets where name = $1",
|
|
140
|
+
name,
|
|
141
|
+
)
|
|
142
|
+
if result.endswith(" 0"):
|
|
143
|
+
raise StorageError("bucket_not_found", f"Bucket {name!r} not found", 404)
|
|
144
|
+
|
|
145
|
+
for k in keys:
|
|
146
|
+
try:
|
|
147
|
+
await backend.delete(make_object_key(name, k["name"]))
|
|
148
|
+
except BackendError:
|
|
149
|
+
logger.warning(
|
|
150
|
+
"orphaned bytes after bucket delete: bucket=%s key=%s",
|
|
151
|
+
name,
|
|
152
|
+
k["name"],
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
# ---------------------------------------------------------------------------
|
|
157
|
+
# Objects
|
|
158
|
+
# ---------------------------------------------------------------------------
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
async def _fetch_bucket_for_object(
|
|
162
|
+
conn: asyncpg.Connection, bucket_name: str
|
|
163
|
+
) -> BucketResponse:
|
|
164
|
+
"""Fetch a bucket by name, raising ``bucket_not_found`` consistently."""
|
|
165
|
+
return await get_bucket(conn, bucket_name)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def _check_mime(bucket: BucketResponse, mime_type: str | None) -> None:
|
|
169
|
+
if not bucket.allowed_mime_types:
|
|
170
|
+
return
|
|
171
|
+
if mime_type is None or mime_type not in bucket.allowed_mime_types:
|
|
172
|
+
raise StorageError(
|
|
173
|
+
"mime_not_allowed",
|
|
174
|
+
f"Mime type {mime_type!r} is not allowed in bucket {bucket.name!r}",
|
|
175
|
+
415,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
async def _enforce_max_size(
|
|
180
|
+
it: AsyncIterator[bytes], max_bytes: int
|
|
181
|
+
) -> AsyncIterator[bytes]:
|
|
182
|
+
total = 0
|
|
183
|
+
async for chunk in it:
|
|
184
|
+
total += len(chunk)
|
|
185
|
+
if total > max_bytes:
|
|
186
|
+
raise StorageError(
|
|
187
|
+
"file_too_large",
|
|
188
|
+
f"Upload exceeds {max_bytes} bytes",
|
|
189
|
+
413,
|
|
190
|
+
)
|
|
191
|
+
yield chunk
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
async def upload_object(
|
|
195
|
+
conn: asyncpg.Connection,
|
|
196
|
+
backend: StorageBackend,
|
|
197
|
+
*,
|
|
198
|
+
bucket_name: str,
|
|
199
|
+
path: str,
|
|
200
|
+
data: AsyncIterator[bytes],
|
|
201
|
+
content_type: str | None,
|
|
202
|
+
) -> ObjectResponse:
|
|
203
|
+
settings = get_settings()
|
|
204
|
+
bucket = await _fetch_bucket_for_object(conn, bucket_name)
|
|
205
|
+
_check_mime(bucket, content_type)
|
|
206
|
+
|
|
207
|
+
max_bytes = settings.storage_max_upload_bytes
|
|
208
|
+
if bucket.file_size_limit is not None:
|
|
209
|
+
max_bytes = min(max_bytes, bucket.file_size_limit)
|
|
210
|
+
|
|
211
|
+
key = make_object_key(bucket_name, path)
|
|
212
|
+
try:
|
|
213
|
+
stat = await backend.put(key, _enforce_max_size(data, max_bytes), content_type)
|
|
214
|
+
except StorageError:
|
|
215
|
+
# size limit raised mid-stream; backend wrote a partial file — try to clean up.
|
|
216
|
+
try:
|
|
217
|
+
await backend.delete(key)
|
|
218
|
+
except BackendError:
|
|
219
|
+
logger.warning("failed to clean up partial upload at %s", key)
|
|
220
|
+
raise
|
|
221
|
+
except BackendError as exc:
|
|
222
|
+
raise StorageError("backend_error", str(exc), 500) from exc
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
row = await conn.fetchrow(
|
|
226
|
+
"""
|
|
227
|
+
insert into storage.objects
|
|
228
|
+
(bucket_id, name, owner, size, mime_type, etag)
|
|
229
|
+
values ($1, $2, auth.uid(), $3, $4, $5)
|
|
230
|
+
returning id, bucket_id, name, owner, size, mime_type, etag,
|
|
231
|
+
created_at, updated_at, $6::text as bucket_name
|
|
232
|
+
""",
|
|
233
|
+
bucket.id,
|
|
234
|
+
path,
|
|
235
|
+
stat.size,
|
|
236
|
+
content_type,
|
|
237
|
+
stat.etag,
|
|
238
|
+
bucket_name,
|
|
239
|
+
)
|
|
240
|
+
except asyncpg.UniqueViolationError as exc:
|
|
241
|
+
await _safe_delete(backend, key)
|
|
242
|
+
raise StorageError(
|
|
243
|
+
"object_exists",
|
|
244
|
+
f"Object {bucket_name}/{path} already exists",
|
|
245
|
+
409,
|
|
246
|
+
) from exc
|
|
247
|
+
except asyncpg.InsufficientPrivilegeError as exc:
|
|
248
|
+
await _safe_delete(backend, key)
|
|
249
|
+
raise StorageError("forbidden", "Not allowed to write here", 403) from exc
|
|
250
|
+
|
|
251
|
+
if row is None:
|
|
252
|
+
await _safe_delete(backend, key)
|
|
253
|
+
raise StorageError("forbidden", "Not allowed to write here", 403)
|
|
254
|
+
|
|
255
|
+
return _row_to_object(row)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
async def _safe_delete(backend: StorageBackend, key: str) -> None:
|
|
259
|
+
try:
|
|
260
|
+
await backend.delete(key)
|
|
261
|
+
except BackendError:
|
|
262
|
+
logger.warning("orphaned bytes at %s after metadata insert failed", key)
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
async def _select_object_row(
|
|
266
|
+
conn: asyncpg.Connection, bucket_name: str, path: str
|
|
267
|
+
) -> asyncpg.Record:
|
|
268
|
+
row = await conn.fetchrow(
|
|
269
|
+
"""
|
|
270
|
+
select o.id, o.bucket_id, o.name, o.owner, o.size, o.mime_type,
|
|
271
|
+
o.etag, o.created_at, o.updated_at,
|
|
272
|
+
b.name as bucket_name
|
|
273
|
+
from storage.objects o
|
|
274
|
+
join storage.buckets b on b.id = o.bucket_id
|
|
275
|
+
where b.name = $1 and o.name = $2
|
|
276
|
+
""",
|
|
277
|
+
bucket_name,
|
|
278
|
+
path,
|
|
279
|
+
)
|
|
280
|
+
if row is None:
|
|
281
|
+
raise StorageError(
|
|
282
|
+
"object_not_found",
|
|
283
|
+
f"Object {bucket_name}/{path} not found",
|
|
284
|
+
404,
|
|
285
|
+
)
|
|
286
|
+
return row
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
async def get_object_metadata(
|
|
290
|
+
conn: asyncpg.Connection, bucket_name: str, path: str
|
|
291
|
+
) -> ObjectResponse:
|
|
292
|
+
row = await _select_object_row(conn, bucket_name, path)
|
|
293
|
+
return _row_to_object(row)
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
async def download_object(
|
|
297
|
+
conn: asyncpg.Connection,
|
|
298
|
+
backend: StorageBackend,
|
|
299
|
+
*,
|
|
300
|
+
bucket_name: str,
|
|
301
|
+
path: str,
|
|
302
|
+
byte_range: tuple[int, int | None] | None = None,
|
|
303
|
+
) -> tuple[ObjectResponse, ObjectStream]:
|
|
304
|
+
row = await _select_object_row(conn, bucket_name, path)
|
|
305
|
+
obj = _row_to_object(row)
|
|
306
|
+
key = make_object_key(bucket_name, path)
|
|
307
|
+
try:
|
|
308
|
+
stream = await backend.get(key, byte_range=byte_range)
|
|
309
|
+
except BackendError as exc:
|
|
310
|
+
raise StorageError("object_not_found", str(exc), 404) from exc
|
|
311
|
+
|
|
312
|
+
if not stream.content_type and obj.mime_type:
|
|
313
|
+
stream.content_type = obj.mime_type
|
|
314
|
+
if not stream.etag and obj.etag:
|
|
315
|
+
stream.etag = obj.etag
|
|
316
|
+
return obj, stream
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
async def delete_object(
|
|
320
|
+
conn: asyncpg.Connection,
|
|
321
|
+
backend: StorageBackend,
|
|
322
|
+
*,
|
|
323
|
+
bucket_name: str,
|
|
324
|
+
path: str,
|
|
325
|
+
) -> None:
|
|
326
|
+
row = await _select_object_row(conn, bucket_name, path)
|
|
327
|
+
bucket_id = row["bucket_id"]
|
|
328
|
+
result = await conn.execute(
|
|
329
|
+
"delete from storage.objects where bucket_id = $1 and name = $2",
|
|
330
|
+
bucket_id,
|
|
331
|
+
path,
|
|
332
|
+
)
|
|
333
|
+
if result.endswith(" 0"):
|
|
334
|
+
raise StorageError(
|
|
335
|
+
"object_not_found",
|
|
336
|
+
f"Object {bucket_name}/{path} not found",
|
|
337
|
+
404,
|
|
338
|
+
)
|
|
339
|
+
try:
|
|
340
|
+
await backend.delete(make_object_key(bucket_name, path))
|
|
341
|
+
except BackendError:
|
|
342
|
+
logger.warning(
|
|
343
|
+
"orphaned bytes after object delete: %s/%s", bucket_name, path
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
# ---------------------------------------------------------------------------
|
|
348
|
+
# Signed URLs
|
|
349
|
+
# ---------------------------------------------------------------------------
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
def _build_signed_url(bucket: str, path: str, token: str) -> str:
|
|
353
|
+
site = get_settings().site_url.rstrip("/")
|
|
354
|
+
return f"{site}/storage/v1/object/signed/{bucket}/{path}?token={token}"
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
async def issue_signed_url(
|
|
358
|
+
conn: asyncpg.Connection,
|
|
359
|
+
*,
|
|
360
|
+
bucket_name: str,
|
|
361
|
+
path: str,
|
|
362
|
+
expires_in: int | None,
|
|
363
|
+
) -> SignedUrlResponse:
|
|
364
|
+
# RLS-bound read confirms the caller is allowed to share this object.
|
|
365
|
+
await _select_object_row(conn, bucket_name, path)
|
|
366
|
+
settings = get_settings()
|
|
367
|
+
ttl = expires_in or settings.storage_signed_url_default_ttl
|
|
368
|
+
token, expires_at = signing.sign(bucket_name, path, ttl)
|
|
369
|
+
return SignedUrlResponse(
|
|
370
|
+
signed_url=_build_signed_url(bucket_name, path, token),
|
|
371
|
+
token=token,
|
|
372
|
+
expires_at=expires_at,
|
|
373
|
+
expires_in=ttl,
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
async def verify_signed_download(
|
|
378
|
+
conn: asyncpg.Connection,
|
|
379
|
+
backend: StorageBackend,
|
|
380
|
+
*,
|
|
381
|
+
bucket_name: str,
|
|
382
|
+
path: str,
|
|
383
|
+
token: str,
|
|
384
|
+
byte_range: tuple[int, int | None] | None = None,
|
|
385
|
+
) -> tuple[ObjectResponse, ObjectStream]:
|
|
386
|
+
"""Verify ``token``, then stream from backend.
|
|
387
|
+
|
|
388
|
+
``conn`` should be a service-role connection — the JWT is absent here, the
|
|
389
|
+
signature is the entire authorization story.
|
|
390
|
+
"""
|
|
391
|
+
try:
|
|
392
|
+
signing.verify(token, bucket_name, path)
|
|
393
|
+
except signing.ExpiredSignature as exc:
|
|
394
|
+
raise StorageError("signature_expired", "Signed URL has expired", 400) from exc
|
|
395
|
+
except signing.SignatureError as exc:
|
|
396
|
+
raise StorageError("invalid_signature", str(exc), 400) from exc
|
|
397
|
+
|
|
398
|
+
return await download_object(
|
|
399
|
+
conn,
|
|
400
|
+
backend,
|
|
401
|
+
bucket_name=bucket_name,
|
|
402
|
+
path=path,
|
|
403
|
+
byte_range=byte_range,
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
async def download_public_object(
|
|
408
|
+
conn: asyncpg.Connection,
|
|
409
|
+
backend: StorageBackend,
|
|
410
|
+
*,
|
|
411
|
+
bucket_name: str,
|
|
412
|
+
path: str,
|
|
413
|
+
byte_range: tuple[int, int | None] | None = None,
|
|
414
|
+
) -> tuple[ObjectResponse, ObjectStream]:
|
|
415
|
+
"""Fetch an object from a ``public=true`` bucket.
|
|
416
|
+
|
|
417
|
+
Caller is anonymous; ``conn`` should be ``role=anon`` so the policy
|
|
418
|
+
``objects: public bucket read`` is the only thing that lets the SELECT
|
|
419
|
+
succeed. A non-public bucket returns 404 here without leaking that the
|
|
420
|
+
object exists.
|
|
421
|
+
"""
|
|
422
|
+
return await download_object(
|
|
423
|
+
conn,
|
|
424
|
+
backend,
|
|
425
|
+
bucket_name=bucket_name,
|
|
426
|
+
path=path,
|
|
427
|
+
byte_range=byte_range,
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
# Re-export for `from .service import *` convenience
|
|
432
|
+
__all__ = [
|
|
433
|
+
"StorageError",
|
|
434
|
+
"create_bucket",
|
|
435
|
+
"list_buckets",
|
|
436
|
+
"get_bucket",
|
|
437
|
+
"delete_bucket",
|
|
438
|
+
"upload_object",
|
|
439
|
+
"download_object",
|
|
440
|
+
"delete_object",
|
|
441
|
+
"get_object_metadata",
|
|
442
|
+
"issue_signed_url",
|
|
443
|
+
"verify_signed_download",
|
|
444
|
+
"download_public_object",
|
|
445
|
+
]
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
"""HMAC sign/verify for storage signed URLs.
|
|
2
|
+
|
|
3
|
+
Signed URLs are minted by supython itself (not S3 presigned URLs). This keeps
|
|
4
|
+
the URL shape identical for the local and S3 backends and lets RLS gate the
|
|
5
|
+
*sign* step — the bytes step is then a stateless signature check.
|
|
6
|
+
|
|
7
|
+
The signing secret (``storage_signed_url_secret``) is intentionally distinct
|
|
8
|
+
from the JWT private key: rotating one must not invalidate the other.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from datetime import UTC, datetime, timedelta
|
|
12
|
+
from functools import lru_cache
|
|
13
|
+
|
|
14
|
+
from itsdangerous import BadSignature, SignatureExpired, TimestampSigner
|
|
15
|
+
|
|
16
|
+
from ..settings import get_settings
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class SignatureError(Exception):
|
|
20
|
+
"""Base class for signed-URL verification failures."""
|
|
21
|
+
|
|
22
|
+
code: str = "invalid_signature"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class InvalidSignature(SignatureError):
|
|
26
|
+
code = "invalid_signature"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ExpiredSignature(SignatureError):
|
|
30
|
+
code = "signature_expired"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
_SALT = "supython.storage.signed-url"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _signer_for(secret_value: str) -> TimestampSigner:
|
|
37
|
+
return TimestampSigner(secret_value, salt=_SALT)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _active_signer() -> TimestampSigner:
|
|
41
|
+
from ..secretset import load_signing_secret
|
|
42
|
+
|
|
43
|
+
manifest_secret = load_signing_secret("storage_signed_url")
|
|
44
|
+
if manifest_secret is not None:
|
|
45
|
+
return _signer_for(manifest_secret)
|
|
46
|
+
legacy = get_settings().storage_signed_url_secret
|
|
47
|
+
if legacy is None:
|
|
48
|
+
raise RuntimeError(
|
|
49
|
+
"no storage signed-url secret configured; run "
|
|
50
|
+
"`supython secret rotate storage` or set STORAGE_SIGNED_URL_SECRET"
|
|
51
|
+
)
|
|
52
|
+
return _signer_for(legacy)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _payload(bucket: str, path: str, ttl: int) -> str:
|
|
56
|
+
return f"{bucket}/{path}|{ttl}"
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def sign(bucket: str, path: str, ttl: int) -> tuple[str, datetime]:
|
|
60
|
+
"""Return ``(token, expires_at)`` for a (bucket, path) and TTL in seconds."""
|
|
61
|
+
if ttl <= 0:
|
|
62
|
+
raise InvalidSignature("ttl must be positive")
|
|
63
|
+
token = _active_signer().sign(_payload(bucket, path, ttl)).decode("utf-8")
|
|
64
|
+
expires_at = datetime.now(tz=UTC) + timedelta(seconds=ttl)
|
|
65
|
+
return token, expires_at
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def verify(token: str, bucket: str, path: str) -> None:
|
|
69
|
+
"""Raise ``SignatureError`` when ``token`` is bad, tampered, or expired.
|
|
70
|
+
|
|
71
|
+
The TTL is read out of the signed payload itself, so the signer is the
|
|
72
|
+
source of truth for how long a URL is valid for.
|
|
73
|
+
"""
|
|
74
|
+
from ..secretset import load_verification_secrets
|
|
75
|
+
|
|
76
|
+
secrets_list = load_verification_secrets("storage_signed_url")
|
|
77
|
+
if not secrets_list:
|
|
78
|
+
legacy = get_settings().storage_signed_url_secret
|
|
79
|
+
if legacy is None:
|
|
80
|
+
raise RuntimeError(
|
|
81
|
+
"no storage signed-url secret configured; run "
|
|
82
|
+
"`supython secret rotate storage` or set STORAGE_SIGNED_URL_SECRET"
|
|
83
|
+
)
|
|
84
|
+
secrets_list = [(legacy, None)]
|
|
85
|
+
|
|
86
|
+
last_error: Exception | None = None
|
|
87
|
+
for value, _kid in secrets_list:
|
|
88
|
+
signer = _signer_for(value)
|
|
89
|
+
try:
|
|
90
|
+
raw = signer.unsign(token).decode("utf-8")
|
|
91
|
+
|
|
92
|
+
expected_prefix = f"{bucket}/{path}|"
|
|
93
|
+
if not raw.startswith(expected_prefix):
|
|
94
|
+
raise InvalidSignature("signed payload does not match (bucket, path)")
|
|
95
|
+
try:
|
|
96
|
+
ttl = int(raw[len(expected_prefix) :])
|
|
97
|
+
except ValueError as exc:
|
|
98
|
+
raise InvalidSignature("signed payload is malformed") from exc
|
|
99
|
+
if ttl <= 0:
|
|
100
|
+
raise InvalidSignature("signed payload has non-positive ttl")
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
signer.unsign(token, max_age=ttl)
|
|
104
|
+
except SignatureExpired as exc:
|
|
105
|
+
raise ExpiredSignature(str(exc)) from exc
|
|
106
|
+
except BadSignature as exc:
|
|
107
|
+
raise InvalidSignature(str(exc)) from exc
|
|
108
|
+
return # success
|
|
109
|
+
except (BadSignature, SignatureExpired) as exc:
|
|
110
|
+
last_error = exc
|
|
111
|
+
continue
|
|
112
|
+
raise InvalidSignature(str(last_error)) from last_error
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def reset_signer_cache() -> None:
|
|
116
|
+
"""Clear the cached signer; tests use this after overriding the secret."""
|
|
117
|
+
from ..secretset import clear_cache
|
|
118
|
+
|
|
119
|
+
clear_cache()
|
supython/tokens.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""JWT issuance & verification.
|
|
2
|
+
|
|
3
|
+
PostgREST verifies tokens using the same public JWKS this module emits
|
|
4
|
+
(see supython.jwks). All tokens are RS256 or ES256; the symmetric
|
|
5
|
+
fallback was removed in v0.6.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import secrets
|
|
10
|
+
import uuid
|
|
11
|
+
from datetime import datetime, timedelta, timezone
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
import jwt
|
|
15
|
+
|
|
16
|
+
from . import jwks
|
|
17
|
+
from .settings import get_settings
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
_LEEWAY_SECONDS = 30
|
|
22
|
+
_REQUIRED_CLAIMS = ["exp", "iat", "aud", "role"]
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _now() -> datetime:
|
|
26
|
+
return datetime.now(tz=timezone.utc)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def issue_access_token(
|
|
30
|
+
user_id: uuid.UUID,
|
|
31
|
+
email: str,
|
|
32
|
+
role: str = "authenticated",
|
|
33
|
+
extra_claims: dict[str, Any] | None = None,
|
|
34
|
+
) -> tuple[str, int]:
|
|
35
|
+
s = get_settings()
|
|
36
|
+
iat = _now()
|
|
37
|
+
exp = iat + timedelta(seconds=s.access_token_ttl)
|
|
38
|
+
payload: dict[str, Any] = {
|
|
39
|
+
"sub": str(user_id),
|
|
40
|
+
"email": email,
|
|
41
|
+
"role": role,
|
|
42
|
+
"aud": s.jwt_aud,
|
|
43
|
+
"iat": int(iat.timestamp()),
|
|
44
|
+
"exp": int(exp.timestamp()),
|
|
45
|
+
"jti": uuid.uuid4().hex,
|
|
46
|
+
}
|
|
47
|
+
if extra_claims:
|
|
48
|
+
payload.update(extra_claims)
|
|
49
|
+
|
|
50
|
+
signer = jwks.load_signing_key()
|
|
51
|
+
# Use signer.alg (the algorithm of the actual signing key) rather than
|
|
52
|
+
# s.jwt_alg (the env preference). During rotation or when a keyset
|
|
53
|
+
# manifest pins a different alg than the env, these can disagree and
|
|
54
|
+
# the resulting token would have an `alg` header that contradicts the
|
|
55
|
+
# key it was signed with — making it unverifiable under its own kid.
|
|
56
|
+
headers = {"kid": signer.kid, "alg": signer.alg, "typ": "JWT"}
|
|
57
|
+
token = jwt.encode(payload, signer.key, algorithm=signer.alg, headers=headers)
|
|
58
|
+
return token, s.access_token_ttl
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def issue_refresh_token() -> str:
|
|
62
|
+
return secrets.token_urlsafe(48)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def decode_access_token(token: str) -> dict[str, Any]:
|
|
66
|
+
s = get_settings()
|
|
67
|
+
try:
|
|
68
|
+
unverified = jwt.get_unverified_header(token)
|
|
69
|
+
except jwt.DecodeError as exc:
|
|
70
|
+
raise jwt.InvalidTokenError(f"malformed token header: {exc}") from exc
|
|
71
|
+
|
|
72
|
+
kid = unverified.get("kid")
|
|
73
|
+
keyset = jwks.load_verification_keyset()
|
|
74
|
+
if kid is None or kid not in keyset:
|
|
75
|
+
raise jwt.InvalidKeyError(f"unknown kid: {kid!r}")
|
|
76
|
+
|
|
77
|
+
pyjwk = keyset[kid]
|
|
78
|
+
return jwt.decode(
|
|
79
|
+
token,
|
|
80
|
+
pyjwk.key,
|
|
81
|
+
algorithms=[pyjwk.algorithm_name],
|
|
82
|
+
audience=s.jwt_aud,
|
|
83
|
+
leeway=_LEEWAY_SECONDS,
|
|
84
|
+
options={"require": _REQUIRED_CLAIMS},
|
|
85
|
+
)
|