supython 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (200) hide show
  1. supython/__init__.py +24 -0
  2. supython/admin/__init__.py +3 -0
  3. supython/admin/api/__init__.py +24 -0
  4. supython/admin/api/auth.py +118 -0
  5. supython/admin/api/auth_templates.py +67 -0
  6. supython/admin/api/auth_users.py +225 -0
  7. supython/admin/api/db.py +174 -0
  8. supython/admin/api/functions.py +92 -0
  9. supython/admin/api/jobs.py +192 -0
  10. supython/admin/api/ops.py +224 -0
  11. supython/admin/api/realtime.py +281 -0
  12. supython/admin/api/service_auth.py +49 -0
  13. supython/admin/api/service_auth_templates.py +83 -0
  14. supython/admin/api/service_auth_users.py +346 -0
  15. supython/admin/api/service_db.py +214 -0
  16. supython/admin/api/service_functions.py +287 -0
  17. supython/admin/api/service_jobs.py +282 -0
  18. supython/admin/api/service_ops.py +213 -0
  19. supython/admin/api/service_realtime.py +30 -0
  20. supython/admin/api/service_storage.py +220 -0
  21. supython/admin/api/storage.py +117 -0
  22. supython/admin/api/system.py +37 -0
  23. supython/admin/audit.py +29 -0
  24. supython/admin/deps.py +22 -0
  25. supython/admin/errors.py +16 -0
  26. supython/admin/schemas.py +310 -0
  27. supython/admin/session.py +52 -0
  28. supython/admin/spa.py +38 -0
  29. supython/admin/static/assets/Alert-dluGVkos.js +49 -0
  30. supython/admin/static/assets/Audit-Njung3HI.js +2 -0
  31. supython/admin/static/assets/Backups-DzPlFgrm.js +2 -0
  32. supython/admin/static/assets/Buckets-ByacGkU1.js +2 -0
  33. supython/admin/static/assets/Channels-BoIuTtam.js +353 -0
  34. supython/admin/static/assets/ChevronRight-CtQH1EQ1.js +2 -0
  35. supython/admin/static/assets/CodeViewer-Bqy7-wvH.js +2 -0
  36. supython/admin/static/assets/Crons-B67vc39F.js +2 -0
  37. supython/admin/static/assets/DashboardView-CUTFVL6k.js +2 -0
  38. supython/admin/static/assets/DataTable-COAAWEft.js +747 -0
  39. supython/admin/static/assets/DescriptionsItem-P8JUDaBs.js +75 -0
  40. supython/admin/static/assets/DrawerContent-TpYTFgF1.js +139 -0
  41. supython/admin/static/assets/Empty-cr2r7e2u.js +25 -0
  42. supython/admin/static/assets/EmptyState-DeDck-OL.js +2 -0
  43. supython/admin/static/assets/Grid-hFkp9F4P.js +2 -0
  44. supython/admin/static/assets/Input-DppYTq9C.js +259 -0
  45. supython/admin/static/assets/Invoke-DW3Nveeh.js +2 -0
  46. supython/admin/static/assets/JsonField-DibyJgun.js +2 -0
  47. supython/admin/static/assets/LoginView-BjLyE3Ds.css +1 -0
  48. supython/admin/static/assets/LoginView-CoOjECT_.js +111 -0
  49. supython/admin/static/assets/Logs-D9WYrnIT.js +2 -0
  50. supython/admin/static/assets/Logs-DS1XPa0h.css +1 -0
  51. supython/admin/static/assets/Migrations-DOSC2ddQ.js +2 -0
  52. supython/admin/static/assets/ObjectBrowser-_5w8vOX8.js +2 -0
  53. supython/admin/static/assets/Queue-CywZs6vI.js +2 -0
  54. supython/admin/static/assets/RefreshTokens-Ccjr53jg.js +2 -0
  55. supython/admin/static/assets/RlsEditor-BSlH9vSc.js +2 -0
  56. supython/admin/static/assets/Routes-BiLXE49D.js +2 -0
  57. supython/admin/static/assets/Routes-C-ianIGD.css +1 -0
  58. supython/admin/static/assets/SchemaBrowser-DKy2_KQi.css +1 -0
  59. supython/admin/static/assets/SchemaBrowser-XFvFbtDB.js +2 -0
  60. supython/admin/static/assets/Select-DIzZyRZb.js +434 -0
  61. supython/admin/static/assets/Space-n5-XcguU.js +400 -0
  62. supython/admin/static/assets/SqlEditor-b8pTsILY.js +3 -0
  63. supython/admin/static/assets/SqlWorkspace-BUS7IntH.js +104 -0
  64. supython/admin/static/assets/TableData-CQIagLKn.js +2 -0
  65. supython/admin/static/assets/Tag-D1fOKpTH.js +72 -0
  66. supython/admin/static/assets/Templates-BS-ugkdq.js +2 -0
  67. supython/admin/static/assets/Thing-CEAniuMg.js +107 -0
  68. supython/admin/static/assets/Users-wzwajhlh.js +2 -0
  69. supython/admin/static/assets/_plugin-vue_export-helper-DGA9ry_j.js +1 -0
  70. supython/admin/static/assets/dist-VXIJLCYq.js +13 -0
  71. supython/admin/static/assets/format-length-CGCY1rMh.js +2 -0
  72. supython/admin/static/assets/get-Ca6unauB.js +2 -0
  73. supython/admin/static/assets/index-CeE6v959.js +951 -0
  74. supython/admin/static/assets/pinia-COXwfrOX.js +2 -0
  75. supython/admin/static/assets/resources-Bt6thQCD.js +44 -0
  76. supython/admin/static/assets/use-locale-mtgM0a3a.js +2 -0
  77. supython/admin/static/assets/use-merged-state-BvhkaHNX.js +2 -0
  78. supython/admin/static/assets/useConfirm-tMjvBFXR.js +2 -0
  79. supython/admin/static/assets/useResource-C_rJCY8C.js +2 -0
  80. supython/admin/static/assets/useTable-CnZc5zhi.js +363 -0
  81. supython/admin/static/assets/useTable-Dg0XlRlq.css +1 -0
  82. supython/admin/static/assets/useToast-DsZKx0IX.js +2 -0
  83. supython/admin/static/assets/utils-sbXoq7Ir.js +2 -0
  84. supython/admin/static/favicon.svg +1 -0
  85. supython/admin/static/icons.svg +24 -0
  86. supython/admin/static/index.html +24 -0
  87. supython/app.py +162 -0
  88. supython/auth/__init__.py +3 -0
  89. supython/auth/_email_job.py +11 -0
  90. supython/auth/providers/__init__.py +34 -0
  91. supython/auth/providers/github.py +22 -0
  92. supython/auth/providers/google.py +19 -0
  93. supython/auth/providers/oauth.py +56 -0
  94. supython/auth/providers/registry.py +16 -0
  95. supython/auth/ratelimit.py +39 -0
  96. supython/auth/router.py +282 -0
  97. supython/auth/schemas.py +79 -0
  98. supython/auth/service.py +587 -0
  99. supython/backups/__init__.py +24 -0
  100. supython/backups/_backup_job.py +170 -0
  101. supython/backups/schemas.py +18 -0
  102. supython/backups/service.py +217 -0
  103. supython/body_size.py +184 -0
  104. supython/cli.py +1663 -0
  105. supython/client/__init__.py +67 -0
  106. supython/client/_auth.py +249 -0
  107. supython/client/_client.py +145 -0
  108. supython/client/_config.py +92 -0
  109. supython/client/_functions.py +69 -0
  110. supython/client/_storage.py +255 -0
  111. supython/client/py.typed +0 -0
  112. supython/db.py +151 -0
  113. supython/db_admin.py +8 -0
  114. supython/extensions.py +36 -0
  115. supython/functions/__init__.py +19 -0
  116. supython/functions/context.py +262 -0
  117. supython/functions/loader.py +307 -0
  118. supython/functions/router.py +228 -0
  119. supython/functions/schemas.py +50 -0
  120. supython/gen/__init__.py +5 -0
  121. supython/gen/_introspect.py +137 -0
  122. supython/gen/types_py.py +270 -0
  123. supython/gen/types_ts.py +365 -0
  124. supython/health.py +229 -0
  125. supython/hooks.py +117 -0
  126. supython/jobs/__init__.py +31 -0
  127. supython/jobs/backends.py +97 -0
  128. supython/jobs/context.py +58 -0
  129. supython/jobs/cron.py +152 -0
  130. supython/jobs/cron_inproc.py +119 -0
  131. supython/jobs/decorators.py +76 -0
  132. supython/jobs/registry.py +79 -0
  133. supython/jobs/router.py +136 -0
  134. supython/jobs/schemas.py +92 -0
  135. supython/jobs/service.py +311 -0
  136. supython/jobs/worker.py +219 -0
  137. supython/jwks.py +257 -0
  138. supython/keyset.py +279 -0
  139. supython/logging_config.py +291 -0
  140. supython/mail.py +33 -0
  141. supython/mailer.py +65 -0
  142. supython/migrate.py +81 -0
  143. supython/migrations/0001_extensions_and_roles.sql +46 -0
  144. supython/migrations/0002_auth_schema.sql +66 -0
  145. supython/migrations/0003_demo_todos.sql +42 -0
  146. supython/migrations/0004_auth_v0_2.sql +47 -0
  147. supython/migrations/0005_storage_schema.sql +117 -0
  148. supython/migrations/0006_realtime_schema.sql +206 -0
  149. supython/migrations/0007_jobs_schema.sql +254 -0
  150. supython/migrations/0008_jobs_last_error.sql +56 -0
  151. supython/migrations/0009_auth_rate_limits.sql +33 -0
  152. supython/migrations/0010_worker_heartbeat.sql +14 -0
  153. supython/migrations/0011_admin_schema.sql +45 -0
  154. supython/migrations/0012_auth_banned_until.sql +10 -0
  155. supython/migrations/0013_email_templates.sql +19 -0
  156. supython/migrations/0014_realtime_payload_warning.sql +96 -0
  157. supython/migrations/0015_backups_schema.sql +14 -0
  158. supython/passwords.py +15 -0
  159. supython/realtime/__init__.py +6 -0
  160. supython/realtime/broker.py +814 -0
  161. supython/realtime/protocol.py +234 -0
  162. supython/realtime/router.py +184 -0
  163. supython/realtime/schemas.py +207 -0
  164. supython/realtime/service.py +261 -0
  165. supython/realtime/topics.py +175 -0
  166. supython/realtime/websocket.py +586 -0
  167. supython/scaffold/__init__.py +5 -0
  168. supython/scaffold/init_project.py +144 -0
  169. supython/scaffold/templates/Caddyfile.tmpl +4 -0
  170. supython/scaffold/templates/README.md.tmpl +22 -0
  171. supython/scaffold/templates/apps_hooks.py.tmpl +11 -0
  172. supython/scaffold/templates/apps_jobs.py.tmpl +8 -0
  173. supython/scaffold/templates/asgi.py.tmpl +14 -0
  174. supython/scaffold/templates/docker-compose.prod.yml.tmpl +84 -0
  175. supython/scaffold/templates/docker-compose.yml.tmpl +45 -0
  176. supython/scaffold/templates/docker_postgres_Dockerfile.tmpl +9 -0
  177. supython/scaffold/templates/docker_postgres_postgresql.conf.tmpl +3 -0
  178. supython/scaffold/templates/env.example.tmpl +168 -0
  179. supython/scaffold/templates/functions_README.md.tmpl +21 -0
  180. supython/scaffold/templates/gitignore.tmpl +14 -0
  181. supython/scaffold/templates/manage.py.tmpl +11 -0
  182. supython/scaffold/templates/migrations/.gitkeep +0 -0
  183. supython/scaffold/templates/package_init.py.tmpl +1 -0
  184. supython/scaffold/templates/settings.py.tmpl +31 -0
  185. supython/secretset.py +347 -0
  186. supython/security_headers.py +78 -0
  187. supython/settings.py +244 -0
  188. supython/settings_module.py +117 -0
  189. supython/storage/__init__.py +5 -0
  190. supython/storage/backends.py +392 -0
  191. supython/storage/router.py +341 -0
  192. supython/storage/schemas.py +50 -0
  193. supython/storage/service.py +445 -0
  194. supython/storage/signing.py +119 -0
  195. supython/tokens.py +85 -0
  196. supython-0.1.0.dist-info/METADATA +756 -0
  197. supython-0.1.0.dist-info/RECORD +200 -0
  198. supython-0.1.0.dist-info/WHEEL +4 -0
  199. supython-0.1.0.dist-info/entry_points.txt +2 -0
  200. supython-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,814 @@
1
+ """In-process realtime broker.
2
+
3
+ The broker is the heart of the realtime module:
4
+
5
+ * It owns a single dedicated ``asyncpg.Connection`` running
6
+ ``LISTEN realtime:changes``. When the trigger function in
7
+ :file:`migrations/0006_realtime_schema.sql` fires ``pg_notify``, the
8
+ payload lands here and is dispatched to in-process subscribers.
9
+
10
+ * It tracks every WebSocket connection and the channels each connection
11
+ has joined. Per-connection bounded queues provide back-pressure so a
12
+ slow client cannot drown the process; on overflow we drop the oldest
13
+ frame and increment a counter exposed for metrics.
14
+
15
+ * It performs the per-event RLS visibility check by acquiring a
16
+ role-scoped connection (via :func:`db.as_role`) for the subscriber.
17
+ ``service_role`` connections — used for server-side fan-out from edge
18
+ functions — bypass the check, matching Postgres semantics.
19
+
20
+ * For ``DELETE`` events the row is gone, so the broker falls back to
21
+ comparing ``old_record.<owner_column>`` against the subscriber's
22
+ ``auth.uid()``. If the table was registered with ``owner_column =
23
+ null``, ``DELETE`` events are only delivered to ``service_role``
24
+ subscribers.
25
+
26
+ * It maintains presence state (``dict[topic, dict[key, list[meta]]]``)
27
+ in memory and emits ``presence_state`` / ``presence_diff`` frames.
28
+
29
+ The broker is a singleton. ``get_broker()`` returns the lazily-created
30
+ instance; tests that need isolation can call ``reset_broker()`` between
31
+ runs or instantiate :class:`Broker` directly.
32
+ """
33
+
34
+ import asyncio
35
+ import contextlib
36
+ import itertools
37
+ import json
38
+ import logging
39
+ import time
40
+ from dataclasses import dataclass, field
41
+ from datetime import UTC, datetime
42
+ from typing import Any
43
+ from uuid import UUID
44
+
45
+ import asyncpg
46
+ from pydantic import ValidationError
47
+
48
+ from .. import db
49
+ from ..settings import get_settings
50
+ from .protocol import (
51
+ EVENT_BROADCAST,
52
+ EVENT_POSTGRES_CHANGES,
53
+ EVENT_PRESENCE_DIFF,
54
+ EVENT_PRESENCE_STATE,
55
+ make_server_push,
56
+ )
57
+ from .schemas import (
58
+ EnabledTable,
59
+ Frame,
60
+ PostgresChangesData,
61
+ PostgresChangesPush,
62
+ PresenceDiff,
63
+ )
64
+ from .service import get_enabled, rls_check
65
+ from .topics import EqFilter, InFilter, ParsedFilter, ResolvedSubscription
66
+
67
+ logger = logging.getLogger(__name__)
68
+
69
+
70
+ # ---------------------------------------------------------------------------
71
+ # Subscription bookkeeping
72
+ # ---------------------------------------------------------------------------
73
+
74
+
75
+ @dataclass(slots=True)
76
+ class ChannelSubscription:
77
+ """A single channel join held by one WebSocket connection.
78
+
79
+ Each connection may join multiple ``realtime:<name>`` channels. Each
80
+ join carries an ordered list of :class:`ResolvedSubscription` (the
81
+ server-assigned ids / parsed filters for the join's
82
+ ``config.postgres_changes`` entries), plus broadcast and presence
83
+ config.
84
+ """
85
+
86
+ topic: str
87
+ join_ref: str | None
88
+ postgres_changes: list[ResolvedSubscription] = field(default_factory=list)
89
+ broadcast_self: bool = False
90
+ presence_key: str = ""
91
+
92
+
93
+ @dataclass(slots=True)
94
+ class Connection:
95
+ """One WebSocket connection registered with the broker.
96
+
97
+ ``role`` and ``claims`` reflect the *current* JWT — they may be
98
+ updated mid-stream when the client sends an ``access_token`` event.
99
+ The outbound queue is bounded; producers (the broker) drop the
100
+ oldest frame on overflow rather than block, and the WS writer task
101
+ drains it.
102
+ """
103
+
104
+ id: int
105
+ role: str
106
+ claims: dict[str, Any]
107
+ outbound: asyncio.Queue[Frame]
108
+ subscriptions: dict[str, ChannelSubscription] = field(default_factory=dict)
109
+ dropped: int = 0
110
+ closed: bool = False
111
+
112
+ @property
113
+ def is_token_expired(self) -> bool:
114
+ """True when the JWT ``exp`` claim is in the past.
115
+
116
+ ``anon`` connections (and any connection without an ``exp``
117
+ claim) never expire. Used by the broker to stop forwarding
118
+ ``postgres_changes`` and by the WS layer to reject ``broadcast``
119
+ / ``presence`` until a fresh ``access_token`` arrives.
120
+ """
121
+ exp = self.claims.get("exp")
122
+ if exp is None:
123
+ return False
124
+ try:
125
+ return time.time() >= float(exp)
126
+ except (TypeError, ValueError):
127
+ return False
128
+
129
+
130
+ # ---------------------------------------------------------------------------
131
+ # Broker
132
+ # ---------------------------------------------------------------------------
133
+
134
+
135
+ class BrokerError(RuntimeError):
136
+ """Raised when the broker is misused (e.g. unknown table, queue full)."""
137
+
138
+
139
+ class Broker:
140
+ """In-process realtime fan-out engine.
141
+
142
+ Lifecycle:
143
+
144
+ * :meth:`start` — opens the dedicated listener connection and
145
+ attaches the LISTEN callback. Idempotent.
146
+ * :meth:`stop` — detaches the listener and closes the connection.
147
+
148
+ All other methods are safe to call only between ``start`` and
149
+ ``stop``.
150
+ """
151
+
152
+ # Conservative ceiling for reconnect backoff. Five minutes is long
153
+ # enough that even a flapping pg_notify connection does not hot-loop,
154
+ # short enough that a recovered DB is picked up before clients give up.
155
+ _MAX_RECONNECT_BACKOFF_S = 300.0
156
+ _INITIAL_RECONNECT_BACKOFF_S = 1.0
157
+
158
+ def __init__(self) -> None:
159
+ s = get_settings()
160
+ self._channel: str = s.realtime_notify_channel
161
+ self._queue_size: int = s.realtime_broker_queue_size
162
+ self._rls_timeout: float = s.realtime_rls_check_timeout_s
163
+ self._max_connections: int = s.realtime_max_connections
164
+ self._database_url: str = s.database_url
165
+
166
+ self._connections: dict[int, Connection] = {}
167
+ # topic -> set of connection ids subscribed
168
+ self._topics: dict[str, set[int]] = {}
169
+ # topic -> presence key -> ordered list of (conn_id, meta) tuples.
170
+ # We keep conn_id so we can clean up when a connection departs.
171
+ self._presence: dict[str, dict[str, list[tuple[int, dict[str, Any]]]]] = {}
172
+ # Cache for realtime.enabled_tables; populated lazily per (schema, table).
173
+ # The cache is invalidated only on broker restart — calls to
174
+ # realtime.enable() during a long-running process won't be picked up
175
+ # without a restart, which is acceptable for v0.4.
176
+ self._registry_cache: dict[tuple[str, str], EnabledTable | None] = {}
177
+
178
+ self._conn_id_counter: itertools.count[int] = itertools.count(1)
179
+ self._listener: asyncpg.Connection | None = None
180
+ self._listener_task: asyncio.Task[None] | None = None
181
+ self._stopping: bool = False
182
+ self._lock = asyncio.Lock()
183
+
184
+ # -- lifecycle ----------------------------------------------------------
185
+
186
+ @property
187
+ def is_healthy(self) -> bool:
188
+ """True when the listener task is running and the connection is open."""
189
+ if self._stopping:
190
+ return False
191
+ if self._listener_task is None or self._listener_task.done():
192
+ return False
193
+ return self._listener is not None and not self._listener.is_closed()
194
+
195
+ @property
196
+ def connection_count(self) -> int:
197
+ """Number of registered realtime connections."""
198
+ return len(self._connections)
199
+
200
+ async def start(self) -> None:
201
+ """Start the dedicated LISTEN connection in the background."""
202
+ if self._listener_task is not None and not self._listener_task.done():
203
+ return
204
+ self._stopping = False
205
+ self._listener_task = asyncio.create_task(
206
+ self._listener_loop(), name="realtime-broker-listener"
207
+ )
208
+
209
+ async def stop(self) -> None:
210
+ """Detach the listener and close the dedicated connection."""
211
+ self._stopping = True
212
+ if self._listener_task is not None:
213
+ self._listener_task.cancel()
214
+ with contextlib.suppress(asyncio.CancelledError):
215
+ await self._listener_task
216
+ self._listener_task = None
217
+ if self._listener is not None and not self._listener.is_closed():
218
+ with contextlib.suppress(Exception):
219
+ await self._listener.remove_listener(self._channel, self._on_notification)
220
+ with contextlib.suppress(Exception):
221
+ await self._listener.close()
222
+ self._listener = None
223
+ # Drain registered connections so test runs do not leak state.
224
+ self._connections.clear()
225
+ self._topics.clear()
226
+ self._presence.clear()
227
+ self._registry_cache.clear()
228
+
229
+ # -- connection registry -----------------------------------------------
230
+
231
+ async def register(
232
+ self,
233
+ *,
234
+ role: str,
235
+ claims: dict[str, Any],
236
+ ) -> Connection:
237
+ """Register a fresh WebSocket connection.
238
+
239
+ Raises :class:`BrokerError` if the per-process connection cap is
240
+ already reached.
241
+ """
242
+ async with self._lock:
243
+ if len(self._connections) >= self._max_connections:
244
+ raise BrokerError(
245
+ f"realtime: connection cap reached ({self._max_connections})"
246
+ )
247
+ conn = Connection(
248
+ id=next(self._conn_id_counter),
249
+ role=role,
250
+ claims=claims,
251
+ outbound=asyncio.Queue(maxsize=self._queue_size),
252
+ )
253
+ self._connections[conn.id] = conn
254
+ return conn
255
+
256
+ async def unregister(self, conn: Connection) -> None:
257
+ """Remove a connection and tear down all its subscriptions/presence."""
258
+ async with self._lock:
259
+ conn.closed = True
260
+ self._connections.pop(conn.id, None)
261
+ for topic in list(conn.subscriptions.keys()):
262
+ self._unsubscribe_locked(conn, topic, send_diff=True)
263
+
264
+ def update_claims(
265
+ self,
266
+ conn: Connection,
267
+ *,
268
+ role: str,
269
+ claims: dict[str, Any],
270
+ ) -> None:
271
+ """Mutate the role/claims of an existing connection (access_token)."""
272
+ conn.role = role
273
+ conn.claims = claims
274
+
275
+ # -- channel subscribe / leave -----------------------------------------
276
+
277
+ async def subscribe(
278
+ self,
279
+ conn: Connection,
280
+ *,
281
+ topic: str,
282
+ join_ref: str | None,
283
+ postgres_changes: list[ResolvedSubscription],
284
+ broadcast_self: bool,
285
+ presence_key: str,
286
+ ) -> ChannelSubscription:
287
+ """Attach *conn* to *topic* with the given config.
288
+
289
+ Idempotent on re-join: an existing subscription on the same topic
290
+ is replaced, and any presence entries owned by the connection on
291
+ that topic are cleared first.
292
+ """
293
+ async with self._lock:
294
+ if topic in conn.subscriptions:
295
+ self._unsubscribe_locked(conn, topic, send_diff=True)
296
+ sub = ChannelSubscription(
297
+ topic=topic,
298
+ join_ref=join_ref,
299
+ postgres_changes=list(postgres_changes),
300
+ broadcast_self=broadcast_self,
301
+ presence_key=presence_key or str(conn.id),
302
+ )
303
+ conn.subscriptions[topic] = sub
304
+ self._topics.setdefault(topic, set()).add(conn.id)
305
+ return sub
306
+
307
+ async def unsubscribe(self, conn: Connection, topic: str) -> None:
308
+ """Detach *conn* from *topic* and emit a presence diff if needed."""
309
+ async with self._lock:
310
+ self._unsubscribe_locked(conn, topic, send_diff=True)
311
+
312
+ def _unsubscribe_locked(
313
+ self,
314
+ conn: Connection,
315
+ topic: str,
316
+ *,
317
+ send_diff: bool,
318
+ ) -> None:
319
+ conn.subscriptions.pop(topic, None)
320
+ peers = self._topics.get(topic)
321
+ if peers is not None:
322
+ peers.discard(conn.id)
323
+ if not peers:
324
+ self._topics.pop(topic, None)
325
+ leaves = self._drop_presence_for(conn, topic)
326
+ if send_diff and leaves:
327
+ self._emit_presence_diff_locked(topic, joins={}, leaves=leaves)
328
+
329
+ # -- broadcast ----------------------------------------------------------
330
+
331
+ def broadcast(
332
+ self,
333
+ *,
334
+ topic: str,
335
+ event: str,
336
+ payload: dict[str, Any],
337
+ sender_id: int | None = None,
338
+ ) -> int:
339
+ """Fan a Phoenix ``broadcast`` event out to every subscriber of *topic*.
340
+
341
+ ``sender_id`` is the connection id of the originator (or ``None``
342
+ for REST-initiated broadcasts). The sender is excluded from the
343
+ fan-out unless they joined with ``config.broadcast.self = true``.
344
+
345
+ Returns the number of recipients the frame was enqueued for.
346
+ """
347
+ peers = self._topics.get(topic)
348
+ if not peers:
349
+ return 0
350
+ frame = make_server_push(
351
+ topic=topic,
352
+ event=EVENT_BROADCAST,
353
+ payload={"type": "broadcast", "event": event, "payload": payload},
354
+ )
355
+ delivered = 0
356
+ for cid in list(peers):
357
+ conn = self._connections.get(cid)
358
+ if conn is None or conn.closed:
359
+ continue
360
+ if cid == sender_id:
361
+ sub = conn.subscriptions.get(topic)
362
+ if sub is None or not sub.broadcast_self:
363
+ continue
364
+ if self._enqueue(conn, frame):
365
+ delivered += 1
366
+ return delivered
367
+
368
+ # -- presence -----------------------------------------------------------
369
+
370
+ async def track_presence(
371
+ self,
372
+ conn: Connection,
373
+ *,
374
+ topic: str,
375
+ meta: dict[str, Any],
376
+ ) -> None:
377
+ """Add a presence entry for *conn* on *topic*."""
378
+ async with self._lock:
379
+ sub = conn.subscriptions.get(topic)
380
+ if sub is None:
381
+ raise BrokerError(
382
+ f"connection {conn.id} cannot track presence on {topic!r} — not joined"
383
+ )
384
+ key = sub.presence_key
385
+ bucket = self._presence.setdefault(topic, {}).setdefault(key, [])
386
+ bucket.append((conn.id, meta))
387
+ self._emit_presence_diff_locked(
388
+ topic,
389
+ joins={key: [meta]},
390
+ leaves={},
391
+ )
392
+
393
+ async def untrack_presence(
394
+ self,
395
+ conn: Connection,
396
+ *,
397
+ topic: str,
398
+ ) -> None:
399
+ """Remove every presence entry owned by *conn* on *topic*."""
400
+ async with self._lock:
401
+ leaves = self._drop_presence_for(conn, topic)
402
+ if leaves:
403
+ self._emit_presence_diff_locked(topic, joins={}, leaves=leaves)
404
+
405
+ def presence_state(self, topic: str) -> dict[str, list[dict[str, Any]]]:
406
+ """Snapshot the current presence map for *topic*.
407
+
408
+ The returned dict is a fresh copy — callers may mutate it freely.
409
+ """
410
+ bucket = self._presence.get(topic) or {}
411
+ return {key: [meta for _cid, meta in entries] for key, entries in bucket.items()}
412
+
413
+ async def push_presence_state(self, conn: Connection, topic: str) -> None:
414
+ """Send the full ``presence_state`` for *topic* to *conn* (post-join).
415
+
416
+ Supabase ships ``presence_state`` with the raw key→[meta] map as
417
+ the payload (no ``presences`` wrapper); we mirror that shape.
418
+ """
419
+ state = self.presence_state(topic)
420
+ frame = make_server_push(
421
+ topic=topic,
422
+ event=EVENT_PRESENCE_STATE,
423
+ payload=state,
424
+ )
425
+ self._enqueue(conn, frame)
426
+
427
+ # -- postgres_changes fan-out ------------------------------------------
428
+
429
+ async def _on_notification(
430
+ self,
431
+ connection: asyncpg.Connection,
432
+ pid: int,
433
+ channel: str,
434
+ payload: str,
435
+ ) -> None:
436
+ """asyncpg LISTEN callback — runs on the listener connection."""
437
+ if channel != self._channel:
438
+ return
439
+ try:
440
+ event = json.loads(payload)
441
+ except json.JSONDecodeError:
442
+ logger.warning("realtime: dropped malformed notify payload (not JSON)")
443
+ return
444
+ try:
445
+ await self._fanout_change(event)
446
+ except Exception: # noqa: BLE001 — broker must never propagate
447
+ logger.exception("realtime: fan-out failed for payload=%s", payload[:200])
448
+
449
+ async def fanout_change(self, event: dict[str, Any]) -> None:
450
+ """Public entry point used by tests to inject a synthetic notify event."""
451
+ await self._fanout_change(event)
452
+
453
+ async def _fanout_change(self, event: dict[str, Any]) -> None:
454
+ try:
455
+ data = PostgresChangesData.model_validate(
456
+ {
457
+ "schema": event.get("schema"),
458
+ "table": event.get("table"),
459
+ "type": event.get("type"),
460
+ "commit_timestamp": event.get("commit_timestamp"),
461
+ "columns": event.get("columns") or [],
462
+ "record": event.get("record"),
463
+ "old_record": event.get("old_record"),
464
+ }
465
+ )
466
+ except ValidationError as exc:
467
+ logger.warning("realtime: dropped malformed notify payload: %s", exc)
468
+ return
469
+
470
+ registry = await self._lookup_registry(data.schema_name, data.table)
471
+ if registry is None:
472
+ logger.debug(
473
+ "realtime: notify for %s.%s but table is not in the registry",
474
+ data.schema_name,
475
+ data.table,
476
+ )
477
+ return
478
+
479
+ # Snapshot recipients under the lock so concurrent subscribe/leave
480
+ # calls don't trip our iteration; the actual SELECT 1 RLS probes
481
+ # happen outside the lock.
482
+ candidates: list[tuple[Connection, ChannelSubscription, list[int]]] = []
483
+ async with self._lock:
484
+ for conn in self._connections.values():
485
+ if conn.closed:
486
+ continue
487
+ for sub in conn.subscriptions.values():
488
+ matched_ids = _match_subscription_ids(sub, data)
489
+ if matched_ids:
490
+ candidates.append((conn, sub, matched_ids))
491
+
492
+ if not candidates:
493
+ return
494
+
495
+ for conn, _sub, matched_ids in candidates:
496
+ try:
497
+ allowed = await self._authorize(conn, data, registry)
498
+ except Exception: # noqa: BLE001 — never let one client stall fan-out
499
+ logger.exception(
500
+ "realtime: authorize failed for conn=%s table=%s.%s",
501
+ conn.id,
502
+ data.schema_name,
503
+ data.table,
504
+ )
505
+ continue
506
+ if not allowed:
507
+ continue
508
+ push = PostgresChangesPush(ids=matched_ids, data=data)
509
+ frame = make_server_push(
510
+ topic=_topic_for(conn, data),
511
+ event=EVENT_POSTGRES_CHANGES,
512
+ payload=push.model_dump(mode="json"),
513
+ )
514
+ self._enqueue(conn, frame)
515
+
516
+ async def _authorize(
517
+ self,
518
+ conn: Connection,
519
+ data: PostgresChangesData,
520
+ registry: EnabledTable,
521
+ ) -> bool:
522
+ # service_role bypasses RLS by Postgres convention; mirror it here so
523
+ # server-side listeners (edge functions) see everything.
524
+ if conn.role == "service_role":
525
+ return True
526
+
527
+ # Stop forwarding to authenticated subscribers whose JWT has expired
528
+ # mid-stream — the connection stays open (heartbeats continue) so the
529
+ # client can rotate via the access_token in-channel event.
530
+ if conn.is_token_expired:
531
+ return False
532
+
533
+ if data.type == "DELETE":
534
+ # The row is gone — fall back to the owner-column short circuit.
535
+ if registry.owner_column is None:
536
+ return False
537
+ owner_value = (data.old_record or {}).get(registry.owner_column)
538
+ if owner_value is None:
539
+ return False
540
+ uid = conn.claims.get("sub")
541
+ if uid is None:
542
+ return False
543
+ try:
544
+ return UUID(str(owner_value)) == UUID(str(uid))
545
+ except (ValueError, AttributeError):
546
+ # Non-UUID owner columns: fall back to string equality.
547
+ return str(owner_value) == str(uid)
548
+
549
+ # INSERT / UPDATE: probe with a role-scoped SELECT 1.
550
+ record = data.record or {}
551
+ try:
552
+ pk_values = [record[col] for col in registry.pk_columns]
553
+ except KeyError as exc:
554
+ logger.warning(
555
+ "realtime: %s.%s notification missing pk column %s; dropping",
556
+ data.schema_name,
557
+ data.table,
558
+ exc,
559
+ )
560
+ return False
561
+
562
+ try:
563
+ async with db.as_role(conn.role, conn.claims) as scoped:
564
+ return await rls_check(
565
+ scoped,
566
+ schema_name=registry.schema_name,
567
+ table_name=registry.table_name,
568
+ pk_columns=registry.pk_columns,
569
+ pk_values=pk_values,
570
+ timeout=self._rls_timeout,
571
+ )
572
+ except ValueError:
573
+ # role not in db_allowed_roles (e.g. "service_role" through the WS path)
574
+ # — treat as deny to be safe; the service_role short-circuit above
575
+ # is the only legitimate way through.
576
+ return False
577
+
578
+ # -- internals ----------------------------------------------------------
579
+
580
+ async def _lookup_registry(
581
+ self,
582
+ schema_name: str,
583
+ table_name: str,
584
+ ) -> EnabledTable | None:
585
+ key = (schema_name, table_name)
586
+ if key in self._registry_cache:
587
+ return self._registry_cache[key]
588
+ # Use a service_role-equivalent: the registry is grant-readable by
589
+ # anon/authenticated, so any pool connection works.
590
+ async with db.acquire() as conn:
591
+ row = await get_enabled(conn, schema_name, table_name)
592
+ self._registry_cache[key] = row
593
+ return row
594
+
595
+ def _enqueue(self, conn: Connection, frame: Frame) -> bool:
596
+ """Best-effort enqueue with drop-oldest on overflow.
597
+
598
+ Returns ``True`` if the frame was queued, ``False`` if the
599
+ connection is closed.
600
+ """
601
+ if conn.closed:
602
+ return False
603
+ q = conn.outbound
604
+ if q.full():
605
+ try:
606
+ q.get_nowait()
607
+ conn.dropped += 1
608
+ except asyncio.QueueEmpty:
609
+ pass
610
+ try:
611
+ q.put_nowait(frame)
612
+ except asyncio.QueueFull:
613
+ conn.dropped += 1
614
+ return False
615
+ return True
616
+
617
+ def _drop_presence_for(
618
+ self,
619
+ conn: Connection,
620
+ topic: str,
621
+ ) -> dict[str, list[dict[str, Any]]]:
622
+ """Strip presence entries owned by *conn* on *topic*.
623
+
624
+ Returns a dict mapping presence key → metas removed, suitable for
625
+ a ``presence_diff.leaves`` payload. Caller must hold the lock.
626
+ """
627
+ leaves: dict[str, list[dict[str, Any]]] = {}
628
+ bucket = self._presence.get(topic)
629
+ if bucket is None:
630
+ return leaves
631
+ for key in list(bucket.keys()):
632
+ entries = bucket[key]
633
+ kept: list[tuple[int, dict[str, Any]]] = []
634
+ removed: list[dict[str, Any]] = []
635
+ for cid, meta in entries:
636
+ if cid == conn.id:
637
+ removed.append(meta)
638
+ else:
639
+ kept.append((cid, meta))
640
+ if removed:
641
+ leaves[key] = removed
642
+ if kept:
643
+ bucket[key] = kept
644
+ else:
645
+ bucket.pop(key, None)
646
+ if not bucket:
647
+ self._presence.pop(topic, None)
648
+ return leaves
649
+
650
+ def _emit_presence_diff_locked(
651
+ self,
652
+ topic: str,
653
+ *,
654
+ joins: dict[str, list[dict[str, Any]]],
655
+ leaves: dict[str, list[dict[str, Any]]],
656
+ ) -> None:
657
+ peers = self._topics.get(topic)
658
+ if not peers:
659
+ return
660
+ diff = PresenceDiff(joins=joins, leaves=leaves)
661
+ frame = make_server_push(
662
+ topic=topic,
663
+ event=EVENT_PRESENCE_DIFF,
664
+ payload=diff.model_dump(mode="json"),
665
+ )
666
+ for cid in peers:
667
+ conn = self._connections.get(cid)
668
+ if conn is None or conn.closed:
669
+ continue
670
+ self._enqueue(conn, frame)
671
+
672
+ # -- listener loop ------------------------------------------------------
673
+
674
+ async def _listener_loop(self) -> None:
675
+ """Background task: keep a LISTEN connection alive, retrying on failure."""
676
+ backoff = self._INITIAL_RECONNECT_BACKOFF_S
677
+ while not self._stopping:
678
+ try:
679
+ listener = await asyncpg.connect(self._database_url)
680
+ self._listener = listener
681
+ await listener.add_listener(self._channel, self._on_notification)
682
+ logger.info("realtime: LISTEN %s established", self._channel)
683
+ backoff = self._INITIAL_RECONNECT_BACKOFF_S
684
+ # asyncpg dispatches notifications on its own; we just need
685
+ # to keep the connection open and notice when it dies.
686
+ while not self._stopping and not listener.is_closed():
687
+ await asyncio.sleep(5.0)
688
+ except asyncio.CancelledError:
689
+ raise
690
+ except Exception: # noqa: BLE001
691
+ logger.exception(
692
+ "realtime: listener connection lost; reconnecting in %.1fs",
693
+ backoff,
694
+ )
695
+ finally:
696
+ if self._listener is not None and not self._listener.is_closed():
697
+ with contextlib.suppress(Exception):
698
+ await self._listener.remove_listener(
699
+ self._channel, self._on_notification
700
+ )
701
+ with contextlib.suppress(Exception):
702
+ await self._listener.close()
703
+ self._listener = None
704
+
705
+ if self._stopping:
706
+ return
707
+ try:
708
+ await asyncio.sleep(backoff)
709
+ except asyncio.CancelledError:
710
+ raise
711
+ backoff = min(backoff * 2.0, self._MAX_RECONNECT_BACKOFF_S)
712
+
713
+
714
+ # ---------------------------------------------------------------------------
715
+ # Helpers (module-private)
716
+ # ---------------------------------------------------------------------------
717
+
718
+
719
+ def _match_subscription_ids(
720
+ sub: ChannelSubscription,
721
+ data: PostgresChangesData,
722
+ ) -> list[int]:
723
+ """Return the subscription ids on *sub* whose filter matches *data*.
724
+
725
+ Returns ``[]`` when no postgres_changes filter on this channel matches
726
+ (the channel might still receive broadcasts, just not this row event).
727
+ """
728
+ matched: list[int] = []
729
+ for resolved in sub.postgres_changes:
730
+ spec = resolved.filter_spec
731
+ if spec.schema_name != data.schema_name:
732
+ continue
733
+ if spec.table != data.table:
734
+ continue
735
+ if spec.event != "*" and spec.event != data.type:
736
+ continue
737
+ if not _row_matches_filter(resolved.parsed_filter, data):
738
+ continue
739
+ matched.append(resolved.id)
740
+ return matched
741
+
742
+
743
+ def _row_matches_filter(
744
+ parsed: ParsedFilter | None,
745
+ data: PostgresChangesData,
746
+ ) -> bool:
747
+ if parsed is None:
748
+ return True
749
+ # For DELETE the row is in old_record; for INSERT/UPDATE, in record.
750
+ row = data.record if data.type in ("INSERT", "UPDATE") else data.old_record
751
+ if row is None:
752
+ return False
753
+ if isinstance(parsed, EqFilter):
754
+ return _coerce(row.get(parsed.column)) == parsed.value
755
+ if isinstance(parsed, InFilter):
756
+ return _coerce(row.get(parsed.column)) in parsed.values
757
+ return False
758
+
759
+
760
+ def _coerce(value: Any) -> str:
761
+ """Normalize a Postgres column value to its string form for filter compare.
762
+
763
+ Filters are always strings on the wire; we string-coerce the row value
764
+ so ``id=eq.42`` matches an ``int8`` column without the user having to
765
+ quote it. ``None`` becomes the empty string sentinel which will not
766
+ match any client-supplied filter (a client wanting to filter on NULL
767
+ must use a server-side trigger instead — out of scope for v0.4).
768
+ """
769
+ if value is None:
770
+ return ""
771
+ if isinstance(value, datetime):
772
+ # ISO-8601 with tz so equality with a JSON-encoded value matches.
773
+ if value.tzinfo is None:
774
+ value = value.replace(tzinfo=UTC)
775
+ return value.isoformat()
776
+ return str(value)
777
+
778
+
779
+ def _topic_for(conn: Connection, data: PostgresChangesData) -> str:
780
+ """Pick the channel topic for a postgres_changes push.
781
+
782
+ A connection may be subscribed to several topics that all match the
783
+ same row event (e.g. one channel per room). We re-walk the
784
+ connection's subscriptions and pick the first whose
785
+ ``postgres_changes`` matches — the broker's caller already guaranteed
786
+ at least one match exists.
787
+ """
788
+ for sub in conn.subscriptions.values():
789
+ if _match_subscription_ids(sub, data):
790
+ return sub.topic
791
+ # Should be unreachable; caller filters by exactly this predicate.
792
+ return next(iter(conn.subscriptions.keys()), "realtime:unknown")
793
+
794
+
795
+ # ---------------------------------------------------------------------------
796
+ # Singleton
797
+ # ---------------------------------------------------------------------------
798
+
799
+
800
+ _broker: Broker | None = None
801
+
802
+
803
+ def get_broker() -> Broker:
804
+ """Return the process-wide broker, lazily constructed."""
805
+ global _broker
806
+ if _broker is None:
807
+ _broker = Broker()
808
+ return _broker
809
+
810
+
811
+ def reset_broker() -> None:
812
+ """Drop the singleton (tests). Caller is responsible for stopping it first."""
813
+ global _broker
814
+ _broker = None