horsies 0.1.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. horsies/__init__.py +115 -0
  2. horsies/core/__init__.py +0 -0
  3. horsies/core/app.py +552 -0
  4. horsies/core/banner.py +144 -0
  5. horsies/core/brokers/__init__.py +5 -0
  6. horsies/core/brokers/listener.py +444 -0
  7. horsies/core/brokers/postgres.py +864 -0
  8. horsies/core/cli.py +624 -0
  9. horsies/core/codec/serde.py +575 -0
  10. horsies/core/errors.py +535 -0
  11. horsies/core/logging.py +90 -0
  12. horsies/core/models/__init__.py +0 -0
  13. horsies/core/models/app.py +268 -0
  14. horsies/core/models/broker.py +79 -0
  15. horsies/core/models/queues.py +23 -0
  16. horsies/core/models/recovery.py +101 -0
  17. horsies/core/models/schedule.py +229 -0
  18. horsies/core/models/task_pg.py +307 -0
  19. horsies/core/models/tasks.py +332 -0
  20. horsies/core/models/workflow.py +1988 -0
  21. horsies/core/models/workflow_pg.py +245 -0
  22. horsies/core/registry/tasks.py +101 -0
  23. horsies/core/scheduler/__init__.py +26 -0
  24. horsies/core/scheduler/calculator.py +267 -0
  25. horsies/core/scheduler/service.py +569 -0
  26. horsies/core/scheduler/state.py +260 -0
  27. horsies/core/task_decorator.py +615 -0
  28. horsies/core/types/status.py +38 -0
  29. horsies/core/utils/imports.py +203 -0
  30. horsies/core/utils/loop_runner.py +44 -0
  31. horsies/core/worker/current.py +17 -0
  32. horsies/core/worker/worker.py +1967 -0
  33. horsies/core/workflows/__init__.py +23 -0
  34. horsies/core/workflows/engine.py +2344 -0
  35. horsies/core/workflows/recovery.py +501 -0
  36. horsies/core/workflows/registry.py +97 -0
  37. horsies/py.typed +0 -0
  38. horsies-0.1.0a1.dist-info/METADATA +31 -0
  39. horsies-0.1.0a1.dist-info/RECORD +42 -0
  40. horsies-0.1.0a1.dist-info/WHEEL +5 -0
  41. horsies-0.1.0a1.dist-info/entry_points.txt +2 -0
  42. horsies-0.1.0a1.dist-info/top_level.txt +1 -0
horsies/core/banner.py ADDED
@@ -0,0 +1,144 @@
1
+ """Startup banner for horsies."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import sys
6
+ from typing import TYPE_CHECKING, TextIO
7
+
8
+ if TYPE_CHECKING:
9
+ from horsies.core.app import Horsies
10
+
11
+
12
+ # Braille art of galloping horse - converted from the golden horse image
13
+ # Each braille character represents a 2x4 pixel block for higher resolution
14
+ HORSE_BRAILLE = """
15
+ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣤⣶⣾⣿⣿⣿⣿⣷⣯⡀⠀⠀⠀⠀⠀⠀
16
+ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣤⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣟⣿⣆⠀⠀⠀⠀
17
+ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣴⣿⣿⣿⣿⣿⣿⣿⣿⡟⠹⣿⣿⣿⣿⣿⣦⡀⠀⠀⠀
18
+ ⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⣀⣠⣴⣾⣿⣿⣿⣶⣶⣶⣤⣤⣤⣤⣤⣶⣾⣿⣿⣿⣿⣿⣿⣿⡟⠀⠀⠈⠉⠛⠻⡿⣿⣿⠂⠀
19
+ ⠀⠀⢀⣀⠀⢀⣀⣠⣶⣿⣿⠟⢛⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡟⡐⠀⠀⠀⠀⠀⠀⠈⠋⡿⠁⠀⠀
20
+ ⠀⠀⠀⢹⣿⣿⣿⣿⣿⡿⠁⠀⢸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣯⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
21
+ ⠀⠀⠀⠀⠛⠻⠿⠛⠉⠀⠀⠀⠈⢯⡻⣿⣿⣿⣿⣿⢿⣿⣿⣿⣿⣿⣿⡿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
22
+ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠻⣷⣿⣿⣿⡟⠀⠙⠻⠿⠿⣿⣿⠃⣿⣿⣿⣿⣿⣿⣿⣿⡁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
23
+ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⡿⠁⢀⠀⠀⠀⠀⠀⠂⠀⢿⣿⣿⣿⡍⠈⢁⣙⣿⢦⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
24
+ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⣿⣿⠏⠀⠀⣼⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣿⣷⠀⠀⠀⠀⠁⣿⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
25
+ ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠻⣿⣧⣀⢄⣿⡀⠀⠀⠀⠀⠀⠀⠀⠀⠈⢻⣿⣧⠀⠀⢀⣼⡟⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
26
+ ⠀⠀⠀⣀⠀⠀⠀⢀⡀⠀⠀⠀⡀⠀⠀⠀⣀⠛⣿⣷⣍⠛⣦⡀⠀⠂⢠⣷⣶⣾⡿⠟⠛⢃⠀⢠⣾⡟⠀⠀⠀⠀⡀⠀⠀⠀⡀⠀⠀⠀
27
+ ⠄⡤⠦⠤⠤⢤⡤⠡⠤⠤⢤⠬⠤⠤⠤⢤⠅⠀⠤⣿⣷⠄⠎⢻⣤⡦⠄⠀⠤⢵⠄⠠⠤⠬⣾⠏⠁⠀⠥⡤⠄⠠⠬⢦⡤⠀⠠⠵⢤⠠
28
+ ⠚⠒⠒⠒⡶⠚⠒⠒⠒⡰⠓⠒⠒⠒⢲⠓⠒⠒⠒⢻⠿⠀⠀⠚⢿⡷⠐⠒⠒⠚⡆⠒⠒⠒⠚⣖⠒⠒⠒⠚⢶⠒⠒⠒⠚⢶⠂⠐⠒⠛
29
+ """
30
+
31
+ # Figlet-style "horsies" text
32
+ LOGO_TEXT = r"""
33
+ __ _
34
+ / /_ ____ __________(_)__ _____
35
+ / __ \/ __ \/ ___/ ___/ / _ \/ ___/
36
+ / / / / /_/ / / (__ ) / __(__ )
37
+ /_/ /_/\____/_/ /____/_/\___/____/
38
+ """
39
+
40
+ # Full banner combining horse and logo
41
+ BANNER = (
42
+ HORSE_BRAILLE
43
+ + r"""
44
+ __ _
45
+ / /_ ____ __________(_)__ _____
46
+ / __ \/ __ \/ ___/ ___/ / _ \/ ___/ {version}
47
+ / / / / /_/ / / (__ ) / __(__ ) distributed task queue
48
+ /_/ /_/\____/_/ /____/_/\___/____/ and workflow engine
49
+ """
50
+ )
51
+
52
+
53
+ def get_version() -> str:
54
+ """Get horsies version."""
55
+ try:
56
+ import horsies
57
+
58
+ version = getattr(horsies, "__version__", None)
59
+ return str(version) if version else "0.1.0"
60
+ except ImportError:
61
+ return "0.1.0"
62
+
63
+
64
+ def format_banner(version: str | None = None) -> str:
65
+ """Format the banner string with version."""
66
+ if version is None:
67
+ version = get_version()
68
+ return BANNER.format(version=f"v{version}")
69
+
70
+
71
+ def print_banner(
72
+ app: "Horsies",
73
+ role: str = "worker",
74
+ show_tasks: bool = True,
75
+ file: TextIO | None = None,
76
+ ) -> None:
77
+ """
78
+ Print startup banner with configuration and task list.
79
+
80
+ Args:
81
+ app: The Horsies app instance
82
+ role: The role (worker, scheduler, producer)
83
+ show_tasks: Whether to list discovered tasks
84
+ file: Output file (default: sys.stdout)
85
+ """
86
+ if file is None:
87
+ file = sys.stdout
88
+
89
+ version = get_version()
90
+
91
+ # Build the banner
92
+ lines: list[str] = []
93
+
94
+ # ASCII art header
95
+ banner = format_banner(version)
96
+ lines.append(banner)
97
+
98
+ # Configuration section
99
+ lines.append("[config]")
100
+ lines.append(f" .> app: {app.__class__.__name__}")
101
+ lines.append(f" .> role: {role}")
102
+ lines.append(f" .> queue_mode: {app.config.queue_mode.name}")
103
+
104
+ # Queue info
105
+ if app.config.queue_mode.name == "CUSTOM" and app.config.custom_queues:
106
+ queues_str = ", ".join(q.name for q in app.config.custom_queues)
107
+ lines.append(f" .> queues: {queues_str}")
108
+ else:
109
+ lines.append(f" .> queues: default")
110
+
111
+ # Broker info
112
+ broker_url = app.config.broker.database_url
113
+ # Mask password in URL
114
+ if "@" in broker_url:
115
+ pre, post = broker_url.split("@", 1)
116
+ if ":" in pre:
117
+ scheme_user = pre.rsplit(":", 1)[0]
118
+ broker_url = f"{scheme_user}:****@{post}"
119
+ lines.append(f" .> broker: {broker_url}")
120
+
121
+ # Concurrency info (if available)
122
+ if hasattr(app.config, "cluster_wide_cap") and app.config.cluster_wide_cap:
123
+ lines.append(f" .> cap: {app.config.cluster_wide_cap} (cluster-wide)")
124
+
125
+ lines.append("")
126
+
127
+ # Tasks section
128
+ if show_tasks:
129
+ task_names = app.list_tasks()
130
+ lines.append(f"[tasks] ({len(task_names)} registered)")
131
+ for task_name in sorted(task_names):
132
+ lines.append(f" . {task_name}")
133
+ lines.append("")
134
+
135
+ # Print everything
136
+ output = "\n".join(lines)
137
+ print(output, file=file)
138
+
139
+
140
+ def print_simple_banner(file: TextIO | None = None) -> None:
141
+ """Print just the ASCII art banner without config."""
142
+ if file is None:
143
+ file = sys.stdout
144
+ print(format_banner(), file=file)
@@ -0,0 +1,5 @@
1
+ from horsies.core.brokers.postgres import PostgresBroker
2
+
3
+ __all__ = [
4
+ 'PostgresBroker',
5
+ ]
@@ -0,0 +1,444 @@
1
+ # app/core/brokers/listener.py
2
+ """
3
+ PostgreSQL LISTEN/NOTIFY-based notification system for task management:
4
+
5
+ Architecture:
6
+ TaskApp -> PostgresBroker -> PostgresListener
7
+
8
+ Flow:
9
+ 1. Task submission: INSERT into tasks table -> PostgreSQL trigger -> NOTIFY task_new/task_queue_*
10
+ 2. Worker notification: Workers listen to task_new + queue-specific channels
11
+ 3. Task processing: Worker picks up task, processes it, updates status
12
+ 4. Completion notification: UPDATE tasks status -> PostgreSQL trigger -> NOTIFY task_done
13
+ 5. Result retrieval: Clients listen to task_done channel, filter by task_id
14
+
15
+ Key channels:
16
+ - task_new: Global notification for any new task (workers)
17
+ - task_queue_*: Queue-specific notifications (workers)
18
+ - task_done: Task completion notifications (result waiters)
19
+ """
20
+
21
+ # app/core/brokers/listener.py
22
+ from __future__ import annotations
23
+ import asyncio
24
+ import contextlib
25
+ from collections import defaultdict
26
+ from typing import DefaultDict, Optional, Set
27
+ from asyncio import Task, Queue # precise type for Pylance/mypy
28
+
29
+ import psycopg
30
+ from psycopg import AsyncConnection, OperationalError, Notify
31
+ from psycopg import sql
32
+ from horsies.core.logging import get_logger
33
+
34
+ logger = get_logger('listener')
35
+
36
+
37
+ class PostgresListener:
38
+ """
39
+ PostgreSQL LISTEN/NOTIFY wrapper with async queue distribution.
40
+
41
+ Features:
42
+ - Single dispatcher consuming conn.notifies() to avoid competing readers
43
+ - Multiple subscribers per channel via independent asyncio queues
44
+ - Safe SQL with identifier quoting and parameter binding
45
+ - Auto-reconnect with exponential backoff and re-LISTEN
46
+ - Proactive connection health monitoring
47
+
48
+ Architecture:
49
+ - dispatcher_conn: Exclusively consumes notifications from conn.notifies()
50
+ - command_conn: Handles LISTEN/UNLISTEN commands separately
51
+ - Per-channel queues: Each subscriber gets independent notification delivery
52
+
53
+ Usage:
54
+ ------
55
+ listener = PostgresListener(database_url)
56
+ await listener.start()
57
+
58
+ # Subscribe: returns a queue that receives notifications for this channel
59
+ queue = await listener.listen("task_done")
60
+
61
+ # Wait for notifications (blocks until one arrives)
62
+ notification = await queue.get()
63
+ if notification.payload == "task_123": # Filter by payload if needed
64
+ # Process notification
65
+ pass
66
+
67
+ # Cleanup: remove local subscription (keeps server-side LISTEN active)
68
+ await listener.unsubscribe("task_done", queue)
69
+ await listener.close()
70
+
71
+ Notes:
72
+ ------
73
+ * Uses autocommit=True: NOTIFYs are sent immediately, not held in transactions
74
+ * Dispatcher distributes each notification to ALL subscriber queues for that channel
75
+ * put_nowait() prevents slow consumers from blocking others (drops on queue full)
76
+ * Channel names are PostgreSQL identifiers, automatically quoted for safety
77
+ """
78
+
79
+ def __init__(self, database_url: str) -> None:
80
+ self.database_url = database_url
81
+
82
+ # Two separate connections to avoid blocking issues:
83
+ # 1. Dispatcher connection: exclusively for conn.notifies() consumption
84
+ self._dispatcher_conn: Optional[AsyncConnection] = None
85
+ # 2. Command connection: for LISTEN/UNLISTEN SQL commands
86
+ self._command_conn: Optional[AsyncConnection] = None
87
+
88
+ # Set of channels we have LISTENed to on the server.
89
+ self._listen_channels: Set[str] = set()
90
+
91
+ # Per-channel subscribers: each subscriber is an asyncio.Queue[Notify].
92
+ # Multiple subscribers can wait on the same channel independently.
93
+ self._subs: DefaultDict[str, Set[Queue[Notify]]] = defaultdict(set)
94
+
95
+ # Background dispatcher task consuming conn.notifies() and distributing to subscriber queues.
96
+ self._dispatcher_task: Optional[Task[None]] = None
97
+
98
+ # Health check task for proactive disconnection detection
99
+ self._health_check_task: Optional[Task[None]] = None
100
+
101
+ # Event for file descriptor activity detection
102
+ self._fd_activity = asyncio.Event()
103
+ self._fd_registered = False
104
+
105
+ # A lock used to serialize LISTEN/UNLISTEN and subscription book-keeping.
106
+ self._lock = asyncio.Lock()
107
+
108
+ async def start(self) -> None:
109
+ """
110
+ Establish the notification connections (autocommit) and start the
111
+ dispatcher task. Safe to call multiple times.
112
+ """
113
+ await self._ensure_connections()
114
+ # Defer starting the dispatcher until at least one channel is LISTENed.
115
+ if self._health_check_task is None:
116
+ # Start proactive health monitoring
117
+ self._health_check_task = asyncio.create_task(
118
+ self._health_monitor(), name='pg-listener-health'
119
+ )
120
+
121
+ async def _start_listening(self, conn: AsyncConnection) -> None:
122
+ """
123
+ Start listening to all _listen_channels for the given connection.
124
+ """
125
+ for channel in self._listen_channels:
126
+ await conn.execute(sql.SQL('LISTEN {}').format(sql.Identifier(channel)))
127
+
128
+ async def _ensure_connections(self) -> None:
129
+ """
130
+ Ensure we have live AsyncConnections in autocommit mode.
131
+ On reconnection, re-issue LISTEN for any previously tracked channels.
132
+ """
133
+ # Ensure dispatcher connection (for conn.notifies() consumption)
134
+ if self._dispatcher_conn is None or self._dispatcher_conn.closed:
135
+ self._dispatcher_conn = await psycopg.AsyncConnection.connect(
136
+ self.database_url,
137
+ autocommit=True,
138
+ )
139
+ await self._start_listening(self._dispatcher_conn)
140
+ # Register file descriptor for activity monitoring
141
+ self._register_fd_monitoring()
142
+
143
+ # Ensure command connection (for LISTEN/UNLISTEN commands)
144
+ if self._command_conn is None or self._command_conn.closed:
145
+ self._command_conn = await psycopg.AsyncConnection.connect(
146
+ self.database_url,
147
+ autocommit=True,
148
+ )
149
+ await self._start_listening(self._command_conn)
150
+
151
+ async def _ensure_dispatcher_connection(self) -> AsyncConnection:
152
+ """Ensure dispatcher connection is available."""
153
+ if self._dispatcher_conn is None or self._dispatcher_conn.closed:
154
+ await self._ensure_connections()
155
+ assert self._dispatcher_conn is not None
156
+ return self._dispatcher_conn
157
+
158
+ async def _ensure_command_connection(self) -> AsyncConnection:
159
+ """Ensure command connection is available."""
160
+ if self._command_conn is None or self._command_conn.closed:
161
+ await self._ensure_connections()
162
+ assert self._command_conn is not None
163
+ return self._command_conn
164
+
165
+ def _register_fd_monitoring(self) -> None:
166
+ """
167
+ Register file descriptor monitoring for proactive disconnection detection.
168
+ """
169
+ if (
170
+ self._dispatcher_conn
171
+ and not self._dispatcher_conn.closed
172
+ and not self._fd_registered
173
+ ):
174
+ try:
175
+ loop = asyncio.get_running_loop()
176
+ loop.add_reader(self._dispatcher_conn.fileno(), self._fd_activity.set)
177
+ self._fd_registered = True
178
+ except (OSError, AttributeError):
179
+ # fileno() might not be available or connection might be closed
180
+ pass
181
+
182
+ def _unregister_fd_monitoring(self) -> None:
183
+ """
184
+ Unregister file descriptor monitoring.
185
+ """
186
+ if self._fd_registered and self._dispatcher_conn:
187
+ try:
188
+ loop = asyncio.get_running_loop()
189
+ loop.remove_reader(self._dispatcher_conn.fileno())
190
+ except (OSError, AttributeError, ValueError):
191
+ # Connection might be closed or already removed
192
+ pass
193
+ finally:
194
+ self._fd_registered = False
195
+
196
+ async def _health_monitor(self) -> None:
197
+ """
198
+ Proactive health monitoring using psycopg3 recommended pattern:
199
+ - Monitor file descriptor activity with timeout
200
+ - Perform health checks during idle periods
201
+ - Detect disconnections faster than waiting for operations to fail
202
+ """
203
+ while True:
204
+ try:
205
+ # Wait up to 60 seconds for file descriptor activity
206
+ try:
207
+ await asyncio.wait_for(self._fd_activity.wait(), timeout=60.0)
208
+ self._fd_activity.clear()
209
+
210
+ # Activity detected - verify connection health
211
+ if self._command_conn and not self._command_conn.closed:
212
+ try:
213
+ await self._command_conn.execute('SELECT 1')
214
+ except OperationalError:
215
+ # Connection is dead, trigger reconnection
216
+ self._unregister_fd_monitoring()
217
+ self._command_conn = None
218
+ self._dispatcher_conn = None
219
+ continue
220
+
221
+ except asyncio.TimeoutError:
222
+ # No activity for 60 seconds - perform health check
223
+ if self._command_conn and not self._command_conn.closed:
224
+ try:
225
+ await self._command_conn.execute('SELECT 1')
226
+ except OperationalError:
227
+ # Connection is dead, trigger reconnection
228
+ self._unregister_fd_monitoring()
229
+ self._command_conn = None
230
+ self._dispatcher_conn = None
231
+ continue
232
+
233
+ except asyncio.CancelledError:
234
+ raise
235
+ except Exception:
236
+ # Unexpected error in health monitoring - brief pause
237
+ await asyncio.sleep(1.0)
238
+ continue
239
+
240
+ async def _dispatcher(self) -> None:
241
+ """
242
+ Core notification dispatcher:
243
+ - Efficiently waits on PostgreSQL socket (no polling)
244
+ - Distributes each notification to all subscriber queues for that channel
245
+ - Handles disconnections with exponential backoff and automatic re-LISTEN
246
+ - Uses put_nowait() to prevent slow consumers from blocking others
247
+
248
+ The dispatcher is the single consumer of conn.notifies() to avoid race conditions
249
+ when multiple coroutines try to read from the same notification stream.
250
+ """
251
+ backoff = 0.2 # start small; increase on failures up to a cap
252
+ while True:
253
+ try:
254
+ conn = await self._ensure_dispatcher_connection()
255
+ # This async iterator blocks efficiently on the socket.
256
+ async for notification in conn.notifies():
257
+ # Activity means the connection is healthy; reset backoff.
258
+ backoff = 0.2
259
+
260
+ # Signal file descriptor activity
261
+ self._fd_activity.set()
262
+
263
+ # Distribute to all subscriber queues for this channel
264
+ # Use list() snapshot to avoid mutation during iteration
265
+ for q in list(self._subs.get(notification.channel, ())):
266
+ try:
267
+ q.put_nowait(notification)
268
+ except asyncio.QueueFull:
269
+ # Drop notification if queue is full (prevents blocking other subscribers)
270
+ # Consider increasing queue size if this happens frequently
271
+ pass
272
+
273
+ except OperationalError:
274
+ # Likely a disconnect. Back off a bit, then force reconnect and re-LISTEN.
275
+ self._unregister_fd_monitoring()
276
+ await asyncio.sleep(backoff)
277
+ backoff = min(backoff * 2, 5.0) # cap backoff
278
+ self._dispatcher_conn = None # force reconnection
279
+ self._command_conn = None
280
+ continue
281
+ except asyncio.CancelledError:
282
+ # Graceful shutdown: stop dispatching and exit the task.
283
+ self._unregister_fd_monitoring()
284
+ raise
285
+ except Exception:
286
+ # Unexpected issue: brief pause to avoid a hot loop, then try again.
287
+ self._unregister_fd_monitoring()
288
+ await asyncio.sleep(0.5)
289
+ self._dispatcher_conn = None
290
+ self._command_conn = None
291
+ continue
292
+
293
+ async def listen(self, channel_name: str) -> Queue[Notify]:
294
+ """
295
+ Subscribe to a PostgreSQL notification channel.
296
+
297
+ Creates a new asyncio queue for this subscriber and issues LISTEN command
298
+ to PostgreSQL (only once per channel, regardless of subscriber count).
299
+
300
+ Parameters
301
+ ----------
302
+ channel_name : str
303
+ PostgreSQL channel name (automatically quoted for SQL safety)
304
+
305
+ Returns
306
+ -------
307
+ asyncio.Queue[Notify]
308
+ Queue that will receive Notify objects for this channel.
309
+ Each subscriber gets their own independent queue.
310
+
311
+ Example
312
+ -------
313
+ queue = await listener.listen("task_done")
314
+ notification = await queue.get() # Blocks until notification arrives
315
+ task_id = notification.payload # Extract payload data
316
+
317
+ Notes
318
+ -----
319
+ - Multiple subscribers to same channel each get independent queues
320
+ - Server-side LISTEN is issued only once per channel
321
+ - Dispatcher automatically starts after first subscription
322
+ """
323
+ await self._ensure_connections()
324
+
325
+ async with self._lock:
326
+ # LISTEN on the server if this is the first subscriber for the channel.
327
+ if channel_name not in self._listen_channels:
328
+ # Ensure connections exist
329
+ await self._ensure_connections()
330
+
331
+ # Stop dispatcher temporarily to safely modify dispatcher connection state
332
+ if self._dispatcher_task is not None:
333
+ self._dispatcher_task.cancel()
334
+ with contextlib.suppress(asyncio.CancelledError):
335
+ await self._dispatcher_task
336
+ self._dispatcher_task = None
337
+
338
+ # Issue LISTEN on the dispatcher connection (the one consuming notifies)
339
+ disp_conn = await self._ensure_dispatcher_connection()
340
+ await disp_conn.execute(
341
+ sql.SQL('LISTEN {}').format(sql.Identifier(channel_name))
342
+ )
343
+ self._listen_channels.add(channel_name)
344
+
345
+ # Restart dispatcher after updating the LISTEN set
346
+ if self._dispatcher_task is None:
347
+ self._dispatcher_task = asyncio.create_task(
348
+ self._dispatcher(), name='pg-listener-dispatcher'
349
+ )
350
+
351
+ # Create a fresh queue for this subscriber.
352
+ q: Queue[Notify] = asyncio.Queue()
353
+ self._subs[channel_name].add(q)
354
+ return q
355
+
356
+ async def unlisten(
357
+ self, channel_name: str, q: Optional[Queue[Notify]] = None
358
+ ) -> None:
359
+ """
360
+ Remove subscription and issue UNLISTEN if no subscribers remain.
361
+
362
+ This completely removes the server-side LISTEN when the last subscriber
363
+ unsubscribes. Use unsubscribe() instead if you want to keep the server-side
364
+ LISTEN active to avoid LISTEN/UNLISTEN races.
365
+
366
+ Parameters
367
+ ----------
368
+ channel_name : str
369
+ Channel name used in listen()
370
+ q : Optional[Queue[Notify]]
371
+ Specific queue to remove. If None, only checks for server-side cleanup.
372
+
373
+ Warning
374
+ -------
375
+ Can cause LISTEN races if other subscribers are still using the channel.
376
+ Consider using unsubscribe() instead for better performance.
377
+ """
378
+ async with self._lock:
379
+ if q is not None:
380
+ self._subs[channel_name].discard(q)
381
+ # If nobody is listening locally, drop the server-side LISTEN.
382
+ no_local_subs = not self._subs[channel_name]
383
+ if no_local_subs and channel_name in self._listen_channels:
384
+ # Only UNLISTEN via the command connection to avoid racing dispatcher notifies iterator
385
+ if self._command_conn and not self._command_conn.closed:
386
+ await self._command_conn.execute(
387
+ sql.SQL('UNLISTEN {}').format(sql.Identifier(channel_name))
388
+ )
389
+
390
+ self._listen_channels.discard(channel_name)
391
+
392
+ async def unsubscribe(
393
+ self, channel_name: str, q: Optional[Queue[Notify]] = None
394
+ ) -> None:
395
+ """
396
+ Remove local queue subscription while keeping server-side LISTEN active.
397
+
398
+ This is the preferred way to clean up subscriptions as it avoids expensive
399
+ LISTEN/UNLISTEN operations when multiple subscribers use the same channel.
400
+ The server-side LISTEN remains active for other current/future subscribers.
401
+
402
+ Parameters
403
+ ----------
404
+ channel_name : str
405
+ Channel name used in listen()
406
+ q : Optional[Queue[Notify]]
407
+ Specific queue to remove from local subscriptions
408
+
409
+ Notes
410
+ -----
411
+ - More efficient than unlisten() for temporary subscriptions
412
+ - Prevents LISTEN/UNLISTEN races in high-traffic scenarios
413
+ - Server-side LISTEN remains active until listener.close()
414
+ """
415
+ async with self._lock:
416
+ if q is not None:
417
+ self._subs[channel_name].discard(q)
418
+
419
+ async def close(self) -> None:
420
+ """
421
+ Stop the dispatcher, health monitor and close the notification connection.
422
+ Safe to call more than once.
423
+ """
424
+ if self._health_check_task:
425
+ self._health_check_task.cancel()
426
+ with contextlib.suppress(asyncio.CancelledError):
427
+ await self._health_check_task
428
+ self._health_check_task = None
429
+
430
+ if self._dispatcher_task:
431
+ self._dispatcher_task.cancel()
432
+ with contextlib.suppress(asyncio.CancelledError):
433
+ await self._dispatcher_task
434
+ self._dispatcher_task = None
435
+
436
+ self._unregister_fd_monitoring()
437
+
438
+ if self._dispatcher_conn and not self._dispatcher_conn.closed:
439
+ await self._dispatcher_conn.close()
440
+ self._dispatcher_conn = None
441
+
442
+ if self._command_conn and not self._command_conn.closed:
443
+ await self._command_conn.close()
444
+ self._command_conn = None