pgque-py 0.2.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pgque/__init__.py +44 -0
- pgque/client.py +461 -0
- pgque/consumer.py +333 -0
- pgque/errors.py +25 -0
- pgque/types.py +49 -0
- pgque_py-0.2.0rc1.dist-info/METADATA +246 -0
- pgque_py-0.2.0rc1.dist-info/RECORD +10 -0
- pgque_py-0.2.0rc1.dist-info/WHEEL +5 -0
- pgque_py-0.2.0rc1.dist-info/licenses/LICENSE +191 -0
- pgque_py-0.2.0rc1.dist-info/top_level.txt +1 -0
pgque/__init__.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# Copyright 2026 Nikolay Samokhvalov. Apache-2.0 license.
|
|
2
|
+
# PgQue includes code derived from PgQ (ISC license,
|
|
3
|
+
# Marko Kreen / Skype Technologies OU).
|
|
4
|
+
|
|
5
|
+
"""pgque -- Python client for PgQue (PgQ Universal Edition).
|
|
6
|
+
|
|
7
|
+
Quickstart::
|
|
8
|
+
|
|
9
|
+
import pgque
|
|
10
|
+
|
|
11
|
+
with pgque.connect("postgresql://localhost/mydb") as client:
|
|
12
|
+
client.send("orders", {"order_id": 42}, type="order.created")
|
|
13
|
+
client.conn.commit()
|
|
14
|
+
|
|
15
|
+
See https://github.com/NikolayS/pgque for the SQL schema install and
|
|
16
|
+
full documentation.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from .client import PgqueClient, connect
|
|
20
|
+
from .consumer import Consumer
|
|
21
|
+
from .errors import (
|
|
22
|
+
PgqueBatchNotFound,
|
|
23
|
+
PgqueConnectionError,
|
|
24
|
+
PgqueConsumerNotFound,
|
|
25
|
+
PgqueError,
|
|
26
|
+
PgqueQueueNotFound,
|
|
27
|
+
)
|
|
28
|
+
from .types import Event, Message
|
|
29
|
+
|
|
30
|
+
__version__ = "0.2.0rc1"
|
|
31
|
+
|
|
32
|
+
__all__ = [
|
|
33
|
+
"PgqueClient",
|
|
34
|
+
"Consumer",
|
|
35
|
+
"Message",
|
|
36
|
+
"Event",
|
|
37
|
+
"PgqueError",
|
|
38
|
+
"PgqueConnectionError",
|
|
39
|
+
"PgqueQueueNotFound",
|
|
40
|
+
"PgqueBatchNotFound",
|
|
41
|
+
"PgqueConsumerNotFound",
|
|
42
|
+
"connect",
|
|
43
|
+
"__version__",
|
|
44
|
+
]
|
pgque/client.py
ADDED
|
@@ -0,0 +1,461 @@
|
|
|
1
|
+
# Copyright 2026 Nikolay Samokhvalov. Apache-2.0 license.
|
|
2
|
+
# PgQue includes code derived from PgQ (ISC license,
|
|
3
|
+
# Marko Kreen / Skype Technologies OU).
|
|
4
|
+
|
|
5
|
+
"""PgqueClient -- thin Python wrapper over the pgque SQL API."""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from typing import Any, Optional, Union
|
|
9
|
+
|
|
10
|
+
import psycopg
|
|
11
|
+
|
|
12
|
+
from .errors import (
|
|
13
|
+
PgqueBatchNotFound,
|
|
14
|
+
PgqueConnectionError,
|
|
15
|
+
PgqueError,
|
|
16
|
+
PgqueQueueNotFound,
|
|
17
|
+
)
|
|
18
|
+
from .types import Event, Message
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def connect(dsn: str, *, autocommit: bool = False) -> "PgqueClient":
|
|
22
|
+
"""Open a connection to PostgreSQL and return a ``PgqueClient``.
|
|
23
|
+
|
|
24
|
+
The returned client owns the connection and must be closed via
|
|
25
|
+
``client.close()`` or used as a context manager.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
dsn: libpq connection string (``postgresql://...``).
|
|
29
|
+
autocommit: If True, the connection runs in autocommit mode.
|
|
30
|
+
Useful for one-off scripts and consumers that prefer
|
|
31
|
+
implicit transactions per statement.
|
|
32
|
+
|
|
33
|
+
Raises:
|
|
34
|
+
PgqueConnectionError: Connection could not be established.
|
|
35
|
+
"""
|
|
36
|
+
try:
|
|
37
|
+
conn = psycopg.connect(dsn, autocommit=autocommit)
|
|
38
|
+
except psycopg.OperationalError as e:
|
|
39
|
+
raise PgqueConnectionError(str(e)) from e
|
|
40
|
+
return PgqueClient(conn, _owns_conn=True)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _wrap_sql_error(e: Exception) -> PgqueError:
|
|
44
|
+
"""Map a raw psycopg error to a pgque exception subclass."""
|
|
45
|
+
msg = str(e)
|
|
46
|
+
low = msg.lower()
|
|
47
|
+
if "queue not found" in low:
|
|
48
|
+
return PgqueQueueNotFound(msg)
|
|
49
|
+
if "batch not found" in low:
|
|
50
|
+
return PgqueBatchNotFound(msg)
|
|
51
|
+
return PgqueError(msg)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class PgqueClient:
|
|
55
|
+
"""Thin wrapper around pgque SQL functions.
|
|
56
|
+
|
|
57
|
+
By default, methods execute SQL against the wrapped connection
|
|
58
|
+
without managing transactions; the caller decides when to
|
|
59
|
+
``commit()``/``rollback()``. If the connection is in autocommit
|
|
60
|
+
mode, each statement is its own transaction.
|
|
61
|
+
|
|
62
|
+
Use ``pgque.connect(dsn)`` to construct a client that owns its
|
|
63
|
+
connection. Pass an existing ``psycopg.Connection`` to share one
|
|
64
|
+
with application code.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
def __init__(
|
|
68
|
+
self,
|
|
69
|
+
conn: psycopg.Connection,
|
|
70
|
+
*,
|
|
71
|
+
_owns_conn: bool = False,
|
|
72
|
+
):
|
|
73
|
+
self.conn = conn
|
|
74
|
+
self._owns_conn = _owns_conn
|
|
75
|
+
|
|
76
|
+
# --- context manager / lifecycle ------------------------------------
|
|
77
|
+
|
|
78
|
+
def __enter__(self) -> "PgqueClient":
|
|
79
|
+
return self
|
|
80
|
+
|
|
81
|
+
def __exit__(self, exc_type, exc, tb) -> None:
|
|
82
|
+
self.close()
|
|
83
|
+
|
|
84
|
+
def close(self) -> None:
|
|
85
|
+
"""Close the underlying connection if owned by this client.
|
|
86
|
+
|
|
87
|
+
If the client was constructed with an externally-managed
|
|
88
|
+
connection, ``close()`` is a no-op.
|
|
89
|
+
"""
|
|
90
|
+
if self._owns_conn and not self.conn.closed:
|
|
91
|
+
self.conn.close()
|
|
92
|
+
|
|
93
|
+
# --- producer -------------------------------------------------------
|
|
94
|
+
|
|
95
|
+
def send(
|
|
96
|
+
self,
|
|
97
|
+
queue: str,
|
|
98
|
+
payload: Any = None,
|
|
99
|
+
*,
|
|
100
|
+
type: str = "default",
|
|
101
|
+
) -> int:
|
|
102
|
+
"""Send a single message to a queue.
|
|
103
|
+
|
|
104
|
+
Maps to ``pgque.send(queue, payload)`` or
|
|
105
|
+
``pgque.send(queue, type, payload)``.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
queue: Target queue name.
|
|
109
|
+
payload: Message payload. Accepted forms:
|
|
110
|
+
|
|
111
|
+
- ``dict`` / ``list`` — JSON-serialised automatically.
|
|
112
|
+
- ``str`` — must be **valid JSON text** (e.g.
|
|
113
|
+
``'"hello"'``, ``'{"k": 1}'``, ``'42'``, ``'null'``).
|
|
114
|
+
The value is cast to ``jsonb`` by PostgreSQL. The
|
|
115
|
+
Python literal ``"hello"`` has content ``hello``,
|
|
116
|
+
which is not valid JSON; pass ``'"hello"'`` or
|
|
117
|
+
``json.dumps("hello")`` instead.
|
|
118
|
+
- ``None`` — stored as JSON ``null``.
|
|
119
|
+
- :class:`Event` — ``type`` and ``payload`` are unpacked.
|
|
120
|
+
|
|
121
|
+
type: Event type (default ``"default"``). Ignored if
|
|
122
|
+
``payload`` is an ``Event`` (its own ``type`` wins).
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
The event ID assigned by pgque.
|
|
126
|
+
"""
|
|
127
|
+
if isinstance(payload, Event):
|
|
128
|
+
type = payload.type
|
|
129
|
+
payload = payload.payload
|
|
130
|
+
|
|
131
|
+
if isinstance(payload, (dict, list)):
|
|
132
|
+
payload = json.dumps(payload)
|
|
133
|
+
elif payload is None:
|
|
134
|
+
payload = "null"
|
|
135
|
+
|
|
136
|
+
try:
|
|
137
|
+
if type and type != "default":
|
|
138
|
+
row = self.conn.execute(
|
|
139
|
+
"select pgque.send(%s, %s, %s::jsonb)",
|
|
140
|
+
(queue, type, payload),
|
|
141
|
+
).fetchone()
|
|
142
|
+
else:
|
|
143
|
+
row = self.conn.execute(
|
|
144
|
+
"select pgque.send(%s, %s::jsonb)",
|
|
145
|
+
(queue, payload),
|
|
146
|
+
).fetchone()
|
|
147
|
+
except psycopg.Error as e:
|
|
148
|
+
raise _wrap_sql_error(e) from e
|
|
149
|
+
|
|
150
|
+
return row[0]
|
|
151
|
+
|
|
152
|
+
def send_batch(
|
|
153
|
+
self,
|
|
154
|
+
queue: str,
|
|
155
|
+
type: str,
|
|
156
|
+
payloads: list,
|
|
157
|
+
) -> list[int]:
|
|
158
|
+
"""Send multiple messages in one SQL call.
|
|
159
|
+
|
|
160
|
+
Maps to ``pgque.send_batch(queue, type, payloads[])`` and returns event
|
|
161
|
+
IDs in input order. The call is atomic inside the current transaction.
|
|
162
|
+
|
|
163
|
+
Payload encoding matches ``send``: ``dict``/``list`` values are JSON
|
|
164
|
+
encoded, ``str`` values must already be valid JSON text, and ``None`` is
|
|
165
|
+
stored as JSON ``null`` rather than SQL NULL.
|
|
166
|
+
"""
|
|
167
|
+
json_payloads = [
|
|
168
|
+
json.dumps(p) if isinstance(p, (dict, list))
|
|
169
|
+
else ("null" if p is None else p)
|
|
170
|
+
for p in payloads
|
|
171
|
+
]
|
|
172
|
+
try:
|
|
173
|
+
row = self.conn.execute(
|
|
174
|
+
"select pgque.send_batch(%s, %s, %s::jsonb[])",
|
|
175
|
+
(queue, type, json_payloads),
|
|
176
|
+
).fetchone()
|
|
177
|
+
except psycopg.Error as e:
|
|
178
|
+
raise _wrap_sql_error(e) from e
|
|
179
|
+
return list(row[0])
|
|
180
|
+
|
|
181
|
+
# --- consumer -------------------------------------------------------
|
|
182
|
+
|
|
183
|
+
def receive(
|
|
184
|
+
self,
|
|
185
|
+
queue: str,
|
|
186
|
+
consumer: str,
|
|
187
|
+
max_messages: int = 100,
|
|
188
|
+
) -> list[Message]:
|
|
189
|
+
"""Receive a batch of messages from a queue.
|
|
190
|
+
|
|
191
|
+
Maps to ``pgque.receive(queue, consumer, max_messages)``, which
|
|
192
|
+
opens a batch via ``next_batch`` internally. The caller must
|
|
193
|
+
``ack()`` the batch (with the ``batch_id`` from any returned
|
|
194
|
+
message) to advance the consumer past it. ``ack()`` finishes the
|
|
195
|
+
whole underlying PgQ batch, including rows beyond ``max_messages``;
|
|
196
|
+
direct callers should pass a value large enough for the queue's
|
|
197
|
+
possible batch size before acknowledging.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
queue: Queue name.
|
|
201
|
+
consumer: Consumer name (must be registered on the queue).
|
|
202
|
+
max_messages: Maximum number of messages to return from the
|
|
203
|
+
current batch.
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
List of ``Message`` objects, possibly empty if no batch is
|
|
207
|
+
currently available (e.g. the ticker has not run since the
|
|
208
|
+
last enqueue).
|
|
209
|
+
"""
|
|
210
|
+
try:
|
|
211
|
+
rows = self.conn.execute(
|
|
212
|
+
"select * from pgque.receive(%s, %s, %s)",
|
|
213
|
+
(queue, consumer, max_messages),
|
|
214
|
+
).fetchall()
|
|
215
|
+
except psycopg.Error as e:
|
|
216
|
+
raise _wrap_sql_error(e) from e
|
|
217
|
+
|
|
218
|
+
return [
|
|
219
|
+
Message(
|
|
220
|
+
msg_id=r[0],
|
|
221
|
+
batch_id=r[1],
|
|
222
|
+
type=r[2],
|
|
223
|
+
payload=r[3],
|
|
224
|
+
retry_count=r[4],
|
|
225
|
+
created_at=r[5],
|
|
226
|
+
extra1=r[6],
|
|
227
|
+
extra2=r[7],
|
|
228
|
+
extra3=r[8],
|
|
229
|
+
extra4=r[9],
|
|
230
|
+
)
|
|
231
|
+
for r in rows
|
|
232
|
+
]
|
|
233
|
+
|
|
234
|
+
def ack(self, batch_id: int) -> int:
|
|
235
|
+
"""Acknowledge (finish) a batch. Advances the consumer past it.
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
batch_id: Batch ID from any ``Message`` in the batch.
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
Result returned by ``pgque.ack`` (1 on success).
|
|
242
|
+
"""
|
|
243
|
+
try:
|
|
244
|
+
row = self.conn.execute(
|
|
245
|
+
"select pgque.ack(%s)", (batch_id,)
|
|
246
|
+
).fetchone()
|
|
247
|
+
except psycopg.Error as e:
|
|
248
|
+
raise _wrap_sql_error(e) from e
|
|
249
|
+
return row[0]
|
|
250
|
+
|
|
251
|
+
def force_next_tick(self, queue: str) -> Optional[int]:
|
|
252
|
+
"""Force the next ``pgque.ticker(queue)`` call to insert a tick.
|
|
253
|
+
|
|
254
|
+
Maps to ``pgque.force_next_tick(queue)``. The SQL function bumps the
|
|
255
|
+
queue's event sequence so the next ticker pass skips the normal
|
|
256
|
+
``ticker_max_count`` / ``ticker_max_lag`` thresholds. It does **not**
|
|
257
|
+
insert the tick itself; call ``pgque.ticker`` afterwards (via raw SQL or
|
|
258
|
+
a scheduler).
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
The current last tick ID, or ``None`` for a brand-new / skipped
|
|
262
|
+
queue, matching the SQL function.
|
|
263
|
+
"""
|
|
264
|
+
try:
|
|
265
|
+
row = self.conn.execute(
|
|
266
|
+
"select pgque.force_next_tick(%s)", (queue,)
|
|
267
|
+
).fetchone()
|
|
268
|
+
except psycopg.Error as e:
|
|
269
|
+
raise _wrap_sql_error(e) from e
|
|
270
|
+
return row[0]
|
|
271
|
+
|
|
272
|
+
def force_tick(self, queue: str) -> Optional[int]:
|
|
273
|
+
"""Deprecated compatibility alias for ``force_next_tick``."""
|
|
274
|
+
return self.force_next_tick(queue)
|
|
275
|
+
|
|
276
|
+
# --- experimental cooperative consumers -----------------------------
|
|
277
|
+
#
|
|
278
|
+
# Function names, edge-case behavior, and signatures for these methods
|
|
279
|
+
# may change before the cooperative API is marked stable. See the
|
|
280
|
+
# client README ("Experimental: cooperative consumers") and
|
|
281
|
+
# ``docs/reference.md`` for context.
|
|
282
|
+
|
|
283
|
+
def subscribe_subconsumer(
|
|
284
|
+
self,
|
|
285
|
+
queue: str,
|
|
286
|
+
consumer: str,
|
|
287
|
+
subconsumer: str,
|
|
288
|
+
) -> int:
|
|
289
|
+
"""Register ``subconsumer`` under logical ``consumer`` for ``queue``.
|
|
290
|
+
|
|
291
|
+
Maps to ``pgque.subscribe_subconsumer(queue, consumer, subconsumer)``.
|
|
292
|
+
Returns ``1`` for a new registration and ``0`` if the row already
|
|
293
|
+
existed.
|
|
294
|
+
"""
|
|
295
|
+
try:
|
|
296
|
+
row = self.conn.execute(
|
|
297
|
+
"select pgque.subscribe_subconsumer(%s, %s, %s)",
|
|
298
|
+
(queue, consumer, subconsumer),
|
|
299
|
+
).fetchone()
|
|
300
|
+
except psycopg.Error as e:
|
|
301
|
+
raise _wrap_sql_error(e) from e
|
|
302
|
+
return row[0]
|
|
303
|
+
|
|
304
|
+
def unsubscribe_subconsumer(
|
|
305
|
+
self,
|
|
306
|
+
queue: str,
|
|
307
|
+
consumer: str,
|
|
308
|
+
subconsumer: str,
|
|
309
|
+
*,
|
|
310
|
+
batch_handling: int = 0,
|
|
311
|
+
) -> int:
|
|
312
|
+
"""Unregister one subconsumer.
|
|
313
|
+
|
|
314
|
+
Maps to ``pgque.unsubscribe_subconsumer(queue, consumer,
|
|
315
|
+
subconsumer, batch_handling)``. The default ``batch_handling=0``
|
|
316
|
+
raises if the subconsumer holds an active batch; pass ``1`` to
|
|
317
|
+
route active messages through the same retry/DLQ policy as
|
|
318
|
+
``nack`` before the row is removed.
|
|
319
|
+
"""
|
|
320
|
+
try:
|
|
321
|
+
row = self.conn.execute(
|
|
322
|
+
"select pgque.unsubscribe_subconsumer(%s, %s, %s, %s)",
|
|
323
|
+
(queue, consumer, subconsumer, batch_handling),
|
|
324
|
+
).fetchone()
|
|
325
|
+
except psycopg.Error as e:
|
|
326
|
+
raise _wrap_sql_error(e) from e
|
|
327
|
+
return row[0]
|
|
328
|
+
|
|
329
|
+
def receive_coop(
|
|
330
|
+
self,
|
|
331
|
+
queue: str,
|
|
332
|
+
consumer: str,
|
|
333
|
+
subconsumer: str,
|
|
334
|
+
*,
|
|
335
|
+
max_messages: int = 100,
|
|
336
|
+
dead_interval: Optional[str] = None,
|
|
337
|
+
) -> list[Message]:
|
|
338
|
+
"""Receive a batch of messages for one cooperative subconsumer.
|
|
339
|
+
|
|
340
|
+
Maps to ``pgque.receive_coop(queue, consumer, subconsumer,
|
|
341
|
+
max_return, dead_interval)``. The function auto-registers the
|
|
342
|
+
``coop_main`` and ``coop_member`` rows on first call, so callers
|
|
343
|
+
do not need to ``subscribe_subconsumer`` ahead of time unless
|
|
344
|
+
they want to convert an existing normal consumer.
|
|
345
|
+
|
|
346
|
+
Args:
|
|
347
|
+
queue: Queue name.
|
|
348
|
+
consumer: Logical consumer (the ``coop_main`` row).
|
|
349
|
+
subconsumer: Per-worker member name.
|
|
350
|
+
max_messages: Maximum rows to return from the current batch.
|
|
351
|
+
``ack(batch_id)`` advances the cooperative cursor past
|
|
352
|
+
the entire underlying batch, so set this >= the queue's
|
|
353
|
+
worst-case batch size or consume the full batch before
|
|
354
|
+
acking.
|
|
355
|
+
dead_interval: Optional PostgreSQL interval syntax (e.g.
|
|
356
|
+
``"5 minutes"``). When set, allows takeover of a stale
|
|
357
|
+
sibling's batch under a fresh ``batch_id``; the old
|
|
358
|
+
token is invalidated.
|
|
359
|
+
|
|
360
|
+
Returns:
|
|
361
|
+
Possibly-empty list of ``Message`` objects.
|
|
362
|
+
"""
|
|
363
|
+
try:
|
|
364
|
+
rows = self.conn.execute(
|
|
365
|
+
"select * from pgque.receive_coop(%s, %s, %s, %s, %s::interval)",
|
|
366
|
+
(queue, consumer, subconsumer, max_messages, dead_interval),
|
|
367
|
+
).fetchall()
|
|
368
|
+
except psycopg.Error as e:
|
|
369
|
+
raise _wrap_sql_error(e) from e
|
|
370
|
+
|
|
371
|
+
return [
|
|
372
|
+
Message(
|
|
373
|
+
msg_id=r[0],
|
|
374
|
+
batch_id=r[1],
|
|
375
|
+
type=r[2],
|
|
376
|
+
payload=r[3],
|
|
377
|
+
retry_count=r[4],
|
|
378
|
+
created_at=r[5],
|
|
379
|
+
extra1=r[6],
|
|
380
|
+
extra2=r[7],
|
|
381
|
+
extra3=r[8],
|
|
382
|
+
extra4=r[9],
|
|
383
|
+
)
|
|
384
|
+
for r in rows
|
|
385
|
+
]
|
|
386
|
+
|
|
387
|
+
def touch_subconsumer(
|
|
388
|
+
self,
|
|
389
|
+
queue: str,
|
|
390
|
+
consumer: str,
|
|
391
|
+
subconsumer: str,
|
|
392
|
+
) -> int:
|
|
393
|
+
"""Refresh the heartbeat for a registered subconsumer row.
|
|
394
|
+
|
|
395
|
+
Maps to ``pgque.touch_subconsumer(queue, consumer, subconsumer)``.
|
|
396
|
+
Does not create a row if one does not already exist; returns the
|
|
397
|
+
number of rows touched (``1`` when the subconsumer is registered,
|
|
398
|
+
``0`` otherwise).
|
|
399
|
+
"""
|
|
400
|
+
try:
|
|
401
|
+
row = self.conn.execute(
|
|
402
|
+
"select pgque.touch_subconsumer(%s, %s, %s)",
|
|
403
|
+
(queue, consumer, subconsumer),
|
|
404
|
+
).fetchone()
|
|
405
|
+
except psycopg.Error as e:
|
|
406
|
+
raise _wrap_sql_error(e) from e
|
|
407
|
+
return row[0]
|
|
408
|
+
|
|
409
|
+
def nack(
|
|
410
|
+
self,
|
|
411
|
+
batch_id: int,
|
|
412
|
+
msg: Message,
|
|
413
|
+
retry_after: Union[int, float] = 60,
|
|
414
|
+
reason: Optional[str] = None,
|
|
415
|
+
) -> None:
|
|
416
|
+
"""Negatively acknowledge a single message.
|
|
417
|
+
|
|
418
|
+
Routes the message to the retry queue with a ``retry_after``
|
|
419
|
+
delay. If the message's ``retry_count`` is at or above the
|
|
420
|
+
queue's ``queue_max_retries``, it is moved to the dead-letter
|
|
421
|
+
queue instead.
|
|
422
|
+
|
|
423
|
+
After nacking individual messages, the caller should still
|
|
424
|
+
``ack()`` the batch to finish it.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
batch_id: Batch ID.
|
|
428
|
+
msg: The ``Message`` to retry.
|
|
429
|
+
retry_after: Seconds before the message becomes available
|
|
430
|
+
again (default 60).
|
|
431
|
+
reason: Optional reason text (stored on the DLQ row when
|
|
432
|
+
max retries is exceeded).
|
|
433
|
+
"""
|
|
434
|
+
try:
|
|
435
|
+
self.conn.execute(
|
|
436
|
+
"select pgque.nack("
|
|
437
|
+
" %s,"
|
|
438
|
+
" ROW(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)::pgque.message,"
|
|
439
|
+
" %s::interval,"
|
|
440
|
+
" %s"
|
|
441
|
+
")",
|
|
442
|
+
(
|
|
443
|
+
batch_id,
|
|
444
|
+
msg.msg_id,
|
|
445
|
+
msg.batch_id,
|
|
446
|
+
msg.type,
|
|
447
|
+
json.dumps(msg.payload)
|
|
448
|
+
if isinstance(msg.payload, (dict, list))
|
|
449
|
+
else msg.payload,
|
|
450
|
+
msg.retry_count,
|
|
451
|
+
msg.created_at,
|
|
452
|
+
msg.extra1,
|
|
453
|
+
msg.extra2,
|
|
454
|
+
msg.extra3,
|
|
455
|
+
msg.extra4,
|
|
456
|
+
f"{retry_after} seconds",
|
|
457
|
+
reason,
|
|
458
|
+
),
|
|
459
|
+
)
|
|
460
|
+
except psycopg.Error as e:
|
|
461
|
+
raise _wrap_sql_error(e) from e
|
pgque/consumer.py
ADDED
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
# Copyright 2026 Nikolay Samokhvalov. Apache-2.0 license.
|
|
2
|
+
# PgQue includes code derived from PgQ (ISC license,
|
|
3
|
+
# Marko Kreen / Skype Technologies OU).
|
|
4
|
+
|
|
5
|
+
"""Consumer -- event-driven message consumer with LISTEN/NOTIFY support."""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import select
|
|
9
|
+
import signal
|
|
10
|
+
import threading
|
|
11
|
+
import time
|
|
12
|
+
from typing import Callable, Literal, Optional
|
|
13
|
+
|
|
14
|
+
import psycopg
|
|
15
|
+
from psycopg import sql
|
|
16
|
+
|
|
17
|
+
from .client import PgqueClient
|
|
18
|
+
from .types import Message
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger("pgque")
|
|
21
|
+
|
|
22
|
+
# Maximum time the LISTEN wait blocks before re-checking the stop flag.
|
|
23
|
+
# Bounds shutdown latency to roughly this many seconds.
|
|
24
|
+
_WAIT_SLICE_SECONDS = 0.5
|
|
25
|
+
# PostgreSQL int4 max; request the whole batch by default.
|
|
26
|
+
_DEFAULT_MAX_MESSAGES = 2_147_483_647
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Consumer:
|
|
30
|
+
"""Synchronous polling consumer with LISTEN/NOTIFY wakeup.
|
|
31
|
+
|
|
32
|
+
Usage::
|
|
33
|
+
|
|
34
|
+
consumer = Consumer(
|
|
35
|
+
dsn="postgresql://localhost/mydb",
|
|
36
|
+
queue="orders",
|
|
37
|
+
name="order_processor",
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
@consumer.on("order.created")
|
|
41
|
+
def handle_order(msg: Message):
|
|
42
|
+
process_order(msg.payload)
|
|
43
|
+
|
|
44
|
+
consumer.start() # blocks until SIGTERM/SIGINT
|
|
45
|
+
|
|
46
|
+
Handler return semantics:
|
|
47
|
+
- If the handler returns without exception, the message is
|
|
48
|
+
considered processed.
|
|
49
|
+
- If the handler raises an exception, the message is nacked
|
|
50
|
+
with the default retry_after.
|
|
51
|
+
- If no handler is registered for a message type (and no
|
|
52
|
+
default ``"*"`` handler exists), the message is nacked
|
|
53
|
+
(sent to retry_queue, or to the dead-letter queue once
|
|
54
|
+
``queue_max_retries`` is exhausted). To ack unknown types
|
|
55
|
+
instead, pass ``unknown_handler_policy="ack"``.
|
|
56
|
+
|
|
57
|
+
After all messages in a batch have been dispatched, the batch is
|
|
58
|
+
acked automatically.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
def __init__(
|
|
62
|
+
self,
|
|
63
|
+
dsn: str,
|
|
64
|
+
*,
|
|
65
|
+
queue: str,
|
|
66
|
+
name: str,
|
|
67
|
+
poll_interval: int = 30,
|
|
68
|
+
max_messages: int = _DEFAULT_MAX_MESSAGES,
|
|
69
|
+
retry_after: int = 60,
|
|
70
|
+
unknown_handler_policy: Literal["nack", "ack"] = "nack",
|
|
71
|
+
subconsumer: Optional[str] = None,
|
|
72
|
+
dead_interval: Optional[str] = None,
|
|
73
|
+
):
|
|
74
|
+
self.dsn = dsn
|
|
75
|
+
self.queue = queue
|
|
76
|
+
self.name = name
|
|
77
|
+
self.poll_interval = poll_interval
|
|
78
|
+
self.max_messages = max_messages
|
|
79
|
+
self.retry_after = retry_after
|
|
80
|
+
if unknown_handler_policy not in ("nack", "ack"):
|
|
81
|
+
raise ValueError(
|
|
82
|
+
"unknown_handler_policy must be 'nack' or 'ack', "
|
|
83
|
+
f"got {unknown_handler_policy!r}"
|
|
84
|
+
)
|
|
85
|
+
self._unknown_handler_policy = unknown_handler_policy
|
|
86
|
+
|
|
87
|
+
# Experimental cooperative-consumers mode. When ``subconsumer`` is
|
|
88
|
+
# set, the poll loop calls ``client.receive_coop(...)`` instead of
|
|
89
|
+
# the normal ``receive(...)``. ``dead_interval`` is meaningless
|
|
90
|
+
# outside coop mode and signals a programming error if provided.
|
|
91
|
+
if dead_interval is not None and subconsumer is None:
|
|
92
|
+
raise ValueError(
|
|
93
|
+
"dead_interval is only valid in cooperative mode "
|
|
94
|
+
"(set subconsumer=...)"
|
|
95
|
+
)
|
|
96
|
+
self.subconsumer = subconsumer
|
|
97
|
+
self.dead_interval = dead_interval
|
|
98
|
+
|
|
99
|
+
self._handlers: dict[str, Callable] = {}
|
|
100
|
+
self._default_handler: Optional[Callable] = None
|
|
101
|
+
self._running = False
|
|
102
|
+
self._log = logging.getLogger(f"pgque.consumer.{name}")
|
|
103
|
+
|
|
104
|
+
def on(self, event_type: str):
|
|
105
|
+
"""Decorator to register a handler for a given event type.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
event_type: The ``pgque.message.type`` value to match.
|
|
109
|
+
Use ``"*"`` to register a default/catch-all handler.
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
def decorator(func: Callable):
|
|
113
|
+
if event_type == "*":
|
|
114
|
+
self._default_handler = func
|
|
115
|
+
else:
|
|
116
|
+
self._handlers[event_type] = func
|
|
117
|
+
return func
|
|
118
|
+
|
|
119
|
+
return decorator
|
|
120
|
+
|
|
121
|
+
def start(self) -> None:
|
|
122
|
+
"""Run the consume loop (blocks until SIGTERM/SIGINT).
|
|
123
|
+
|
|
124
|
+
Opens its own connection, subscribes to LISTEN, and polls for
|
|
125
|
+
batches. Each batch is processed and acked in a single
|
|
126
|
+
transaction.
|
|
127
|
+
"""
|
|
128
|
+
self._running = True
|
|
129
|
+
|
|
130
|
+
# Graceful shutdown on signals; only main-thread invocations can
|
|
131
|
+
# install signal handlers. When the consumer is run from a worker
|
|
132
|
+
# thread (tests, embedded use), skip registration -- callers stop
|
|
133
|
+
# via Consumer.stop().
|
|
134
|
+
in_main_thread = threading.current_thread() is threading.main_thread()
|
|
135
|
+
original_sigterm = None
|
|
136
|
+
original_sigint = None
|
|
137
|
+
|
|
138
|
+
def _stop(signum, frame):
|
|
139
|
+
logger.info("received signal %s, shutting down", signum)
|
|
140
|
+
self._running = False
|
|
141
|
+
|
|
142
|
+
if in_main_thread:
|
|
143
|
+
original_sigterm = signal.getsignal(signal.SIGTERM)
|
|
144
|
+
original_sigint = signal.getsignal(signal.SIGINT)
|
|
145
|
+
signal.signal(signal.SIGTERM, _stop)
|
|
146
|
+
signal.signal(signal.SIGINT, _stop)
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
with psycopg.connect(self.dsn, autocommit=True) as conn:
|
|
150
|
+
# Subscribe for wakeup notifications
|
|
151
|
+
channel = f"pgque_{self.queue}"
|
|
152
|
+
conn.execute(sql.SQL("LISTEN {}").format(sql.Identifier(channel)))
|
|
153
|
+
logger.info(
|
|
154
|
+
"consumer %s listening on %s (poll=%ds)",
|
|
155
|
+
self.name,
|
|
156
|
+
self.queue,
|
|
157
|
+
self.poll_interval,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
while self._running:
|
|
161
|
+
self._poll_once(conn)
|
|
162
|
+
|
|
163
|
+
if not self._running:
|
|
164
|
+
break
|
|
165
|
+
|
|
166
|
+
# Wait for NOTIFY or poll_interval timeout in short
|
|
167
|
+
# bounded slices. psycopg's conn.notifies() can
|
|
168
|
+
# block uninterruptibly for the full timeout, which
|
|
169
|
+
# makes stop() slow and can miss prompt wakeups.
|
|
170
|
+
# Polling the underlying socket with
|
|
171
|
+
# select() lets us re-check _running every SLICE
|
|
172
|
+
# seconds and drain any pending NOTIFY immediately.
|
|
173
|
+
self._wait_for_notify_or_stop(conn)
|
|
174
|
+
|
|
175
|
+
finally:
|
|
176
|
+
if in_main_thread:
|
|
177
|
+
signal.signal(signal.SIGTERM, original_sigterm)
|
|
178
|
+
signal.signal(signal.SIGINT, original_sigint)
|
|
179
|
+
|
|
180
|
+
logger.info("consumer %s stopped", self.name)
|
|
181
|
+
|
|
182
|
+
def stop(self) -> None:
|
|
183
|
+
"""Request graceful shutdown (safe to call from another thread)."""
|
|
184
|
+
self._running = False
|
|
185
|
+
|
|
186
|
+
def _wait_for_notify_or_stop(self, conn: psycopg.Connection) -> None:
|
|
187
|
+
"""Wait up to ``poll_interval`` for a NOTIFY, in short slices.
|
|
188
|
+
|
|
189
|
+
Returns early on any of:
|
|
190
|
+
* a NOTIFY arrives (drained from the connection),
|
|
191
|
+
* ``stop()`` flips ``_running`` to False,
|
|
192
|
+
* ``poll_interval`` elapses cumulatively.
|
|
193
|
+
|
|
194
|
+
Each slice is at most ``_WAIT_SLICE_SECONDS`` so ``stop()`` is
|
|
195
|
+
observed within ~SLICE seconds of the call.
|
|
196
|
+
"""
|
|
197
|
+
# Drain any NOTIFY already buffered in libpq from the prior
|
|
198
|
+
# _poll_once (e.g. delivered alongside query results). Without
|
|
199
|
+
# this, a buffered notify sits in libpq until the socket
|
|
200
|
+
# becomes readable for some other reason -- select() won't see
|
|
201
|
+
# it, and wakeup latency stretches out. Restores the implicit
|
|
202
|
+
# entry-drain semantics of the old conn.notifies(timeout=...).
|
|
203
|
+
drained = False
|
|
204
|
+
for _notify in conn.notifies(timeout=0):
|
|
205
|
+
drained = True
|
|
206
|
+
if drained:
|
|
207
|
+
return
|
|
208
|
+
|
|
209
|
+
deadline = time.monotonic() + self.poll_interval
|
|
210
|
+
fd = conn.fileno()
|
|
211
|
+
while self._running:
|
|
212
|
+
remaining = deadline - time.monotonic()
|
|
213
|
+
if remaining <= 0:
|
|
214
|
+
return
|
|
215
|
+
slice_timeout = min(_WAIT_SLICE_SECONDS, remaining)
|
|
216
|
+
# select() returns when the socket is readable (notify
|
|
217
|
+
# delivered by the server) or when slice_timeout expires.
|
|
218
|
+
# It is a thin wrapper around the OS poll, so it is cheap
|
|
219
|
+
# and interruptible.
|
|
220
|
+
r, _w, _x = select.select([fd], [], [], slice_timeout)
|
|
221
|
+
if not self._running:
|
|
222
|
+
return
|
|
223
|
+
if r:
|
|
224
|
+
# Drain pending notifications without blocking. A
|
|
225
|
+
# zero timeout makes notifies() return immediately
|
|
226
|
+
# after consuming whatever is buffered.
|
|
227
|
+
for _notify in conn.notifies(timeout=0):
|
|
228
|
+
pass
|
|
229
|
+
return
|
|
230
|
+
|
|
231
|
+
def _poll_once(self, conn: psycopg.Connection) -> None:
|
|
232
|
+
"""Receive one batch and dispatch messages.
|
|
233
|
+
|
|
234
|
+
If any per-message ``nack()`` raises, all remaining messages in
|
|
235
|
+
the batch are still dispatched (their handlers run), but the
|
|
236
|
+
batch is NOT acked at the end -- the receive transaction commits
|
|
237
|
+
without finishing the batch, so PgQ redelivers the whole batch
|
|
238
|
+
on the next poll. Without this guard, swallowing a nack failure
|
|
239
|
+
and then acking would advance past the batch and silently drop
|
|
240
|
+
the failed message.
|
|
241
|
+
"""
|
|
242
|
+
# Use a transaction block for receive + ack
|
|
243
|
+
with conn.transaction():
|
|
244
|
+
client = PgqueClient(conn)
|
|
245
|
+
if self.subconsumer is not None:
|
|
246
|
+
msgs = client.receive_coop(
|
|
247
|
+
self.queue,
|
|
248
|
+
self.name,
|
|
249
|
+
self.subconsumer,
|
|
250
|
+
max_messages=self.max_messages,
|
|
251
|
+
dead_interval=self.dead_interval,
|
|
252
|
+
)
|
|
253
|
+
else:
|
|
254
|
+
msgs = client.receive(
|
|
255
|
+
self.queue, self.name, self.max_messages
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
if not msgs:
|
|
259
|
+
return
|
|
260
|
+
|
|
261
|
+
batch_id = msgs[0].batch_id
|
|
262
|
+
logger.debug(
|
|
263
|
+
"batch %d: %d message(s)", batch_id, len(msgs)
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
nack_failed = False
|
|
267
|
+
|
|
268
|
+
for msg in msgs:
|
|
269
|
+
handler = self._handlers.get(msg.type, self._default_handler)
|
|
270
|
+
if handler is None:
|
|
271
|
+
if self._unknown_handler_policy == "ack":
|
|
272
|
+
self._log.warning(
|
|
273
|
+
"no handler for event type=%s ev_id=%s; acking",
|
|
274
|
+
msg.type,
|
|
275
|
+
msg.msg_id,
|
|
276
|
+
)
|
|
277
|
+
continue
|
|
278
|
+
self._log.warning(
|
|
279
|
+
"no handler for event type=%s ev_id=%s; nacking",
|
|
280
|
+
msg.type,
|
|
281
|
+
msg.msg_id,
|
|
282
|
+
)
|
|
283
|
+
try:
|
|
284
|
+
client.nack(
|
|
285
|
+
batch_id,
|
|
286
|
+
msg,
|
|
287
|
+
retry_after=self.retry_after,
|
|
288
|
+
reason=f"no handler for type={msg.type}",
|
|
289
|
+
)
|
|
290
|
+
except Exception:
|
|
291
|
+
nack_failed = True
|
|
292
|
+
self._log.exception(
|
|
293
|
+
"nack failed for unhandled msg_id=%d; "
|
|
294
|
+
"skipping batch ack so PgQ redelivers",
|
|
295
|
+
msg.msg_id,
|
|
296
|
+
)
|
|
297
|
+
continue
|
|
298
|
+
continue
|
|
299
|
+
|
|
300
|
+
try:
|
|
301
|
+
handler(msg)
|
|
302
|
+
except Exception:
|
|
303
|
+
self._log.exception(
|
|
304
|
+
"handler failed for msg_id=%d, nacking",
|
|
305
|
+
msg.msg_id,
|
|
306
|
+
)
|
|
307
|
+
try:
|
|
308
|
+
client.nack(
|
|
309
|
+
batch_id, msg, retry_after=self.retry_after
|
|
310
|
+
)
|
|
311
|
+
except Exception:
|
|
312
|
+
nack_failed = True
|
|
313
|
+
self._log.exception(
|
|
314
|
+
"nack failed for msg_id=%d; "
|
|
315
|
+
"skipping batch ack so PgQ redelivers",
|
|
316
|
+
msg.msg_id,
|
|
317
|
+
)
|
|
318
|
+
continue
|
|
319
|
+
|
|
320
|
+
if nack_failed:
|
|
321
|
+
# Do NOT ack -- redeliver on next poll.
|
|
322
|
+
return
|
|
323
|
+
|
|
324
|
+
# pgque.ack returns 1 on success, 0 if the batch was already
|
|
325
|
+
# finished or not found (stale/double ack, cross-consumer
|
|
326
|
+
# race). Mirror the TS+Go consumers and log warn on 0; do
|
|
327
|
+
# not treat it as an error.
|
|
328
|
+
if client.ack(batch_id) == 0:
|
|
329
|
+
logger.warning(
|
|
330
|
+
"pgque: ack batch %d returned 0 -- stale or double ack "
|
|
331
|
+
"(batch already finished or not found)",
|
|
332
|
+
batch_id,
|
|
333
|
+
)
|
pgque/errors.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# Copyright 2026 Nikolay Samokhvalov. Apache-2.0 license.
|
|
2
|
+
# PgQue includes code derived from PgQ (ISC license,
|
|
3
|
+
# Marko Kreen / Skype Technologies OU).
|
|
4
|
+
|
|
5
|
+
"""Exception hierarchy for pgque."""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class PgqueError(Exception):
|
|
9
|
+
"""Base class for all pgque-raised errors."""
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class PgqueConnectionError(PgqueError):
|
|
13
|
+
"""Failed to connect to PostgreSQL or the connection was lost."""
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class PgqueQueueNotFound(PgqueError):
|
|
17
|
+
"""Queue does not exist (raised by pgque SQL with a recognizable message)."""
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class PgqueBatchNotFound(PgqueError):
|
|
21
|
+
"""Batch ID does not exist or was already finished."""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class PgqueConsumerNotFound(PgqueError):
|
|
25
|
+
"""Consumer is not registered on the queue."""
|
pgque/types.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
# Copyright 2026 Nikolay Samokhvalov. Apache-2.0 license.
|
|
2
|
+
# PgQue includes code derived from PgQ (ISC license,
|
|
3
|
+
# Marko Kreen / Skype Technologies OU).
|
|
4
|
+
|
|
5
|
+
"""Message and Event types for pgque."""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import Any, Optional
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class Message:
|
|
14
|
+
"""A message received from a pgque queue.
|
|
15
|
+
|
|
16
|
+
Maps to the ``pgque.message`` composite type:
|
|
17
|
+
msg_id -- ev_id
|
|
18
|
+
batch_id -- batch containing this message
|
|
19
|
+
type -- ev_type
|
|
20
|
+
payload -- ev_data (jsonb auto-decoded by psycopg, otherwise text)
|
|
21
|
+
retry_count -- ev_retry (None for first delivery)
|
|
22
|
+
created_at -- ev_time
|
|
23
|
+
extra1..4 -- ev_extra1..ev_extra4
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
msg_id: int
|
|
27
|
+
batch_id: int
|
|
28
|
+
type: str
|
|
29
|
+
payload: Any
|
|
30
|
+
retry_count: Optional[int]
|
|
31
|
+
created_at: datetime
|
|
32
|
+
extra1: Optional[str] = None
|
|
33
|
+
extra2: Optional[str] = None
|
|
34
|
+
extra3: Optional[str] = None
|
|
35
|
+
extra4: Optional[str] = None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class Event:
|
|
40
|
+
"""An event being published to a queue. Convenience type for ``Client.send``.
|
|
41
|
+
|
|
42
|
+
For most code, passing ``payload`` and ``type`` directly to ``send`` is
|
|
43
|
+
simpler. ``Event`` is useful when constructing events programmatically
|
|
44
|
+
or when the payload + metadata travel together.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
payload: Any
|
|
48
|
+
type: str = "default"
|
|
49
|
+
extra: dict[str, str] = field(default_factory=dict)
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pgque-py
|
|
3
|
+
Version: 0.2.0rc1
|
|
4
|
+
Summary: Python client for PgQue -- PgQ Universal Edition
|
|
5
|
+
Author-email: Nikolay Samokhvalov <nik@postgres.ai>
|
|
6
|
+
License-Expression: Apache-2.0
|
|
7
|
+
Project-URL: Homepage, https://github.com/NikolayS/pgque
|
|
8
|
+
Project-URL: Repository, https://github.com/NikolayS/pgque
|
|
9
|
+
Project-URL: Issues, https://github.com/NikolayS/pgque/issues
|
|
10
|
+
Project-URL: Documentation, https://github.com/NikolayS/pgque/blob/main/docs/reference.md
|
|
11
|
+
Keywords: postgres,postgresql,queue,pgq,pgque,background-jobs
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Programming Language :: Python
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Operating System :: OS Independent
|
|
21
|
+
Classifier: Topic :: Database
|
|
22
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
23
|
+
Requires-Python: >=3.10
|
|
24
|
+
Description-Content-Type: text/markdown
|
|
25
|
+
License-File: LICENSE
|
|
26
|
+
Requires-Dist: psycopg[binary]<4,>=3.1
|
|
27
|
+
Provides-Extra: dev
|
|
28
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
29
|
+
Dynamic: license-file
|
|
30
|
+
|
|
31
|
+
# pgque-py
|
|
32
|
+
|
|
33
|
+
Python client for [PgQue](https://github.com/NikolayS/pgque) — the PgQ-based
|
|
34
|
+
universal PostgreSQL queue. Thin wrapper over `pgque-api` SQL functions:
|
|
35
|
+
`send`, `receive`, `ack`, `nack`, `force_next_tick`, plus a polling
|
|
36
|
+
`Consumer` with `LISTEN`/`NOTIFY` wakeup.
|
|
37
|
+
|
|
38
|
+
## Install
|
|
39
|
+
|
|
40
|
+
After the first Python client release:
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
pip install pgque-py
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Requires Python 3.10+ and PostgreSQL 14+ with the PgQue schema installed
|
|
47
|
+
(`\i pgque.sql` — no extension required).
|
|
48
|
+
|
|
49
|
+
## Database permissions
|
|
50
|
+
|
|
51
|
+
The connecting database role needs `pgque_reader` to consume (`receive`, `ack`, `nack`, `subscribe`, `unsubscribe`) and `pgque_writer` to produce (`send`, `send_batch`). The two are **siblings** — neither inherits the other. An app that both produces and consumes (the typical case for code using this client) must be granted **both** roles:
|
|
52
|
+
|
|
53
|
+
```sql
|
|
54
|
+
grant pgque_reader to your_app_user;
|
|
55
|
+
grant pgque_writer to your_app_user;
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
See [`docs/reference.md` — Roles and grants](../../docs/reference.md#roles-and-grants) for the full role table.
|
|
59
|
+
|
|
60
|
+
## Quickstart
|
|
61
|
+
|
|
62
|
+
```python
|
|
63
|
+
import pgque
|
|
64
|
+
|
|
65
|
+
with pgque.connect("postgresql://localhost/mydb") as client:
|
|
66
|
+
# one-time setup (typically in a migration)
|
|
67
|
+
client.conn.execute("select pgque.subscribe('orders', 'order_worker')")
|
|
68
|
+
client.conn.commit()
|
|
69
|
+
|
|
70
|
+
# producer: commit once to publish both calls atomically
|
|
71
|
+
event_id = client.send("orders", {"order_id": 42}, type="order.created")
|
|
72
|
+
batch_ids = client.send_batch("orders", "order.created", [
|
|
73
|
+
{"order_id": 43},
|
|
74
|
+
{"order_id": 44},
|
|
75
|
+
])
|
|
76
|
+
client.conn.commit()
|
|
77
|
+
print(event_id, batch_ids)
|
|
78
|
+
|
|
79
|
+
# consumer (separate process / thread)
|
|
80
|
+
consumer = pgque.Consumer(
|
|
81
|
+
dsn="postgresql://localhost/mydb",
|
|
82
|
+
queue="orders",
|
|
83
|
+
name="order_worker",
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@consumer.on("order.created")
|
|
87
|
+
def handle_order(msg: pgque.Message) -> None:
|
|
88
|
+
print(f"got {msg.type}: {msg.payload}")
|
|
89
|
+
|
|
90
|
+
# Optional: catch-all handler for types with no specific handler.
|
|
91
|
+
# Without it, messages with unhandled types are nacked by default
|
|
92
|
+
# (sent to retry_queue, or to the dead-letter queue once
|
|
93
|
+
# queue_max_retries is exhausted). Register a "*" handler to take
|
|
94
|
+
# explicit control.
|
|
95
|
+
@consumer.on("*")
|
|
96
|
+
def handle_unknown(msg: pgque.Message) -> None:
|
|
97
|
+
print(f"unhandled type {msg.type!r}: {msg.payload}")
|
|
98
|
+
|
|
99
|
+
consumer.start() # blocks until SIGTERM / SIGINT
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### Consumer options
|
|
103
|
+
|
|
104
|
+
`Consumer(..., max_messages=...)` controls the per-`receive` limit.
|
|
105
|
+
The default is PostgreSQL's `int` maximum, so the consumer requests
|
|
106
|
+
the whole PgQ batch before acknowledging it. `ack()` finishes the
|
|
107
|
+
entire underlying PgQ batch, including rows beyond `max_messages`;
|
|
108
|
+
only lower this value when it is at least as large as the queue's
|
|
109
|
+
worst-case batch size, otherwise rows past the limit are silently
|
|
110
|
+
skipped by the batch ack.
|
|
111
|
+
|
|
112
|
+
### Handling unknown event types
|
|
113
|
+
|
|
114
|
+
By default the consumer **nacks** any message whose type has no
|
|
115
|
+
registered handler and no `"*"` catch-all. The message is retried (or
|
|
116
|
+
dead-lettered once `queue_max_retries` is exhausted) so unknown types
|
|
117
|
+
are never silently dropped.
|
|
118
|
+
|
|
119
|
+
To ack unknown types instead, pass `unknown_handler_policy="ack"`:
|
|
120
|
+
|
|
121
|
+
```python
|
|
122
|
+
consumer = pgque.Consumer(
|
|
123
|
+
dsn="postgresql://localhost/mydb",
|
|
124
|
+
queue="orders",
|
|
125
|
+
name="order_worker",
|
|
126
|
+
unknown_handler_policy="ack", # log WARNING and ack; do not nack
|
|
127
|
+
)
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
## Experimental: cooperative consumers
|
|
131
|
+
|
|
132
|
+
> **Experimental in PgQue 0.2.** Function names, edge-case behavior, and
|
|
133
|
+
> client API shape may change before this feature is marked stable. Do
|
|
134
|
+
> not use this as the only processing path for critical workloads
|
|
135
|
+
> without idempotent handlers and stale-worker takeover tests.
|
|
136
|
+
|
|
137
|
+
Cooperative consumers let several worker processes share **one logical
|
|
138
|
+
consumer**. Each batch is handed to exactly one subconsumer; the main
|
|
139
|
+
row owns the group cursor, member rows own active batches. See
|
|
140
|
+
[`docs/reference.md` — Cooperative consumers / subconsumers](../../docs/reference.md#cooperative-consumers--subconsumers)
|
|
141
|
+
for the SQL surface.
|
|
142
|
+
|
|
143
|
+
Two-worker example (each worker holds its own connection / process):
|
|
144
|
+
|
|
145
|
+
```python
|
|
146
|
+
import pgque
|
|
147
|
+
|
|
148
|
+
# worker-1
|
|
149
|
+
c1 = pgque.Consumer(
|
|
150
|
+
dsn="postgresql://localhost/mydb",
|
|
151
|
+
queue="orders",
|
|
152
|
+
name="order_worker",
|
|
153
|
+
subconsumer="worker-1",
|
|
154
|
+
dead_interval="5 minutes", # optional: take over a stale sibling
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
@c1.on("order.created")
|
|
158
|
+
def handle(msg):
|
|
159
|
+
process(msg)
|
|
160
|
+
|
|
161
|
+
c1.start() # in a second process: subconsumer="worker-2"
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
`Consumer(subconsumer=...)` switches the poll loop to
|
|
165
|
+
`receive_coop` and auto-registers the `coop_main` + `coop_member` rows
|
|
166
|
+
on the first call. `dead_interval` is only valid in cooperative mode;
|
|
167
|
+
passing it without `subconsumer` raises `ValueError`.
|
|
168
|
+
|
|
169
|
+
The low-level methods on `PgqueClient` are also available for direct
|
|
170
|
+
use:
|
|
171
|
+
|
|
172
|
+
```python
|
|
173
|
+
client.subscribe_subconsumer("orders", "order_worker", "worker-1")
|
|
174
|
+
msgs = client.receive_coop(
|
|
175
|
+
"orders", "order_worker", "worker-1",
|
|
176
|
+
max_messages=100, dead_interval="5 minutes",
|
|
177
|
+
)
|
|
178
|
+
client.ack(msgs[0].batch_id)
|
|
179
|
+
client.touch_subconsumer("orders", "order_worker", "worker-1")
|
|
180
|
+
client.unsubscribe_subconsumer(
|
|
181
|
+
"orders", "order_worker", "worker-1", batch_handling=1,
|
|
182
|
+
)
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
`unsubscribe_subconsumer(..., batch_handling=0)` (the default) raises if
|
|
186
|
+
the subconsumer holds an active batch; pass `batch_handling=1` to route
|
|
187
|
+
active messages through retry/DLQ before removal.
|
|
188
|
+
|
|
189
|
+
A runnable two-worker demo lives at
|
|
190
|
+
[`bench/coop_demo.py`](bench/coop_demo.py); run it against any pgque
|
|
191
|
+
database with `PGQUE_TEST_DSN` set.
|
|
192
|
+
|
|
193
|
+
## Manual ticking
|
|
194
|
+
|
|
195
|
+
For tests, demos, or manual operation without `pg_cron`, use
|
|
196
|
+
`client.force_next_tick(queue)` to force the **next** `pgque.ticker()` call to
|
|
197
|
+
materialize a tick. It does not insert the tick itself:
|
|
198
|
+
|
|
199
|
+
```python
|
|
200
|
+
client.force_next_tick("orders")
|
|
201
|
+
client.conn.execute("select pgque.ticker()")
|
|
202
|
+
client.conn.commit()
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
`client.force_tick(queue)` remains as a deprecated compatibility alias.
|
|
206
|
+
|
|
207
|
+
## Transactions
|
|
208
|
+
|
|
209
|
+
`send` → ticker → `receive` must each run in its own committed transaction (PgQue is snapshot-based). `pgque.connect(dsn)` is non-autocommit by default — commit between produce and consumer. The `Consumer` is autocommit + explicit `conn.transaction()` around `receive + dispatch + ack`.
|
|
210
|
+
|
|
211
|
+
Don't wrap `send` and `receive` in one explicit tx; same for `maint_retry_events` + `ticker`. See [snapshot rule](https://github.com/NikolayS/pgque/blob/main/docs/pgq-concepts.md#snapshot-rule).
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
## Tests
|
|
215
|
+
|
|
216
|
+
Integration tests require a running PostgreSQL with the PgQue schema
|
|
217
|
+
installed. Set `PGQUE_TEST_DSN` and run pytest:
|
|
218
|
+
|
|
219
|
+
```bash
|
|
220
|
+
PGQUE_TEST_DSN=postgresql://postgres:pgque_test@localhost/pgque_test \
|
|
221
|
+
pytest clients/python/tests
|
|
222
|
+
```
|
|
223
|
+
|
|
224
|
+
Without `PGQUE_TEST_DSN`, the tests skip.
|
|
225
|
+
|
|
226
|
+
## Distribution
|
|
227
|
+
|
|
228
|
+
The PyPI distribution is `pgque-py`; the import package is `pgque`:
|
|
229
|
+
|
|
230
|
+
```python
|
|
231
|
+
import pgque
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
See [RELEASE.md](RELEASE.md) for publishing steps.
|
|
235
|
+
|
|
236
|
+
## More
|
|
237
|
+
|
|
238
|
+
- Schema install, full reference, tutorial:
|
|
239
|
+
<https://github.com/NikolayS/pgque>
|
|
240
|
+
- Per-function SQL reference:
|
|
241
|
+
<https://github.com/NikolayS/pgque/blob/main/docs/reference.md>
|
|
242
|
+
- Issues: <https://github.com/NikolayS/pgque/issues>
|
|
243
|
+
|
|
244
|
+
## License
|
|
245
|
+
|
|
246
|
+
Apache-2.0. Copyright 2026 Nikolay Samokhvalov.
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
pgque/__init__.py,sha256=6Au5wTrYBL9r0S1hLjHbI4_51KtS5qcMkyhkNV5sGEU,1017
|
|
2
|
+
pgque/client.py,sha256=mR5fpe-MCtzd79RgbnmRD6hUJYE7CFlGRBMZFMj8p2g,15720
|
|
3
|
+
pgque/consumer.py,sha256=Gi6Ty7k7fusN6qXckd_CGZLHGm2M-0ido9WC9rE7x1c,12683
|
|
4
|
+
pgque/errors.py,sha256=ORjo02BxygfR-VlpVnoEBxc8NYHWf59GO3DzNY6pzAY,697
|
|
5
|
+
pgque/types.py,sha256=repuehmcjPXJjmTPXUNwPhcrLurJPr6wDnT0JRVZlp8,1430
|
|
6
|
+
pgque_py-0.2.0rc1.dist-info/licenses/LICENSE,sha256=4O_EJRa1pubA2kMIOnlg4m1ZeYcS6p2zAHlgQLzAzGk,10771
|
|
7
|
+
pgque_py-0.2.0rc1.dist-info/METADATA,sha256=Dy1RGs_I3TkYZ8L4U1_Dl9fvQ_iGOp1Yz9BINDtmYnk,8411
|
|
8
|
+
pgque_py-0.2.0rc1.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
9
|
+
pgque_py-0.2.0rc1.dist-info/top_level.txt,sha256=ZvNzuJtA1VF1VoGEKutME5Ai2FSDjO8PEIe8RN3DlgQ,6
|
|
10
|
+
pgque_py-0.2.0rc1.dist-info/RECORD,,
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
|
|
2
|
+
Apache License
|
|
3
|
+
Version 2.0, January 2004
|
|
4
|
+
http://www.apache.org/licenses/
|
|
5
|
+
|
|
6
|
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
7
|
+
|
|
8
|
+
1. Definitions.
|
|
9
|
+
|
|
10
|
+
"License" shall mean the terms and conditions for use, reproduction,
|
|
11
|
+
and distribution as defined by Sections 1 through 9 of this document.
|
|
12
|
+
|
|
13
|
+
"Licensor" shall mean the copyright owner or entity authorized by
|
|
14
|
+
the copyright owner that is granting the License.
|
|
15
|
+
|
|
16
|
+
"Legal Entity" shall mean the union of the acting entity and all
|
|
17
|
+
other entities that control, are controlled by, or are under common
|
|
18
|
+
control with that entity. For the purposes of this definition,
|
|
19
|
+
"control" means (i) the power, direct or indirect, to cause the
|
|
20
|
+
direction or management of such entity, whether by contract or
|
|
21
|
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
22
|
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
23
|
+
|
|
24
|
+
"You" (or "Your") shall mean an individual or Legal Entity
|
|
25
|
+
exercising permissions granted by this License.
|
|
26
|
+
|
|
27
|
+
"Source" form shall mean the preferred form for making modifications,
|
|
28
|
+
including but not limited to software source code, documentation
|
|
29
|
+
source, and configuration files.
|
|
30
|
+
|
|
31
|
+
"Object" form shall mean any form resulting from mechanical
|
|
32
|
+
transformation or translation of a Source form, including but
|
|
33
|
+
not limited to compiled object code, generated documentation,
|
|
34
|
+
and conversions to other media types.
|
|
35
|
+
|
|
36
|
+
"Work" shall mean the work of authorship, whether in Source or
|
|
37
|
+
Object form, made available under the License, as indicated by a
|
|
38
|
+
copyright notice that is included in or attached to the work
|
|
39
|
+
(an example is provided in the Appendix below).
|
|
40
|
+
|
|
41
|
+
"Derivative Works" shall mean any work, whether in Source or Object
|
|
42
|
+
form, that is based on (or derived from) the Work and for which the
|
|
43
|
+
editorial revisions, annotations, elaborations, or other modifications
|
|
44
|
+
represent, as a whole, an original work of authorship. For the purposes
|
|
45
|
+
of this License, Derivative Works shall not include works that remain
|
|
46
|
+
separable from, or merely link (or bind by name) to the interfaces of,
|
|
47
|
+
the Work and Derivative Works thereof.
|
|
48
|
+
|
|
49
|
+
"Contribution" shall mean any work of authorship, including
|
|
50
|
+
the original version of the Work and any modifications or additions
|
|
51
|
+
to that Work or Derivative Works thereof, that is intentionally
|
|
52
|
+
submitted to the Licensor for inclusion in the Work by the copyright owner
|
|
53
|
+
or by an individual or Legal Entity authorized to submit on behalf of
|
|
54
|
+
the copyright owner. For the purposes of this definition, "submitted"
|
|
55
|
+
means any form of electronic, verbal, or written communication sent
|
|
56
|
+
to the Licensor or its representatives, including but not limited to
|
|
57
|
+
communication on electronic mailing lists, source code control systems,
|
|
58
|
+
and issue tracking systems that are managed by, or on behalf of, the
|
|
59
|
+
Licensor for the purpose of discussing and improving the Work, but
|
|
60
|
+
excluding communication that is conspicuously marked or otherwise
|
|
61
|
+
designated in writing by the copyright owner as "Not a Contribution."
|
|
62
|
+
|
|
63
|
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
64
|
+
on behalf of whom a Contribution has been received by the Licensor and
|
|
65
|
+
subsequently incorporated within the Work.
|
|
66
|
+
|
|
67
|
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
68
|
+
this License, each Contributor hereby grants to You a perpetual,
|
|
69
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
70
|
+
copyright license to reproduce, prepare Derivative Works of,
|
|
71
|
+
publicly display, publicly perform, sublicense, and distribute the
|
|
72
|
+
Work and such Derivative Works in Source or Object form.
|
|
73
|
+
|
|
74
|
+
3. Grant of Patent License. Subject to the terms and conditions of
|
|
75
|
+
this License, each Contributor hereby grants to You a perpetual,
|
|
76
|
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
77
|
+
(except as stated in this section) patent license to make, have made,
|
|
78
|
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
79
|
+
where such license applies only to those patent claims licensable
|
|
80
|
+
by such Contributor that are necessarily infringed by their
|
|
81
|
+
Contribution(s) alone or by combination of their Contribution(s)
|
|
82
|
+
with the Work to which such Contribution(s) was submitted. If You
|
|
83
|
+
institute patent litigation against any entity (including a
|
|
84
|
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
85
|
+
or a Contribution incorporated within the Work constitutes direct
|
|
86
|
+
or contributory patent infringement, then any patent licenses
|
|
87
|
+
granted to You under this License for that Work shall terminate
|
|
88
|
+
as of the date such litigation is filed.
|
|
89
|
+
|
|
90
|
+
4. Redistribution. You may reproduce and distribute copies of the
|
|
91
|
+
Work or Derivative Works thereof in any medium, with or without
|
|
92
|
+
modifications, and in Source or Object form, provided that You
|
|
93
|
+
meet the following conditions:
|
|
94
|
+
|
|
95
|
+
(a) You must give any other recipients of the Work or
|
|
96
|
+
Derivative Works a copy of this License; and
|
|
97
|
+
|
|
98
|
+
(b) You must cause any modified files to carry prominent notices
|
|
99
|
+
stating that You changed the files; and
|
|
100
|
+
|
|
101
|
+
(c) You must retain, in the Source form of any Derivative Works
|
|
102
|
+
that You distribute, all copyright, patent, trademark, and
|
|
103
|
+
attribution notices from the Source form of the Work,
|
|
104
|
+
excluding those notices that do not pertain to any part of
|
|
105
|
+
the Derivative Works; and
|
|
106
|
+
|
|
107
|
+
(d) If the Work includes a "NOTICE" text file as part of its
|
|
108
|
+
distribution, then any Derivative Works that You distribute must
|
|
109
|
+
include a readable copy of the attribution notices contained
|
|
110
|
+
within such NOTICE file, excluding any notices that do not
|
|
111
|
+
pertain to any part of the Derivative Works, in at least one
|
|
112
|
+
of the following places: within a NOTICE text file distributed
|
|
113
|
+
as part of the Derivative Works; within the Source form or
|
|
114
|
+
documentation, if provided along with the Derivative Works; or,
|
|
115
|
+
within a display generated by the Derivative Works, if and
|
|
116
|
+
wherever such third-party notices normally appear. The contents
|
|
117
|
+
of the NOTICE file are for informational purposes only and
|
|
118
|
+
do not modify the License. You may add Your own attribution
|
|
119
|
+
notices within Derivative Works that You distribute, alongside
|
|
120
|
+
or as an addendum to the NOTICE text from the Work, provided
|
|
121
|
+
that such additional attribution notices cannot be construed
|
|
122
|
+
as modifying the License.
|
|
123
|
+
|
|
124
|
+
You may add Your own copyright statement to Your modifications and
|
|
125
|
+
may provide additional or different license terms and conditions
|
|
126
|
+
for use, reproduction, or distribution of Your modifications, or
|
|
127
|
+
for any such Derivative Works as a whole, provided Your use,
|
|
128
|
+
reproduction, and distribution of the Work otherwise complies with
|
|
129
|
+
the conditions stated in this License.
|
|
130
|
+
|
|
131
|
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
132
|
+
any Contribution intentionally submitted for inclusion in the Work
|
|
133
|
+
by You to the Licensor shall be under the terms and conditions of
|
|
134
|
+
this License, without any additional terms or conditions.
|
|
135
|
+
Notwithstanding the above, nothing herein shall supersede or modify
|
|
136
|
+
the terms of any separate license agreement you may have executed
|
|
137
|
+
with Licensor regarding such Contributions.
|
|
138
|
+
|
|
139
|
+
6. Trademarks. This License does not grant permission to use the trade
|
|
140
|
+
names, trademarks, service marks, or product names of the Licensor,
|
|
141
|
+
except as required for reasonable and customary use in describing the
|
|
142
|
+
origin of the Work and reproducing the content of the NOTICE file.
|
|
143
|
+
|
|
144
|
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
145
|
+
agreed to in writing, Licensor provides the Work (and each
|
|
146
|
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
147
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
148
|
+
implied, including, without limitation, any warranties or conditions
|
|
149
|
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
150
|
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
151
|
+
appropriateness of using or redistributing the Work and assume any
|
|
152
|
+
risks associated with Your exercise of permissions under this License.
|
|
153
|
+
|
|
154
|
+
8. Limitation of Liability. In no event and under no legal theory,
|
|
155
|
+
whether in tort (including negligence), contract, or otherwise,
|
|
156
|
+
unless required by applicable law (such as deliberate and grossly
|
|
157
|
+
negligent acts) or agreed to in writing, shall any Contributor be
|
|
158
|
+
liable to You for damages, including any direct, indirect, special,
|
|
159
|
+
incidental, or consequential damages of any character arising as a
|
|
160
|
+
result of this License or out of the use or inability to use the
|
|
161
|
+
Work (including but not limited to damages for loss of goodwill,
|
|
162
|
+
work stoppage, computer failure or malfunction, or any and all
|
|
163
|
+
other commercial damages or losses), even if such Contributor
|
|
164
|
+
has been advised of the possibility of such damages.
|
|
165
|
+
|
|
166
|
+
9. Accepting Warranty or Additional Liability. While redistributing
|
|
167
|
+
the Work or Derivative Works thereof, You may choose to offer,
|
|
168
|
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
169
|
+
or other liability obligations and/or rights consistent with this
|
|
170
|
+
License. However, in accepting such obligations, You may act only
|
|
171
|
+
on Your own behalf and on Your sole responsibility, not on behalf
|
|
172
|
+
of any other Contributor, and only if You agree to indemnify,
|
|
173
|
+
defend, and hold each Contributor harmless for any liability
|
|
174
|
+
incurred by, or claims asserted against, such Contributor by reason
|
|
175
|
+
of your accepting any such warranty or additional liability.
|
|
176
|
+
|
|
177
|
+
END OF TERMS AND CONDITIONS
|
|
178
|
+
|
|
179
|
+
Copyright 2026 Nikolay Samokhvalov
|
|
180
|
+
|
|
181
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
|
182
|
+
you may not use this file except in compliance with the License.
|
|
183
|
+
You may obtain a copy of the License at
|
|
184
|
+
|
|
185
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
|
186
|
+
|
|
187
|
+
Unless required by applicable law or agreed to in writing, software
|
|
188
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
|
189
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
190
|
+
See the License for the specific language governing permissions and
|
|
191
|
+
limitations under the License.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
pgque
|