pgqueuer 0.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pgqueuer-0.0.0/LICENSE +21 -0
- pgqueuer-0.0.0/PKG-INFO +156 -0
- pgqueuer-0.0.0/README.md +102 -0
- pgqueuer-0.0.0/pgqueuer/__init__.py +0 -0
- pgqueuer-0.0.0/pgqueuer/__main__.py +8 -0
- pgqueuer-0.0.0/pgqueuer/__version__.py +16 -0
- pgqueuer-0.0.0/pgqueuer/_version.py +16 -0
- pgqueuer-0.0.0/pgqueuer/buffers.py +90 -0
- pgqueuer-0.0.0/pgqueuer/cli.py +322 -0
- pgqueuer-0.0.0/pgqueuer/db.py +232 -0
- pgqueuer-0.0.0/pgqueuer/helpers.py +15 -0
- pgqueuer-0.0.0/pgqueuer/listeners.py +57 -0
- pgqueuer-0.0.0/pgqueuer/logconfig.py +9 -0
- pgqueuer-0.0.0/pgqueuer/models.py +144 -0
- pgqueuer-0.0.0/pgqueuer/py.typed +0 -0
- pgqueuer-0.0.0/pgqueuer/qm.py +250 -0
- pgqueuer-0.0.0/pgqueuer/queries.py +627 -0
- pgqueuer-0.0.0/pgqueuer/supervisor.py +39 -0
- pgqueuer-0.0.0/pgqueuer/tm.py +31 -0
- pgqueuer-0.0.0/pgqueuer.egg-info/PKG-INFO +156 -0
- pgqueuer-0.0.0/pgqueuer.egg-info/SOURCES.txt +30 -0
- pgqueuer-0.0.0/pgqueuer.egg-info/dependency_links.txt +1 -0
- pgqueuer-0.0.0/pgqueuer.egg-info/requires.txt +29 -0
- pgqueuer-0.0.0/pgqueuer.egg-info/top_level.txt +1 -0
- pgqueuer-0.0.0/pyproject.toml +107 -0
- pgqueuer-0.0.0/setup.cfg +4 -0
- pgqueuer-0.0.0/test/test_buffers.py +72 -0
- pgqueuer-0.0.0/test/test_drivers.py +158 -0
- pgqueuer-0.0.0/test/test_qm.py +154 -0
- pgqueuer-0.0.0/test/test_queries.py +232 -0
- pgqueuer-0.0.0/test/test_rps.py +71 -0
- pgqueuer-0.0.0/test/test_tm.py +45 -0
pgqueuer-0.0.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 janbjorge
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
pgqueuer-0.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: pgqueuer
|
|
3
|
+
Version: 0.0.0
|
|
4
|
+
Summary: Pgqueuer is a Python library leveraging PostgreSQL for efficient job queuing.
|
|
5
|
+
Author: janbjorge
|
|
6
|
+
License: MIT License
|
|
7
|
+
Project-URL: Documentation, https://github.com/janbjorge/pgqueuer/wiki
|
|
8
|
+
Project-URL: Homepage, https://github.com/janbjorge/pgqueuer/
|
|
9
|
+
Project-URL: Issues, https://github.com/janbjorge/pgqueuer/issues
|
|
10
|
+
Project-URL: Repository, https://github.com/janbjorge/pgqueuer/
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Environment :: Web Environment
|
|
13
|
+
Classifier: Framework :: AsyncIO
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
16
|
+
Classifier: Natural Language :: English
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Programming Language :: Python
|
|
22
|
+
Classifier: Topic :: Database
|
|
23
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
24
|
+
Classifier: Topic :: Utilities
|
|
25
|
+
Classifier: Topic :: System :: Distributed Computing
|
|
26
|
+
Requires-Python: >=3.10
|
|
27
|
+
Description-Content-Type: text/markdown
|
|
28
|
+
License-File: LICENSE
|
|
29
|
+
Requires-Dist: anyio>=4.0
|
|
30
|
+
Requires-Dist: pydantic>=2.0.0
|
|
31
|
+
Requires-Dist: tabulate>=0.9.0
|
|
32
|
+
Provides-Extra: asyncpg
|
|
33
|
+
Requires-Dist: asyncpg; extra == "asyncpg"
|
|
34
|
+
Provides-Extra: psycopg
|
|
35
|
+
Requires-Dist: psycopg>=3.2.0; extra == "psycopg"
|
|
36
|
+
Provides-Extra: dev
|
|
37
|
+
Requires-Dist: asyncpg; extra == "dev"
|
|
38
|
+
Requires-Dist: asyncpg-stubs; extra == "dev"
|
|
39
|
+
Requires-Dist: fastapi; extra == "dev"
|
|
40
|
+
Requires-Dist: httpx; extra == "dev"
|
|
41
|
+
Requires-Dist: mypy; extra == "dev"
|
|
42
|
+
Requires-Dist: mypy-extensions; extra == "dev"
|
|
43
|
+
Requires-Dist: psycopg>=3.2.0; extra == "dev"
|
|
44
|
+
Requires-Dist: pytest; extra == "dev"
|
|
45
|
+
Requires-Dist: pytest-asyncio; extra == "dev"
|
|
46
|
+
Requires-Dist: ruff; extra == "dev"
|
|
47
|
+
Requires-Dist: tqdm; extra == "dev"
|
|
48
|
+
Requires-Dist: types-tabulate; extra == "dev"
|
|
49
|
+
Requires-Dist: uvicorn; extra == "dev"
|
|
50
|
+
Provides-Extra: docs
|
|
51
|
+
Requires-Dist: myst-parser; extra == "docs"
|
|
52
|
+
Requires-Dist: sphinx; extra == "docs"
|
|
53
|
+
Requires-Dist: sphinx-rtd-theme; extra == "docs"
|
|
54
|
+
|
|
55
|
+
### Readme
|
|
56
|
+
## 🚀 pgqueuer - Building Smoother Workflows One Queue at a Time 🚀
|
|
57
|
+
[](https://github.com/janbjorge/pgqueuer/actions/workflows/ci.yml?query=branch%3Amain)
|
|
58
|
+
[](https://pypi.python.org/pypi/pgqueuer)
|
|
59
|
+
[](https://pepy.tech/project/pgqueuer)
|
|
60
|
+
[](https://github.com/janbjorge/pgqueuer)
|
|
61
|
+
|
|
62
|
+
---
|
|
63
|
+
|
|
64
|
+
📚 **Documentation**: [Explore the Docs 📖](https://pgqueuer.readthedocs.io/en/latest/)
|
|
65
|
+
|
|
66
|
+
🔍 **Source Code**: [View on GitHub 💾](https://github.com/janbjorge/pgqueuer/)
|
|
67
|
+
|
|
68
|
+
💬 **Join the Discussion**: [Discord Community](https://discord.gg/C7YMBzcRMQ)
|
|
69
|
+
|
|
70
|
+
---
|
|
71
|
+
|
|
72
|
+
## pgqueuer
|
|
73
|
+
|
|
74
|
+
pgqueuer is a minimalist, high-performance job queue library for Python, leveraging the robustness of PostgreSQL. Designed for simplicity and efficiency, pgqueuer uses PostgreSQL's LISTEN/NOTIFY to manage job queues effortlessly.
|
|
75
|
+
|
|
76
|
+
### Features
|
|
77
|
+
|
|
78
|
+
- **Simple Integration**: Easy to integrate with existing Python applications using PostgreSQL.
|
|
79
|
+
- **Efficient Concurrency Handling**: Utilizes PostgreSQL's `FOR UPDATE SKIP LOCKED` for reliable and concurrent job processing.
|
|
80
|
+
- **Real-time Notifications**: Leverages `LISTEN` and `NOTIFY` for real-time updates on job status changes.
|
|
81
|
+
|
|
82
|
+
### Installation
|
|
83
|
+
|
|
84
|
+
To install pgqueuer, simply install with pip the following command:
|
|
85
|
+
|
|
86
|
+
```bash
|
|
87
|
+
pip install pgqueuer
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
### Example Usage
|
|
91
|
+
|
|
92
|
+
Here's how you can use pgqueuer in a typical scenario processing incoming data messages:
|
|
93
|
+
|
|
94
|
+
#### Write and run a consumer
|
|
95
|
+
Start a long-lived consumer that will begin processing jobs as soon as they are enqueued by another process. In this case we want to be a bit more carefull as we want gracefull shutdowns, `pgqueuer run` will setup signals to
|
|
96
|
+
ensure this.
|
|
97
|
+
|
|
98
|
+
```python
|
|
99
|
+
from __future__ import annotations
|
|
100
|
+
|
|
101
|
+
import asyncpg
|
|
102
|
+
from pgqueuer.db import AsyncpgDriver, dsn
|
|
103
|
+
from pgqueuer.models import Job
|
|
104
|
+
from pgqueuer.qm import QueueManager
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
async def main() -> QueueManager:
|
|
108
|
+
connection = await asyncpg.connect(dsn())
|
|
109
|
+
driver = AsyncpgDriver(connection)
|
|
110
|
+
qm = QueueManager(driver)
|
|
111
|
+
|
|
112
|
+
# Setup the 'fetch' entrypoint
|
|
113
|
+
@qm.entrypoint("fetch")
|
|
114
|
+
async def process_message(job: Job) -> None:
|
|
115
|
+
print(f"Processed message: {job}")
|
|
116
|
+
|
|
117
|
+
return qm
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
```bash
|
|
121
|
+
python3 -m pgqueuer run tools.consumer.main
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
#### Write and run a producer
|
|
125
|
+
Start a short-lived producer that will enqueue 10,000 jobs.
|
|
126
|
+
```python
|
|
127
|
+
from __future__ import annotations
|
|
128
|
+
|
|
129
|
+
import asyncio
|
|
130
|
+
import sys
|
|
131
|
+
|
|
132
|
+
import asyncpg
|
|
133
|
+
from pgqueuer.db import AsyncpgDriver
|
|
134
|
+
from pgqueuer.queries import Queries
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
async def main(N: int) -> None:
|
|
138
|
+
connection = await asyncpg.connect()
|
|
139
|
+
driver = AsyncpgDriver(connection)
|
|
140
|
+
queries = Queries(driver)
|
|
141
|
+
await queries.enqueue(
|
|
142
|
+
["fetch"] * N,
|
|
143
|
+
[f"this is from me: {n}".encode() for n in range(1, N+1)],
|
|
144
|
+
[0] * N,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
if __name__ == "__main__":
|
|
149
|
+
print(sys.argv)
|
|
150
|
+
N = 1_000 if len(sys.argv) == 1 else int(sys.argv[1])
|
|
151
|
+
asyncio.run(main(N))
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
```bash
|
|
155
|
+
python3 tools/producer.py 10000
|
|
156
|
+
```
|
pgqueuer-0.0.0/README.md
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
### Readme
|
|
2
|
+
## 🚀 pgqueuer - Building Smoother Workflows One Queue at a Time 🚀
|
|
3
|
+
[](https://github.com/janbjorge/pgqueuer/actions/workflows/ci.yml?query=branch%3Amain)
|
|
4
|
+
[](https://pypi.python.org/pypi/pgqueuer)
|
|
5
|
+
[](https://pepy.tech/project/pgqueuer)
|
|
6
|
+
[](https://github.com/janbjorge/pgqueuer)
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
📚 **Documentation**: [Explore the Docs 📖](https://pgqueuer.readthedocs.io/en/latest/)
|
|
11
|
+
|
|
12
|
+
🔍 **Source Code**: [View on GitHub 💾](https://github.com/janbjorge/pgqueuer/)
|
|
13
|
+
|
|
14
|
+
💬 **Join the Discussion**: [Discord Community](https://discord.gg/C7YMBzcRMQ)
|
|
15
|
+
|
|
16
|
+
---
|
|
17
|
+
|
|
18
|
+
## pgqueuer
|
|
19
|
+
|
|
20
|
+
pgqueuer is a minimalist, high-performance job queue library for Python, leveraging the robustness of PostgreSQL. Designed for simplicity and efficiency, pgqueuer uses PostgreSQL's LISTEN/NOTIFY to manage job queues effortlessly.
|
|
21
|
+
|
|
22
|
+
### Features
|
|
23
|
+
|
|
24
|
+
- **Simple Integration**: Easy to integrate with existing Python applications using PostgreSQL.
|
|
25
|
+
- **Efficient Concurrency Handling**: Utilizes PostgreSQL's `FOR UPDATE SKIP LOCKED` for reliable and concurrent job processing.
|
|
26
|
+
- **Real-time Notifications**: Leverages `LISTEN` and `NOTIFY` for real-time updates on job status changes.
|
|
27
|
+
|
|
28
|
+
### Installation
|
|
29
|
+
|
|
30
|
+
To install pgqueuer, simply install with pip the following command:
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
pip install pgqueuer
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
### Example Usage
|
|
37
|
+
|
|
38
|
+
Here's how you can use pgqueuer in a typical scenario processing incoming data messages:
|
|
39
|
+
|
|
40
|
+
#### Write and run a consumer
|
|
41
|
+
Start a long-lived consumer that will begin processing jobs as soon as they are enqueued by another process. In this case we want to be a bit more carefull as we want gracefull shutdowns, `pgqueuer run` will setup signals to
|
|
42
|
+
ensure this.
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
from __future__ import annotations
|
|
46
|
+
|
|
47
|
+
import asyncpg
|
|
48
|
+
from pgqueuer.db import AsyncpgDriver, dsn
|
|
49
|
+
from pgqueuer.models import Job
|
|
50
|
+
from pgqueuer.qm import QueueManager
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
async def main() -> QueueManager:
|
|
54
|
+
connection = await asyncpg.connect(dsn())
|
|
55
|
+
driver = AsyncpgDriver(connection)
|
|
56
|
+
qm = QueueManager(driver)
|
|
57
|
+
|
|
58
|
+
# Setup the 'fetch' entrypoint
|
|
59
|
+
@qm.entrypoint("fetch")
|
|
60
|
+
async def process_message(job: Job) -> None:
|
|
61
|
+
print(f"Processed message: {job}")
|
|
62
|
+
|
|
63
|
+
return qm
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
```bash
|
|
67
|
+
python3 -m pgqueuer run tools.consumer.main
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
#### Write and run a producer
|
|
71
|
+
Start a short-lived producer that will enqueue 10,000 jobs.
|
|
72
|
+
```python
|
|
73
|
+
from __future__ import annotations
|
|
74
|
+
|
|
75
|
+
import asyncio
|
|
76
|
+
import sys
|
|
77
|
+
|
|
78
|
+
import asyncpg
|
|
79
|
+
from pgqueuer.db import AsyncpgDriver
|
|
80
|
+
from pgqueuer.queries import Queries
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
async def main(N: int) -> None:
|
|
84
|
+
connection = await asyncpg.connect()
|
|
85
|
+
driver = AsyncpgDriver(connection)
|
|
86
|
+
queries = Queries(driver)
|
|
87
|
+
await queries.enqueue(
|
|
88
|
+
["fetch"] * N,
|
|
89
|
+
[f"this is from me: {n}".encode() for n in range(1, N+1)],
|
|
90
|
+
[0] * N,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
if __name__ == "__main__":
|
|
95
|
+
print(sys.argv)
|
|
96
|
+
N = 1_000 if len(sys.argv) == 1 else int(sys.argv[1])
|
|
97
|
+
asyncio.run(main(N))
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
python3 tools/producer.py 10000
|
|
102
|
+
```
|
|
File without changes
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# file generated by setuptools_scm
|
|
2
|
+
# don't change, don't track in version control
|
|
3
|
+
TYPE_CHECKING = False
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
from typing import Tuple, Union
|
|
6
|
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
7
|
+
else:
|
|
8
|
+
VERSION_TUPLE = object
|
|
9
|
+
|
|
10
|
+
version: str
|
|
11
|
+
__version__: str
|
|
12
|
+
__version_tuple__: VERSION_TUPLE
|
|
13
|
+
version_tuple: VERSION_TUPLE
|
|
14
|
+
|
|
15
|
+
__version__ = version = '0.7.1.dev1+g4c5074e.d20240824'
|
|
16
|
+
__version_tuple__ = version_tuple = (0, 7, 1, 'dev1', 'g4c5074e.d20240824')
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# file generated by setuptools_scm
|
|
2
|
+
# don't change, don't track in version control
|
|
3
|
+
TYPE_CHECKING = False
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
from typing import Tuple, Union
|
|
6
|
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
|
7
|
+
else:
|
|
8
|
+
VERSION_TUPLE = object
|
|
9
|
+
|
|
10
|
+
version: str
|
|
11
|
+
__version__: str
|
|
12
|
+
__version_tuple__: VERSION_TUPLE
|
|
13
|
+
version_tuple: VERSION_TUPLE
|
|
14
|
+
|
|
15
|
+
__version__ = version = '0.7.1.dev1+g4c5074e.d20240824'
|
|
16
|
+
__version_tuple__ = version_tuple = (0, 7, 1, 'dev1', 'g4c5074e.d20240824')
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import dataclasses
|
|
5
|
+
from datetime import datetime, timedelta
|
|
6
|
+
from typing import Awaitable, Callable
|
|
7
|
+
|
|
8
|
+
from .helpers import perf_counter_dt
|
|
9
|
+
from .logconfig import logger
|
|
10
|
+
from .models import STATUS_LOG, Job
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclasses.dataclass
|
|
14
|
+
class JobBuffer:
|
|
15
|
+
"""
|
|
16
|
+
A buffer class that accumulates jobs and their statuses until a specified
|
|
17
|
+
capacity or timeout is reached, at which point it flushes them using a
|
|
18
|
+
provided callback function.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
max_size (int): Maximum number of jobs the buffer can hold before
|
|
22
|
+
triggering a flush.
|
|
23
|
+
timeout (timedelta): Maximum time to wait before flushing the buffer,
|
|
24
|
+
regardless of the buffer size.
|
|
25
|
+
flush_callback (Callable[[list[tuple[Job, STATUS_LOG]]], Awaitable[None]]):
|
|
26
|
+
Asynchronous callback function to process jobs when the buffer is flushed.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
max_size: int
|
|
30
|
+
timeout: timedelta
|
|
31
|
+
flush_callback: Callable[
|
|
32
|
+
[list[tuple[Job, STATUS_LOG]]],
|
|
33
|
+
Awaitable[None],
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
alive: bool = dataclasses.field(
|
|
37
|
+
init=False,
|
|
38
|
+
default=True,
|
|
39
|
+
)
|
|
40
|
+
events: list[tuple[Job, STATUS_LOG]] = dataclasses.field(
|
|
41
|
+
init=False,
|
|
42
|
+
default_factory=list,
|
|
43
|
+
)
|
|
44
|
+
last_event_time: datetime = dataclasses.field(
|
|
45
|
+
init=False,
|
|
46
|
+
default_factory=perf_counter_dt,
|
|
47
|
+
)
|
|
48
|
+
lock: asyncio.Lock = dataclasses.field(
|
|
49
|
+
init=False,
|
|
50
|
+
default_factory=asyncio.Lock,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
async def add_job(self, job: Job, status: STATUS_LOG) -> None:
|
|
54
|
+
"""
|
|
55
|
+
Adds a job and its status to the buffer and flushes the buffer
|
|
56
|
+
if it reaches maximum size.
|
|
57
|
+
"""
|
|
58
|
+
async with self.lock:
|
|
59
|
+
self.events.append((job, status))
|
|
60
|
+
self.last_event_time = perf_counter_dt()
|
|
61
|
+
if len(self.events) >= self.max_size:
|
|
62
|
+
await self.flush_jobs()
|
|
63
|
+
|
|
64
|
+
async def flush_jobs(self) -> None:
|
|
65
|
+
"""
|
|
66
|
+
Flushes the buffer by calling the flush callback with all accumulated jobs
|
|
67
|
+
and statuses. Clears the buffer after flushing.
|
|
68
|
+
"""
|
|
69
|
+
while self.events:
|
|
70
|
+
try:
|
|
71
|
+
await self.flush_callback(self.events)
|
|
72
|
+
except Exception:
|
|
73
|
+
logger.exception(
|
|
74
|
+
"Exception during buffer flush, waiting: %s seconds before retry.",
|
|
75
|
+
self.timeout.total_seconds(),
|
|
76
|
+
)
|
|
77
|
+
await asyncio.sleep(self.timeout.total_seconds())
|
|
78
|
+
else:
|
|
79
|
+
self.events.clear()
|
|
80
|
+
|
|
81
|
+
async def monitor(self) -> None:
|
|
82
|
+
"""
|
|
83
|
+
Periodically checks if the buffer needs to be flushed based on the timeout.
|
|
84
|
+
Runs until the `alive` event is cleared.
|
|
85
|
+
"""
|
|
86
|
+
while self.alive:
|
|
87
|
+
await asyncio.sleep(self.timeout.total_seconds())
|
|
88
|
+
async with self.lock:
|
|
89
|
+
if perf_counter_dt() - self.last_event_time >= self.timeout:
|
|
90
|
+
await self.flush_jobs()
|