ltq 0.3.0__tar.gz → 0.3.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ltq-0.3.2/PKG-INFO +218 -0
- ltq-0.3.2/README.md +204 -0
- {ltq-0.3.0 → ltq-0.3.2}/pyproject.toml +2 -2
- ltq-0.3.2/src/ltq/__init__.py +24 -0
- ltq-0.3.2/src/ltq/app.py +40 -0
- ltq-0.3.2/src/ltq/broker.py +135 -0
- ltq-0.3.2/src/ltq/cli.py +180 -0
- {ltq-0.3.0 → ltq-0.3.2}/src/ltq/errors.py +2 -2
- {ltq-0.3.0 → ltq-0.3.2}/src/ltq/logger.py +13 -6
- {ltq-0.3.0 → ltq-0.3.2}/src/ltq/message.py +6 -6
- ltq-0.3.2/src/ltq/middleware.py +115 -0
- ltq-0.3.2/src/ltq/scheduler.py +101 -0
- {ltq-0.3.0 → ltq-0.3.2}/src/ltq/task.py +7 -10
- ltq-0.3.2/src/ltq/utils.py +1 -0
- ltq-0.3.2/src/ltq/worker.py +115 -0
- ltq-0.3.0/PKG-INFO +0 -137
- ltq-0.3.0/README.md +0 -123
- ltq-0.3.0/src/ltq/__init__.py +0 -18
- ltq-0.3.0/src/ltq/app.py +0 -14
- ltq-0.3.0/src/ltq/cli.py +0 -111
- ltq-0.3.0/src/ltq/middleware.py +0 -117
- ltq-0.3.0/src/ltq/q.py +0 -82
- ltq-0.3.0/src/ltq/scheduler.py +0 -82
- ltq-0.3.0/src/ltq/utils.py +0 -19
- ltq-0.3.0/src/ltq/worker.py +0 -108
ltq-0.3.2/PKG-INFO
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: ltq
|
|
3
|
+
Version: 0.3.2
|
|
4
|
+
Summary: Add your description here
|
|
5
|
+
Author: Tom Clesius
|
|
6
|
+
Author-email: Tom Clesius <tomclesius@gmail.com>
|
|
7
|
+
Requires-Dist: redis>=7.1.0
|
|
8
|
+
Requires-Dist: croniter>=6.0.0 ; extra == 'scheduler'
|
|
9
|
+
Requires-Dist: sentry-sdk>=2.0.0 ; extra == 'sentry'
|
|
10
|
+
Requires-Python: >=3.13
|
|
11
|
+
Provides-Extra: scheduler
|
|
12
|
+
Provides-Extra: sentry
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
<p align="center">
|
|
16
|
+
<img src="https://raw.githubusercontent.com/tclesius/ltq/refs/heads/main/assets/logo.png" alt="LTQ" width="400">
|
|
17
|
+
</p>
|
|
18
|
+
|
|
19
|
+
<p align="center">
|
|
20
|
+
A lightweight, Async-first task queue built on Redis.
|
|
21
|
+
</p>
|
|
22
|
+
|
|
23
|
+
## Installation
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
pip install ltq
|
|
27
|
+
# or
|
|
28
|
+
uv add ltq
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## Broker Backends
|
|
32
|
+
|
|
33
|
+
LTQ supports multiple broker backends:
|
|
34
|
+
|
|
35
|
+
- **Redis** (default): `broker_url="redis://localhost:6379"`
|
|
36
|
+
- **Memory**: `broker_url="memory://"` (useful for testing)
|
|
37
|
+
|
|
38
|
+
All workers and schedulers accept a `broker_url` parameter.
|
|
39
|
+
|
|
40
|
+
## Quick Start
|
|
41
|
+
|
|
42
|
+
```python
|
|
43
|
+
import asyncio
|
|
44
|
+
import ltq
|
|
45
|
+
|
|
46
|
+
worker = ltq.Worker("emails", broker_url="redis://localhost:6379")
|
|
47
|
+
|
|
48
|
+
@worker.task()
|
|
49
|
+
async def send_email(to: str, subject: str, body: str) -> None:
|
|
50
|
+
# your async code here
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
async def main():
|
|
54
|
+
# Enqueue a task
|
|
55
|
+
await send_email.send("user@example.com", "Hello", "World")
|
|
56
|
+
|
|
57
|
+
# Or enqueue multiple tasks
|
|
58
|
+
for email in ["a@example.com", "b@example.com"]:
|
|
59
|
+
await send_email.send(email, "Hi", "Message")
|
|
60
|
+
|
|
61
|
+
asyncio.run(main())
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
Each worker has a namespace (e.g., `"emails"`), and tasks are automatically namespaced as `{namespace}:{function_name}`.
|
|
65
|
+
|
|
66
|
+
## Running Workers
|
|
67
|
+
|
|
68
|
+
```bash
|
|
69
|
+
# Run a single worker
|
|
70
|
+
ltq run myapp:worker
|
|
71
|
+
|
|
72
|
+
# With options
|
|
73
|
+
ltq run myapp:worker --concurrency 100 --log-level DEBUG
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
## Running an App
|
|
77
|
+
|
|
78
|
+
Register multiple workers into an `App` to run them together:
|
|
79
|
+
|
|
80
|
+
```python
|
|
81
|
+
import ltq
|
|
82
|
+
|
|
83
|
+
app = ltq.App()
|
|
84
|
+
app.register_worker(emails_worker)
|
|
85
|
+
app.register_worker(notifications_worker)
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
```bash
|
|
89
|
+
ltq run --app myapp:app
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
### App Middleware
|
|
93
|
+
|
|
94
|
+
Apply middleware globally to all workers in an app:
|
|
95
|
+
|
|
96
|
+
```python
|
|
97
|
+
from ltq.middleware import Sentry
|
|
98
|
+
|
|
99
|
+
app = ltq.App(middlewares=[Sentry(dsn="https://...")])
|
|
100
|
+
|
|
101
|
+
# Or register after creation
|
|
102
|
+
app.register_middleware(Sentry(dsn="https://..."))
|
|
103
|
+
app.register_middleware(MyMiddleware(), pos=0)
|
|
104
|
+
|
|
105
|
+
# When workers are registered, app middlewares are prepended to each worker's stack
|
|
106
|
+
app.register_worker(emails_worker)
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
### Threading Model
|
|
110
|
+
|
|
111
|
+
By default, `App` runs each worker in its own thread with a separate event loop. This provides isolation between workers while keeping them in the same process. Workers won't block each other since each has its own async event loop.
|
|
112
|
+
|
|
113
|
+
**For maximum isolation** (separate memory, crash protection), run each worker in its own process:
|
|
114
|
+
|
|
115
|
+
```bash
|
|
116
|
+
# Terminal 1
|
|
117
|
+
ltq run myapp:emails_worker
|
|
118
|
+
|
|
119
|
+
# Terminal 2
|
|
120
|
+
ltq run myapp:notifications_worker
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
This gives you full process isolation at the cost of more overhead.
|
|
124
|
+
|
|
125
|
+
## Queue Management
|
|
126
|
+
|
|
127
|
+
Manage queues using the CLI:
|
|
128
|
+
|
|
129
|
+
```bash
|
|
130
|
+
# Clear a task queue
|
|
131
|
+
ltq clear emails:send_email
|
|
132
|
+
|
|
133
|
+
# Get queue size
|
|
134
|
+
ltq size emails:send_email
|
|
135
|
+
|
|
136
|
+
# With custom Redis URL
|
|
137
|
+
ltq clear emails:send_email --redis-url redis://localhost:6380
|
|
138
|
+
ltq size emails:send_email --redis-url redis://localhost:6380
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
Queue names are automatically namespaced as `{worker_name}:{function_name}`.
|
|
142
|
+
|
|
143
|
+
## Scheduler
|
|
144
|
+
|
|
145
|
+
Run tasks on a cron schedule (requires `ltq[scheduler]`):
|
|
146
|
+
|
|
147
|
+
```python
|
|
148
|
+
import ltq
|
|
149
|
+
|
|
150
|
+
scheduler = ltq.Scheduler()
|
|
151
|
+
scheduler.cron("*/5 * * * *", send_email.message("admin@example.com", "Report", "..."))
|
|
152
|
+
scheduler.start() # Runs scheduler in blocking mode with asyncio.run()
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
## Task Options
|
|
156
|
+
|
|
157
|
+
Configure task behavior with options:
|
|
158
|
+
|
|
159
|
+
```python
|
|
160
|
+
from datetime import timedelta
|
|
161
|
+
|
|
162
|
+
@worker.task(max_tries=3, max_age=timedelta(hours=1), max_rate="10/s")
|
|
163
|
+
async def send_email(to: str, subject: str, body: str) -> None:
|
|
164
|
+
# your async code here
|
|
165
|
+
pass
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
**Available options:**
|
|
169
|
+
|
|
170
|
+
- `max_tries` (int): Maximum retry attempts before rejecting the message
|
|
171
|
+
- `max_age` (timedelta): Maximum message age before rejection
|
|
172
|
+
- `max_rate` (str): Rate limit in format `"N/s"`, `"N/m"`, or `"N/h"` (requests per second/minute/hour)
|
|
173
|
+
|
|
174
|
+
## Middleware
|
|
175
|
+
|
|
176
|
+
Middleware are async context managers that wrap task execution. The default stack is `[MaxTries(), MaxAge(), MaxRate()]`, so you only need to specify middlewares if you want to customize or add additional ones:
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
from ltq.middleware import MaxTries, MaxAge, MaxRate, Sentry
|
|
180
|
+
|
|
181
|
+
worker = ltq.Worker(
|
|
182
|
+
"emails",
|
|
183
|
+
broker_url="redis://localhost:6379",
|
|
184
|
+
middlewares=[
|
|
185
|
+
MaxTries(),
|
|
186
|
+
MaxAge(),
|
|
187
|
+
MaxRate(),
|
|
188
|
+
Sentry(dsn="https://..."),
|
|
189
|
+
],
|
|
190
|
+
)
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
**Built-in:** `MaxTries`, `MaxAge`, `MaxRate`, `Sentry` (requires `ltq[sentry]`)
|
|
194
|
+
|
|
195
|
+
You can also register middleware after creating the worker:
|
|
196
|
+
|
|
197
|
+
```python
|
|
198
|
+
worker.register_middleware(Sentry(dsn="https://..."))
|
|
199
|
+
|
|
200
|
+
# Insert at specific position (default is -1 for append)
|
|
201
|
+
worker.register_middleware(MyMiddleware(), pos=0)
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
**Custom middleware:**
|
|
205
|
+
|
|
206
|
+
```python
|
|
207
|
+
from contextlib import asynccontextmanager
|
|
208
|
+
from ltq.middleware import Middleware
|
|
209
|
+
from ltq.message import Message
|
|
210
|
+
from ltq.task import Task
|
|
211
|
+
|
|
212
|
+
class Logger(Middleware):
|
|
213
|
+
@asynccontextmanager
|
|
214
|
+
async def __call__(self, message: Message, task: Task):
|
|
215
|
+
print(f"Processing {message.task_name}")
|
|
216
|
+
yield
|
|
217
|
+
print(f"Completed {message.task_name}")
|
|
218
|
+
```
|
ltq-0.3.2/README.md
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="https://raw.githubusercontent.com/tclesius/ltq/refs/heads/main/assets/logo.png" alt="LTQ" width="400">
|
|
3
|
+
</p>
|
|
4
|
+
|
|
5
|
+
<p align="center">
|
|
6
|
+
A lightweight, Async-first task queue built on Redis.
|
|
7
|
+
</p>
|
|
8
|
+
|
|
9
|
+
## Installation
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
pip install ltq
|
|
13
|
+
# or
|
|
14
|
+
uv add ltq
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
## Broker Backends
|
|
18
|
+
|
|
19
|
+
LTQ supports multiple broker backends:
|
|
20
|
+
|
|
21
|
+
- **Redis** (default): `broker_url="redis://localhost:6379"`
|
|
22
|
+
- **Memory**: `broker_url="memory://"` (useful for testing)
|
|
23
|
+
|
|
24
|
+
All workers and schedulers accept a `broker_url` parameter.
|
|
25
|
+
|
|
26
|
+
## Quick Start
|
|
27
|
+
|
|
28
|
+
```python
|
|
29
|
+
import asyncio
|
|
30
|
+
import ltq
|
|
31
|
+
|
|
32
|
+
worker = ltq.Worker("emails", broker_url="redis://localhost:6379")
|
|
33
|
+
|
|
34
|
+
@worker.task()
|
|
35
|
+
async def send_email(to: str, subject: str, body: str) -> None:
|
|
36
|
+
# your async code here
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
async def main():
|
|
40
|
+
# Enqueue a task
|
|
41
|
+
await send_email.send("user@example.com", "Hello", "World")
|
|
42
|
+
|
|
43
|
+
# Or enqueue multiple tasks
|
|
44
|
+
for email in ["a@example.com", "b@example.com"]:
|
|
45
|
+
await send_email.send(email, "Hi", "Message")
|
|
46
|
+
|
|
47
|
+
asyncio.run(main())
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
Each worker has a namespace (e.g., `"emails"`), and tasks are automatically namespaced as `{namespace}:{function_name}`.
|
|
51
|
+
|
|
52
|
+
## Running Workers
|
|
53
|
+
|
|
54
|
+
```bash
|
|
55
|
+
# Run a single worker
|
|
56
|
+
ltq run myapp:worker
|
|
57
|
+
|
|
58
|
+
# With options
|
|
59
|
+
ltq run myapp:worker --concurrency 100 --log-level DEBUG
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
## Running an App
|
|
63
|
+
|
|
64
|
+
Register multiple workers into an `App` to run them together:
|
|
65
|
+
|
|
66
|
+
```python
|
|
67
|
+
import ltq
|
|
68
|
+
|
|
69
|
+
app = ltq.App()
|
|
70
|
+
app.register_worker(emails_worker)
|
|
71
|
+
app.register_worker(notifications_worker)
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
```bash
|
|
75
|
+
ltq run --app myapp:app
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### App Middleware
|
|
79
|
+
|
|
80
|
+
Apply middleware globally to all workers in an app:
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from ltq.middleware import Sentry
|
|
84
|
+
|
|
85
|
+
app = ltq.App(middlewares=[Sentry(dsn="https://...")])
|
|
86
|
+
|
|
87
|
+
# Or register after creation
|
|
88
|
+
app.register_middleware(Sentry(dsn="https://..."))
|
|
89
|
+
app.register_middleware(MyMiddleware(), pos=0)
|
|
90
|
+
|
|
91
|
+
# When workers are registered, app middlewares are prepended to each worker's stack
|
|
92
|
+
app.register_worker(emails_worker)
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### Threading Model
|
|
96
|
+
|
|
97
|
+
By default, `App` runs each worker in its own thread with a separate event loop. This provides isolation between workers while keeping them in the same process. Workers won't block each other since each has its own async event loop.
|
|
98
|
+
|
|
99
|
+
**For maximum isolation** (separate memory, crash protection), run each worker in its own process:
|
|
100
|
+
|
|
101
|
+
```bash
|
|
102
|
+
# Terminal 1
|
|
103
|
+
ltq run myapp:emails_worker
|
|
104
|
+
|
|
105
|
+
# Terminal 2
|
|
106
|
+
ltq run myapp:notifications_worker
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
This gives you full process isolation at the cost of more overhead.
|
|
110
|
+
|
|
111
|
+
## Queue Management
|
|
112
|
+
|
|
113
|
+
Manage queues using the CLI:
|
|
114
|
+
|
|
115
|
+
```bash
|
|
116
|
+
# Clear a task queue
|
|
117
|
+
ltq clear emails:send_email
|
|
118
|
+
|
|
119
|
+
# Get queue size
|
|
120
|
+
ltq size emails:send_email
|
|
121
|
+
|
|
122
|
+
# With custom Redis URL
|
|
123
|
+
ltq clear emails:send_email --redis-url redis://localhost:6380
|
|
124
|
+
ltq size emails:send_email --redis-url redis://localhost:6380
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
Queue names are automatically namespaced as `{worker_name}:{function_name}`.
|
|
128
|
+
|
|
129
|
+
## Scheduler
|
|
130
|
+
|
|
131
|
+
Run tasks on a cron schedule (requires `ltq[scheduler]`):
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
import ltq
|
|
135
|
+
|
|
136
|
+
scheduler = ltq.Scheduler()
|
|
137
|
+
scheduler.cron("*/5 * * * *", send_email.message("admin@example.com", "Report", "..."))
|
|
138
|
+
scheduler.start() # Runs scheduler in blocking mode with asyncio.run()
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
## Task Options
|
|
142
|
+
|
|
143
|
+
Configure task behavior with options:
|
|
144
|
+
|
|
145
|
+
```python
|
|
146
|
+
from datetime import timedelta
|
|
147
|
+
|
|
148
|
+
@worker.task(max_tries=3, max_age=timedelta(hours=1), max_rate="10/s")
|
|
149
|
+
async def send_email(to: str, subject: str, body: str) -> None:
|
|
150
|
+
# your async code here
|
|
151
|
+
pass
|
|
152
|
+
```
|
|
153
|
+
|
|
154
|
+
**Available options:**
|
|
155
|
+
|
|
156
|
+
- `max_tries` (int): Maximum retry attempts before rejecting the message
|
|
157
|
+
- `max_age` (timedelta): Maximum message age before rejection
|
|
158
|
+
- `max_rate` (str): Rate limit in format `"N/s"`, `"N/m"`, or `"N/h"` (requests per second/minute/hour)
|
|
159
|
+
|
|
160
|
+
## Middleware
|
|
161
|
+
|
|
162
|
+
Middleware are async context managers that wrap task execution. The default stack is `[MaxTries(), MaxAge(), MaxRate()]`, so you only need to specify middlewares if you want to customize or add additional ones:
|
|
163
|
+
|
|
164
|
+
```python
|
|
165
|
+
from ltq.middleware import MaxTries, MaxAge, MaxRate, Sentry
|
|
166
|
+
|
|
167
|
+
worker = ltq.Worker(
|
|
168
|
+
"emails",
|
|
169
|
+
broker_url="redis://localhost:6379",
|
|
170
|
+
middlewares=[
|
|
171
|
+
MaxTries(),
|
|
172
|
+
MaxAge(),
|
|
173
|
+
MaxRate(),
|
|
174
|
+
Sentry(dsn="https://..."),
|
|
175
|
+
],
|
|
176
|
+
)
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
**Built-in:** `MaxTries`, `MaxAge`, `MaxRate`, `Sentry` (requires `ltq[sentry]`)
|
|
180
|
+
|
|
181
|
+
You can also register middleware after creating the worker:
|
|
182
|
+
|
|
183
|
+
```python
|
|
184
|
+
worker.register_middleware(Sentry(dsn="https://..."))
|
|
185
|
+
|
|
186
|
+
# Insert at specific position (default is -1 for append)
|
|
187
|
+
worker.register_middleware(MyMiddleware(), pos=0)
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
**Custom middleware:**
|
|
191
|
+
|
|
192
|
+
```python
|
|
193
|
+
from contextlib import asynccontextmanager
|
|
194
|
+
from ltq.middleware import Middleware
|
|
195
|
+
from ltq.message import Message
|
|
196
|
+
from ltq.task import Task
|
|
197
|
+
|
|
198
|
+
class Logger(Middleware):
|
|
199
|
+
@asynccontextmanager
|
|
200
|
+
async def __call__(self, message: Message, task: Task):
|
|
201
|
+
print(f"Processing {message.task_name}")
|
|
202
|
+
yield
|
|
203
|
+
print(f"Completed {message.task_name}")
|
|
204
|
+
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "ltq"
|
|
3
|
-
version = "0.3.
|
|
3
|
+
version = "0.3.2"
|
|
4
4
|
description = "Add your description here"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
authors = [{ name = "Tom Clesius", email = "tomclesius@gmail.com" }]
|
|
@@ -19,7 +19,7 @@ requires = ["uv_build>=0.9.26,<0.10.0"]
|
|
|
19
19
|
build-backend = "uv_build"
|
|
20
20
|
|
|
21
21
|
[tool.bumpversion]
|
|
22
|
-
current_version = "0.3.
|
|
22
|
+
current_version = "0.3.2"
|
|
23
23
|
commit = true
|
|
24
24
|
tag = true
|
|
25
25
|
message = "v{new_version}"
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from .app import App
|
|
2
|
+
from .broker import Broker
|
|
3
|
+
from .task import Task
|
|
4
|
+
from .worker import Worker
|
|
5
|
+
from .scheduler import Scheduler
|
|
6
|
+
from .logger import get_logger
|
|
7
|
+
from .errors import RejectError, RetryError
|
|
8
|
+
from .middleware import Middleware, MaxTries, MaxAge, MaxRate, Sentry
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"App",
|
|
12
|
+
"Broker",
|
|
13
|
+
"Worker",
|
|
14
|
+
"Scheduler",
|
|
15
|
+
"Task",
|
|
16
|
+
"get_logger",
|
|
17
|
+
"RejectError",
|
|
18
|
+
"RetryError",
|
|
19
|
+
"Middleware",
|
|
20
|
+
"MaxTries",
|
|
21
|
+
"MaxAge",
|
|
22
|
+
"MaxRate",
|
|
23
|
+
"Sentry",
|
|
24
|
+
]
|
ltq-0.3.2/src/ltq/app.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import threading
|
|
3
|
+
|
|
4
|
+
from .middleware import Middleware
|
|
5
|
+
from .worker import Worker
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class App:
|
|
9
|
+
def __init__(self, middlewares: list[Middleware] | None = None) -> None:
|
|
10
|
+
self.workers: dict[str, Worker] = dict()
|
|
11
|
+
self.middlewares: list[Middleware] = middlewares or []
|
|
12
|
+
|
|
13
|
+
def register_middleware(self, middleware: Middleware, pos: int = -1) -> None:
|
|
14
|
+
if pos == -1:
|
|
15
|
+
self.middlewares.append(middleware)
|
|
16
|
+
else:
|
|
17
|
+
self.middlewares.insert(pos, middleware)
|
|
18
|
+
|
|
19
|
+
def register_worker(self, worker: Worker) -> None:
|
|
20
|
+
if worker.name in self.workers:
|
|
21
|
+
raise RuntimeError(f"Worker '{worker.name}' is already registered")
|
|
22
|
+
worker.middlewares = list(self.middlewares) + worker.middlewares
|
|
23
|
+
self.workers[worker.name] = worker
|
|
24
|
+
|
|
25
|
+
@staticmethod
|
|
26
|
+
def _run_worker(worker: Worker) -> None:
|
|
27
|
+
asyncio.run(worker.run())
|
|
28
|
+
|
|
29
|
+
async def run(self) -> None:
|
|
30
|
+
threads: list[threading.Thread] = []
|
|
31
|
+
for worker in self.workers.values():
|
|
32
|
+
t = threading.Thread(target=self._run_worker, args=(worker,), daemon=True)
|
|
33
|
+
t.start()
|
|
34
|
+
threads.append(t)
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
while any(t.is_alive() for t in threads):
|
|
38
|
+
await asyncio.sleep(0.2)
|
|
39
|
+
except asyncio.CancelledError:
|
|
40
|
+
pass
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import time
|
|
5
|
+
from urllib.parse import urlparse
|
|
6
|
+
import uuid
|
|
7
|
+
from collections import defaultdict
|
|
8
|
+
|
|
9
|
+
import redis.asyncio as aioredis
|
|
10
|
+
|
|
11
|
+
from .message import Message
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Broker:
|
|
15
|
+
@staticmethod
|
|
16
|
+
def from_url(url: str) -> Broker:
|
|
17
|
+
urlp = urlparse(url)
|
|
18
|
+
if urlp.scheme == "memory":
|
|
19
|
+
return MemoryBroker()
|
|
20
|
+
elif urlp.scheme == "redis":
|
|
21
|
+
return RedisBroker(url)
|
|
22
|
+
else:
|
|
23
|
+
raise RuntimeError(f"Unknown scheme: {urlp.scheme}")
|
|
24
|
+
|
|
25
|
+
async def close(self) -> None: ...
|
|
26
|
+
async def publish(self, message: Message, delay: float = 0) -> None: ...
|
|
27
|
+
async def consume(self, queue: str) -> Message: ...
|
|
28
|
+
async def ack(self, message: Message) -> None: ...
|
|
29
|
+
async def nack(
|
|
30
|
+
self,
|
|
31
|
+
message: Message,
|
|
32
|
+
delay: float = 0,
|
|
33
|
+
drop: bool = False,
|
|
34
|
+
) -> None: ...
|
|
35
|
+
async def len(self, queue: str) -> int: ...
|
|
36
|
+
async def clear(self, queue: str) -> None: ...
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class RedisBroker(Broker):
|
|
40
|
+
def __init__(self, url: str) -> None:
|
|
41
|
+
self.url = url
|
|
42
|
+
self._client = aioredis.from_url(url)
|
|
43
|
+
self._id = uuid.uuid4().hex[:8]
|
|
44
|
+
|
|
45
|
+
async def close(self) -> None:
|
|
46
|
+
await self._client.aclose()
|
|
47
|
+
|
|
48
|
+
async def publish(
|
|
49
|
+
self,
|
|
50
|
+
message: Message,
|
|
51
|
+
delay: float = 0,
|
|
52
|
+
) -> None:
|
|
53
|
+
score = time.time() + delay
|
|
54
|
+
await self._client.zadd(
|
|
55
|
+
f"queue:{message.task_name}",
|
|
56
|
+
{
|
|
57
|
+
message.to_json(): score,
|
|
58
|
+
},
|
|
59
|
+
) # type: ignore
|
|
60
|
+
|
|
61
|
+
async def consume(self, queue: str) -> Message:
|
|
62
|
+
while True:
|
|
63
|
+
now = time.time()
|
|
64
|
+
ready = await self._client.zrangebyscore(
|
|
65
|
+
f"queue:{queue}", 0, now, start=0, num=1
|
|
66
|
+
) # type: ignore
|
|
67
|
+
if ready:
|
|
68
|
+
msg = ready[0]
|
|
69
|
+
await self._client.zadd(f"processing:{queue}:{self._id}", {msg: now,}) # type: ignore
|
|
70
|
+
await self._client.zrem(f"queue:{queue}", msg) # type: ignore
|
|
71
|
+
return Message.from_json(msg)
|
|
72
|
+
await asyncio.sleep(0.1)
|
|
73
|
+
|
|
74
|
+
async def ack(self, message: Message) -> None:
|
|
75
|
+
key = f"processing:{message.task_name}:{self._id}"
|
|
76
|
+
await self._client.zrem(key, message.to_json()) # type: ignore
|
|
77
|
+
|
|
78
|
+
async def nack(
|
|
79
|
+
self,
|
|
80
|
+
message: Message,
|
|
81
|
+
delay: float = 0,
|
|
82
|
+
drop: bool = False,
|
|
83
|
+
) -> None:
|
|
84
|
+
key = f"processing:{message.task_name}:{self._id}"
|
|
85
|
+
await self._client.zrem(key, message.to_json()) # type: ignore
|
|
86
|
+
if not drop:
|
|
87
|
+
await self.publish(message, delay=delay)
|
|
88
|
+
|
|
89
|
+
async def len(self, queue: str) -> int:
|
|
90
|
+
return await self._client.zcard(f"queue:{queue}") or 0 # type: ignore
|
|
91
|
+
|
|
92
|
+
async def clear(self, queue: str) -> None:
|
|
93
|
+
await self._client.delete(f"queue:{queue}", f"processing:{queue}:{self._id}") # type: ignore
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class MemoryBroker(Broker):
|
|
97
|
+
def __init__(self) -> None:
|
|
98
|
+
self._queues: defaultdict[str, dict[str, float]] = defaultdict(dict)
|
|
99
|
+
|
|
100
|
+
async def close(self) -> None:
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
async def publish(
|
|
104
|
+
self,
|
|
105
|
+
message: Message,
|
|
106
|
+
delay: float = 0,
|
|
107
|
+
) -> None:
|
|
108
|
+
self._queues[message.task_name][message.to_json()] = time.time() + delay
|
|
109
|
+
|
|
110
|
+
async def consume(self, queue: str) -> Message:
|
|
111
|
+
while True:
|
|
112
|
+
now = time.time()
|
|
113
|
+
for msg, score in list(self._queues[queue].items()):
|
|
114
|
+
if score <= now:
|
|
115
|
+
del self._queues[queue][msg]
|
|
116
|
+
return Message.from_json(msg)
|
|
117
|
+
await asyncio.sleep(0.1)
|
|
118
|
+
|
|
119
|
+
async def ack(self, message: Message) -> None:
|
|
120
|
+
pass
|
|
121
|
+
|
|
122
|
+
async def nack(
|
|
123
|
+
self,
|
|
124
|
+
message: Message,
|
|
125
|
+
delay: float = 0,
|
|
126
|
+
drop: bool = False,
|
|
127
|
+
) -> None:
|
|
128
|
+
if not drop:
|
|
129
|
+
await self.publish(message, delay=delay)
|
|
130
|
+
|
|
131
|
+
async def len(self, queue: str) -> int:
|
|
132
|
+
return len(self._queues[queue])
|
|
133
|
+
|
|
134
|
+
async def clear(self, queue: str) -> None:
|
|
135
|
+
self._queues.pop(queue, None)
|