ltq 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ltq-0.1.0/PKG-INFO +109 -0
- ltq-0.1.0/README.md +97 -0
- ltq-0.1.0/pyproject.toml +18 -0
- ltq-0.1.0/src/ltq/__init__.py +12 -0
- ltq-0.1.0/src/ltq/cli.py +85 -0
- ltq-0.1.0/src/ltq/errors.py +12 -0
- ltq-0.1.0/src/ltq/logger.py +63 -0
- ltq-0.1.0/src/ltq/message.py +30 -0
- ltq-0.1.0/src/ltq/middleware.py +117 -0
- ltq-0.1.0/src/ltq/q.py +82 -0
- ltq-0.1.0/src/ltq/task.py +38 -0
- ltq-0.1.0/src/ltq/worker.py +91 -0
ltq-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: ltq
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Add your description here
|
|
5
|
+
Author: Tom Clesius
|
|
6
|
+
Author-email: Tom Clesius <tomclesius@gmail.com>
|
|
7
|
+
Requires-Dist: redis>=7.1.0
|
|
8
|
+
Requires-Dist: sentry-sdk>=2.0.0 ; extra == 'sentry'
|
|
9
|
+
Requires-Python: >=3.13
|
|
10
|
+
Provides-Extra: sentry
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
|
|
13
|
+
<p align="center">
|
|
14
|
+
<img src="assets/logo.png" alt="LTQ" width="400">
|
|
15
|
+
</p>
|
|
16
|
+
|
|
17
|
+
<p align="center">
|
|
18
|
+
A lightweight, Async-first task queue built on Redis.
|
|
19
|
+
</p>
|
|
20
|
+
|
|
21
|
+
## Installation
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
pip install ltq
|
|
25
|
+
# or
|
|
26
|
+
uv add ltq
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Quick Start
|
|
30
|
+
|
|
31
|
+
```python
|
|
32
|
+
import asyncio
|
|
33
|
+
import redis.asyncio as redis
|
|
34
|
+
import ltq
|
|
35
|
+
|
|
36
|
+
client = redis.from_url("redis://localhost:6379")
|
|
37
|
+
worker = ltq.Worker(client=client)
|
|
38
|
+
|
|
39
|
+
@worker.task()
|
|
40
|
+
async def send_email(to: str, subject: str, body: str) -> None:
|
|
41
|
+
# your async code here
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
async def main():
|
|
45
|
+
# Enqueue a task
|
|
46
|
+
await send_email.send("user@example.com", "Hello", "World")
|
|
47
|
+
|
|
48
|
+
# Or enqueue in bulk
|
|
49
|
+
messages = [
|
|
50
|
+
send_email.message("a@example.com", "Hi", "A"),
|
|
51
|
+
send_email.message("b@example.com", "Hi", "B"),
|
|
52
|
+
]
|
|
53
|
+
await send_email.send_bulk(messages)
|
|
54
|
+
|
|
55
|
+
asyncio.run(main())
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
Each task gets its own queue by default. To share a queue between tasks, pass `queue_name`:
|
|
59
|
+
|
|
60
|
+
```python
|
|
61
|
+
@worker.task(queue_name="emails")
|
|
62
|
+
async def send_email(...): ...
|
|
63
|
+
|
|
64
|
+
@worker.task(queue_name="emails")
|
|
65
|
+
async def send_newsletter(...): ...
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
## Running Workers
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
# Run a worker
|
|
72
|
+
ltq myapp:worker
|
|
73
|
+
|
|
74
|
+
# With options
|
|
75
|
+
ltq myapp:worker --concurrency 100 --log-level DEBUG
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## Middleware
|
|
79
|
+
|
|
80
|
+
Add middleware to handle cross-cutting concerns:
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from ltq.middleware import Retry, RateLimit, Timeout
|
|
84
|
+
|
|
85
|
+
worker = ltq.Worker(
|
|
86
|
+
client=client,
|
|
87
|
+
middlewares=[
|
|
88
|
+
Retry(max_retries=3, min_delay=1.0),
|
|
89
|
+
RateLimit(requests_per_second=10),
|
|
90
|
+
Timeout(timeout=30.0),
|
|
91
|
+
],
|
|
92
|
+
)
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
**Built-in:** `Retry`, `RateLimit`, `Timeout`, `Sentry` (requires `ltq[sentry]`)
|
|
96
|
+
|
|
97
|
+
**Custom middleware:**
|
|
98
|
+
|
|
99
|
+
```python
|
|
100
|
+
from ltq.middleware import Middleware, Handler
|
|
101
|
+
from ltq.message import Message
|
|
102
|
+
|
|
103
|
+
class Logger(Middleware):
|
|
104
|
+
async def handle(self, message: Message, next_handler: Handler):
|
|
105
|
+
print(f"Processing {message.task}")
|
|
106
|
+
result = await next_handler(message)
|
|
107
|
+
print(f"Completed {message.task}")
|
|
108
|
+
return result
|
|
109
|
+
```
|
ltq-0.1.0/README.md
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="assets/logo.png" alt="LTQ" width="400">
|
|
3
|
+
</p>
|
|
4
|
+
|
|
5
|
+
<p align="center">
|
|
6
|
+
A lightweight, Async-first task queue built on Redis.
|
|
7
|
+
</p>
|
|
8
|
+
|
|
9
|
+
## Installation
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
pip install ltq
|
|
13
|
+
# or
|
|
14
|
+
uv add ltq
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
## Quick Start
|
|
18
|
+
|
|
19
|
+
```python
|
|
20
|
+
import asyncio
|
|
21
|
+
import redis.asyncio as redis
|
|
22
|
+
import ltq
|
|
23
|
+
|
|
24
|
+
client = redis.from_url("redis://localhost:6379")
|
|
25
|
+
worker = ltq.Worker(client=client)
|
|
26
|
+
|
|
27
|
+
@worker.task()
|
|
28
|
+
async def send_email(to: str, subject: str, body: str) -> None:
|
|
29
|
+
# your async code here
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
async def main():
|
|
33
|
+
# Enqueue a task
|
|
34
|
+
await send_email.send("user@example.com", "Hello", "World")
|
|
35
|
+
|
|
36
|
+
# Or enqueue in bulk
|
|
37
|
+
messages = [
|
|
38
|
+
send_email.message("a@example.com", "Hi", "A"),
|
|
39
|
+
send_email.message("b@example.com", "Hi", "B"),
|
|
40
|
+
]
|
|
41
|
+
await send_email.send_bulk(messages)
|
|
42
|
+
|
|
43
|
+
asyncio.run(main())
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
Each task gets its own queue by default. To share a queue between tasks, pass `queue_name`:
|
|
47
|
+
|
|
48
|
+
```python
|
|
49
|
+
@worker.task(queue_name="emails")
|
|
50
|
+
async def send_email(...): ...
|
|
51
|
+
|
|
52
|
+
@worker.task(queue_name="emails")
|
|
53
|
+
async def send_newsletter(...): ...
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
## Running Workers
|
|
57
|
+
|
|
58
|
+
```bash
|
|
59
|
+
# Run a worker
|
|
60
|
+
ltq myapp:worker
|
|
61
|
+
|
|
62
|
+
# With options
|
|
63
|
+
ltq myapp:worker --concurrency 100 --log-level DEBUG
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
## Middleware
|
|
67
|
+
|
|
68
|
+
Add middleware to handle cross-cutting concerns:
|
|
69
|
+
|
|
70
|
+
```python
|
|
71
|
+
from ltq.middleware import Retry, RateLimit, Timeout
|
|
72
|
+
|
|
73
|
+
worker = ltq.Worker(
|
|
74
|
+
client=client,
|
|
75
|
+
middlewares=[
|
|
76
|
+
Retry(max_retries=3, min_delay=1.0),
|
|
77
|
+
RateLimit(requests_per_second=10),
|
|
78
|
+
Timeout(timeout=30.0),
|
|
79
|
+
],
|
|
80
|
+
)
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
**Built-in:** `Retry`, `RateLimit`, `Timeout`, `Sentry` (requires `ltq[sentry]`)
|
|
84
|
+
|
|
85
|
+
**Custom middleware:**
|
|
86
|
+
|
|
87
|
+
```python
|
|
88
|
+
from ltq.middleware import Middleware, Handler
|
|
89
|
+
from ltq.message import Message
|
|
90
|
+
|
|
91
|
+
class Logger(Middleware):
|
|
92
|
+
async def handle(self, message: Message, next_handler: Handler):
|
|
93
|
+
print(f"Processing {message.task}")
|
|
94
|
+
result = await next_handler(message)
|
|
95
|
+
print(f"Completed {message.task}")
|
|
96
|
+
return result
|
|
97
|
+
```
|
ltq-0.1.0/pyproject.toml
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "ltq"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Add your description here"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
authors = [{ name = "Tom Clesius", email = "tomclesius@gmail.com" }]
|
|
7
|
+
requires-python = ">=3.13"
|
|
8
|
+
dependencies = ["redis>=7.1.0"]
|
|
9
|
+
|
|
10
|
+
[project.optional-dependencies]
|
|
11
|
+
sentry = ["sentry-sdk>=2.0.0"]
|
|
12
|
+
|
|
13
|
+
[project.scripts]
|
|
14
|
+
ltq = "ltq.cli:main"
|
|
15
|
+
|
|
16
|
+
[build-system]
|
|
17
|
+
requires = ["uv_build>=0.9.26,<0.10.0"]
|
|
18
|
+
build-backend = "uv_build"
|
ltq-0.1.0/src/ltq/cli.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""CLI for running ltq workers."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import importlib
|
|
5
|
+
import sys
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
import argparse
|
|
8
|
+
|
|
9
|
+
from .logger import setup_logging, get_logger
|
|
10
|
+
from .worker import Worker
|
|
11
|
+
|
|
12
|
+
logger = get_logger()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def import_from_string(import_str: str):
|
|
16
|
+
"""Import a Worker from 'module.path:worker_name'."""
|
|
17
|
+
if ":" not in import_str:
|
|
18
|
+
logger.error("Invalid format: %s", import_str)
|
|
19
|
+
logger.error("Use: module:attribute")
|
|
20
|
+
sys.exit(1)
|
|
21
|
+
|
|
22
|
+
module_str, attr_str = import_str.split(":", 1)
|
|
23
|
+
|
|
24
|
+
# Add cwd to path for local imports
|
|
25
|
+
sys.path.insert(0, str(Path.cwd()))
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
module = importlib.import_module(module_str)
|
|
29
|
+
return getattr(module, attr_str)
|
|
30
|
+
except ImportError as e:
|
|
31
|
+
logger.error("Cannot import module '%s'", module_str)
|
|
32
|
+
logger.error("%s", e)
|
|
33
|
+
sys.exit(1)
|
|
34
|
+
except AttributeError:
|
|
35
|
+
logger.error("Module '%s' has no attribute '%s'", module_str, attr_str)
|
|
36
|
+
sys.exit(1)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def main():
|
|
40
|
+
"""Run a ltq worker."""
|
|
41
|
+
|
|
42
|
+
parser = argparse.ArgumentParser(
|
|
43
|
+
prog="ltq",
|
|
44
|
+
description="Run a ltq worker",
|
|
45
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
46
|
+
epilog="Example:\n ltq example:worker --concurrency 100",
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
parser.add_argument("worker", help="Worker import string (module:attribute)")
|
|
50
|
+
parser.add_argument("--concurrency", type=int, help="Override worker concurrency")
|
|
51
|
+
parser.add_argument("--poll-sleep", type=float, help="Override worker poll sleep")
|
|
52
|
+
parser.add_argument(
|
|
53
|
+
"--log-level",
|
|
54
|
+
type=str,
|
|
55
|
+
default="INFO",
|
|
56
|
+
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
|
|
57
|
+
help="Set logging level (default: INFO)",
|
|
58
|
+
)
|
|
59
|
+
args = parser.parse_args()
|
|
60
|
+
|
|
61
|
+
# Setup colored logging for CLI
|
|
62
|
+
setup_logging(level=args.log_level)
|
|
63
|
+
|
|
64
|
+
worker: Worker = import_from_string(args.worker)
|
|
65
|
+
|
|
66
|
+
# Apply overrides
|
|
67
|
+
if args.concurrency:
|
|
68
|
+
worker.concurrency = args.concurrency
|
|
69
|
+
if args.poll_sleep:
|
|
70
|
+
worker.poll_sleep = args.poll_sleep
|
|
71
|
+
if args.log_level:
|
|
72
|
+
logger.setLevel(args.log_level)
|
|
73
|
+
|
|
74
|
+
# Print startup info
|
|
75
|
+
logger.info("Starting ltq worker")
|
|
76
|
+
logger.info("Worker: %s", args.worker)
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
asyncio.run(worker.run())
|
|
80
|
+
except KeyboardInterrupt:
|
|
81
|
+
logger.info("Shutting down...")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
if __name__ == "__main__":
|
|
85
|
+
main()
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
class RejectMessage(Exception):
|
|
2
|
+
"""Signal that a message should be dropped."""
|
|
3
|
+
|
|
4
|
+
pass
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class RetryMessage(Exception):
|
|
8
|
+
"""Signal that a message should be retried after a delay."""
|
|
9
|
+
|
|
10
|
+
def __init__(self, delay: float = 0.0, message: str = "") -> None:
|
|
11
|
+
self.delay = delay
|
|
12
|
+
super().__init__(message)
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ColoredFormatter(logging.Formatter):
|
|
6
|
+
"""Custom formatter with colored severity levels."""
|
|
7
|
+
|
|
8
|
+
# ANSI color codes
|
|
9
|
+
RESET = "\033[0m"
|
|
10
|
+
GRAY = "\033[90m"
|
|
11
|
+
GREEN = "\033[32m"
|
|
12
|
+
BRIGHT_GREEN = "\033[92m"
|
|
13
|
+
YELLOW = "\033[33m"
|
|
14
|
+
RED = "\033[91m"
|
|
15
|
+
CYAN = "\033[36m"
|
|
16
|
+
|
|
17
|
+
COLORS = {
|
|
18
|
+
logging.DEBUG: GRAY,
|
|
19
|
+
logging.INFO: BRIGHT_GREEN,
|
|
20
|
+
logging.WARNING: YELLOW,
|
|
21
|
+
logging.ERROR: RED,
|
|
22
|
+
logging.CRITICAL: RED,
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
def format(self, record: logging.LogRecord) -> str:
|
|
26
|
+
"""Format log record with colored severity level."""
|
|
27
|
+
color = self.COLORS.get(record.levelno, self.RESET)
|
|
28
|
+
log_time = self.formatTime(record, "%H:%M:%S")
|
|
29
|
+
timestamp = f"{self.GRAY}{log_time}{self.RESET}"
|
|
30
|
+
|
|
31
|
+
levelname = f"{color}{record.levelname:<8}{self.RESET}"
|
|
32
|
+
workername = f"{self.CYAN}{record.name:<8}{self.RESET}"
|
|
33
|
+
|
|
34
|
+
message = record.getMessage()
|
|
35
|
+
log_line = f"{timestamp} {levelname} {workername} {message}"
|
|
36
|
+
|
|
37
|
+
if record.exc_info and not record.exc_text:
|
|
38
|
+
record.exc_text = self.formatException(record.exc_info)
|
|
39
|
+
|
|
40
|
+
if record.exc_text:
|
|
41
|
+
lines = record.exc_text.split("\n")
|
|
42
|
+
log_line += "\n" + "\n".join(
|
|
43
|
+
f" {self.GRAY}{line}{self.RESET}" for line in lines
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
return log_line
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def setup_logging(level: int | str = logging.INFO) -> None:
|
|
50
|
+
logger = logging.getLogger("ltq")
|
|
51
|
+
|
|
52
|
+
if not logger.handlers:
|
|
53
|
+
logger.setLevel(level)
|
|
54
|
+
|
|
55
|
+
handler = logging.StreamHandler(sys.stdout)
|
|
56
|
+
handler.setFormatter(ColoredFormatter())
|
|
57
|
+
logger.addHandler(handler)
|
|
58
|
+
|
|
59
|
+
logger.propagate = False
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def get_logger(name: str = "ltq") -> logging.Logger:
|
|
63
|
+
return logging.getLogger(name)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import uuid
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class Message:
|
|
11
|
+
args: tuple[Any, ...]
|
|
12
|
+
kwargs: dict[str, Any]
|
|
13
|
+
task: str
|
|
14
|
+
ctx: dict[str, Any] = field(default_factory=dict)
|
|
15
|
+
id: str = field(default_factory=lambda: uuid.uuid4().hex)
|
|
16
|
+
|
|
17
|
+
def to_json(self) -> str:
|
|
18
|
+
return json.dumps(
|
|
19
|
+
{
|
|
20
|
+
"task": self.task,
|
|
21
|
+
"id": self.id,
|
|
22
|
+
"args": self.args,
|
|
23
|
+
"kwargs": self.kwargs,
|
|
24
|
+
"ctx": self.ctx,
|
|
25
|
+
}
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
@classmethod
|
|
29
|
+
def from_json(cls, data: str | bytes) -> Message:
|
|
30
|
+
return cls(**json.loads(data))
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import time
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from typing import Any, Awaitable, Callable
|
|
7
|
+
|
|
8
|
+
from .errors import RetryMessage
|
|
9
|
+
from .message import Message
|
|
10
|
+
from .logger import get_logger
|
|
11
|
+
|
|
12
|
+
logger = get_logger()
|
|
13
|
+
Handler = Callable[[Message], Awaitable[Any]]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Middleware(ABC):
|
|
17
|
+
@abstractmethod
|
|
18
|
+
async def handle(self, message: Message, next_handler: Handler) -> Any: ...
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Retry(Middleware):
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
max_retries: int = 3,
|
|
25
|
+
min_delay: float = 1.0,
|
|
26
|
+
max_delay: float = 60.0,
|
|
27
|
+
backoff: float = 2.0,
|
|
28
|
+
):
|
|
29
|
+
self.max_retries = max_retries
|
|
30
|
+
self.min_delay = min_delay
|
|
31
|
+
self.max_delay = max_delay
|
|
32
|
+
self.backoff = backoff
|
|
33
|
+
|
|
34
|
+
async def handle(self, message: Message, next_handler: Handler) -> Any:
|
|
35
|
+
retries = message.ctx.get("retries", 0)
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
return await next_handler(message)
|
|
39
|
+
except Exception as e:
|
|
40
|
+
retries += 1
|
|
41
|
+
message.ctx["retries"] = retries
|
|
42
|
+
max_retries = max(self.max_retries - 1, 0)
|
|
43
|
+
|
|
44
|
+
if retries > max_retries:
|
|
45
|
+
raise
|
|
46
|
+
|
|
47
|
+
delay = min(
|
|
48
|
+
self.min_delay * (self.backoff ** (retries - 1)),
|
|
49
|
+
self.max_delay,
|
|
50
|
+
)
|
|
51
|
+
logger.warning(
|
|
52
|
+
f"Retry attempt {retries}/{max_retries} ({type(e).__name__})",
|
|
53
|
+
exc_info=True,
|
|
54
|
+
)
|
|
55
|
+
raise RetryMessage(delay, str(e))
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class RateLimit(Middleware):
|
|
59
|
+
def __init__(self, requests_per_second: float):
|
|
60
|
+
self.min_interval = 1.0 / requests_per_second
|
|
61
|
+
self._last_request: float = 0
|
|
62
|
+
self._lock = asyncio.Lock()
|
|
63
|
+
|
|
64
|
+
async def handle(self, message: Message, next_handler: Handler) -> Any:
|
|
65
|
+
async with self._lock:
|
|
66
|
+
now = time.monotonic()
|
|
67
|
+
elapsed = now - self._last_request
|
|
68
|
+
if elapsed < self.min_interval:
|
|
69
|
+
await asyncio.sleep(self.min_interval - elapsed)
|
|
70
|
+
self._last_request = time.monotonic()
|
|
71
|
+
|
|
72
|
+
return await next_handler(message)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class Timeout(Middleware):
|
|
76
|
+
def __init__(self, timeout: float):
|
|
77
|
+
self.timeout = timeout
|
|
78
|
+
|
|
79
|
+
async def handle(self, message: Message, next_handler: Handler) -> Any:
|
|
80
|
+
return await asyncio.wait_for(next_handler(message), timeout=self.timeout)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class Sentry(Middleware):
|
|
84
|
+
def __init__(self, dsn: str, **kwargs: Any) -> None:
|
|
85
|
+
try:
|
|
86
|
+
import sentry_sdk # type: ignore[import-not-found]
|
|
87
|
+
except ModuleNotFoundError as exc:
|
|
88
|
+
raise ModuleNotFoundError(
|
|
89
|
+
"Sentry middleware requires optional dependency 'sentry-sdk'. "
|
|
90
|
+
"Install with 'ltq[sentry]'."
|
|
91
|
+
) from exc
|
|
92
|
+
|
|
93
|
+
self.sentry = sentry_sdk
|
|
94
|
+
self.sentry.init(dsn=dsn, send_default_pii=True, **kwargs)
|
|
95
|
+
|
|
96
|
+
async def handle(self, message: Message, next_handler: Handler) -> Any:
|
|
97
|
+
with self.sentry.push_scope() as scope:
|
|
98
|
+
scope.set_tag("task", message.task)
|
|
99
|
+
scope.set_tag("message_id", message.id)
|
|
100
|
+
scope.set_context(
|
|
101
|
+
"message",
|
|
102
|
+
{
|
|
103
|
+
"id": message.id,
|
|
104
|
+
"task": message.task,
|
|
105
|
+
"args": message.args,
|
|
106
|
+
"kwargs": message.kwargs,
|
|
107
|
+
"ctx": message.ctx,
|
|
108
|
+
},
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
try:
|
|
112
|
+
return await next_handler(message)
|
|
113
|
+
except RetryMessage:
|
|
114
|
+
raise
|
|
115
|
+
except Exception as e:
|
|
116
|
+
self.sentry.capture_exception(e)
|
|
117
|
+
raise
|
ltq-0.1.0/src/ltq/q.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from .message import Message
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from redis.asyncio import Redis
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Queue:
|
|
13
|
+
_GET_SCRIPT = """
|
|
14
|
+
local items = {}
|
|
15
|
+
for i = 1, ARGV[1] do
|
|
16
|
+
local item = redis.call("RPOP", KEYS[1])
|
|
17
|
+
if item then
|
|
18
|
+
table.insert(items, item)
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
if #items > 0 then
|
|
22
|
+
redis.call("SADD", KEYS[2], unpack(items))
|
|
23
|
+
end
|
|
24
|
+
return items
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, client: Redis, name: str) -> None:
|
|
28
|
+
self.client = client
|
|
29
|
+
self.name = name
|
|
30
|
+
self.queue_key = f"queue:{name}"
|
|
31
|
+
self.processing_key = f"queue:{name}:processing"
|
|
32
|
+
self._get = client.register_script(self._GET_SCRIPT)
|
|
33
|
+
|
|
34
|
+
@staticmethod
|
|
35
|
+
def _serialize(messages: list[Message]) -> list[str]:
|
|
36
|
+
return [msg.to_json() for msg in messages]
|
|
37
|
+
|
|
38
|
+
async def put(
|
|
39
|
+
self,
|
|
40
|
+
messages: list[Message],
|
|
41
|
+
delay: float = 0.0,
|
|
42
|
+
ttl: int | None = None,
|
|
43
|
+
) -> None:
|
|
44
|
+
if not messages:
|
|
45
|
+
return
|
|
46
|
+
if delay > 0:
|
|
47
|
+
await asyncio.sleep(delay)
|
|
48
|
+
pipe = self.client.pipeline()
|
|
49
|
+
for item in self._serialize(messages):
|
|
50
|
+
pipe.lpush(self.queue_key, item)
|
|
51
|
+
if ttl:
|
|
52
|
+
pipe.expire(self.queue_key, ttl)
|
|
53
|
+
await pipe.execute() # type: ignore
|
|
54
|
+
|
|
55
|
+
async def get(self, count: int) -> list[Message]:
|
|
56
|
+
results = await self._get(
|
|
57
|
+
keys=[self.queue_key, self.processing_key],
|
|
58
|
+
args=[count],
|
|
59
|
+
) # type: ignore
|
|
60
|
+
return [Message.from_json(r) for r in results]
|
|
61
|
+
|
|
62
|
+
async def ack(self, messages: list[Message]) -> None:
|
|
63
|
+
if not messages:
|
|
64
|
+
return
|
|
65
|
+
items = self._serialize(messages)
|
|
66
|
+
await self.client.srem(self.processing_key, *items) # type: ignore
|
|
67
|
+
|
|
68
|
+
async def nack(self, messages: list[Message]) -> None:
|
|
69
|
+
if not messages:
|
|
70
|
+
return
|
|
71
|
+
items = self._serialize(messages)
|
|
72
|
+
pipe = self.client.pipeline()
|
|
73
|
+
pipe.srem(self.processing_key, *items)
|
|
74
|
+
for item in items:
|
|
75
|
+
pipe.lpush(self.queue_key, item)
|
|
76
|
+
await pipe.execute() # type: ignore
|
|
77
|
+
|
|
78
|
+
async def len(self) -> int:
|
|
79
|
+
return await self.client.llen(self.queue_key) # type: ignore
|
|
80
|
+
|
|
81
|
+
async def clear(self) -> None:
|
|
82
|
+
await self.client.delete(self.queue_key, self.processing_key) # type: ignore
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Awaitable, Callable, Generic, ParamSpec
|
|
4
|
+
|
|
5
|
+
from .message import Message
|
|
6
|
+
from .q import Queue
|
|
7
|
+
|
|
8
|
+
P = ParamSpec("P")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Task(Generic[P]):
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
name: str,
|
|
15
|
+
fn: Callable[P, Awaitable[Any]],
|
|
16
|
+
queue: Queue,
|
|
17
|
+
ttl: int | None = None,
|
|
18
|
+
) -> None:
|
|
19
|
+
self.name = name
|
|
20
|
+
self.fn = fn
|
|
21
|
+
self.queue = queue
|
|
22
|
+
self.ttl = ttl
|
|
23
|
+
|
|
24
|
+
def message(self, *args: P.args, **kwargs: P.kwargs) -> Message:
|
|
25
|
+
return Message(
|
|
26
|
+
args=args,
|
|
27
|
+
kwargs=kwargs,
|
|
28
|
+
task=self.name,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
async def send(self, *args: P.args, **kwargs: P.kwargs) -> str:
|
|
32
|
+
message = self.message(*args, **kwargs)
|
|
33
|
+
await self.queue.put([message], ttl=self.ttl)
|
|
34
|
+
return message.id
|
|
35
|
+
|
|
36
|
+
async def send_bulk(self, messages: list[Message]) -> list[str]:
|
|
37
|
+
await self.queue.put(messages, ttl=self.ttl)
|
|
38
|
+
return [message.id for message in messages]
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from functools import partial
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Awaitable, Callable, ParamSpec
|
|
7
|
+
|
|
8
|
+
from .errors import RetryMessage
|
|
9
|
+
from .task import Task
|
|
10
|
+
from .message import Message
|
|
11
|
+
from .middleware import Handler, Middleware
|
|
12
|
+
from .q import Queue
|
|
13
|
+
from .logger import get_logger
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from redis.asyncio import Redis as AsyncRedis
|
|
17
|
+
|
|
18
|
+
logger = get_logger()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
P = ParamSpec("P")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Worker:
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
client: AsyncRedis,
|
|
28
|
+
middlewares: list[Middleware] | None = None,
|
|
29
|
+
concurrency: int = 250,
|
|
30
|
+
poll_sleep: float = 0.1,
|
|
31
|
+
) -> None:
|
|
32
|
+
self.client: AsyncRedis = client
|
|
33
|
+
self.tasks: list[Task] = []
|
|
34
|
+
self.middlewares: list[Middleware] = middlewares or []
|
|
35
|
+
self.concurrency: int = concurrency
|
|
36
|
+
self.poll_sleep: float = poll_sleep
|
|
37
|
+
|
|
38
|
+
def task(
|
|
39
|
+
self,
|
|
40
|
+
queue_name: str | None = None,
|
|
41
|
+
ttl: int | None = None,
|
|
42
|
+
) -> Callable[[Callable[P, Awaitable[Any]]], Task[P]]:
|
|
43
|
+
def decorator(fn: Callable[P, Awaitable[Any]]) -> Task[P]:
|
|
44
|
+
filename = Path(fn.__code__.co_filename).stem
|
|
45
|
+
task_name = f"{filename}:{fn.__qualname__}"
|
|
46
|
+
queue = Queue(self.client, queue_name or task_name)
|
|
47
|
+
task = Task(
|
|
48
|
+
name=task_name,
|
|
49
|
+
fn=fn,
|
|
50
|
+
queue=queue,
|
|
51
|
+
ttl=ttl,
|
|
52
|
+
)
|
|
53
|
+
self.tasks.append(task)
|
|
54
|
+
return task
|
|
55
|
+
|
|
56
|
+
return decorator
|
|
57
|
+
|
|
58
|
+
async def worker(self, task: Task):
|
|
59
|
+
async def base(message: Message) -> Any:
|
|
60
|
+
return await task.fn(*message.args, **message.kwargs)
|
|
61
|
+
|
|
62
|
+
handler: Handler = base
|
|
63
|
+
for middleware in reversed(self.middlewares):
|
|
64
|
+
handler = partial(middleware.handle, next_handler=handler)
|
|
65
|
+
|
|
66
|
+
while True:
|
|
67
|
+
messages = await task.queue.get(self.concurrency)
|
|
68
|
+
if not messages:
|
|
69
|
+
await asyncio.sleep(self.poll_sleep)
|
|
70
|
+
continue
|
|
71
|
+
|
|
72
|
+
logger.debug(f"Processing {len(messages)} messages for {task.name}")
|
|
73
|
+
|
|
74
|
+
async def process(msg: Message) -> None:
|
|
75
|
+
try:
|
|
76
|
+
await handler(msg)
|
|
77
|
+
except RetryMessage as e:
|
|
78
|
+
logger.warning(f"Retrying in {e.delay}s: {e}")
|
|
79
|
+
await task.queue.put([msg], delay=e.delay)
|
|
80
|
+
except Exception as e:
|
|
81
|
+
logger.error(
|
|
82
|
+
f"Rejected after error in {task.name}: {e}",
|
|
83
|
+
exc_info=True,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
await asyncio.gather(*(process(m) for m in messages))
|
|
87
|
+
await task.queue.ack(messages)
|
|
88
|
+
|
|
89
|
+
async def run(self) -> None:
|
|
90
|
+
workers = (self.worker(task) for task in self.tasks)
|
|
91
|
+
await asyncio.gather(*workers)
|