ltq 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ltq/__init__.py +11 -5
- ltq/app.py +30 -4
- ltq/broker.py +135 -0
- ltq/cli.py +112 -43
- ltq/errors.py +2 -2
- ltq/logger.py +13 -6
- ltq/message.py +6 -6
- ltq/middleware.py +95 -97
- ltq/scheduler.py +56 -37
- ltq/task.py +7 -10
- ltq/utils.py +0 -18
- ltq/worker.py +74 -67
- ltq-0.3.2.dist-info/METADATA +218 -0
- ltq-0.3.2.dist-info/RECORD +16 -0
- {ltq-0.3.0.dist-info → ltq-0.3.2.dist-info}/WHEEL +1 -1
- ltq/q.py +0 -82
- ltq-0.3.0.dist-info/METADATA +0 -137
- ltq-0.3.0.dist-info/RECORD +0 -16
- {ltq-0.3.0.dist-info → ltq-0.3.2.dist-info}/entry_points.txt +0 -0
ltq/__init__.py
CHANGED
|
@@ -1,18 +1,24 @@
|
|
|
1
1
|
from .app import App
|
|
2
|
-
from .
|
|
2
|
+
from .broker import Broker
|
|
3
3
|
from .task import Task
|
|
4
4
|
from .worker import Worker
|
|
5
5
|
from .scheduler import Scheduler
|
|
6
6
|
from .logger import get_logger
|
|
7
|
-
from .errors import
|
|
7
|
+
from .errors import RejectError, RetryError
|
|
8
|
+
from .middleware import Middleware, MaxTries, MaxAge, MaxRate, Sentry
|
|
8
9
|
|
|
9
10
|
__all__ = [
|
|
10
11
|
"App",
|
|
12
|
+
"Broker",
|
|
11
13
|
"Worker",
|
|
12
14
|
"Scheduler",
|
|
13
15
|
"Task",
|
|
14
|
-
"dispatch",
|
|
15
16
|
"get_logger",
|
|
16
|
-
"
|
|
17
|
-
"
|
|
17
|
+
"RejectError",
|
|
18
|
+
"RetryError",
|
|
19
|
+
"Middleware",
|
|
20
|
+
"MaxTries",
|
|
21
|
+
"MaxAge",
|
|
22
|
+
"MaxRate",
|
|
23
|
+
"Sentry",
|
|
18
24
|
]
|
ltq/app.py
CHANGED
|
@@ -1,14 +1,40 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import threading
|
|
2
3
|
|
|
4
|
+
from .middleware import Middleware
|
|
3
5
|
from .worker import Worker
|
|
4
6
|
|
|
5
7
|
|
|
6
8
|
class App:
|
|
7
|
-
def __init__(self) -> None:
|
|
8
|
-
self.workers:
|
|
9
|
+
def __init__(self, middlewares: list[Middleware] | None = None) -> None:
|
|
10
|
+
self.workers: dict[str, Worker] = dict()
|
|
11
|
+
self.middlewares: list[Middleware] = middlewares or []
|
|
12
|
+
|
|
13
|
+
def register_middleware(self, middleware: Middleware, pos: int = -1) -> None:
|
|
14
|
+
if pos == -1:
|
|
15
|
+
self.middlewares.append(middleware)
|
|
16
|
+
else:
|
|
17
|
+
self.middlewares.insert(pos, middleware)
|
|
9
18
|
|
|
10
19
|
def register_worker(self, worker: Worker) -> None:
|
|
11
|
-
self.workers
|
|
20
|
+
if worker.name in self.workers:
|
|
21
|
+
raise RuntimeError(f"Worker '{worker.name}' is already registered")
|
|
22
|
+
worker.middlewares = list(self.middlewares) + worker.middlewares
|
|
23
|
+
self.workers[worker.name] = worker
|
|
24
|
+
|
|
25
|
+
@staticmethod
|
|
26
|
+
def _run_worker(worker: Worker) -> None:
|
|
27
|
+
asyncio.run(worker.run())
|
|
12
28
|
|
|
13
29
|
async def run(self) -> None:
|
|
14
|
-
|
|
30
|
+
threads: list[threading.Thread] = []
|
|
31
|
+
for worker in self.workers.values():
|
|
32
|
+
t = threading.Thread(target=self._run_worker, args=(worker,), daemon=True)
|
|
33
|
+
t.start()
|
|
34
|
+
threads.append(t)
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
while any(t.is_alive() for t in threads):
|
|
38
|
+
await asyncio.sleep(0.2)
|
|
39
|
+
except asyncio.CancelledError:
|
|
40
|
+
pass
|
ltq/broker.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import time
|
|
5
|
+
from urllib.parse import urlparse
|
|
6
|
+
import uuid
|
|
7
|
+
from collections import defaultdict
|
|
8
|
+
|
|
9
|
+
import redis.asyncio as aioredis
|
|
10
|
+
|
|
11
|
+
from .message import Message
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Broker:
|
|
15
|
+
@staticmethod
|
|
16
|
+
def from_url(url: str) -> Broker:
|
|
17
|
+
urlp = urlparse(url)
|
|
18
|
+
if urlp.scheme == "memory":
|
|
19
|
+
return MemoryBroker()
|
|
20
|
+
elif urlp.scheme == "redis":
|
|
21
|
+
return RedisBroker(url)
|
|
22
|
+
else:
|
|
23
|
+
raise RuntimeError(f"Unknown scheme: {urlp.scheme}")
|
|
24
|
+
|
|
25
|
+
async def close(self) -> None: ...
|
|
26
|
+
async def publish(self, message: Message, delay: float = 0) -> None: ...
|
|
27
|
+
async def consume(self, queue: str) -> Message: ...
|
|
28
|
+
async def ack(self, message: Message) -> None: ...
|
|
29
|
+
async def nack(
|
|
30
|
+
self,
|
|
31
|
+
message: Message,
|
|
32
|
+
delay: float = 0,
|
|
33
|
+
drop: bool = False,
|
|
34
|
+
) -> None: ...
|
|
35
|
+
async def len(self, queue: str) -> int: ...
|
|
36
|
+
async def clear(self, queue: str) -> None: ...
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class RedisBroker(Broker):
|
|
40
|
+
def __init__(self, url: str) -> None:
|
|
41
|
+
self.url = url
|
|
42
|
+
self._client = aioredis.from_url(url)
|
|
43
|
+
self._id = uuid.uuid4().hex[:8]
|
|
44
|
+
|
|
45
|
+
async def close(self) -> None:
|
|
46
|
+
await self._client.aclose()
|
|
47
|
+
|
|
48
|
+
async def publish(
|
|
49
|
+
self,
|
|
50
|
+
message: Message,
|
|
51
|
+
delay: float = 0,
|
|
52
|
+
) -> None:
|
|
53
|
+
score = time.time() + delay
|
|
54
|
+
await self._client.zadd(
|
|
55
|
+
f"queue:{message.task_name}",
|
|
56
|
+
{
|
|
57
|
+
message.to_json(): score,
|
|
58
|
+
},
|
|
59
|
+
) # type: ignore
|
|
60
|
+
|
|
61
|
+
async def consume(self, queue: str) -> Message:
|
|
62
|
+
while True:
|
|
63
|
+
now = time.time()
|
|
64
|
+
ready = await self._client.zrangebyscore(
|
|
65
|
+
f"queue:{queue}", 0, now, start=0, num=1
|
|
66
|
+
) # type: ignore
|
|
67
|
+
if ready:
|
|
68
|
+
msg = ready[0]
|
|
69
|
+
await self._client.zadd(f"processing:{queue}:{self._id}", {msg: now,}) # type: ignore
|
|
70
|
+
await self._client.zrem(f"queue:{queue}", msg) # type: ignore
|
|
71
|
+
return Message.from_json(msg)
|
|
72
|
+
await asyncio.sleep(0.1)
|
|
73
|
+
|
|
74
|
+
async def ack(self, message: Message) -> None:
|
|
75
|
+
key = f"processing:{message.task_name}:{self._id}"
|
|
76
|
+
await self._client.zrem(key, message.to_json()) # type: ignore
|
|
77
|
+
|
|
78
|
+
async def nack(
|
|
79
|
+
self,
|
|
80
|
+
message: Message,
|
|
81
|
+
delay: float = 0,
|
|
82
|
+
drop: bool = False,
|
|
83
|
+
) -> None:
|
|
84
|
+
key = f"processing:{message.task_name}:{self._id}"
|
|
85
|
+
await self._client.zrem(key, message.to_json()) # type: ignore
|
|
86
|
+
if not drop:
|
|
87
|
+
await self.publish(message, delay=delay)
|
|
88
|
+
|
|
89
|
+
async def len(self, queue: str) -> int:
|
|
90
|
+
return await self._client.zcard(f"queue:{queue}") or 0 # type: ignore
|
|
91
|
+
|
|
92
|
+
async def clear(self, queue: str) -> None:
|
|
93
|
+
await self._client.delete(f"queue:{queue}", f"processing:{queue}:{self._id}") # type: ignore
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class MemoryBroker(Broker):
|
|
97
|
+
def __init__(self) -> None:
|
|
98
|
+
self._queues: defaultdict[str, dict[str, float]] = defaultdict(dict)
|
|
99
|
+
|
|
100
|
+
async def close(self) -> None:
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
async def publish(
|
|
104
|
+
self,
|
|
105
|
+
message: Message,
|
|
106
|
+
delay: float = 0,
|
|
107
|
+
) -> None:
|
|
108
|
+
self._queues[message.task_name][message.to_json()] = time.time() + delay
|
|
109
|
+
|
|
110
|
+
async def consume(self, queue: str) -> Message:
|
|
111
|
+
while True:
|
|
112
|
+
now = time.time()
|
|
113
|
+
for msg, score in list(self._queues[queue].items()):
|
|
114
|
+
if score <= now:
|
|
115
|
+
del self._queues[queue][msg]
|
|
116
|
+
return Message.from_json(msg)
|
|
117
|
+
await asyncio.sleep(0.1)
|
|
118
|
+
|
|
119
|
+
async def ack(self, message: Message) -> None:
|
|
120
|
+
pass
|
|
121
|
+
|
|
122
|
+
async def nack(
|
|
123
|
+
self,
|
|
124
|
+
message: Message,
|
|
125
|
+
delay: float = 0,
|
|
126
|
+
drop: bool = False,
|
|
127
|
+
) -> None:
|
|
128
|
+
if not drop:
|
|
129
|
+
await self.publish(message, delay=delay)
|
|
130
|
+
|
|
131
|
+
async def len(self, queue: str) -> int:
|
|
132
|
+
return len(self._queues[queue])
|
|
133
|
+
|
|
134
|
+
async def clear(self, queue: str) -> None:
|
|
135
|
+
self._queues.pop(queue, None)
|
ltq/cli.py
CHANGED
|
@@ -8,6 +8,7 @@ import argparse
|
|
|
8
8
|
|
|
9
9
|
from .logger import setup_logging, get_logger
|
|
10
10
|
from .app import App
|
|
11
|
+
from .broker import Broker
|
|
11
12
|
from .worker import Worker
|
|
12
13
|
|
|
13
14
|
logger = get_logger()
|
|
@@ -37,74 +38,142 @@ def import_from_string(import_str: str):
|
|
|
37
38
|
sys.exit(1)
|
|
38
39
|
|
|
39
40
|
|
|
41
|
+
async def clear_queue(
|
|
42
|
+
task_name: str,
|
|
43
|
+
url: str = "redis://localhost:6379",
|
|
44
|
+
) -> None:
|
|
45
|
+
"""Clear a queue for a specific task."""
|
|
46
|
+
broker = Broker.from_url(url)
|
|
47
|
+
try:
|
|
48
|
+
await broker.clear(task_name)
|
|
49
|
+
logger.info(f"Cleared queue for task: {task_name}")
|
|
50
|
+
finally:
|
|
51
|
+
await broker.close()
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
async def get_queue_size(
|
|
55
|
+
task_name: str,
|
|
56
|
+
url: str = "redis://localhost:6379",
|
|
57
|
+
) -> int:
|
|
58
|
+
"""Get the size of a queue for a specific task."""
|
|
59
|
+
broker = Broker.from_url(url)
|
|
60
|
+
try:
|
|
61
|
+
return await broker.len(task_name)
|
|
62
|
+
finally:
|
|
63
|
+
await broker.close()
|
|
64
|
+
|
|
65
|
+
|
|
40
66
|
def main():
|
|
41
67
|
"""Run a ltq worker."""
|
|
42
68
|
|
|
43
69
|
parser = argparse.ArgumentParser(
|
|
44
70
|
prog="ltq",
|
|
45
|
-
description="Run a ltq worker",
|
|
71
|
+
description="Run a ltq worker or manage queues",
|
|
46
72
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
47
|
-
epilog="
|
|
73
|
+
epilog="Examples:\n ltq run examples:worker --concurrency 100\n ltq clear emails:send_email",
|
|
48
74
|
)
|
|
49
75
|
|
|
50
|
-
parser.
|
|
76
|
+
subparsers = parser.add_subparsers(dest="command", help="Command to run")
|
|
77
|
+
|
|
78
|
+
# Run command
|
|
79
|
+
run_parser = subparsers.add_parser("run", help="Run a worker or app")
|
|
80
|
+
run_parser.add_argument(
|
|
51
81
|
"worker", nargs="?", help="Worker import string (module:attribute)"
|
|
52
82
|
)
|
|
53
|
-
|
|
83
|
+
run_parser.add_argument(
|
|
54
84
|
"--app", dest="app", help="App import string (module:attribute)"
|
|
55
85
|
)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
86
|
+
run_parser.add_argument(
|
|
87
|
+
"--concurrency", type=int, help="Override worker concurrency"
|
|
88
|
+
)
|
|
89
|
+
run_parser.add_argument(
|
|
59
90
|
"--log-level",
|
|
60
91
|
type=str,
|
|
61
92
|
default="INFO",
|
|
62
93
|
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
|
|
63
94
|
help="Set logging level (default: INFO)",
|
|
64
95
|
)
|
|
65
|
-
args = parser.parse_args()
|
|
66
96
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
97
|
+
# Clear command
|
|
98
|
+
clear_parser = subparsers.add_parser("clear", help="Clear a task queue")
|
|
99
|
+
clear_parser.add_argument("task_name", help="Task name (namespace:function)")
|
|
100
|
+
clear_parser.add_argument(
|
|
101
|
+
"--redis-url",
|
|
102
|
+
default="redis://localhost:6379",
|
|
103
|
+
help="Redis URL (default: redis://localhost:6379)",
|
|
104
|
+
)
|
|
105
|
+
clear_parser.add_argument(
|
|
106
|
+
"--log-level",
|
|
107
|
+
type=str,
|
|
108
|
+
default="INFO",
|
|
109
|
+
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
|
|
110
|
+
help="Set logging level (default: INFO)",
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Size command
|
|
114
|
+
size_parser = subparsers.add_parser("size", help="Get queue size for a task")
|
|
115
|
+
size_parser.add_argument("task_name", help="Task name (namespace:function)")
|
|
116
|
+
size_parser.add_argument(
|
|
117
|
+
"--redis-url",
|
|
118
|
+
default="redis://localhost:6379",
|
|
119
|
+
help="Redis URL (default: redis://localhost:6379)",
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
args = parser.parse_args()
|
|
71
123
|
|
|
72
124
|
# Setup colored logging for CLI
|
|
73
|
-
setup_logging(level=args
|
|
74
|
-
if args.log_level:
|
|
125
|
+
setup_logging(level=getattr(args, "log_level", "INFO"))
|
|
126
|
+
if hasattr(args, "log_level") and args.log_level:
|
|
75
127
|
logger.setLevel(args.log_level)
|
|
76
128
|
|
|
77
|
-
|
|
78
|
-
|
|
129
|
+
# Handle clear command
|
|
130
|
+
if args.command == "clear":
|
|
131
|
+
asyncio.run(clear_queue(args.task_name, args.redis_url))
|
|
132
|
+
return
|
|
133
|
+
|
|
134
|
+
# Handle size command
|
|
135
|
+
if args.command == "size":
|
|
136
|
+
size = asyncio.run(get_queue_size(args.task_name, args.redis_url))
|
|
137
|
+
print(f"{args.task_name}: {size}")
|
|
138
|
+
return
|
|
139
|
+
|
|
140
|
+
# Handle run command
|
|
141
|
+
if args.command == "run":
|
|
142
|
+
if not args.worker and not args.app:
|
|
143
|
+
run_parser.error("either worker or --app is required")
|
|
144
|
+
if args.worker and args.app:
|
|
145
|
+
run_parser.error("cannot specify both worker and --app")
|
|
146
|
+
|
|
147
|
+
if args.app:
|
|
148
|
+
app: App = import_from_string(args.app)
|
|
149
|
+
|
|
150
|
+
for w in app.workers.values():
|
|
151
|
+
if args.concurrency:
|
|
152
|
+
w.concurrency = args.concurrency
|
|
153
|
+
|
|
154
|
+
logger.info("Starting ltq app")
|
|
155
|
+
|
|
156
|
+
try:
|
|
157
|
+
asyncio.run(app.run())
|
|
158
|
+
except KeyboardInterrupt:
|
|
159
|
+
logger.info("Shutting down...")
|
|
160
|
+
else:
|
|
161
|
+
worker: Worker = import_from_string(args.worker)
|
|
79
162
|
|
|
80
|
-
for w in app.workers:
|
|
81
163
|
if args.concurrency:
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
if args.concurrency:
|
|
97
|
-
worker.concurrency = args.concurrency
|
|
98
|
-
if args.poll_sleep:
|
|
99
|
-
worker.poll_sleep = args.poll_sleep
|
|
100
|
-
|
|
101
|
-
logger.info("Starting ltq worker")
|
|
102
|
-
logger.info("Worker: %s", args.worker)
|
|
103
|
-
|
|
104
|
-
try:
|
|
105
|
-
asyncio.run(worker.run())
|
|
106
|
-
except KeyboardInterrupt:
|
|
107
|
-
logger.info("Shutting down...")
|
|
164
|
+
worker.concurrency = args.concurrency
|
|
165
|
+
|
|
166
|
+
logger.info("Starting ltq worker")
|
|
167
|
+
|
|
168
|
+
try:
|
|
169
|
+
asyncio.run(worker.run())
|
|
170
|
+
except KeyboardInterrupt:
|
|
171
|
+
logger.info("Shutting down...")
|
|
172
|
+
return
|
|
173
|
+
|
|
174
|
+
# No command specified
|
|
175
|
+
parser.print_help()
|
|
176
|
+
sys.exit(1)
|
|
108
177
|
|
|
109
178
|
|
|
110
179
|
if __name__ == "__main__":
|
ltq/errors.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
class
|
|
1
|
+
class RejectError(Exception):
|
|
2
2
|
"""Signal that a message should be dropped."""
|
|
3
3
|
|
|
4
4
|
pass
|
|
5
5
|
|
|
6
6
|
|
|
7
|
-
class
|
|
7
|
+
class RetryError(Exception):
|
|
8
8
|
"""Signal that a message should be retried after a delay."""
|
|
9
9
|
|
|
10
10
|
def __init__(self, delay: float = 0.0, message: str = "") -> None:
|
ltq/logger.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import sys
|
|
3
|
+
import threading
|
|
3
4
|
|
|
4
5
|
|
|
5
6
|
class ColoredFormatter(logging.Formatter):
|
|
@@ -25,14 +26,18 @@ class ColoredFormatter(logging.Formatter):
|
|
|
25
26
|
def format(self, record: logging.LogRecord) -> str:
|
|
26
27
|
"""Format log record with colored severity level."""
|
|
27
28
|
color = self.COLORS.get(record.levelno, self.RESET)
|
|
28
|
-
log_time = self.formatTime(record, "%H:%M:%S")
|
|
29
|
-
|
|
29
|
+
log_time = self.formatTime(record, "%Y-%m-%d %H:%M:%S")
|
|
30
|
+
log_time_ms = f"{log_time}.{int(record.msecs):03d}"
|
|
31
|
+
timestamp = f"{self.GRAY}{log_time_ms}{self.RESET}"
|
|
30
32
|
|
|
31
|
-
levelname = f"{color}{record.levelname:<
|
|
32
|
-
|
|
33
|
+
levelname = f"{color}{record.levelname:<4}{self.RESET}"
|
|
34
|
+
|
|
35
|
+
name = record.name.removeprefix("ltq.")
|
|
36
|
+
workername = f"{self.CYAN}{name:<6}{self.RESET}"
|
|
37
|
+
thread_id = f"{self.GRAY}[{threading.current_thread().ident}]{self.RESET}"
|
|
33
38
|
|
|
34
39
|
message = record.getMessage()
|
|
35
|
-
log_line = f"{timestamp} {levelname} {workername} {message}"
|
|
40
|
+
log_line = f"{timestamp} {thread_id} {levelname} {workername} {message}"
|
|
36
41
|
|
|
37
42
|
if record.exc_info and not record.exc_text:
|
|
38
43
|
record.exc_text = self.formatException(record.exc_info)
|
|
@@ -60,4 +65,6 @@ def setup_logging(level: int | str = logging.INFO) -> None:
|
|
|
60
65
|
|
|
61
66
|
|
|
62
67
|
def get_logger(name: str = "ltq") -> logging.Logger:
|
|
63
|
-
|
|
68
|
+
if name == "ltq":
|
|
69
|
+
return logging.getLogger("ltq")
|
|
70
|
+
return logging.getLogger(f"ltq.{name}")
|
ltq/message.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import json
|
|
4
|
+
import time
|
|
4
5
|
import uuid
|
|
5
6
|
from dataclasses import dataclass, field
|
|
6
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
7
8
|
|
|
8
|
-
|
|
9
|
-
|
|
9
|
+
def _default_ctx() -> dict[str, Any]:
|
|
10
|
+
return {"created_at": time.time()}
|
|
10
11
|
|
|
11
12
|
|
|
12
13
|
@dataclass
|
|
@@ -14,9 +15,8 @@ class Message:
|
|
|
14
15
|
args: tuple[Any, ...]
|
|
15
16
|
kwargs: dict[str, Any]
|
|
16
17
|
task_name: str
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
id: str = field(default_factory=lambda: uuid.uuid4().hex)
|
|
18
|
+
ctx: dict[str, Any] = field(default_factory=_default_ctx)
|
|
19
|
+
id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
|
20
20
|
|
|
21
21
|
def to_json(self) -> str:
|
|
22
22
|
return json.dumps(
|