ltq 0.3.1__tar.gz → 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ltq-0.4.0/PKG-INFO ADDED
@@ -0,0 +1,218 @@
1
+ Metadata-Version: 2.3
2
+ Name: ltq
3
+ Version: 0.4.0
4
+ Summary: Add your description here
5
+ Author: Tom Clesius
6
+ Author-email: Tom Clesius <tomclesius@gmail.com>
7
+ Requires-Dist: redis>=7.1.0
8
+ Requires-Dist: croniter>=6.0.0 ; extra == 'scheduler'
9
+ Requires-Dist: sentry-sdk>=2.0.0 ; extra == 'sentry'
10
+ Requires-Python: >=3.13
11
+ Provides-Extra: scheduler
12
+ Provides-Extra: sentry
13
+ Description-Content-Type: text/markdown
14
+
15
+ <p align="center">
16
+ <img src="https://raw.githubusercontent.com/tclesius/ltq/refs/heads/main/assets/logo.png" alt="LTQ" width="400">
17
+ </p>
18
+
19
+ <p align="center">
20
+ A lightweight, Async-first task queue built on Redis.
21
+ </p>
22
+
23
+ ## Installation
24
+
25
+ ```bash
26
+ pip install ltq
27
+ # or
28
+ uv add ltq
29
+ ```
30
+
31
+ ## Broker Backends
32
+
33
+ LTQ supports multiple broker backends:
34
+
35
+ - **Redis** (default): `broker_url="redis://localhost:6379"`
36
+ - **Memory**: `broker_url="memory://"` (useful for testing)
37
+
38
+ All workers and schedulers accept a `broker_url` parameter.
39
+
40
+ ## Quick Start
41
+
42
+ ```python
43
+ import asyncio
44
+ import ltq
45
+
46
+ worker = ltq.Worker("emails", broker_url="redis://localhost:6379")
47
+
48
+ @worker.task()
49
+ async def send_email(to: str, subject: str, body: str) -> None:
50
+ # your async code here
51
+ pass
52
+
53
+ async def main():
54
+ # Enqueue a task
55
+ await send_email.send("user@example.com", "Hello", "World")
56
+
57
+ # Or enqueue multiple tasks
58
+ for email in ["a@example.com", "b@example.com"]:
59
+ await send_email.send(email, "Hi", "Message")
60
+
61
+ asyncio.run(main())
62
+ ```
63
+
64
+ Each worker has a namespace (e.g., `"emails"`), and tasks are automatically namespaced as `{namespace}:{function_name}`.
65
+
66
+ ## Running Workers
67
+
68
+ ```bash
69
+ # Run a single worker
70
+ ltq run myapp:worker
71
+
72
+ # With options
73
+ ltq run myapp:worker --concurrency 100 --log-level DEBUG
74
+ ```
75
+
76
+ ## Running an App
77
+
78
+ Register multiple workers into an `App` to run them together:
79
+
80
+ ```python
81
+ import ltq
82
+
83
+ app = ltq.App()
84
+ app.register_worker(emails_worker)
85
+ app.register_worker(notifications_worker)
86
+ ```
87
+
88
+ ```bash
89
+ ltq run --app myapp:app
90
+ ```
91
+
92
+ ### App Middleware
93
+
94
+ Apply middleware globally to all workers in an app:
95
+
96
+ ```python
97
+ from ltq.middleware import Sentry
98
+
99
+ app = ltq.App(middlewares=[Sentry(dsn="https://...")])
100
+
101
+ # Or register after creation
102
+ app.register_middleware(Sentry(dsn="https://..."))
103
+ app.register_middleware(MyMiddleware(), pos=0)
104
+
105
+ # When workers are registered, app middlewares are prepended to each worker's stack
106
+ app.register_worker(emails_worker)
107
+ ```
108
+
109
+ ### Threading Model
110
+
111
+ By default, `App` runs each worker in its own thread with a separate event loop. This provides isolation between workers while keeping them in the same process. Workers won't block each other since each has its own async event loop.
112
+
113
+ **For maximum isolation** (separate memory, crash protection), run each worker in its own process:
114
+
115
+ ```bash
116
+ # Terminal 1
117
+ ltq run myapp:emails_worker
118
+
119
+ # Terminal 2
120
+ ltq run myapp:notifications_worker
121
+ ```
122
+
123
+ This gives you full process isolation at the cost of more overhead.
124
+
125
+ ## Queue Management
126
+
127
+ Manage queues using the CLI:
128
+
129
+ ```bash
130
+ # Clear a task queue
131
+ ltq clear emails:send_email
132
+
133
+ # Get queue size
134
+ ltq size emails:send_email
135
+
136
+ # With custom Redis URL
137
+ ltq clear emails:send_email --redis-url redis://localhost:6380
138
+ ltq size emails:send_email --redis-url redis://localhost:6380
139
+ ```
140
+
141
+ Queue names are automatically namespaced as `{worker_name}:{function_name}`.
142
+
143
+ ## Scheduler
144
+
145
+ Run tasks on a cron schedule (requires `ltq[scheduler]`):
146
+
147
+ ```python
148
+ import ltq
149
+
150
+ scheduler = ltq.Scheduler()
151
+ scheduler.cron("*/5 * * * *", send_email.message("admin@example.com", "Report", "..."))
152
+ scheduler.start() # Runs scheduler in blocking mode with asyncio.run()
153
+ ```
154
+
155
+ ## Task Options
156
+
157
+ Configure task behavior with options:
158
+
159
+ ```python
160
+ from datetime import timedelta
161
+
162
+ @worker.task(max_tries=3, max_age=timedelta(hours=1), max_rate="10/s")
163
+ async def send_email(to: str, subject: str, body: str) -> None:
164
+ # your async code here
165
+ pass
166
+ ```
167
+
168
+ **Available options:**
169
+
170
+ - `max_tries` (int): Maximum retry attempts before rejecting the message
171
+ - `max_age` (timedelta): Maximum message age before rejection
172
+ - `max_rate` (str): Rate limit in format `"N/s"`, `"N/m"`, or `"N/h"` (requests per second/minute/hour)
173
+
174
+ ## Middleware
175
+
176
+ Middleware are async context managers that wrap task execution. The default stack is `[MaxTries(), MaxAge(), MaxRate()]`, so you only need to specify middlewares if you want to customize or add additional ones:
177
+
178
+ ```python
179
+ from ltq.middleware import MaxTries, MaxAge, MaxRate, Sentry
180
+
181
+ worker = ltq.Worker(
182
+ "emails",
183
+ broker_url="redis://localhost:6379",
184
+ middlewares=[
185
+ MaxTries(),
186
+ MaxAge(),
187
+ MaxRate(),
188
+ Sentry(dsn="https://..."),
189
+ ],
190
+ )
191
+ ```
192
+
193
+ **Built-in:** `MaxTries`, `MaxAge`, `MaxRate`, `Sentry` (requires `ltq[sentry]`)
194
+
195
+ You can also register middleware after creating the worker:
196
+
197
+ ```python
198
+ worker.register_middleware(Sentry(dsn="https://..."))
199
+
200
+ # Insert at specific position (default is -1 for append)
201
+ worker.register_middleware(MyMiddleware(), pos=0)
202
+ ```
203
+
204
+ **Custom middleware:**
205
+
206
+ ```python
207
+ from contextlib import asynccontextmanager
208
+ from ltq.middleware import Middleware
209
+ from ltq.message import Message
210
+ from ltq.task import Task
211
+
212
+ class Logger(Middleware):
213
+ @asynccontextmanager
214
+ async def __call__(self, message: Message, task: Task):
215
+ print(f"Processing {message.task_name}")
216
+ yield
217
+ print(f"Completed {message.task_name}")
218
+ ```
ltq-0.4.0/README.md ADDED
@@ -0,0 +1,204 @@
1
+ <p align="center">
2
+ <img src="https://raw.githubusercontent.com/tclesius/ltq/refs/heads/main/assets/logo.png" alt="LTQ" width="400">
3
+ </p>
4
+
5
+ <p align="center">
6
+ A lightweight, Async-first task queue built on Redis.
7
+ </p>
8
+
9
+ ## Installation
10
+
11
+ ```bash
12
+ pip install ltq
13
+ # or
14
+ uv add ltq
15
+ ```
16
+
17
+ ## Broker Backends
18
+
19
+ LTQ supports multiple broker backends:
20
+
21
+ - **Redis** (default): `broker_url="redis://localhost:6379"`
22
+ - **Memory**: `broker_url="memory://"` (useful for testing)
23
+
24
+ All workers and schedulers accept a `broker_url` parameter.
25
+
26
+ ## Quick Start
27
+
28
+ ```python
29
+ import asyncio
30
+ import ltq
31
+
32
+ worker = ltq.Worker("emails", broker_url="redis://localhost:6379")
33
+
34
+ @worker.task()
35
+ async def send_email(to: str, subject: str, body: str) -> None:
36
+ # your async code here
37
+ pass
38
+
39
+ async def main():
40
+ # Enqueue a task
41
+ await send_email.send("user@example.com", "Hello", "World")
42
+
43
+ # Or enqueue multiple tasks
44
+ for email in ["a@example.com", "b@example.com"]:
45
+ await send_email.send(email, "Hi", "Message")
46
+
47
+ asyncio.run(main())
48
+ ```
49
+
50
+ Each worker has a namespace (e.g., `"emails"`), and tasks are automatically namespaced as `{namespace}:{function_name}`.
51
+
52
+ ## Running Workers
53
+
54
+ ```bash
55
+ # Run a single worker
56
+ ltq run myapp:worker
57
+
58
+ # With options
59
+ ltq run myapp:worker --concurrency 100 --log-level DEBUG
60
+ ```
61
+
62
+ ## Running an App
63
+
64
+ Register multiple workers into an `App` to run them together:
65
+
66
+ ```python
67
+ import ltq
68
+
69
+ app = ltq.App()
70
+ app.register_worker(emails_worker)
71
+ app.register_worker(notifications_worker)
72
+ ```
73
+
74
+ ```bash
75
+ ltq run --app myapp:app
76
+ ```
77
+
78
+ ### App Middleware
79
+
80
+ Apply middleware globally to all workers in an app:
81
+
82
+ ```python
83
+ from ltq.middleware import Sentry
84
+
85
+ app = ltq.App(middlewares=[Sentry(dsn="https://...")])
86
+
87
+ # Or register after creation
88
+ app.register_middleware(Sentry(dsn="https://..."))
89
+ app.register_middleware(MyMiddleware(), pos=0)
90
+
91
+ # When workers are registered, app middlewares are prepended to each worker's stack
92
+ app.register_worker(emails_worker)
93
+ ```
94
+
95
+ ### Threading Model
96
+
97
+ By default, `App` runs each worker in its own thread with a separate event loop. This provides isolation between workers while keeping them in the same process. Workers won't block each other since each has its own async event loop.
98
+
99
+ **For maximum isolation** (separate memory, crash protection), run each worker in its own process:
100
+
101
+ ```bash
102
+ # Terminal 1
103
+ ltq run myapp:emails_worker
104
+
105
+ # Terminal 2
106
+ ltq run myapp:notifications_worker
107
+ ```
108
+
109
+ This gives you full process isolation at the cost of more overhead.
110
+
111
+ ## Queue Management
112
+
113
+ Manage queues using the CLI:
114
+
115
+ ```bash
116
+ # Clear a task queue
117
+ ltq clear emails:send_email
118
+
119
+ # Get queue size
120
+ ltq size emails:send_email
121
+
122
+ # With custom Redis URL
123
+ ltq clear emails:send_email --redis-url redis://localhost:6380
124
+ ltq size emails:send_email --redis-url redis://localhost:6380
125
+ ```
126
+
127
+ Queue names are automatically namespaced as `{worker_name}:{function_name}`.
128
+
129
+ ## Scheduler
130
+
131
+ Run tasks on a cron schedule (requires `ltq[scheduler]`):
132
+
133
+ ```python
134
+ import ltq
135
+
136
+ scheduler = ltq.Scheduler()
137
+ scheduler.cron("*/5 * * * *", send_email.message("admin@example.com", "Report", "..."))
138
+ scheduler.start() # Runs scheduler in blocking mode with asyncio.run()
139
+ ```
140
+
141
+ ## Task Options
142
+
143
+ Configure task behavior with options:
144
+
145
+ ```python
146
+ from datetime import timedelta
147
+
148
+ @worker.task(max_tries=3, max_age=timedelta(hours=1), max_rate="10/s")
149
+ async def send_email(to: str, subject: str, body: str) -> None:
150
+ # your async code here
151
+ pass
152
+ ```
153
+
154
+ **Available options:**
155
+
156
+ - `max_tries` (int): Maximum retry attempts before rejecting the message
157
+ - `max_age` (timedelta): Maximum message age before rejection
158
+ - `max_rate` (str): Rate limit in format `"N/s"`, `"N/m"`, or `"N/h"` (requests per second/minute/hour)
159
+
160
+ ## Middleware
161
+
162
+ Middleware are async context managers that wrap task execution. The default stack is `[MaxTries(), MaxAge(), MaxRate()]`, so you only need to specify middlewares if you want to customize or add additional ones:
163
+
164
+ ```python
165
+ from ltq.middleware import MaxTries, MaxAge, MaxRate, Sentry
166
+
167
+ worker = ltq.Worker(
168
+ "emails",
169
+ broker_url="redis://localhost:6379",
170
+ middlewares=[
171
+ MaxTries(),
172
+ MaxAge(),
173
+ MaxRate(),
174
+ Sentry(dsn="https://..."),
175
+ ],
176
+ )
177
+ ```
178
+
179
+ **Built-in:** `MaxTries`, `MaxAge`, `MaxRate`, `Sentry` (requires `ltq[sentry]`)
180
+
181
+ You can also register middleware after creating the worker:
182
+
183
+ ```python
184
+ worker.register_middleware(Sentry(dsn="https://..."))
185
+
186
+ # Insert at specific position (default is -1 for append)
187
+ worker.register_middleware(MyMiddleware(), pos=0)
188
+ ```
189
+
190
+ **Custom middleware:**
191
+
192
+ ```python
193
+ from contextlib import asynccontextmanager
194
+ from ltq.middleware import Middleware
195
+ from ltq.message import Message
196
+ from ltq.task import Task
197
+
198
+ class Logger(Middleware):
199
+ @asynccontextmanager
200
+ async def __call__(self, message: Message, task: Task):
201
+ print(f"Processing {message.task_name}")
202
+ yield
203
+ print(f"Completed {message.task_name}")
204
+ ```
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "ltq"
3
- version = "0.3.1"
3
+ version = "0.4.0"
4
4
  description = "Add your description here"
5
5
  readme = "README.md"
6
6
  authors = [{ name = "Tom Clesius", email = "tomclesius@gmail.com" }]
@@ -19,7 +19,7 @@ requires = ["uv_build>=0.9.26,<0.10.0"]
19
19
  build-backend = "uv_build"
20
20
 
21
21
  [tool.bumpversion]
22
- current_version = "0.3.1"
22
+ current_version = "0.4.0"
23
23
  commit = true
24
24
  tag = true
25
25
  message = "v{new_version}"
@@ -0,0 +1,24 @@
1
+ from .app import App
2
+ from .broker import Broker
3
+ from .task import Task
4
+ from .worker import Worker
5
+ from .scheduler import Scheduler
6
+ from .logger import get_logger
7
+ from .errors import RejectError, RetryError
8
+ from .middleware import Middleware, MaxTries, MaxAge, MaxRate, Sentry
9
+
10
+ __all__ = [
11
+ "App",
12
+ "Broker",
13
+ "Worker",
14
+ "Scheduler",
15
+ "Task",
16
+ "get_logger",
17
+ "RejectError",
18
+ "RetryError",
19
+ "Middleware",
20
+ "MaxTries",
21
+ "MaxAge",
22
+ "MaxRate",
23
+ "Sentry",
24
+ ]
@@ -0,0 +1,41 @@
1
+ import asyncio
2
+ import threading
3
+
4
+ from .middleware import Middleware
5
+ from .worker import Worker
6
+
7
+
8
+ class App:
9
+ def __init__(self, middlewares: list[Middleware] | None = None) -> None:
10
+ self.workers: dict[str, Worker] = dict()
11
+ self.middlewares: list[Middleware] = middlewares or []
12
+
13
+ def register_middleware(self, middleware: Middleware, pos: int = -1) -> None:
14
+ if pos == -1:
15
+ self.middlewares.append(middleware)
16
+ else:
17
+ self.middlewares.insert(pos, middleware)
18
+
19
+ def register_worker(self, worker: Worker) -> None:
20
+ if worker.name in self.workers:
21
+ raise RuntimeError(f"Worker '{worker.name}' is already registered")
22
+ worker.middlewares = list(self.middlewares) + worker.middlewares
23
+ self.workers[worker.name] = worker
24
+
25
+ @staticmethod
26
+ def _run_worker(worker: Worker) -> None:
27
+ asyncio.run(worker.run())
28
+
29
+ async def run(self) -> None:
30
+ threads: list[threading.Thread] = []
31
+ for worker in self.workers.values():
32
+ t = threading.Thread(target=self._run_worker, args=(worker,), daemon=True)
33
+ t.start()
34
+ threads.append(t)
35
+
36
+ try:
37
+ while any(t.is_alive() for t in threads):
38
+ await asyncio.sleep(0.2)
39
+ except asyncio.CancelledError:
40
+ # Allow graceful shutdown when the run coroutine is cancelled.
41
+ pass
@@ -0,0 +1,140 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import time
5
+ from urllib.parse import urlparse
6
+ import uuid
7
+ from collections import defaultdict
8
+
9
+ import redis.asyncio as aioredis
10
+
11
+ from .message import Message
12
+
13
+
14
+ class Broker:
15
+ @staticmethod
16
+ def from_url(url: str) -> Broker:
17
+ urlp = urlparse(url)
18
+ if urlp.scheme == "memory":
19
+ return MemoryBroker()
20
+ elif urlp.scheme == "redis":
21
+ return RedisBroker(url)
22
+ else:
23
+ raise RuntimeError(f"Unknown scheme: {urlp.scheme}")
24
+
25
+ async def close(self) -> None: ...
26
+ async def publish(self, message: Message, delay: float = 0) -> None: ...
27
+ async def consume(self, queue: str) -> Message: ...
28
+ async def ack(self, message: Message) -> None: ...
29
+ async def nack(
30
+ self,
31
+ message: Message,
32
+ delay: float = 0,
33
+ drop: bool = False,
34
+ ) -> None: ...
35
+ async def len(self, queue: str) -> int: ...
36
+ async def clear(self, queue: str) -> None: ...
37
+
38
+
39
+ class RedisBroker(Broker):
40
+ def __init__(self, url: str) -> None:
41
+ self.url = url
42
+ self._client = aioredis.from_url(url)
43
+ self._id = uuid.uuid4().hex[:8]
44
+ self._consume = self._client.register_script("""
45
+ local ready = redis.call('zrangebyscore', KEYS[1], 0, ARGV[1], 'LIMIT', 0, 1)
46
+ if #ready == 0 then return nil end
47
+ local msg = ready[1]
48
+ redis.call('zadd', KEYS[2], ARGV[1], msg)
49
+ redis.call('zrem', KEYS[1], msg)
50
+ return msg
51
+ """)
52
+
53
+ async def close(self) -> None:
54
+ await self._client.aclose()
55
+
56
+ async def publish(
57
+ self,
58
+ message: Message,
59
+ delay: float = 0,
60
+ ) -> None:
61
+ score = time.time() + delay
62
+ await self._client.zadd(
63
+ f"queue:{message.task_name}",
64
+ {
65
+ message.to_json(): score,
66
+ },
67
+ ) # type: ignore
68
+
69
+ async def consume(self, queue: str) -> Message:
70
+ while True:
71
+ msg = await self._consume(
72
+ keys=[f"queue:{queue}", f"processing:{queue}:{self._id}"],
73
+ args=[time.time()],
74
+ )
75
+ if msg:
76
+ return Message.from_json(msg)
77
+ await asyncio.sleep(0.1)
78
+
79
+ async def ack(self, message: Message) -> None:
80
+ key = f"processing:{message.task_name}:{self._id}"
81
+ await self._client.zrem(key, message.to_json()) # type: ignore
82
+
83
+ async def nack(
84
+ self,
85
+ message: Message,
86
+ delay: float = 0,
87
+ drop: bool = False,
88
+ ) -> None:
89
+ key = f"processing:{message.task_name}:{self._id}"
90
+ await self._client.zrem(key, message.to_json()) # type: ignore
91
+ if not drop:
92
+ await self.publish(message, delay=delay)
93
+
94
+ async def len(self, queue: str) -> int:
95
+ return await self._client.zcard(f"queue:{queue}") or 0 # type: ignore
96
+
97
+ async def clear(self, queue: str) -> None:
98
+ await self._client.delete(f"queue:{queue}", f"processing:{queue}:{self._id}") # type: ignore
99
+
100
+
101
+ class MemoryBroker(Broker):
102
+ def __init__(self) -> None:
103
+ self._queues: defaultdict[str, dict[str, float]] = defaultdict(dict)
104
+
105
+ async def close(self) -> None:
106
+ pass
107
+
108
+ async def publish(
109
+ self,
110
+ message: Message,
111
+ delay: float = 0,
112
+ ) -> None:
113
+ self._queues[message.task_name][message.to_json()] = time.time() + delay
114
+
115
+ async def consume(self, queue: str) -> Message:
116
+ while True:
117
+ now = time.time()
118
+ for msg, score in list(self._queues[queue].items()):
119
+ if score <= now:
120
+ del self._queues[queue][msg]
121
+ return Message.from_json(msg)
122
+ await asyncio.sleep(0.1)
123
+
124
+ async def ack(self, message: Message) -> None:
125
+ pass
126
+
127
+ async def nack(
128
+ self,
129
+ message: Message,
130
+ delay: float = 0,
131
+ drop: bool = False,
132
+ ) -> None:
133
+ if not drop:
134
+ await self.publish(message, delay=delay)
135
+
136
+ async def len(self, queue: str) -> int:
137
+ return len(self._queues[queue])
138
+
139
+ async def clear(self, queue: str) -> None:
140
+ self._queues.pop(queue, None)