ltq 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ltq/middleware.py CHANGED
@@ -1,117 +1,115 @@
1
1
  from __future__ import annotations
2
2
 
3
- import asyncio
3
+ from datetime import timedelta
4
+ from functools import lru_cache
5
+ import random
4
6
  import time
5
7
  from abc import ABC, abstractmethod
6
- from typing import Any, Awaitable, Callable
8
+ from contextlib import asynccontextmanager
9
+ from typing import TYPE_CHECKING, AsyncIterator
7
10
 
8
- from .errors import RetryMessage
9
- from .message import Message
10
- from .logger import get_logger
11
+ from .errors import RejectError, RetryError
11
12
 
12
- logger = get_logger()
13
- Handler = Callable[[Message], Awaitable[Any]]
13
+ if TYPE_CHECKING:
14
+ from .message import Message
15
+ from .task import Task
14
16
 
15
17
 
16
18
  class Middleware(ABC):
17
19
  @abstractmethod
18
- async def handle(self, message: Message, next_handler: Handler) -> Any: ...
20
+ @asynccontextmanager
21
+ async def __call__(self, message: Message, task: Task) -> AsyncIterator[None]:
22
+ yield
19
23
 
20
24
 
21
- class Retry(Middleware):
22
- def __init__(
23
- self,
24
- max_retries: int = 3,
25
- min_delay: float = 1.0,
26
- max_delay: float = 60.0,
27
- backoff: float = 2.0,
28
- ):
29
- self.max_retries = max_retries
30
- self.min_delay = min_delay
31
- self.max_delay = max_delay
32
- self.backoff = backoff
33
-
34
- async def handle(self, message: Message, next_handler: Handler) -> Any:
35
- retries = message.ctx.get("retries", 0)
25
+ class MaxTries(Middleware):
26
+ @asynccontextmanager
27
+ async def __call__(self, message: Message, task: Task) -> AsyncIterator[None]:
28
+ max_tries = task.options.get("max_tries")
29
+ if max_tries is not None:
30
+ if message.ctx.get("tries", 0) >= max_tries:
31
+ raise RejectError(f"Message {message.id} exceeded max tries")
36
32
 
37
33
  try:
38
- return await next_handler(message)
39
- except Exception as e:
40
- retries += 1
41
- message.ctx["retries"] = retries
42
- max_retries = max(self.max_retries - 1, 0)
43
-
44
- if retries > max_retries:
45
- raise
46
-
47
- delay = min(
48
- self.min_delay * (self.backoff ** (retries - 1)),
49
- self.max_delay,
50
- )
51
- logger.warning(
52
- f"Retry attempt {retries}/{max_retries} ({type(e).__name__})",
53
- exc_info=True,
54
- )
55
- raise RetryMessage(delay, str(e))
56
-
57
-
58
- class RateLimit(Middleware):
59
- def __init__(self, requests_per_second: float):
60
- self.min_interval = 1.0 / requests_per_second
61
- self._last_request: float = 0
62
- self._lock = asyncio.Lock()
63
-
64
- async def handle(self, message: Message, next_handler: Handler) -> Any:
65
- async with self._lock:
66
- now = time.monotonic()
67
- elapsed = now - self._last_request
68
- if elapsed < self.min_interval:
69
- await asyncio.sleep(self.min_interval - elapsed)
70
- self._last_request = time.monotonic()
71
-
72
- return await next_handler(message)
34
+ yield
35
+ except Exception:
36
+ if not message.ctx.pop("rate_limited", False):
37
+ message.ctx["tries"] = message.ctx.get("tries", 0) + 1
38
+ raise
39
+
40
+
41
+ class MaxAge(Middleware):
42
+ @asynccontextmanager
43
+ async def __call__(self, message: Message, task: Task) -> AsyncIterator[None]:
44
+ max_age: timedelta | None = task.options.get("max_age")
45
+ created_at = message.ctx.get("created_at")
46
+
47
+ if max_age is not None and created_at is not None:
48
+ age = time.time() - float(created_at)
49
+ if age > max_age.total_seconds():
50
+ raise RejectError(f"Message {message.id} too old")
51
+
52
+ yield
53
+
54
+
55
+ class MaxRate(Middleware):
56
+ def __init__(self) -> None:
57
+ self.last_times: dict[str, float] = {}
58
+
59
+ @lru_cache(maxsize=128)
60
+ def _parse_rate(self, rate: str) -> float:
61
+ count, unit = rate.split("/")
62
+ count = float(count)
63
+ unit = unit.strip().lower()
64
+
65
+ if unit == "s":
66
+ return count
67
+ elif unit == "m":
68
+ return count / 60
69
+ elif unit == "h":
70
+ return count / 3600
71
+ else:
72
+ raise ValueError(f"Invalid rate unit: {unit}. Use 's', 'm', or 'h'")
73
+
74
+ @asynccontextmanager
75
+ async def __call__(self, message: Message, task: Task) -> AsyncIterator[None]:
76
+ max_rate = task.options.get("max_rate")
77
+ if max_rate:
78
+ now = time.time()
79
+ last = self.last_times.get(message.task_name, 0.0)
80
+ elapsed = now - last
81
+ rate_per_sec = self._parse_rate(max_rate)
82
+ interval = 1.0 / rate_per_sec
83
+
84
+ if elapsed < interval:
85
+ base_delay = interval - elapsed
86
+ delay = base_delay * 0.5 + random.uniform(0, base_delay * 0.5)
87
+ message.ctx["rate_limited"] = True
88
+ raise RetryError(delay=delay)
89
+
90
+ self.last_times[message.task_name] = now
91
+ yield
73
92
 
74
93
 
75
- class Timeout(Middleware):
76
- def __init__(self, timeout: float):
77
- self.timeout = timeout
78
-
79
- async def handle(self, message: Message, next_handler: Handler) -> Any:
80
- return await asyncio.wait_for(next_handler(message), timeout=self.timeout)
94
+ class Sentry(Middleware):
95
+ def __init__(self, dsn: str) -> None:
96
+ self.sentry = None
97
+ try:
98
+ import sentry_sdk # type: ignore
81
99
 
100
+ sentry_sdk.init(dsn=dsn)
101
+ self.sentry = sentry_sdk
102
+ except ImportError:
103
+ pass
82
104
 
83
- class Sentry(Middleware):
84
- def __init__(self, dsn: str, **kwargs: Any) -> None:
105
+ @asynccontextmanager
106
+ async def __call__(self, message: Message, task: Task) -> AsyncIterator[None]:
85
107
  try:
86
- import sentry_sdk # type: ignore[import-not-found]
87
- except ModuleNotFoundError as exc:
88
- raise ModuleNotFoundError(
89
- "Sentry middleware requires optional dependency 'sentry-sdk'. "
90
- "Install with 'ltq[sentry]'."
91
- ) from exc
92
-
93
- self.sentry = sentry_sdk
94
- self.sentry.init(dsn=dsn, send_default_pii=True, **kwargs)
95
-
96
- async def handle(self, message: Message, next_handler: Handler) -> Any:
97
- with self.sentry.push_scope() as scope:
98
- scope.set_tag("task", message.task_name)
99
- scope.set_tag("message_id", message.id)
100
- scope.set_context(
101
- "message",
102
- {
103
- "id": message.id,
104
- "task": message.task_name,
105
- "args": message.args,
106
- "kwargs": message.kwargs,
107
- "ctx": message.ctx,
108
- },
109
- )
110
-
111
- try:
112
- return await next_handler(message)
113
- except RetryMessage:
114
- raise
115
- except Exception as e:
108
+ yield
109
+ except Exception as e:
110
+ if self.sentry:
116
111
  self.sentry.capture_exception(e)
117
- raise
112
+ raise
113
+
114
+
115
+ DEFAULT: list[Middleware] = [MaxTries(), MaxAge(), MaxRate()]
ltq/scheduler.py CHANGED
@@ -1,27 +1,22 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
- import time
5
4
  from dataclasses import dataclass, field
6
5
  from datetime import datetime
7
- from typing import TYPE_CHECKING, Any
6
+ from typing import Any
8
7
 
8
+ from .broker import Broker
9
9
  from .message import Message
10
- from .utils import dispatch
11
10
  from .logger import get_logger
12
11
 
13
12
  try:
14
- from croniter import croniter
13
+ from croniter import croniter # type: ignore
15
14
  except ImportError:
16
15
  croniter = None
17
16
 
18
- if TYPE_CHECKING:
19
- from .task import Task
20
-
21
17
 
22
18
  @dataclass
23
19
  class ScheduledJob:
24
- task: Task
25
20
  msg: Message
26
21
  expr: str
27
22
  _cron: Any = field(init=False, repr=False) # croniter instance
@@ -36,11 +31,16 @@ class ScheduledJob:
36
31
 
37
32
 
38
33
  class Scheduler:
39
- def __init__(self, poll_interval: float = 10.0) -> None:
34
+ def __init__(
35
+ self,
36
+ broker_url: str = "redis://localhost:6379",
37
+ poll_interval: float = 10.0,
38
+ ) -> None:
39
+ self.broker = Broker.from_url(broker_url)
40
40
  self.poll_interval = poll_interval
41
41
  self.jobs: list[ScheduledJob] = []
42
- self.logger = get_logger("ltq.scheduler")
43
- self._running = False
42
+ self.logger = get_logger("scheduler")
43
+ self.task: asyncio.Task[None] | None = None
44
44
 
45
45
  def cron(self, expr: str, msg: Message) -> None:
46
46
  if croniter is None:
@@ -48,35 +48,54 @@ class Scheduler:
48
48
  "Scheduler requires optional dependency 'croniter'. "
49
49
  "Install with 'ltq[scheduler]'."
50
50
  )
51
- if msg.task is None:
52
- raise ValueError("Message must have a task assigned to use with scheduler")
53
- self.jobs.append(ScheduledJob(msg.task, msg, expr))
51
+ self.jobs.append(ScheduledJob(msg, expr))
54
52
 
55
- def run(self) -> None:
56
- self._running = True
53
+ async def run(self) -> None:
57
54
  self.logger.info("Starting scheduler")
58
55
  for job in self.jobs:
59
56
  self.logger.info(
60
- f"{job.task.name} [{job.expr}] next={job.next_run:%H:%M:%S}"
57
+ f"{job.msg.task_name} [{job.expr}] next={job.next_run:%H:%M:%S}"
61
58
  )
62
59
 
63
- loop = asyncio.new_event_loop()
64
- while self._running:
65
- now = datetime.now()
66
- due = [job for job in self.jobs if now >= job.next_run]
67
- if due:
68
- try:
69
- loop.run_until_complete(dispatch([job.msg for job in due]))
70
- for job in due:
71
- self.logger.info(
72
- f"Enqueued {job.task.name} scheduled={job.next_run:%H:%M:%S}"
73
- )
74
- except Exception:
75
- self.logger.exception("Failed to dispatch scheduled jobs")
76
- for job in due:
77
- job.advance()
78
- time.sleep(self.poll_interval)
79
- loop.close()
80
-
81
- def stop(self) -> None:
82
- self._running = False
60
+ try:
61
+ while True:
62
+ now = datetime.now()
63
+ due = [job for job in self.jobs if now >= job.next_run]
64
+
65
+ if due:
66
+ try:
67
+ for job in due:
68
+ await self.broker.publish(job.msg)
69
+ self.logger.info(
70
+ f"Enqueued {job.msg.task_name} scheduled={job.next_run:%H:%M:%S}"
71
+ )
72
+ job.advance()
73
+ except Exception:
74
+ self.logger.exception("Failed to send scheduled jobs")
75
+ # Don't advance jobs on failure - they'll retry next poll
76
+
77
+ await asyncio.sleep(self.poll_interval)
78
+ finally:
79
+ await self.broker.close()
80
+
81
+ def start(self) -> None:
82
+ try:
83
+ asyncio.run(self.run())
84
+ except KeyboardInterrupt:
85
+ self.logger.info("Scheduler stopped")
86
+
87
+ def start_background(self) -> None:
88
+ if self.task is not None:
89
+ raise RuntimeError("Scheduler is already running")
90
+ self.task = asyncio.create_task(self.run())
91
+
92
+ async def stop(self) -> None:
93
+ if self.task is None:
94
+ return
95
+ self.task.cancel()
96
+ try:
97
+ await self.task
98
+ except asyncio.CancelledError:
99
+ pass
100
+ self.task = None
101
+ self.logger.info("Scheduler stopped")
ltq/task.py CHANGED
@@ -2,8 +2,8 @@ from __future__ import annotations
2
2
 
3
3
  from typing import Awaitable, Callable, Generic, ParamSpec, TypeVar
4
4
 
5
+ from .broker import Broker
5
6
  from .message import Message
6
- from .q import Queue
7
7
 
8
8
  P = ParamSpec("P")
9
9
  R = TypeVar("R")
@@ -12,28 +12,25 @@ R = TypeVar("R")
12
12
  class Task(Generic[P, R]):
13
13
  def __init__(
14
14
  self,
15
+ broker: Broker,
15
16
  name: str,
16
17
  fn: Callable[P, Awaitable[R]],
17
- queue: Queue,
18
- ttl: int | None = None,
18
+ options: dict | None = None,
19
19
  ) -> None:
20
20
  self.name = name
21
21
  self.fn = fn
22
- self.queue = queue
23
- self.ttl = ttl
22
+ self.options = options or {}
23
+ self.broker = broker
24
24
 
25
25
  def message(self, *args: P.args, **kwargs: P.kwargs) -> Message:
26
26
  return Message(
27
27
  args=args,
28
28
  kwargs=kwargs,
29
- task=self,
30
29
  task_name=self.name,
31
30
  )
32
31
 
33
- async def send(self, *args: P.args, **kwargs: P.kwargs) -> str:
34
- message = self.message(*args, **kwargs)
35
- await self.queue.put([message], ttl=self.ttl)
36
- return message.id
32
+ async def send(self, *args: P.args, **kwargs: P.kwargs) -> None:
33
+ await self.broker.publish(self.message(*args, **kwargs))
37
34
 
38
35
  async def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
39
36
  return await self.fn(*args, **kwargs)
ltq/utils.py CHANGED
@@ -1,19 +1 @@
1
- from __future__ import annotations
2
1
 
3
- from collections import defaultdict
4
-
5
- from .message import Message
6
- from .q import Queue
7
-
8
-
9
- async def dispatch(messages: list[Message]) -> list[str]:
10
- by_queue: defaultdict[Queue, list[Message]] = defaultdict(list)
11
- for msg in messages:
12
- if msg.task is None:
13
- raise ValueError(f"Message {msg.id} has no task assigned")
14
- by_queue[msg.task.queue].append(msg)
15
-
16
- for queue, batch in by_queue.items():
17
- await queue.put(batch)
18
-
19
- return [msg.id for msg in messages]
ltq/worker.py CHANGED
@@ -1,23 +1,15 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
- from functools import partial
5
- from typing import TYPE_CHECKING, Any, Awaitable, Callable, ParamSpec, TypeVar
4
+ from contextlib import AsyncExitStack
5
+ from typing import Awaitable, Callable, ParamSpec, TypeVar
6
6
 
7
- import redis.asyncio as redis
8
-
9
- from .errors import RetryMessage
10
- from .task import Task
11
- from .message import Message
12
- from .middleware import Handler, Middleware
13
- from .q import Queue
7
+ from .broker import Broker
8
+ from .errors import RejectError, RetryError
14
9
  from .logger import get_logger
15
-
16
- if TYPE_CHECKING:
17
- from redis.asyncio import Redis as AsyncRedis
18
-
19
- logger = get_logger()
20
-
10
+ from .message import Message
11
+ from .middleware import DEFAULT, Middleware
12
+ from .task import Task
21
13
 
22
14
  P = ParamSpec("P")
23
15
  R = TypeVar("R")
@@ -26,80 +18,98 @@ R = TypeVar("R")
26
18
  class Worker:
27
19
  def __init__(
28
20
  self,
29
- url: str = "redis://localhost:6379",
21
+ name: str,
22
+ broker_url: str = "redis://localhost:6379",
23
+ concurrency: int = 100,
30
24
  middlewares: list[Middleware] | None = None,
31
- concurrency: int = 250,
32
- poll_sleep: float = 0.1,
33
25
  ) -> None:
34
- self.client: AsyncRedis = redis.from_url(url)
26
+ self.name = name
27
+ self.broker = Broker.from_url(broker_url)
35
28
  self.tasks: list[Task] = []
36
- self.middlewares: list[Middleware] = middlewares or []
29
+ self.middlewares: list[Middleware] = middlewares or list(DEFAULT)
37
30
  self.concurrency: int = concurrency
38
- self.poll_sleep: float = poll_sleep
31
+ self.logger = get_logger(name)
32
+
33
+ def register_middleware(self, middleware: Middleware, pos: int = -1) -> None:
34
+ if pos == -1:
35
+ self.middlewares.append(middleware)
36
+ else:
37
+ self.middlewares.insert(pos, middleware)
39
38
 
40
39
  def task(
41
40
  self,
42
- queue_name: str | None = None,
43
- ttl: int | None = None,
41
+ **options,
44
42
  ) -> Callable[[Callable[P, Awaitable[R]]], Task[P, R]]:
45
43
  def decorator(fn: Callable[P, Awaitable[R]]) -> Task[P, R]:
46
- task_name = f"{fn.__module__}:{fn.__qualname__}"
47
- queue = Queue(self.client, queue_name or task_name)
44
+ task_name = f"{self.name}:{fn.__qualname__}"
48
45
  task = Task(
49
46
  name=task_name,
50
47
  fn=fn,
51
- queue=queue,
52
- ttl=ttl,
48
+ options=options,
49
+ broker=self.broker,
53
50
  )
54
51
  self.tasks.append(task)
55
52
  return task
56
53
 
57
54
  return decorator
58
55
 
59
- async def processor(self, task: Task):
60
- async def base(message: Message) -> Any:
61
- return await task.fn(*message.args, **message.kwargs)
62
-
63
- handler: Handler = base
64
- for middleware in reversed(self.middlewares):
65
- handler = partial(middleware.handle, next_handler=handler)
66
-
56
+ async def _poll(self, task: Task, broker: Broker) -> None:
67
57
  sem = asyncio.Semaphore(self.concurrency)
68
- pending: dict[asyncio.Task, Message] = {}
58
+ self.logger.info(f"Polling for Task {task.name}")
69
59
 
70
- async def process(msg: Message) -> None:
71
- async with sem:
72
- try:
73
- await handler(msg)
74
- except RetryMessage as e:
75
- logger.warning(f"Retrying in {e.delay}s: {e}")
76
- await task.queue.put([msg], delay=e.delay)
77
- except Exception as e:
78
- logger.error(
79
- f"Rejected after error in {task.name}: {e}",
80
- exc_info=True,
81
- )
82
-
83
- while True:
84
- messages = await task.queue.get(self.concurrency)
85
- if not messages:
86
- await asyncio.sleep(self.poll_sleep)
87
- continue
88
-
89
- logger.debug(f"Processing {len(messages)} messages for {task.name}")
60
+ try:
61
+ while True:
62
+ message = await broker.consume(task.name)
63
+ # concurrency limiter, without, queue would be drained in one go.
64
+ await sem.acquire()
65
+ asyncio.create_task(self._process(task, broker, sem, message))
66
+ except asyncio.CancelledError:
67
+ self.logger.info(f"Worker {task.name} cancelled...")
68
+ raise
90
69
 
91
- for msg in messages:
92
- t = asyncio.create_task(process(msg))
93
- pending[t] = msg
70
+ async def _process(
71
+ self,
72
+ task: Task,
73
+ broker: Broker,
74
+ sem: asyncio.Semaphore,
75
+ message: Message,
76
+ ) -> None:
77
+ try:
78
+ self.logger.debug(f"Processing message {message.id}")
79
+ try:
80
+ if message.task_name != task.name:
81
+ # This should never happen.
82
+ raise RejectError(
83
+ f"Message {message.id} for unknown task '{message.task_name}' (expected '{task.name}')"
84
+ )
94
85
 
95
- done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
96
- await task.queue.ack([pending.pop(t) for t in done])
86
+ async with AsyncExitStack() as stack:
87
+ for middleware in self.middlewares:
88
+ await stack.enter_async_context(middleware(message, task))
89
+ await task.fn(*message.args, **message.kwargs)
90
+
91
+ await broker.ack(message)
92
+ except RejectError as e:
93
+ self.logger.warning(f"Message {message.id} rejected: {e}")
94
+ await broker.nack(message, drop=True)
95
+ except RetryError as e:
96
+ self.logger.debug(f"Retrying in {e.delay}s: {e}")
97
+ await broker.nack(message, delay=e.delay or 0)
98
+ except Exception as e:
99
+ self.logger.error(
100
+ f"Rejected after error in {task.name}: {e}",
101
+ exc_info=True,
102
+ )
103
+ await broker.nack(message, drop=True)
104
+ finally:
105
+ sem.release()
97
106
 
98
107
  async def run(self) -> None:
99
108
  try:
100
- processors = (self.processor(task) for task in self.tasks)
101
- await asyncio.gather(*processors)
109
+ await asyncio.gather(
110
+ *[self._poll(task, self.broker) for task in self.tasks]
111
+ )
102
112
  except asyncio.CancelledError:
103
- logger.info("Worker shutting down...")
113
+ self.logger.info("Worker shutting down...")
104
114
  finally:
105
- await self.client.aclose()
115
+ await self.broker.close()