ltq 0.3.2__tar.gz → 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: ltq
3
- Version: 0.3.2
3
+ Version: 0.4.0
4
4
  Summary: Add your description here
5
5
  Author: Tom Clesius
6
6
  Author-email: Tom Clesius <tomclesius@gmail.com>
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "ltq"
3
- version = "0.3.2"
3
+ version = "0.4.0"
4
4
  description = "Add your description here"
5
5
  readme = "README.md"
6
6
  authors = [{ name = "Tom Clesius", email = "tomclesius@gmail.com" }]
@@ -19,7 +19,7 @@ requires = ["uv_build>=0.9.26,<0.10.0"]
19
19
  build-backend = "uv_build"
20
20
 
21
21
  [tool.bumpversion]
22
- current_version = "0.3.2"
22
+ current_version = "0.4.0"
23
23
  commit = true
24
24
  tag = true
25
25
  message = "v{new_version}"
@@ -37,4 +37,5 @@ class App:
37
37
  while any(t.is_alive() for t in threads):
38
38
  await asyncio.sleep(0.2)
39
39
  except asyncio.CancelledError:
40
+ # Allow graceful shutdown when the run coroutine is cancelled.
40
41
  pass
@@ -41,6 +41,14 @@ class RedisBroker(Broker):
41
41
  self.url = url
42
42
  self._client = aioredis.from_url(url)
43
43
  self._id = uuid.uuid4().hex[:8]
44
+ self._consume = self._client.register_script("""
45
+ local ready = redis.call('zrangebyscore', KEYS[1], 0, ARGV[1], 'LIMIT', 0, 1)
46
+ if #ready == 0 then return nil end
47
+ local msg = ready[1]
48
+ redis.call('zadd', KEYS[2], ARGV[1], msg)
49
+ redis.call('zrem', KEYS[1], msg)
50
+ return msg
51
+ """)
44
52
 
45
53
  async def close(self) -> None:
46
54
  await self._client.aclose()
@@ -60,14 +68,11 @@ class RedisBroker(Broker):
60
68
 
61
69
  async def consume(self, queue: str) -> Message:
62
70
  while True:
63
- now = time.time()
64
- ready = await self._client.zrangebyscore(
65
- f"queue:{queue}", 0, now, start=0, num=1
66
- ) # type: ignore
67
- if ready:
68
- msg = ready[0]
69
- await self._client.zadd(f"processing:{queue}:{self._id}", {msg: now,}) # type: ignore
70
- await self._client.zrem(f"queue:{queue}", msg) # type: ignore
71
+ msg = await self._consume(
72
+ keys=[f"queue:{queue}", f"processing:{queue}:{self._id}"],
73
+ args=[time.time()],
74
+ )
75
+ if msg:
71
76
  return Message.from_json(msg)
72
77
  await asyncio.sleep(0.1)
73
78
 
@@ -100,6 +100,7 @@ class Sentry(Middleware):
100
100
  sentry_sdk.init(dsn=dsn)
101
101
  self.sentry = sentry_sdk
102
102
  except ImportError:
103
+ # Sentry SDK is optional; if it's not installed, disable Sentry integration silently.
103
104
  pass
104
105
 
105
106
  @asynccontextmanager
@@ -96,6 +96,7 @@ class Scheduler:
96
96
  try:
97
97
  await self.task
98
98
  except asyncio.CancelledError:
99
+ # Task cancellation is expected during normal scheduler shutdown.
99
100
  pass
100
101
  self.task = None
101
102
  self.logger.info("Scheduler stopped")
@@ -1,6 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
+ import copy
4
5
  from contextlib import AsyncExitStack
5
6
  from typing import Awaitable, Callable, ParamSpec, TypeVar
6
7
 
@@ -26,7 +27,7 @@ class Worker:
26
27
  self.name = name
27
28
  self.broker = Broker.from_url(broker_url)
28
29
  self.tasks: list[Task] = []
29
- self.middlewares: list[Middleware] = middlewares or list(DEFAULT)
30
+ self.middlewares: list[Middleware] = middlewares or copy.deepcopy(DEFAULT)
30
31
  self.concurrency: int = concurrency
31
32
  self.logger = get_logger(name)
32
33
 
@@ -55,6 +56,7 @@ class Worker:
55
56
 
56
57
  async def _poll(self, task: Task, broker: Broker) -> None:
57
58
  sem = asyncio.Semaphore(self.concurrency)
59
+ pending: set[asyncio.Task] = set()
58
60
  self.logger.info(f"Polling for Task {task.name}")
59
61
 
60
62
  try:
@@ -62,9 +64,13 @@ class Worker:
62
64
  message = await broker.consume(task.name)
63
65
  # concurrency limiter, without, queue would be drained in one go.
64
66
  await sem.acquire()
65
- asyncio.create_task(self._process(task, broker, sem, message))
67
+ t = asyncio.create_task(self._process(task, broker, sem, message))
68
+ pending.add(t)
69
+ t.add_done_callback(pending.discard)
66
70
  except asyncio.CancelledError:
67
71
  self.logger.info(f"Worker {task.name} cancelled...")
72
+ if pending:
73
+ await asyncio.gather(*pending, return_exceptions=True)
68
74
  raise
69
75
 
70
76
  async def _process(
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes