lionherd-core 1.0.0a3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lionherd_core/__init__.py +84 -0
- lionherd_core/base/__init__.py +30 -0
- lionherd_core/base/_utils.py +295 -0
- lionherd_core/base/broadcaster.py +128 -0
- lionherd_core/base/element.py +300 -0
- lionherd_core/base/event.py +322 -0
- lionherd_core/base/eventbus.py +112 -0
- lionherd_core/base/flow.py +236 -0
- lionherd_core/base/graph.py +616 -0
- lionherd_core/base/node.py +212 -0
- lionherd_core/base/pile.py +811 -0
- lionherd_core/base/progression.py +261 -0
- lionherd_core/errors.py +104 -0
- lionherd_core/libs/__init__.py +2 -0
- lionherd_core/libs/concurrency/__init__.py +60 -0
- lionherd_core/libs/concurrency/_cancel.py +85 -0
- lionherd_core/libs/concurrency/_errors.py +80 -0
- lionherd_core/libs/concurrency/_patterns.py +238 -0
- lionherd_core/libs/concurrency/_primitives.py +253 -0
- lionherd_core/libs/concurrency/_priority_queue.py +135 -0
- lionherd_core/libs/concurrency/_resource_tracker.py +66 -0
- lionherd_core/libs/concurrency/_task.py +58 -0
- lionherd_core/libs/concurrency/_utils.py +61 -0
- lionherd_core/libs/schema_handlers/__init__.py +35 -0
- lionherd_core/libs/schema_handlers/_function_call_parser.py +122 -0
- lionherd_core/libs/schema_handlers/_minimal_yaml.py +88 -0
- lionherd_core/libs/schema_handlers/_schema_to_model.py +251 -0
- lionherd_core/libs/schema_handlers/_typescript.py +153 -0
- lionherd_core/libs/string_handlers/__init__.py +15 -0
- lionherd_core/libs/string_handlers/_extract_json.py +65 -0
- lionherd_core/libs/string_handlers/_fuzzy_json.py +103 -0
- lionherd_core/libs/string_handlers/_string_similarity.py +347 -0
- lionherd_core/libs/string_handlers/_to_num.py +63 -0
- lionherd_core/ln/__init__.py +45 -0
- lionherd_core/ln/_async_call.py +314 -0
- lionherd_core/ln/_fuzzy_match.py +166 -0
- lionherd_core/ln/_fuzzy_validate.py +151 -0
- lionherd_core/ln/_hash.py +141 -0
- lionherd_core/ln/_json_dump.py +347 -0
- lionherd_core/ln/_list_call.py +110 -0
- lionherd_core/ln/_to_dict.py +373 -0
- lionherd_core/ln/_to_list.py +190 -0
- lionherd_core/ln/_utils.py +156 -0
- lionherd_core/lndl/__init__.py +62 -0
- lionherd_core/lndl/errors.py +30 -0
- lionherd_core/lndl/fuzzy.py +321 -0
- lionherd_core/lndl/parser.py +427 -0
- lionherd_core/lndl/prompt.py +137 -0
- lionherd_core/lndl/resolver.py +323 -0
- lionherd_core/lndl/types.py +287 -0
- lionherd_core/protocols.py +181 -0
- lionherd_core/py.typed +0 -0
- lionherd_core/types/__init__.py +46 -0
- lionherd_core/types/_sentinel.py +131 -0
- lionherd_core/types/base.py +341 -0
- lionherd_core/types/operable.py +133 -0
- lionherd_core/types/spec.py +313 -0
- lionherd_core/types/spec_adapters/__init__.py +10 -0
- lionherd_core/types/spec_adapters/_protocol.py +125 -0
- lionherd_core/types/spec_adapters/pydantic_field.py +177 -0
- lionherd_core-1.0.0a3.dist-info/METADATA +502 -0
- lionherd_core-1.0.0a3.dist-info/RECORD +64 -0
- lionherd_core-1.0.0a3.dist-info/WHEEL +4 -0
- lionherd_core-1.0.0a3.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
from collections.abc import Awaitable, Callable, Iterable, Sequence
|
|
7
|
+
from typing import TypeVar
|
|
8
|
+
|
|
9
|
+
import anyio
|
|
10
|
+
|
|
11
|
+
# ExceptionGroup is built-in in Python 3.11+
|
|
12
|
+
from ._cancel import effective_deadline, move_on_at
|
|
13
|
+
from ._errors import non_cancel_subgroup
|
|
14
|
+
from ._primitives import CapacityLimiter
|
|
15
|
+
from ._task import create_task_group
|
|
16
|
+
from ._utils import current_time
|
|
17
|
+
|
|
18
|
+
T = TypeVar("T")
|
|
19
|
+
R = TypeVar("R")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
__all__ = (
|
|
23
|
+
"CompletionStream",
|
|
24
|
+
"bounded_map",
|
|
25
|
+
"gather",
|
|
26
|
+
"race",
|
|
27
|
+
"retry",
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
async def gather(*aws: Awaitable[T], return_exceptions: bool = False) -> list[T | BaseException]:
|
|
32
|
+
"""Run awaitables concurrently."""
|
|
33
|
+
if not aws:
|
|
34
|
+
return []
|
|
35
|
+
|
|
36
|
+
results: list[T | BaseException | None] = [None] * len(aws)
|
|
37
|
+
|
|
38
|
+
async def _runner(idx: int, aw: Awaitable[T]) -> None:
|
|
39
|
+
try:
|
|
40
|
+
results[idx] = await aw
|
|
41
|
+
except BaseException as exc:
|
|
42
|
+
results[idx] = exc
|
|
43
|
+
if not return_exceptions:
|
|
44
|
+
raise # Propagate to the TaskGroup
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
async with create_task_group() as tg:
|
|
48
|
+
for i, aw in enumerate(aws):
|
|
49
|
+
tg.start_soon(_runner, i, aw)
|
|
50
|
+
except ExceptionGroup as eg:
|
|
51
|
+
if not return_exceptions:
|
|
52
|
+
# Separate cancellations from real failures while preserving structure/tracebacks
|
|
53
|
+
rest = non_cancel_subgroup(eg)
|
|
54
|
+
if rest is not None:
|
|
55
|
+
raise rest
|
|
56
|
+
# All were cancellations -> propagate cancellation group
|
|
57
|
+
raise
|
|
58
|
+
|
|
59
|
+
return results # type: ignore
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
async def race(*aws: Awaitable[T]) -> T:
|
|
63
|
+
"""Return first completion."""
|
|
64
|
+
if not aws:
|
|
65
|
+
raise ValueError("race() requires at least one awaitable")
|
|
66
|
+
send, recv = anyio.create_memory_object_stream(0)
|
|
67
|
+
|
|
68
|
+
async def _runner(aw: Awaitable[T]) -> None:
|
|
69
|
+
try:
|
|
70
|
+
res = await aw
|
|
71
|
+
await send.send((True, res))
|
|
72
|
+
except BaseException as exc:
|
|
73
|
+
await send.send((False, exc))
|
|
74
|
+
|
|
75
|
+
async with send, recv, create_task_group() as tg:
|
|
76
|
+
for aw in aws:
|
|
77
|
+
tg.start_soon(_runner, aw)
|
|
78
|
+
ok, payload = await recv.receive()
|
|
79
|
+
tg.cancel_scope.cancel()
|
|
80
|
+
|
|
81
|
+
# Raise outside the TaskGroup context to avoid ExceptionGroup wrapping
|
|
82
|
+
if ok:
|
|
83
|
+
return payload # type: ignore[return-value]
|
|
84
|
+
raise payload # type: ignore[misc]
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
async def bounded_map(
|
|
88
|
+
func: Callable[[T], Awaitable[R]],
|
|
89
|
+
items: Iterable[T],
|
|
90
|
+
*,
|
|
91
|
+
limit: int,
|
|
92
|
+
return_exceptions: bool = False,
|
|
93
|
+
) -> list[R | BaseException]:
|
|
94
|
+
"""Apply async function to items with concurrency limit."""
|
|
95
|
+
if limit <= 0:
|
|
96
|
+
raise ValueError("limit must be >= 1")
|
|
97
|
+
|
|
98
|
+
seq = list(items)
|
|
99
|
+
if not seq:
|
|
100
|
+
return []
|
|
101
|
+
|
|
102
|
+
out: list[R | BaseException | None] = [None] * len(seq)
|
|
103
|
+
limiter = CapacityLimiter(limit)
|
|
104
|
+
|
|
105
|
+
async def _runner(i: int, x: T) -> None:
|
|
106
|
+
async with limiter:
|
|
107
|
+
try:
|
|
108
|
+
out[i] = await func(x)
|
|
109
|
+
except BaseException as exc:
|
|
110
|
+
out[i] = exc
|
|
111
|
+
if not return_exceptions:
|
|
112
|
+
raise # Propagate to the TaskGroup
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
async with create_task_group() as tg:
|
|
116
|
+
for i, x in enumerate(seq):
|
|
117
|
+
tg.start_soon(_runner, i, x)
|
|
118
|
+
except ExceptionGroup as eg:
|
|
119
|
+
if not return_exceptions:
|
|
120
|
+
# Separate cancellations from real failures while preserving structure/tracebacks
|
|
121
|
+
rest = non_cancel_subgroup(eg)
|
|
122
|
+
if rest is not None:
|
|
123
|
+
raise rest
|
|
124
|
+
# All were cancellations -> propagate cancellation group
|
|
125
|
+
raise
|
|
126
|
+
|
|
127
|
+
return out # type: ignore
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class CompletionStream:
|
|
131
|
+
"""Async completion stream with structured concurrency and explicit lifecycle."""
|
|
132
|
+
|
|
133
|
+
def __init__(self, aws: Sequence[Awaitable[T]], *, limit: int | None = None):
|
|
134
|
+
self.aws = aws
|
|
135
|
+
self.limit = limit
|
|
136
|
+
self._task_group: anyio.abc.TaskGroup | None = None
|
|
137
|
+
self._send: anyio.abc.ObjectSendStream[tuple[int, T]] | None = None
|
|
138
|
+
self._recv: anyio.abc.ObjectReceiveStream[tuple[int, T]] | None = None
|
|
139
|
+
self._completed_count = 0
|
|
140
|
+
self._total_count = len(aws)
|
|
141
|
+
|
|
142
|
+
async def __aenter__(self):
|
|
143
|
+
n = len(self.aws)
|
|
144
|
+
self._send, self._recv = anyio.create_memory_object_stream(n)
|
|
145
|
+
self._task_group = anyio.create_task_group()
|
|
146
|
+
await self._task_group.__aenter__()
|
|
147
|
+
|
|
148
|
+
limiter = CapacityLimiter(self.limit) if self.limit else None
|
|
149
|
+
|
|
150
|
+
async def _runner(i: int, aw: Awaitable[T]) -> None:
|
|
151
|
+
if limiter:
|
|
152
|
+
await limiter.acquire()
|
|
153
|
+
try:
|
|
154
|
+
res = await aw
|
|
155
|
+
try:
|
|
156
|
+
assert self._send is not None
|
|
157
|
+
await self._send.send((i, res)) # type: ignore[arg-type]
|
|
158
|
+
except anyio.ClosedResourceError:
|
|
159
|
+
# Stream was closed (e.g., early break from iteration)
|
|
160
|
+
# Swallow the error gracefully
|
|
161
|
+
pass
|
|
162
|
+
finally:
|
|
163
|
+
if limiter:
|
|
164
|
+
limiter.release()
|
|
165
|
+
|
|
166
|
+
# Start all tasks
|
|
167
|
+
for i, aw in enumerate(self.aws):
|
|
168
|
+
self._task_group.start_soon(_runner, i, aw)
|
|
169
|
+
|
|
170
|
+
return self
|
|
171
|
+
|
|
172
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
173
|
+
# Cancel remaining tasks and clean up
|
|
174
|
+
if self._task_group:
|
|
175
|
+
await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
|
|
176
|
+
if self._send:
|
|
177
|
+
await self._send.aclose()
|
|
178
|
+
if self._recv:
|
|
179
|
+
await self._recv.aclose()
|
|
180
|
+
return False
|
|
181
|
+
|
|
182
|
+
def __aiter__(self):
|
|
183
|
+
if not self._recv:
|
|
184
|
+
raise RuntimeError("CompletionStream must be used as async context manager")
|
|
185
|
+
return self
|
|
186
|
+
|
|
187
|
+
async def __anext__(self):
|
|
188
|
+
if self._completed_count >= self._total_count:
|
|
189
|
+
raise StopAsyncIteration
|
|
190
|
+
|
|
191
|
+
try:
|
|
192
|
+
result = await self._recv.receive()
|
|
193
|
+
self._completed_count += 1
|
|
194
|
+
return result
|
|
195
|
+
except anyio.EndOfStream:
|
|
196
|
+
raise StopAsyncIteration
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
async def retry(
|
|
200
|
+
fn: Callable[[], Awaitable[T]],
|
|
201
|
+
*,
|
|
202
|
+
attempts: int = 3,
|
|
203
|
+
base_delay: float = 0.1,
|
|
204
|
+
max_delay: float = 2.0,
|
|
205
|
+
retry_on: tuple[type[BaseException], ...] = (Exception,),
|
|
206
|
+
jitter: float = 0.1,
|
|
207
|
+
) -> T:
|
|
208
|
+
"""Deadline-aware exponential backoff retry."""
|
|
209
|
+
attempt = 0
|
|
210
|
+
deadline = effective_deadline()
|
|
211
|
+
while True:
|
|
212
|
+
try:
|
|
213
|
+
return await fn()
|
|
214
|
+
except retry_on:
|
|
215
|
+
attempt += 1
|
|
216
|
+
if attempt >= attempts:
|
|
217
|
+
raise
|
|
218
|
+
|
|
219
|
+
delay = min(max_delay, base_delay * (2 ** (attempt - 1)))
|
|
220
|
+
if jitter:
|
|
221
|
+
import random
|
|
222
|
+
|
|
223
|
+
delay *= 1 + random.random() * jitter
|
|
224
|
+
|
|
225
|
+
# Cap by ambient deadline if one exists
|
|
226
|
+
if deadline is not None:
|
|
227
|
+
remaining = deadline - current_time()
|
|
228
|
+
if remaining <= 0:
|
|
229
|
+
# Out of time; surface the last error
|
|
230
|
+
raise
|
|
231
|
+
# Use move_on_at to avoid TOCTOU race between deadline check and sleep
|
|
232
|
+
with move_on_at(deadline):
|
|
233
|
+
await anyio.sleep(delay)
|
|
234
|
+
# If we were cancelled by deadline, re-raise the original exception
|
|
235
|
+
if current_time() >= deadline:
|
|
236
|
+
raise
|
|
237
|
+
else:
|
|
238
|
+
await anyio.sleep(delay)
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Generic, Self, TypeVar
|
|
8
|
+
|
|
9
|
+
import anyio
|
|
10
|
+
import anyio.abc
|
|
11
|
+
|
|
12
|
+
T = TypeVar("T")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
__all__ = (
|
|
16
|
+
"CapacityLimiter",
|
|
17
|
+
"Condition",
|
|
18
|
+
"Event",
|
|
19
|
+
"Lock",
|
|
20
|
+
"Queue",
|
|
21
|
+
"Semaphore",
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class Lock:
|
|
26
|
+
"""Async mutex lock."""
|
|
27
|
+
|
|
28
|
+
__slots__ = ("_lock",)
|
|
29
|
+
|
|
30
|
+
def __init__(self) -> None:
|
|
31
|
+
self._lock = anyio.Lock()
|
|
32
|
+
|
|
33
|
+
async def acquire(self) -> None:
|
|
34
|
+
"""Acquire lock."""
|
|
35
|
+
await self._lock.acquire()
|
|
36
|
+
|
|
37
|
+
def release(self) -> None:
|
|
38
|
+
"""Release lock."""
|
|
39
|
+
self._lock.release()
|
|
40
|
+
|
|
41
|
+
async def __aenter__(self) -> Self:
|
|
42
|
+
await self.acquire()
|
|
43
|
+
return self
|
|
44
|
+
|
|
45
|
+
async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
|
|
46
|
+
self.release()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class Semaphore:
|
|
50
|
+
"""Async semaphore."""
|
|
51
|
+
|
|
52
|
+
__slots__ = ("_sem",)
|
|
53
|
+
|
|
54
|
+
def __init__(self, initial_value: int) -> None:
|
|
55
|
+
if initial_value < 0:
|
|
56
|
+
raise ValueError("initial_value must be >= 0")
|
|
57
|
+
self._sem = anyio.Semaphore(initial_value)
|
|
58
|
+
|
|
59
|
+
async def acquire(self) -> None:
|
|
60
|
+
"""Acquire slot."""
|
|
61
|
+
await self._sem.acquire()
|
|
62
|
+
|
|
63
|
+
def release(self) -> None:
|
|
64
|
+
"""Release slot."""
|
|
65
|
+
self._sem.release()
|
|
66
|
+
|
|
67
|
+
async def __aenter__(self) -> Self:
|
|
68
|
+
await self.acquire()
|
|
69
|
+
return self
|
|
70
|
+
|
|
71
|
+
async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
|
|
72
|
+
self.release()
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class CapacityLimiter:
|
|
76
|
+
"""Async capacity limiter."""
|
|
77
|
+
|
|
78
|
+
__slots__ = ("_lim",)
|
|
79
|
+
|
|
80
|
+
def __init__(self, total_tokens: float) -> None:
|
|
81
|
+
"""Initialize capacity limiter."""
|
|
82
|
+
if total_tokens <= 0:
|
|
83
|
+
raise ValueError("total_tokens must be > 0")
|
|
84
|
+
self._lim = anyio.CapacityLimiter(total_tokens)
|
|
85
|
+
|
|
86
|
+
async def acquire(self) -> None:
|
|
87
|
+
"""Acquire capacity."""
|
|
88
|
+
await self._lim.acquire()
|
|
89
|
+
|
|
90
|
+
def release(self) -> None:
|
|
91
|
+
"""Release capacity."""
|
|
92
|
+
self._lim.release()
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def remaining_tokens(self) -> float:
|
|
96
|
+
"""Available capacity (deprecated)."""
|
|
97
|
+
return self._lim.available_tokens
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def total_tokens(self) -> float:
|
|
101
|
+
"""Get capacity limit."""
|
|
102
|
+
return self._lim.total_tokens
|
|
103
|
+
|
|
104
|
+
@total_tokens.setter
|
|
105
|
+
def total_tokens(self, value: float) -> None:
|
|
106
|
+
"""Set capacity limit."""
|
|
107
|
+
if value <= 0:
|
|
108
|
+
raise ValueError("total_tokens must be > 0")
|
|
109
|
+
self._lim.total_tokens = value
|
|
110
|
+
|
|
111
|
+
@property
|
|
112
|
+
def borrowed_tokens(self) -> float:
|
|
113
|
+
"""Get borrowed tokens."""
|
|
114
|
+
return self._lim.borrowed_tokens
|
|
115
|
+
|
|
116
|
+
@property
|
|
117
|
+
def available_tokens(self) -> float:
|
|
118
|
+
"""Get available tokens."""
|
|
119
|
+
return self._lim.available_tokens
|
|
120
|
+
|
|
121
|
+
async def acquire_on_behalf_of(self, borrower: object) -> None:
|
|
122
|
+
"""Acquire for borrower."""
|
|
123
|
+
await self._lim.acquire_on_behalf_of(borrower)
|
|
124
|
+
|
|
125
|
+
def release_on_behalf_of(self, borrower: object) -> None:
|
|
126
|
+
"""Release for borrower."""
|
|
127
|
+
self._lim.release_on_behalf_of(borrower)
|
|
128
|
+
|
|
129
|
+
# Support idiomatic AnyIO usage: `async with limiter: ...`
|
|
130
|
+
async def __aenter__(self) -> Self:
|
|
131
|
+
await self.acquire()
|
|
132
|
+
return self
|
|
133
|
+
|
|
134
|
+
async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
|
|
135
|
+
self.release()
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
@dataclass(slots=True)
|
|
139
|
+
class Queue(Generic[T]):
|
|
140
|
+
"""Async FIFO queue."""
|
|
141
|
+
|
|
142
|
+
_send: anyio.abc.ObjectSendStream[T]
|
|
143
|
+
_recv: anyio.abc.ObjectReceiveStream[T]
|
|
144
|
+
|
|
145
|
+
@classmethod
|
|
146
|
+
def with_maxsize(cls, maxsize: int) -> Queue[T]:
|
|
147
|
+
"""Create queue with maxsize."""
|
|
148
|
+
send, recv = anyio.create_memory_object_stream(maxsize)
|
|
149
|
+
return cls(send, recv)
|
|
150
|
+
|
|
151
|
+
async def put(self, item: T) -> None:
|
|
152
|
+
"""Put item."""
|
|
153
|
+
await self._send.send(item)
|
|
154
|
+
|
|
155
|
+
def put_nowait(self, item: T) -> None:
|
|
156
|
+
"""Put without blocking."""
|
|
157
|
+
self._send.send_nowait(item) # type: ignore[attr-defined]
|
|
158
|
+
|
|
159
|
+
async def get(self) -> T:
|
|
160
|
+
"""Get item."""
|
|
161
|
+
return await self._recv.receive()
|
|
162
|
+
|
|
163
|
+
def get_nowait(self) -> T:
|
|
164
|
+
"""Get without blocking."""
|
|
165
|
+
return self._recv.receive_nowait() # type: ignore[attr-defined]
|
|
166
|
+
|
|
167
|
+
async def close(self) -> None:
|
|
168
|
+
"""Close streams."""
|
|
169
|
+
await self._send.aclose()
|
|
170
|
+
await self._recv.aclose()
|
|
171
|
+
|
|
172
|
+
async def __aenter__(self) -> Self:
|
|
173
|
+
return self
|
|
174
|
+
|
|
175
|
+
async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
|
|
176
|
+
await self.close()
|
|
177
|
+
|
|
178
|
+
@property
|
|
179
|
+
def sender(self) -> anyio.abc.ObjectSendStream[T]:
|
|
180
|
+
"""Get send stream."""
|
|
181
|
+
return self._send
|
|
182
|
+
|
|
183
|
+
@property
|
|
184
|
+
def receiver(self) -> anyio.abc.ObjectReceiveStream[T]:
|
|
185
|
+
"""Get receive stream."""
|
|
186
|
+
return self._recv
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
class Event:
|
|
190
|
+
"""Async event for task signaling."""
|
|
191
|
+
|
|
192
|
+
__slots__ = ("_event",)
|
|
193
|
+
|
|
194
|
+
def __init__(self) -> None:
|
|
195
|
+
self._event = anyio.Event()
|
|
196
|
+
|
|
197
|
+
def set(self) -> None:
|
|
198
|
+
"""Set flag."""
|
|
199
|
+
self._event.set()
|
|
200
|
+
|
|
201
|
+
def is_set(self) -> bool:
|
|
202
|
+
"""Check if set."""
|
|
203
|
+
return self._event.is_set()
|
|
204
|
+
|
|
205
|
+
async def wait(self) -> None:
|
|
206
|
+
"""Wait for flag."""
|
|
207
|
+
await self._event.wait()
|
|
208
|
+
|
|
209
|
+
def statistics(self) -> anyio.EventStatistics:
|
|
210
|
+
"""Get statistics."""
|
|
211
|
+
return self._event.statistics()
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
class Condition:
|
|
215
|
+
"""Async condition variable."""
|
|
216
|
+
|
|
217
|
+
__slots__ = ("_condition",)
|
|
218
|
+
|
|
219
|
+
def __init__(self, lock: Lock | None = None) -> None:
|
|
220
|
+
"""Initialize condition variable."""
|
|
221
|
+
_lock = lock._lock if lock else None
|
|
222
|
+
self._condition = anyio.Condition(_lock)
|
|
223
|
+
|
|
224
|
+
async def acquire(self) -> None:
|
|
225
|
+
"""Acquire lock."""
|
|
226
|
+
await self._condition.acquire()
|
|
227
|
+
|
|
228
|
+
def release(self) -> None:
|
|
229
|
+
"""Release lock."""
|
|
230
|
+
self._condition.release()
|
|
231
|
+
|
|
232
|
+
async def __aenter__(self) -> Self:
|
|
233
|
+
await self.acquire()
|
|
234
|
+
return self
|
|
235
|
+
|
|
236
|
+
async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
|
|
237
|
+
self.release()
|
|
238
|
+
|
|
239
|
+
async def wait(self) -> None:
|
|
240
|
+
"""Wait until notified."""
|
|
241
|
+
await self._condition.wait()
|
|
242
|
+
|
|
243
|
+
def notify(self, n: int = 1) -> None:
|
|
244
|
+
"""Wake n tasks."""
|
|
245
|
+
self._condition.notify(n)
|
|
246
|
+
|
|
247
|
+
def notify_all(self) -> None:
|
|
248
|
+
"""Wake all tasks."""
|
|
249
|
+
self._condition.notify_all()
|
|
250
|
+
|
|
251
|
+
def statistics(self) -> anyio.ConditionStatistics:
|
|
252
|
+
"""Get statistics."""
|
|
253
|
+
return self._condition.statistics()
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import heapq
|
|
7
|
+
from typing import Generic, TypeVar
|
|
8
|
+
|
|
9
|
+
from ._primitives import Condition
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T")
|
|
12
|
+
|
|
13
|
+
__all__ = ("PriorityQueue", "QueueEmpty", "QueueFull")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class QueueEmpty(Exception): # noqa: N818
|
|
17
|
+
"""Exception raised when queue.get_nowait() is called on empty queue."""
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class QueueFull(Exception): # noqa: N818
|
|
21
|
+
"""Exception raised when queue.put_nowait() is called on full queue."""
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class PriorityQueue(Generic[T]):
|
|
25
|
+
"""Async priority queue (heapq + anyio.Condition).
|
|
26
|
+
|
|
27
|
+
API: Similar to asyncio.PriorityQueue, but nowait methods are async.
|
|
28
|
+
|
|
29
|
+
Attributes:
|
|
30
|
+
maxsize: Maximum queue size (0 = unlimited)
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(self, maxsize: int = 0):
|
|
34
|
+
"""Initialize priority queue.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
maxsize: Max size (0 = unlimited)
|
|
38
|
+
"""
|
|
39
|
+
self.maxsize = maxsize
|
|
40
|
+
self._queue: list[T] = []
|
|
41
|
+
self._condition = Condition()
|
|
42
|
+
|
|
43
|
+
async def put(self, item: T) -> None:
|
|
44
|
+
"""Put item into queue (blocks if full).
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
item: Item (tuple with priority as first element)
|
|
48
|
+
"""
|
|
49
|
+
async with self._condition:
|
|
50
|
+
# Wait if queue is full
|
|
51
|
+
while self.maxsize > 0 and len(self._queue) >= self.maxsize:
|
|
52
|
+
await self._condition.wait()
|
|
53
|
+
|
|
54
|
+
heapq.heappush(self._queue, item)
|
|
55
|
+
self._condition.notify()
|
|
56
|
+
|
|
57
|
+
async def put_nowait(self, item: T) -> None:
|
|
58
|
+
"""Put item without blocking (async, unlike asyncio).
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
item: Item (tuple with priority as first element)
|
|
62
|
+
|
|
63
|
+
Raises:
|
|
64
|
+
QueueFull: If queue is at maxsize
|
|
65
|
+
"""
|
|
66
|
+
async with self._condition:
|
|
67
|
+
if self.maxsize > 0 and len(self._queue) >= self.maxsize:
|
|
68
|
+
raise QueueFull("Queue is full")
|
|
69
|
+
|
|
70
|
+
heapq.heappush(self._queue, item)
|
|
71
|
+
# Notify waiting getters that item is available
|
|
72
|
+
self._condition.notify()
|
|
73
|
+
|
|
74
|
+
async def get(self) -> T:
|
|
75
|
+
"""Get highest priority item (blocks if empty).
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Highest priority item (lowest value first)
|
|
79
|
+
"""
|
|
80
|
+
async with self._condition:
|
|
81
|
+
# Wait if queue is empty
|
|
82
|
+
while not self._queue:
|
|
83
|
+
await self._condition.wait()
|
|
84
|
+
|
|
85
|
+
item = heapq.heappop(self._queue)
|
|
86
|
+
self._condition.notify()
|
|
87
|
+
return item
|
|
88
|
+
|
|
89
|
+
async def get_nowait(self) -> T:
|
|
90
|
+
"""Get item without blocking (async, unlike asyncio).
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
Highest priority item
|
|
94
|
+
|
|
95
|
+
Raises:
|
|
96
|
+
QueueEmpty: If queue is empty
|
|
97
|
+
"""
|
|
98
|
+
async with self._condition:
|
|
99
|
+
if not self._queue:
|
|
100
|
+
raise QueueEmpty("Queue is empty")
|
|
101
|
+
|
|
102
|
+
item = heapq.heappop(self._queue)
|
|
103
|
+
# Notify waiting putters that space is available
|
|
104
|
+
self._condition.notify()
|
|
105
|
+
return item
|
|
106
|
+
|
|
107
|
+
def qsize(self) -> int:
|
|
108
|
+
"""Approximate queue size (unlocked, racy).
|
|
109
|
+
|
|
110
|
+
Note: Value may be stale immediately. Use for monitoring only.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Number of items in queue
|
|
114
|
+
"""
|
|
115
|
+
return len(self._queue)
|
|
116
|
+
|
|
117
|
+
def empty(self) -> bool:
|
|
118
|
+
"""Check if queue is empty (unlocked, racy).
|
|
119
|
+
|
|
120
|
+
Note: Value may be stale immediately. Use for monitoring only.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
True if queue is empty
|
|
124
|
+
"""
|
|
125
|
+
return len(self._queue) == 0
|
|
126
|
+
|
|
127
|
+
def full(self) -> bool:
|
|
128
|
+
"""Check if queue is full (unlocked, racy).
|
|
129
|
+
|
|
130
|
+
Note: Value may be stale immediately. Use for monitoring only.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
True if queue is at maxsize
|
|
134
|
+
"""
|
|
135
|
+
return self.maxsize > 0 and len(self._queue) >= self.maxsize
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
# Copyright (c) 2025, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import threading
|
|
7
|
+
import time
|
|
8
|
+
import weakref
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
|
|
11
|
+
__all__ = (
|
|
12
|
+
"LeakInfo",
|
|
13
|
+
"LeakTracker",
|
|
14
|
+
"track_resource",
|
|
15
|
+
"untrack_resource",
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass(frozen=True, slots=True)
|
|
20
|
+
class LeakInfo:
|
|
21
|
+
name: str
|
|
22
|
+
kind: str | None
|
|
23
|
+
created_at: float
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class LeakTracker:
|
|
27
|
+
"""Track live objects for leak detection."""
|
|
28
|
+
|
|
29
|
+
def __init__(self) -> None:
|
|
30
|
+
self._live: dict[int, LeakInfo] = {}
|
|
31
|
+
self._lock = threading.Lock()
|
|
32
|
+
|
|
33
|
+
def track(self, obj: object, *, name: str | None, kind: str | None) -> None:
|
|
34
|
+
info = LeakInfo(name=name or f"obj-{id(obj)}", kind=kind, created_at=time.time())
|
|
35
|
+
key = id(obj)
|
|
36
|
+
|
|
37
|
+
def _finalizer(_key: int = key) -> None:
|
|
38
|
+
with self._lock:
|
|
39
|
+
self._live.pop(_key, None)
|
|
40
|
+
|
|
41
|
+
with self._lock:
|
|
42
|
+
self._live[key] = info
|
|
43
|
+
weakref.finalize(obj, _finalizer)
|
|
44
|
+
|
|
45
|
+
def untrack(self, obj: object) -> None:
|
|
46
|
+
with self._lock:
|
|
47
|
+
self._live.pop(id(obj), None)
|
|
48
|
+
|
|
49
|
+
def live(self) -> list[LeakInfo]:
|
|
50
|
+
with self._lock:
|
|
51
|
+
return list(self._live.values())
|
|
52
|
+
|
|
53
|
+
def clear(self) -> None:
|
|
54
|
+
with self._lock:
|
|
55
|
+
self._live.clear()
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
_TRACKER = LeakTracker()
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def track_resource(obj: object, name: str | None = None, kind: str | None = None) -> None:
|
|
62
|
+
_TRACKER.track(obj, name=name, kind=kind)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def untrack_resource(obj: object) -> None:
|
|
66
|
+
_TRACKER.untrack(obj)
|