krons 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. kronos/__init__.py +0 -0
  2. kronos/core/__init__.py +145 -0
  3. kronos/core/broadcaster.py +116 -0
  4. kronos/core/element.py +225 -0
  5. kronos/core/event.py +316 -0
  6. kronos/core/eventbus.py +116 -0
  7. kronos/core/flow.py +356 -0
  8. kronos/core/graph.py +442 -0
  9. kronos/core/node.py +982 -0
  10. kronos/core/pile.py +575 -0
  11. kronos/core/processor.py +494 -0
  12. kronos/core/progression.py +296 -0
  13. kronos/enforcement/__init__.py +57 -0
  14. kronos/enforcement/common/__init__.py +34 -0
  15. kronos/enforcement/common/boolean.py +85 -0
  16. kronos/enforcement/common/choice.py +97 -0
  17. kronos/enforcement/common/mapping.py +118 -0
  18. kronos/enforcement/common/model.py +102 -0
  19. kronos/enforcement/common/number.py +98 -0
  20. kronos/enforcement/common/string.py +140 -0
  21. kronos/enforcement/context.py +129 -0
  22. kronos/enforcement/policy.py +80 -0
  23. kronos/enforcement/registry.py +153 -0
  24. kronos/enforcement/rule.py +312 -0
  25. kronos/enforcement/service.py +370 -0
  26. kronos/enforcement/validator.py +198 -0
  27. kronos/errors.py +146 -0
  28. kronos/operations/__init__.py +32 -0
  29. kronos/operations/builder.py +228 -0
  30. kronos/operations/flow.py +398 -0
  31. kronos/operations/node.py +101 -0
  32. kronos/operations/registry.py +92 -0
  33. kronos/protocols.py +414 -0
  34. kronos/py.typed +0 -0
  35. kronos/services/__init__.py +81 -0
  36. kronos/services/backend.py +286 -0
  37. kronos/services/endpoint.py +608 -0
  38. kronos/services/hook.py +471 -0
  39. kronos/services/imodel.py +465 -0
  40. kronos/services/registry.py +115 -0
  41. kronos/services/utilities/__init__.py +36 -0
  42. kronos/services/utilities/header_factory.py +87 -0
  43. kronos/services/utilities/rate_limited_executor.py +271 -0
  44. kronos/services/utilities/rate_limiter.py +180 -0
  45. kronos/services/utilities/resilience.py +414 -0
  46. kronos/session/__init__.py +41 -0
  47. kronos/session/exchange.py +258 -0
  48. kronos/session/message.py +60 -0
  49. kronos/session/session.py +411 -0
  50. kronos/specs/__init__.py +25 -0
  51. kronos/specs/adapters/__init__.py +0 -0
  52. kronos/specs/adapters/_utils.py +45 -0
  53. kronos/specs/adapters/dataclass_field.py +246 -0
  54. kronos/specs/adapters/factory.py +56 -0
  55. kronos/specs/adapters/pydantic_adapter.py +309 -0
  56. kronos/specs/adapters/sql_ddl.py +946 -0
  57. kronos/specs/catalog/__init__.py +36 -0
  58. kronos/specs/catalog/_audit.py +39 -0
  59. kronos/specs/catalog/_common.py +43 -0
  60. kronos/specs/catalog/_content.py +59 -0
  61. kronos/specs/catalog/_enforcement.py +70 -0
  62. kronos/specs/factory.py +120 -0
  63. kronos/specs/operable.py +314 -0
  64. kronos/specs/phrase.py +405 -0
  65. kronos/specs/protocol.py +140 -0
  66. kronos/specs/spec.py +506 -0
  67. kronos/types/__init__.py +60 -0
  68. kronos/types/_sentinel.py +311 -0
  69. kronos/types/base.py +369 -0
  70. kronos/types/db_types.py +260 -0
  71. kronos/types/identity.py +66 -0
  72. kronos/utils/__init__.py +40 -0
  73. kronos/utils/_hash.py +234 -0
  74. kronos/utils/_json_dump.py +392 -0
  75. kronos/utils/_lazy_init.py +63 -0
  76. kronos/utils/_to_list.py +165 -0
  77. kronos/utils/_to_num.py +85 -0
  78. kronos/utils/_utils.py +375 -0
  79. kronos/utils/concurrency/__init__.py +205 -0
  80. kronos/utils/concurrency/_async_call.py +333 -0
  81. kronos/utils/concurrency/_cancel.py +122 -0
  82. kronos/utils/concurrency/_errors.py +96 -0
  83. kronos/utils/concurrency/_patterns.py +363 -0
  84. kronos/utils/concurrency/_primitives.py +328 -0
  85. kronos/utils/concurrency/_priority_queue.py +135 -0
  86. kronos/utils/concurrency/_resource_tracker.py +110 -0
  87. kronos/utils/concurrency/_run_async.py +67 -0
  88. kronos/utils/concurrency/_task.py +95 -0
  89. kronos/utils/concurrency/_utils.py +79 -0
  90. kronos/utils/fuzzy/__init__.py +14 -0
  91. kronos/utils/fuzzy/_extract_json.py +90 -0
  92. kronos/utils/fuzzy/_fuzzy_json.py +288 -0
  93. kronos/utils/fuzzy/_fuzzy_match.py +149 -0
  94. kronos/utils/fuzzy/_string_similarity.py +187 -0
  95. kronos/utils/fuzzy/_to_dict.py +396 -0
  96. kronos/utils/sql/__init__.py +13 -0
  97. kronos/utils/sql/_sql_validation.py +142 -0
  98. krons-0.1.0.dist-info/METADATA +70 -0
  99. krons-0.1.0.dist-info/RECORD +101 -0
  100. krons-0.1.0.dist-info/WHEEL +4 -0
  101. krons-0.1.0.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,363 @@
1
+ # Copyright (c) 2025 - 2026, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """High-level async concurrency patterns.
5
+
6
+ Provides structured concurrency primitives for common async workflows:
7
+ gather: Run awaitables concurrently, collect all results.
8
+ race: Return first completion, cancel the rest.
9
+ bounded_map: Apply async function with concurrency limit.
10
+ retry: Exponential backoff with deadline awareness.
11
+ CompletionStream: Iterate results as they complete.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import random
17
+ from collections.abc import Awaitable, Callable, Iterable, Sequence
18
+ from typing import TypeVar
19
+
20
+ import anyio
21
+ import anyio.abc
22
+
23
+ from ._cancel import effective_deadline, move_on_at
24
+ from ._errors import non_cancel_subgroup
25
+ from ._primitives import CapacityLimiter
26
+ from ._task import create_task_group
27
+ from ._utils import current_time
28
+
29
+ T = TypeVar("T")
30
+ R = TypeVar("R")
31
+
32
+
33
+ __all__ = (
34
+ "CompletionStream",
35
+ "bounded_map",
36
+ "gather",
37
+ "race",
38
+ "retry",
39
+ )
40
+
41
+
42
+ async def gather(*aws: Awaitable[T], return_exceptions: bool = False) -> list[T | BaseException]:
43
+ """Run awaitables concurrently and collect results in input order.
44
+
45
+ Args:
46
+ *aws: Awaitables to execute concurrently.
47
+ return_exceptions: If True, exceptions are returned in results list.
48
+ If False (default), first exception propagates after cancelling others.
49
+
50
+ Returns:
51
+ List of results in the same order as input awaitables.
52
+ May contain exceptions if return_exceptions=True.
53
+
54
+ Raises:
55
+ BaseExceptionGroup: If return_exceptions=False and any awaitable raises.
56
+
57
+ Example:
58
+ >>> results = await gather(fetch_a(), fetch_b(), fetch_c())
59
+ >>> # With error handling:
60
+ >>> results = await gather(*tasks, return_exceptions=True)
61
+ >>> errors = [r for r in results if isinstance(r, Exception)]
62
+ """
63
+ if not aws:
64
+ return []
65
+
66
+ results: list[T | BaseException | None] = [None] * len(aws)
67
+
68
+ async def _runner(idx: int, aw: Awaitable[T]) -> None:
69
+ try:
70
+ results[idx] = await aw
71
+ except BaseException as exc:
72
+ results[idx] = exc
73
+ if not return_exceptions:
74
+ raise
75
+
76
+ try:
77
+ async with create_task_group() as tg:
78
+ for i, aw in enumerate(aws):
79
+ tg.start_soon(_runner, i, aw)
80
+ except BaseExceptionGroup as eg:
81
+ if not return_exceptions:
82
+ rest = non_cancel_subgroup(eg)
83
+ if rest is not None:
84
+ raise rest
85
+ raise # pragma: no cover
86
+
87
+ return results # type: ignore
88
+
89
+
90
+ async def race(*aws: Awaitable[T]) -> T:
91
+ """Return the result of the first awaitable to complete.
92
+
93
+ Cancels all remaining awaitables once a winner is determined.
94
+ If the first to complete raises, that exception is re-raised.
95
+
96
+ Args:
97
+ *aws: Awaitables to race. Must provide at least one.
98
+
99
+ Returns:
100
+ Result of the first awaitable to complete successfully.
101
+
102
+ Raises:
103
+ ValueError: If no awaitables provided.
104
+ BaseException: If the winning awaitable raises.
105
+
106
+ Example:
107
+ >>> result = await race(slow_api(), fast_cache(), timeout_fallback())
108
+ """
109
+ if not aws:
110
+ raise ValueError("race() requires at least one awaitable")
111
+
112
+ send, recv = anyio.create_memory_object_stream(1)
113
+
114
+ async def _runner(aw: Awaitable[T]) -> None:
115
+ try:
116
+ res = await aw
117
+ await send.send((True, res))
118
+ except BaseException as exc:
119
+ await send.send((False, exc))
120
+
121
+ async with send, recv, create_task_group() as tg:
122
+ for aw in aws:
123
+ tg.start_soon(_runner, aw)
124
+ ok, payload = await recv.receive()
125
+ tg.cancel_scope.cancel()
126
+
127
+ if ok:
128
+ return payload # type: ignore[return-value]
129
+ raise payload # type: ignore[misc]
130
+
131
+
132
+ async def bounded_map(
133
+ func: Callable[[T], Awaitable[R]],
134
+ items: Iterable[T],
135
+ *,
136
+ limit: int,
137
+ return_exceptions: bool = False,
138
+ ) -> list[R | BaseException]:
139
+ """Apply async function to items with bounded concurrency.
140
+
141
+ Maintains input order in results while limiting concurrent executions.
142
+
143
+ Args:
144
+ func: Async function to apply to each item.
145
+ items: Iterable of items to process.
146
+ limit: Maximum concurrent executions (must be >= 1).
147
+ return_exceptions: If True, exceptions are returned in results.
148
+ If False, first exception propagates.
149
+
150
+ Returns:
151
+ List of results in input order.
152
+
153
+ Raises:
154
+ ValueError: If limit < 1.
155
+ BaseExceptionGroup: If return_exceptions=False and any call raises.
156
+
157
+ Example:
158
+ >>> async def fetch(url): ...
159
+ >>> results = await bounded_map(fetch, urls, limit=10)
160
+ """
161
+ if limit <= 0:
162
+ raise ValueError("limit must be >= 1")
163
+
164
+ seq = list(items)
165
+ if not seq:
166
+ return []
167
+
168
+ out: list[R | BaseException | None] = [None] * len(seq)
169
+ limiter = CapacityLimiter(limit)
170
+
171
+ async def _runner(i: int, x: T) -> None:
172
+ async with limiter:
173
+ try:
174
+ out[i] = await func(x)
175
+ except BaseException as exc:
176
+ out[i] = exc
177
+ if not return_exceptions:
178
+ raise
179
+
180
+ try:
181
+ async with create_task_group() as tg:
182
+ for i, x in enumerate(seq):
183
+ tg.start_soon(_runner, i, x)
184
+ except BaseExceptionGroup as eg:
185
+ if not return_exceptions:
186
+ rest = non_cancel_subgroup(eg)
187
+ if rest is not None:
188
+ raise rest
189
+ raise # pragma: no cover
190
+
191
+ return out # type: ignore
192
+
193
+
194
+ class CompletionStream:
195
+ """Iterate async results as they complete (first-finished order).
196
+
197
+ Provides structured concurrency with optional concurrency limiting.
198
+ Must be used as an async context manager.
199
+
200
+ Args:
201
+ aws: Sequence of awaitables to execute.
202
+ limit: Max concurrent executions (None = unlimited).
203
+ return_exceptions: If True, exceptions are yielded as results.
204
+ If False (default), exceptions propagate and terminate iteration.
205
+
206
+ Example:
207
+ >>> async with CompletionStream(tasks, limit=5) as stream:
208
+ ... async for idx, result in stream:
209
+ ... print(f"Task {idx} completed: {result}")
210
+
211
+ Note:
212
+ Results are yielded as (index, result) tuples where index is the
213
+ original position in the input sequence.
214
+ """
215
+
216
+ def __init__(
217
+ self,
218
+ aws: Sequence[Awaitable[T]],
219
+ *,
220
+ limit: int | None = None,
221
+ return_exceptions: bool = False,
222
+ ):
223
+ self.aws = aws
224
+ self.limit = limit
225
+ self.return_exceptions = return_exceptions
226
+ self._task_group: anyio.abc.TaskGroup | None = None
227
+ self._send: anyio.abc.ObjectSendStream[tuple[int, T]] | None = None
228
+ self._recv: anyio.abc.ObjectReceiveStream[tuple[int, T]] | None = None
229
+ self._completed_count = 0
230
+ self._total_count = len(aws)
231
+
232
+ async def __aenter__(self):
233
+ n = len(self.aws)
234
+ self._send, self._recv = anyio.create_memory_object_stream(n)
235
+ self._task_group = anyio.create_task_group()
236
+ await self._task_group.__aenter__()
237
+
238
+ limiter = CapacityLimiter(self.limit) if self.limit else None
239
+
240
+ async def _runner(i: int, aw: Awaitable[T]) -> None:
241
+ if limiter:
242
+ await limiter.acquire()
243
+ try:
244
+ try:
245
+ res = await aw
246
+ except BaseException as exc:
247
+ if self.return_exceptions:
248
+ res = exc # type: ignore[assignment]
249
+ else:
250
+ raise
251
+ try:
252
+ assert self._send is not None
253
+ await self._send.send((i, res)) # type: ignore[arg-type]
254
+ except anyio.ClosedResourceError: # pragma: no cover
255
+ pass
256
+ finally:
257
+ if limiter:
258
+ limiter.release()
259
+
260
+ for i, aw in enumerate(self.aws):
261
+ self._task_group.start_soon(_runner, i, aw)
262
+
263
+ return self
264
+
265
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
266
+ try:
267
+ if self._task_group:
268
+ await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
269
+ finally:
270
+ if self._send:
271
+ await self._send.aclose()
272
+ if self._recv:
273
+ await self._recv.aclose()
274
+ return False
275
+
276
+ def __aiter__(self):
277
+ if not self._recv:
278
+ raise RuntimeError("CompletionStream must be used as async context manager")
279
+ return self
280
+
281
+ async def __anext__(self):
282
+ if self._completed_count >= self._total_count:
283
+ raise StopAsyncIteration
284
+
285
+ try:
286
+ result = await self._recv.receive()
287
+ self._completed_count += 1
288
+ return result
289
+ except anyio.EndOfStream: # pragma: no cover
290
+ raise StopAsyncIteration
291
+
292
+
293
+ async def retry(
294
+ fn: Callable[[], Awaitable[T]],
295
+ *,
296
+ attempts: int = 3,
297
+ base_delay: float = 0.1,
298
+ max_delay: float = 2.0,
299
+ retry_on: tuple[type[BaseException], ...] = (Exception,),
300
+ jitter: float = 0.1,
301
+ ) -> T:
302
+ """Retry async function with exponential backoff and deadline awareness.
303
+
304
+ Respects structured concurrency: cancellation is never retried.
305
+ Automatically caps delays to any ambient deadline from parent scope.
306
+
307
+ Args:
308
+ fn: Zero-argument async callable to retry.
309
+ attempts: Maximum attempts (>= 1).
310
+ base_delay: Initial delay in seconds (> 0).
311
+ max_delay: Maximum delay cap in seconds (>= 0).
312
+ retry_on: Exception types to retry on (must not include CancelledError).
313
+ jitter: Random jitter factor (0.1 = up to 10% extra delay).
314
+
315
+ Returns:
316
+ Result of successful fn() call.
317
+
318
+ Raises:
319
+ ValueError: If parameters are invalid or retry_on includes cancellation.
320
+ BaseException: Last exception after exhausting attempts.
321
+
322
+ Example:
323
+ >>> async def flaky_api():
324
+ ... return await http_client.get(url)
325
+ >>> result = await retry(flaky_api, attempts=3, base_delay=0.5)
326
+ """
327
+ if attempts < 1:
328
+ raise ValueError("attempts must be >= 1")
329
+ if base_delay <= 0:
330
+ raise ValueError("base_delay must be > 0")
331
+ if max_delay < 0:
332
+ raise ValueError("max_delay must be >= 0")
333
+ if jitter < 0:
334
+ raise ValueError("jitter must be >= 0")
335
+
336
+ cancelled_exc = anyio.get_cancelled_exc_class()
337
+ if any(issubclass(cancelled_exc, t) for t in retry_on):
338
+ raise ValueError("retry_on must not include the cancellation exception type")
339
+
340
+ attempt = 0
341
+ deadline = effective_deadline()
342
+ while True:
343
+ try:
344
+ return await fn()
345
+ except retry_on:
346
+ attempt += 1
347
+ if attempt >= attempts:
348
+ raise
349
+
350
+ delay = min(max_delay, base_delay * (2 ** (attempt - 1)))
351
+ if jitter:
352
+ delay *= 1 + random.random() * jitter
353
+
354
+ if deadline is not None:
355
+ remaining = deadline - current_time()
356
+ if remaining <= 0: # pragma: no cover
357
+ raise
358
+ with move_on_at(deadline):
359
+ await anyio.sleep(delay)
360
+ if current_time() >= deadline: # pragma: no cover
361
+ raise
362
+ else:
363
+ await anyio.sleep(delay)
@@ -0,0 +1,328 @@
1
+ # Copyright (c) 2025 - 2026, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """Async synchronization primitives wrapping anyio.
5
+
6
+ All primitives support async context manager protocol for safe acquire/release:
7
+
8
+ async with Lock() as lock:
9
+ # critical section
10
+
11
+ async with Semaphore(3) as sem:
12
+ # limited concurrency section
13
+ """
14
+
15
+ from __future__ import annotations
16
+
17
+ from dataclasses import dataclass
18
+ from typing import Any, Generic, Self, TypeVar
19
+
20
+ import anyio
21
+ import anyio.abc
22
+
23
+ T = TypeVar("T")
24
+
25
+
26
+ __all__ = (
27
+ "CapacityLimiter",
28
+ "Condition",
29
+ "Event",
30
+ "Lock",
31
+ "Queue",
32
+ "Semaphore",
33
+ )
34
+
35
+
36
+ class Lock:
37
+ """Async mutex lock for exclusive access to shared resources.
38
+
39
+ Usage:
40
+ lock = Lock()
41
+ async with lock:
42
+ # exclusive access
43
+ """
44
+
45
+ __slots__ = ("_lock",)
46
+
47
+ def __init__(self) -> None:
48
+ self._lock = anyio.Lock()
49
+
50
+ async def acquire(self) -> None:
51
+ """Acquire lock, blocking until available."""
52
+ await self._lock.acquire()
53
+
54
+ def release(self) -> None:
55
+ """Release lock. Must hold lock before calling."""
56
+ self._lock.release()
57
+
58
+ async def __aenter__(self) -> Self:
59
+ await self.acquire()
60
+ return self
61
+
62
+ async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
63
+ self.release()
64
+
65
+
66
+ class Semaphore:
67
+ """Async counting semaphore for limiting concurrent access.
68
+
69
+ Args:
70
+ initial_value: Maximum concurrent acquisitions allowed.
71
+
72
+ Raises:
73
+ ValueError: If initial_value < 0.
74
+
75
+ Usage:
76
+ sem = Semaphore(3) # max 3 concurrent
77
+ async with sem:
78
+ # limited concurrency section
79
+ """
80
+
81
+ __slots__ = ("_sem",)
82
+
83
+ def __init__(self, initial_value: int) -> None:
84
+ if initial_value < 0:
85
+ raise ValueError("initial_value must be >= 0")
86
+ self._sem = anyio.Semaphore(initial_value)
87
+
88
+ async def acquire(self) -> None:
89
+ """Acquire semaphore slot, blocking if none available."""
90
+ await self._sem.acquire()
91
+
92
+ def release(self) -> None:
93
+ """Release semaphore slot, waking one waiting task."""
94
+ self._sem.release()
95
+
96
+ async def __aenter__(self) -> Self:
97
+ await self.acquire()
98
+ return self
99
+
100
+ async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
101
+ self.release()
102
+
103
+
104
+ class CapacityLimiter:
105
+ """Async capacity limiter for resource pool management.
106
+
107
+ Unlike Semaphore, supports fractional tokens and borrower tracking.
108
+
109
+ Args:
110
+ total_tokens: Total capacity (must be > 0).
111
+
112
+ Raises:
113
+ ValueError: If total_tokens <= 0.
114
+
115
+ Usage:
116
+ limiter = CapacityLimiter(10.0)
117
+ async with limiter:
118
+ # uses 1 token
119
+ """
120
+
121
+ __slots__ = ("_lim",)
122
+
123
+ def __init__(self, total_tokens: float) -> None:
124
+ if total_tokens <= 0:
125
+ raise ValueError("total_tokens must be > 0")
126
+ self._lim = anyio.CapacityLimiter(total_tokens)
127
+
128
+ async def acquire(self) -> None:
129
+ """Acquire one token, blocking until available."""
130
+ await self._lim.acquire()
131
+
132
+ def release(self) -> None:
133
+ """Release one token back to the pool."""
134
+ self._lim.release()
135
+
136
+ @property
137
+ def remaining_tokens(self) -> float:
138
+ """Alias for available_tokens. Use available_tokens instead."""
139
+ return self._lim.available_tokens
140
+
141
+ @property
142
+ def total_tokens(self) -> float:
143
+ """Total capacity configured for this limiter."""
144
+ return self._lim.total_tokens
145
+
146
+ @total_tokens.setter
147
+ def total_tokens(self, value: float) -> None:
148
+ if value <= 0:
149
+ raise ValueError("total_tokens must be > 0")
150
+ self._lim.total_tokens = value
151
+
152
+ @property
153
+ def borrowed_tokens(self) -> float:
154
+ """Currently borrowed (in-use) tokens."""
155
+ return self._lim.borrowed_tokens
156
+
157
+ @property
158
+ def available_tokens(self) -> float:
159
+ """Tokens available for acquisition."""
160
+ return self._lim.available_tokens
161
+
162
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
163
+ """Acquire token tracked to specific borrower for debugging."""
164
+ await self._lim.acquire_on_behalf_of(borrower)
165
+
166
+ def release_on_behalf_of(self, borrower: object) -> None:
167
+ """Release token previously acquired by specific borrower."""
168
+ self._lim.release_on_behalf_of(borrower)
169
+
170
+ async def __aenter__(self) -> Self:
171
+ await self.acquire()
172
+ return self
173
+
174
+ async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
175
+ self.release()
176
+
177
+
178
+ @dataclass(slots=True)
179
+ class Queue(Generic[T]):
180
+ """Async bounded FIFO queue backed by memory object streams.
181
+
182
+ Use factory method `with_maxsize()` to create instances.
183
+
184
+ Usage:
185
+ queue: Queue[int] = Queue.with_maxsize(100)
186
+ await queue.put(42)
187
+ item = await queue.get()
188
+ """
189
+
190
+ _send: anyio.abc.ObjectSendStream[T]
191
+ _recv: anyio.abc.ObjectReceiveStream[T]
192
+
193
+ @classmethod
194
+ def with_maxsize(cls, maxsize: int) -> Queue[T]:
195
+ """Create bounded queue.
196
+
197
+ Args:
198
+ maxsize: Maximum items before put() blocks.
199
+
200
+ Returns:
201
+ New Queue instance.
202
+ """
203
+ send, recv = anyio.create_memory_object_stream(maxsize)
204
+ return cls(send, recv)
205
+
206
+ async def put(self, item: T) -> None:
207
+ """Add item, blocking if queue is full."""
208
+ await self._send.send(item)
209
+
210
+ def put_nowait(self, item: T) -> None:
211
+ """Add item without blocking. Raises WouldBlock if full."""
212
+ self._send.send_nowait(item) # type: ignore[attr-defined]
213
+
214
+ async def get(self) -> T:
215
+ """Remove and return item, blocking if empty."""
216
+ return await self._recv.receive()
217
+
218
+ def get_nowait(self) -> T:
219
+ """Remove and return item. Raises WouldBlock if empty."""
220
+ return self._recv.receive_nowait() # type: ignore[attr-defined]
221
+
222
+ async def close(self) -> None:
223
+ """Close both send and receive streams."""
224
+ await self._send.aclose()
225
+ await self._recv.aclose()
226
+
227
+ async def __aenter__(self) -> Self:
228
+ return self
229
+
230
+ async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
231
+ await self.close()
232
+
233
+ @property
234
+ def sender(self) -> anyio.abc.ObjectSendStream[T]:
235
+ """Underlying send stream for advanced usage."""
236
+ return self._send
237
+
238
+ @property
239
+ def receiver(self) -> anyio.abc.ObjectReceiveStream[T]:
240
+ """Underlying receive stream for advanced usage."""
241
+ return self._recv
242
+
243
+
244
+ class Event:
245
+ """One-shot async event for task coordination.
246
+
247
+ Once set, remains set forever (no reset). All waiters wake simultaneously.
248
+
249
+ Usage:
250
+ event = Event()
251
+ # Task A:
252
+ await event.wait() # blocks until set
253
+ # Task B:
254
+ event.set() # wakes all waiters
255
+ """
256
+
257
+ __slots__ = ("_event",)
258
+
259
+ def __init__(self) -> None:
260
+ self._event = anyio.Event()
261
+
262
+ def set(self) -> None:
263
+ """Set event flag, waking all waiters. Idempotent."""
264
+ self._event.set()
265
+
266
+ def is_set(self) -> bool:
267
+ """Return True if event has been set."""
268
+ return self._event.is_set()
269
+
270
+ async def wait(self) -> None:
271
+ """Block until event is set. Returns immediately if already set."""
272
+ await self._event.wait()
273
+
274
+ def statistics(self) -> anyio.EventStatistics:
275
+ """Return statistics about waiting tasks."""
276
+ return self._event.statistics()
277
+
278
+
279
+ class Condition:
280
+ """Async condition variable for complex synchronization patterns.
281
+
282
+ Args:
283
+ lock: Optional Lock to use. Creates internal lock if None.
284
+
285
+ Usage:
286
+ cond = Condition()
287
+ async with cond:
288
+ while not ready:
289
+ await cond.wait()
290
+ # condition met, proceed
291
+ """
292
+
293
+ __slots__ = ("_condition",)
294
+
295
+ def __init__(self, lock: Lock | None = None) -> None:
296
+ _lock = lock._lock if lock else None
297
+ self._condition = anyio.Condition(_lock)
298
+
299
+ async def acquire(self) -> None:
300
+ """Acquire the underlying lock."""
301
+ await self._condition.acquire()
302
+
303
+ def release(self) -> None:
304
+ """Release the underlying lock."""
305
+ self._condition.release()
306
+
307
+ async def __aenter__(self) -> Self:
308
+ await self.acquire()
309
+ return self
310
+
311
+ async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
312
+ self.release()
313
+
314
+ async def wait(self) -> None:
315
+ """Release lock, wait for notify, re-acquire lock. Must hold lock."""
316
+ await self._condition.wait()
317
+
318
+ def notify(self, n: int = 1) -> None:
319
+ """Wake up to n waiting tasks. Must hold lock."""
320
+ self._condition.notify(n)
321
+
322
+ def notify_all(self) -> None:
323
+ """Wake all waiting tasks. Must hold lock."""
324
+ self._condition.notify_all()
325
+
326
+ def statistics(self) -> anyio.ConditionStatistics:
327
+ """Return statistics about lock and waiting tasks."""
328
+ return self._condition.statistics()