krons 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kronos/__init__.py +0 -0
- kronos/core/__init__.py +145 -0
- kronos/core/broadcaster.py +116 -0
- kronos/core/element.py +225 -0
- kronos/core/event.py +316 -0
- kronos/core/eventbus.py +116 -0
- kronos/core/flow.py +356 -0
- kronos/core/graph.py +442 -0
- kronos/core/node.py +982 -0
- kronos/core/pile.py +575 -0
- kronos/core/processor.py +494 -0
- kronos/core/progression.py +296 -0
- kronos/enforcement/__init__.py +57 -0
- kronos/enforcement/common/__init__.py +34 -0
- kronos/enforcement/common/boolean.py +85 -0
- kronos/enforcement/common/choice.py +97 -0
- kronos/enforcement/common/mapping.py +118 -0
- kronos/enforcement/common/model.py +102 -0
- kronos/enforcement/common/number.py +98 -0
- kronos/enforcement/common/string.py +140 -0
- kronos/enforcement/context.py +129 -0
- kronos/enforcement/policy.py +80 -0
- kronos/enforcement/registry.py +153 -0
- kronos/enforcement/rule.py +312 -0
- kronos/enforcement/service.py +370 -0
- kronos/enforcement/validator.py +198 -0
- kronos/errors.py +146 -0
- kronos/operations/__init__.py +32 -0
- kronos/operations/builder.py +228 -0
- kronos/operations/flow.py +398 -0
- kronos/operations/node.py +101 -0
- kronos/operations/registry.py +92 -0
- kronos/protocols.py +414 -0
- kronos/py.typed +0 -0
- kronos/services/__init__.py +81 -0
- kronos/services/backend.py +286 -0
- kronos/services/endpoint.py +608 -0
- kronos/services/hook.py +471 -0
- kronos/services/imodel.py +465 -0
- kronos/services/registry.py +115 -0
- kronos/services/utilities/__init__.py +36 -0
- kronos/services/utilities/header_factory.py +87 -0
- kronos/services/utilities/rate_limited_executor.py +271 -0
- kronos/services/utilities/rate_limiter.py +180 -0
- kronos/services/utilities/resilience.py +414 -0
- kronos/session/__init__.py +41 -0
- kronos/session/exchange.py +258 -0
- kronos/session/message.py +60 -0
- kronos/session/session.py +411 -0
- kronos/specs/__init__.py +25 -0
- kronos/specs/adapters/__init__.py +0 -0
- kronos/specs/adapters/_utils.py +45 -0
- kronos/specs/adapters/dataclass_field.py +246 -0
- kronos/specs/adapters/factory.py +56 -0
- kronos/specs/adapters/pydantic_adapter.py +309 -0
- kronos/specs/adapters/sql_ddl.py +946 -0
- kronos/specs/catalog/__init__.py +36 -0
- kronos/specs/catalog/_audit.py +39 -0
- kronos/specs/catalog/_common.py +43 -0
- kronos/specs/catalog/_content.py +59 -0
- kronos/specs/catalog/_enforcement.py +70 -0
- kronos/specs/factory.py +120 -0
- kronos/specs/operable.py +314 -0
- kronos/specs/phrase.py +405 -0
- kronos/specs/protocol.py +140 -0
- kronos/specs/spec.py +506 -0
- kronos/types/__init__.py +60 -0
- kronos/types/_sentinel.py +311 -0
- kronos/types/base.py +369 -0
- kronos/types/db_types.py +260 -0
- kronos/types/identity.py +66 -0
- kronos/utils/__init__.py +40 -0
- kronos/utils/_hash.py +234 -0
- kronos/utils/_json_dump.py +392 -0
- kronos/utils/_lazy_init.py +63 -0
- kronos/utils/_to_list.py +165 -0
- kronos/utils/_to_num.py +85 -0
- kronos/utils/_utils.py +375 -0
- kronos/utils/concurrency/__init__.py +205 -0
- kronos/utils/concurrency/_async_call.py +333 -0
- kronos/utils/concurrency/_cancel.py +122 -0
- kronos/utils/concurrency/_errors.py +96 -0
- kronos/utils/concurrency/_patterns.py +363 -0
- kronos/utils/concurrency/_primitives.py +328 -0
- kronos/utils/concurrency/_priority_queue.py +135 -0
- kronos/utils/concurrency/_resource_tracker.py +110 -0
- kronos/utils/concurrency/_run_async.py +67 -0
- kronos/utils/concurrency/_task.py +95 -0
- kronos/utils/concurrency/_utils.py +79 -0
- kronos/utils/fuzzy/__init__.py +14 -0
- kronos/utils/fuzzy/_extract_json.py +90 -0
- kronos/utils/fuzzy/_fuzzy_json.py +288 -0
- kronos/utils/fuzzy/_fuzzy_match.py +149 -0
- kronos/utils/fuzzy/_string_similarity.py +187 -0
- kronos/utils/fuzzy/_to_dict.py +396 -0
- kronos/utils/sql/__init__.py +13 -0
- kronos/utils/sql/_sql_validation.py +142 -0
- krons-0.1.0.dist-info/METADATA +70 -0
- krons-0.1.0.dist-info/RECORD +101 -0
- krons-0.1.0.dist-info/WHEEL +4 -0
- krons-0.1.0.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,333 @@
|
|
|
1
|
+
# Copyright (c) 2025 - 2026, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
"""Async batch processing with retry, timeout, and concurrency control.
|
|
5
|
+
|
|
6
|
+
Primary exports:
|
|
7
|
+
alcall: Apply function to list elements concurrently with full control.
|
|
8
|
+
bcall: Batch processing wrapper yielding results per batch.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from collections.abc import AsyncGenerator, Callable
|
|
12
|
+
from typing import Any, ParamSpec, TypeVar
|
|
13
|
+
|
|
14
|
+
from kronos.types._sentinel import Unset, not_sentinel
|
|
15
|
+
from kronos.utils._lazy_init import LazyInit
|
|
16
|
+
from kronos.utils._to_list import to_list
|
|
17
|
+
|
|
18
|
+
from ._cancel import move_on_after
|
|
19
|
+
from ._errors import get_cancelled_exc_class
|
|
20
|
+
from ._patterns import non_cancel_subgroup
|
|
21
|
+
from ._primitives import Semaphore
|
|
22
|
+
from ._task import create_task_group
|
|
23
|
+
from ._utils import is_coro_func, run_sync, sleep
|
|
24
|
+
|
|
25
|
+
T = TypeVar("T")
|
|
26
|
+
P = ParamSpec("P")
|
|
27
|
+
|
|
28
|
+
_lazy = LazyInit()
|
|
29
|
+
_MODEL_LIKE = None
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
__all__ = (
|
|
33
|
+
"alcall",
|
|
34
|
+
"bcall",
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _do_init() -> None:
|
|
39
|
+
"""Initialize Pydantic BaseModel detection for input normalization."""
|
|
40
|
+
global _MODEL_LIKE
|
|
41
|
+
from pydantic import BaseModel
|
|
42
|
+
|
|
43
|
+
_MODEL_LIKE = (BaseModel,)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _ensure_initialized() -> None:
|
|
47
|
+
"""Trigger lazy Pydantic initialization on first use."""
|
|
48
|
+
_lazy.ensure(_do_init)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _validate_func(func: Any) -> Callable:
|
|
52
|
+
"""Extract and validate a single callable.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
func: Callable or single-element iterable containing a callable.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
The validated callable.
|
|
59
|
+
|
|
60
|
+
Raises:
|
|
61
|
+
ValueError: If not callable or iterable doesn't contain exactly one callable.
|
|
62
|
+
"""
|
|
63
|
+
if callable(func):
|
|
64
|
+
return func
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
func_list = list(func)
|
|
68
|
+
except TypeError:
|
|
69
|
+
raise ValueError("func must be callable or an iterable containing one callable.")
|
|
70
|
+
|
|
71
|
+
if len(func_list) != 1 or not callable(func_list[0]):
|
|
72
|
+
raise ValueError("Only one callable function is allowed.")
|
|
73
|
+
|
|
74
|
+
return func_list[0]
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _normalize_input(
|
|
78
|
+
input_: Any,
|
|
79
|
+
*,
|
|
80
|
+
flatten: bool,
|
|
81
|
+
dropna: bool,
|
|
82
|
+
unique: bool,
|
|
83
|
+
flatten_tuple_set: bool,
|
|
84
|
+
) -> list:
|
|
85
|
+
"""Convert input to a flat list for batch processing.
|
|
86
|
+
|
|
87
|
+
Handles iterables, Pydantic models (as single items), and scalars.
|
|
88
|
+
"""
|
|
89
|
+
if flatten or dropna:
|
|
90
|
+
return to_list(
|
|
91
|
+
input_,
|
|
92
|
+
flatten=flatten,
|
|
93
|
+
dropna=dropna,
|
|
94
|
+
unique=unique,
|
|
95
|
+
flatten_tuple_set=flatten_tuple_set,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
if isinstance(input_, list):
|
|
99
|
+
return input_
|
|
100
|
+
|
|
101
|
+
if _MODEL_LIKE and isinstance(input_, _MODEL_LIKE):
|
|
102
|
+
return [input_]
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
iter(input_)
|
|
106
|
+
return list(input_)
|
|
107
|
+
except TypeError:
|
|
108
|
+
return [input_]
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
async def _call_with_timeout(
|
|
112
|
+
func: Callable,
|
|
113
|
+
item: Any,
|
|
114
|
+
is_coro: bool,
|
|
115
|
+
timeout: float | None,
|
|
116
|
+
**kwargs,
|
|
117
|
+
) -> Any:
|
|
118
|
+
"""Invoke function with optional timeout, handling both sync and async."""
|
|
119
|
+
if is_coro:
|
|
120
|
+
if timeout is not None:
|
|
121
|
+
with move_on_after(timeout) as cancel_scope:
|
|
122
|
+
result = await func(item, **kwargs)
|
|
123
|
+
if cancel_scope.cancelled_caught:
|
|
124
|
+
raise TimeoutError(f"Function call timed out after {timeout}s")
|
|
125
|
+
return result
|
|
126
|
+
return await func(item, **kwargs)
|
|
127
|
+
else:
|
|
128
|
+
if timeout is not None:
|
|
129
|
+
with move_on_after(timeout) as cancel_scope:
|
|
130
|
+
result = await run_sync(func, item, **kwargs)
|
|
131
|
+
if cancel_scope.cancelled_caught:
|
|
132
|
+
raise TimeoutError(f"Function call timed out after {timeout}s")
|
|
133
|
+
return result
|
|
134
|
+
return await run_sync(func, item, **kwargs)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
async def _execute_with_retry(
|
|
138
|
+
func: Callable,
|
|
139
|
+
item: Any,
|
|
140
|
+
index: int,
|
|
141
|
+
*,
|
|
142
|
+
is_coro: bool,
|
|
143
|
+
timeout: float | None,
|
|
144
|
+
initial_delay: float,
|
|
145
|
+
backoff: float,
|
|
146
|
+
max_attempts: int,
|
|
147
|
+
default: Any,
|
|
148
|
+
**kwargs,
|
|
149
|
+
) -> tuple[int, Any]:
|
|
150
|
+
"""Execute function with exponential backoff retry.
|
|
151
|
+
|
|
152
|
+
Returns (index, result) tuple to preserve ordering in concurrent execution.
|
|
153
|
+
Cancellation exceptions are never retried (respects structured concurrency).
|
|
154
|
+
"""
|
|
155
|
+
attempts = 0
|
|
156
|
+
current_delay = initial_delay
|
|
157
|
+
|
|
158
|
+
while True:
|
|
159
|
+
try:
|
|
160
|
+
result = await _call_with_timeout(func, item, is_coro, timeout, **kwargs)
|
|
161
|
+
return index, result
|
|
162
|
+
|
|
163
|
+
except get_cancelled_exc_class():
|
|
164
|
+
raise
|
|
165
|
+
|
|
166
|
+
except Exception:
|
|
167
|
+
attempts += 1
|
|
168
|
+
if attempts <= max_attempts:
|
|
169
|
+
if current_delay:
|
|
170
|
+
await sleep(current_delay)
|
|
171
|
+
current_delay *= backoff
|
|
172
|
+
else:
|
|
173
|
+
if not_sentinel(default):
|
|
174
|
+
return index, default
|
|
175
|
+
raise
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
async def alcall(
|
|
179
|
+
input_: list[Any],
|
|
180
|
+
func: Callable[..., T],
|
|
181
|
+
/,
|
|
182
|
+
*,
|
|
183
|
+
input_flatten: bool = False,
|
|
184
|
+
input_dropna: bool = False,
|
|
185
|
+
input_unique: bool = False,
|
|
186
|
+
input_flatten_tuple_set: bool = False,
|
|
187
|
+
output_flatten: bool = False,
|
|
188
|
+
output_dropna: bool = False,
|
|
189
|
+
output_unique: bool = False,
|
|
190
|
+
output_flatten_tuple_set: bool = False,
|
|
191
|
+
delay_before_start: float = 0,
|
|
192
|
+
retry_initial_delay: float = 0,
|
|
193
|
+
retry_backoff: float = 1,
|
|
194
|
+
retry_default: Any = Unset,
|
|
195
|
+
retry_timeout: float | None = None,
|
|
196
|
+
retry_attempts: int = 0,
|
|
197
|
+
max_concurrent: int | None = None,
|
|
198
|
+
throttle_period: float | None = None,
|
|
199
|
+
return_exceptions: bool = False,
|
|
200
|
+
**kwargs: Any,
|
|
201
|
+
) -> list[T | BaseException]:
|
|
202
|
+
"""Apply function to each list element asynchronously with retry and concurrency control.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
input_: List of items to process (or iterable that will be converted)
|
|
206
|
+
func: Callable to apply (sync or async)
|
|
207
|
+
input_flatten: Flatten nested input structures
|
|
208
|
+
input_dropna: Remove None/undefined from input
|
|
209
|
+
input_unique: Remove duplicate inputs (requires flatten)
|
|
210
|
+
input_flatten_tuple_set: Include tuples/sets in flattening
|
|
211
|
+
output_flatten: Flatten nested output structures
|
|
212
|
+
output_dropna: Remove None/undefined from output
|
|
213
|
+
output_unique: Remove duplicate outputs (requires flatten)
|
|
214
|
+
output_flatten_tuple_set: Include tuples/sets in output flattening
|
|
215
|
+
delay_before_start: Initial delay before processing (seconds)
|
|
216
|
+
retry_initial_delay: Initial retry delay (seconds)
|
|
217
|
+
retry_backoff: Backoff multiplier for retry delays
|
|
218
|
+
retry_default: Default value on retry exhaustion (Unset = raise)
|
|
219
|
+
retry_timeout: Timeout per function call (seconds)
|
|
220
|
+
retry_attempts: Maximum retry attempts (0 = no retry)
|
|
221
|
+
max_concurrent: Max concurrent executions (None = unlimited)
|
|
222
|
+
throttle_period: Delay between starting tasks (seconds)
|
|
223
|
+
return_exceptions: Return exceptions instead of raising
|
|
224
|
+
**kwargs: Additional arguments passed to func
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
List of results (preserves input order, may include exceptions if return_exceptions=True)
|
|
228
|
+
|
|
229
|
+
Raises:
|
|
230
|
+
ValueError: If func is not callable
|
|
231
|
+
TimeoutError: If retry_timeout exceeded
|
|
232
|
+
ExceptionGroup: If return_exceptions=False and tasks raise
|
|
233
|
+
"""
|
|
234
|
+
_ensure_initialized()
|
|
235
|
+
|
|
236
|
+
func = _validate_func(func)
|
|
237
|
+
input_ = _normalize_input(
|
|
238
|
+
input_,
|
|
239
|
+
flatten=input_flatten,
|
|
240
|
+
dropna=input_dropna,
|
|
241
|
+
unique=input_unique,
|
|
242
|
+
flatten_tuple_set=input_flatten_tuple_set,
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
if delay_before_start:
|
|
246
|
+
await sleep(delay_before_start)
|
|
247
|
+
|
|
248
|
+
semaphore = Semaphore(max_concurrent) if max_concurrent else None
|
|
249
|
+
throttle_delay = throttle_period or 0
|
|
250
|
+
is_coro = is_coro_func(func)
|
|
251
|
+
n_items = len(input_)
|
|
252
|
+
out: list[Any] = [None] * n_items
|
|
253
|
+
|
|
254
|
+
async def task_wrapper(item: Any, idx: int) -> None:
|
|
255
|
+
try:
|
|
256
|
+
if semaphore:
|
|
257
|
+
async with semaphore:
|
|
258
|
+
_, result = await _execute_with_retry(
|
|
259
|
+
func,
|
|
260
|
+
item,
|
|
261
|
+
idx,
|
|
262
|
+
is_coro=is_coro,
|
|
263
|
+
timeout=retry_timeout,
|
|
264
|
+
initial_delay=retry_initial_delay,
|
|
265
|
+
backoff=retry_backoff,
|
|
266
|
+
max_attempts=retry_attempts,
|
|
267
|
+
default=retry_default,
|
|
268
|
+
**kwargs,
|
|
269
|
+
)
|
|
270
|
+
else:
|
|
271
|
+
_, result = await _execute_with_retry(
|
|
272
|
+
func,
|
|
273
|
+
item,
|
|
274
|
+
idx,
|
|
275
|
+
is_coro=is_coro,
|
|
276
|
+
timeout=retry_timeout,
|
|
277
|
+
initial_delay=retry_initial_delay,
|
|
278
|
+
backoff=retry_backoff,
|
|
279
|
+
max_attempts=retry_attempts,
|
|
280
|
+
default=retry_default,
|
|
281
|
+
**kwargs,
|
|
282
|
+
)
|
|
283
|
+
out[idx] = result
|
|
284
|
+
except BaseException as exc:
|
|
285
|
+
out[idx] = exc
|
|
286
|
+
if not return_exceptions:
|
|
287
|
+
raise
|
|
288
|
+
|
|
289
|
+
try:
|
|
290
|
+
async with create_task_group() as tg:
|
|
291
|
+
for idx, item in enumerate(input_):
|
|
292
|
+
tg.start_soon(task_wrapper, item, idx)
|
|
293
|
+
if throttle_delay and idx < n_items - 1:
|
|
294
|
+
await sleep(throttle_delay)
|
|
295
|
+
except ExceptionGroup as eg:
|
|
296
|
+
if not return_exceptions:
|
|
297
|
+
rest = non_cancel_subgroup(eg)
|
|
298
|
+
if rest is not None:
|
|
299
|
+
raise rest
|
|
300
|
+
raise
|
|
301
|
+
|
|
302
|
+
return to_list(
|
|
303
|
+
out,
|
|
304
|
+
flatten=output_flatten,
|
|
305
|
+
dropna=output_dropna,
|
|
306
|
+
unique=output_unique,
|
|
307
|
+
flatten_tuple_set=output_flatten_tuple_set,
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
|
|
311
|
+
async def bcall(
|
|
312
|
+
input_: list[Any],
|
|
313
|
+
func: Callable[..., T],
|
|
314
|
+
/,
|
|
315
|
+
batch_size: int,
|
|
316
|
+
**kwargs: Any,
|
|
317
|
+
) -> AsyncGenerator[list[T | BaseException], None]:
|
|
318
|
+
"""Process input in batches using alcall. Yields results batch by batch.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
input_: Items to process
|
|
322
|
+
func: Callable to apply
|
|
323
|
+
batch_size: Number of items per batch
|
|
324
|
+
**kwargs: Arguments passed to alcall (see alcall for details)
|
|
325
|
+
|
|
326
|
+
Yields:
|
|
327
|
+
List of results for each batch
|
|
328
|
+
"""
|
|
329
|
+
input_ = to_list(input_, flatten=True, dropna=True)
|
|
330
|
+
|
|
331
|
+
for i in range(0, len(input_), batch_size):
|
|
332
|
+
batch = input_[i : i + batch_size]
|
|
333
|
+
yield await alcall(batch, func, **kwargs)
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# Copyright (c) 2025 - 2026, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
"""Cancellation scope utilities wrapping anyio with None-safe timeouts.
|
|
5
|
+
|
|
6
|
+
Provides context managers for timeout-based cancellation:
|
|
7
|
+
- `fail_after`/`fail_at`: Raise TimeoutError on expiry
|
|
8
|
+
- `move_on_after`/`move_on_at`: Silent cancellation on expiry
|
|
9
|
+
|
|
10
|
+
All accept None to disable timeout while preserving outer scope cancellability.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
from collections.abc import Iterator
|
|
16
|
+
from contextlib import contextmanager
|
|
17
|
+
|
|
18
|
+
import anyio
|
|
19
|
+
|
|
20
|
+
from ._utils import current_time
|
|
21
|
+
|
|
22
|
+
CancelScope = anyio.CancelScope
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
__all__ = (
|
|
26
|
+
"CancelScope",
|
|
27
|
+
"effective_deadline",
|
|
28
|
+
"fail_after",
|
|
29
|
+
"fail_at",
|
|
30
|
+
"move_on_after",
|
|
31
|
+
"move_on_at",
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@contextmanager
|
|
36
|
+
def fail_after(seconds: float | None) -> Iterator[CancelScope]:
|
|
37
|
+
"""Context manager that raises TimeoutError after elapsed seconds.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
seconds: Timeout duration, or None to disable timeout.
|
|
41
|
+
|
|
42
|
+
Yields:
|
|
43
|
+
CancelScope that can be checked via `scope.cancelled_caught`.
|
|
44
|
+
"""
|
|
45
|
+
if seconds is None:
|
|
46
|
+
with CancelScope() as scope:
|
|
47
|
+
yield scope
|
|
48
|
+
return
|
|
49
|
+
with anyio.fail_after(seconds) as scope:
|
|
50
|
+
yield scope
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@contextmanager
|
|
54
|
+
def move_on_after(seconds: float | None) -> Iterator[CancelScope]:
|
|
55
|
+
"""Context manager that silently cancels after elapsed seconds.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
seconds: Timeout duration, or None to disable timeout.
|
|
59
|
+
|
|
60
|
+
Yields:
|
|
61
|
+
CancelScope; check `scope.cancelled_caught` to detect timeout.
|
|
62
|
+
"""
|
|
63
|
+
if seconds is None:
|
|
64
|
+
with CancelScope() as scope:
|
|
65
|
+
yield scope
|
|
66
|
+
return
|
|
67
|
+
with anyio.move_on_after(seconds) as scope:
|
|
68
|
+
yield scope
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@contextmanager
|
|
72
|
+
def fail_at(deadline: float | None) -> Iterator[CancelScope]:
|
|
73
|
+
"""Context manager that raises TimeoutError at absolute deadline.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
deadline: Absolute time (from `current_time()`), or None to disable.
|
|
77
|
+
|
|
78
|
+
Yields:
|
|
79
|
+
CancelScope that can be checked via `scope.cancelled_caught`.
|
|
80
|
+
"""
|
|
81
|
+
if deadline is None:
|
|
82
|
+
with CancelScope() as scope:
|
|
83
|
+
yield scope
|
|
84
|
+
return
|
|
85
|
+
now = current_time()
|
|
86
|
+
seconds = max(0.0, deadline - now)
|
|
87
|
+
with fail_after(seconds) as scope:
|
|
88
|
+
yield scope
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@contextmanager
|
|
92
|
+
def move_on_at(deadline: float | None) -> Iterator[CancelScope]:
|
|
93
|
+
"""Context manager that silently cancels at absolute deadline.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
deadline: Absolute time (from `current_time()`), or None to disable.
|
|
97
|
+
|
|
98
|
+
Yields:
|
|
99
|
+
CancelScope; check `scope.cancelled_caught` to detect timeout.
|
|
100
|
+
"""
|
|
101
|
+
if deadline is None:
|
|
102
|
+
with CancelScope() as scope:
|
|
103
|
+
yield scope
|
|
104
|
+
return
|
|
105
|
+
now = current_time()
|
|
106
|
+
seconds = max(0.0, deadline - now)
|
|
107
|
+
with anyio.move_on_after(seconds) as scope:
|
|
108
|
+
yield scope
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def effective_deadline() -> float | None:
|
|
112
|
+
"""Return current effective deadline from enclosing cancel scopes.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
Absolute deadline time, -inf if already cancelled, or None if unlimited.
|
|
116
|
+
|
|
117
|
+
Note:
|
|
118
|
+
AnyIO uses +inf for "no deadline" and -inf for "already cancelled".
|
|
119
|
+
This function returns None for +inf but preserves -inf for detection.
|
|
120
|
+
"""
|
|
121
|
+
d = anyio.current_effective_deadline()
|
|
122
|
+
return None if d == float("inf") else d
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# Copyright (c) 2025 - 2026, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
"""Cancellation error handling utilities.
|
|
5
|
+
|
|
6
|
+
Provides backend-agnostic cancellation detection and ExceptionGroup helpers
|
|
7
|
+
for cleanly separating cancellation from application errors.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from collections.abc import Awaitable, Callable
|
|
13
|
+
from typing import ParamSpec, TypeVar
|
|
14
|
+
|
|
15
|
+
import anyio
|
|
16
|
+
|
|
17
|
+
T = TypeVar("T")
|
|
18
|
+
P = ParamSpec("P")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
__all__ = (
|
|
22
|
+
"get_cancelled_exc_class",
|
|
23
|
+
"is_cancelled",
|
|
24
|
+
"non_cancel_subgroup",
|
|
25
|
+
"shield",
|
|
26
|
+
"split_cancellation",
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def get_cancelled_exc_class() -> type[BaseException]:
|
|
31
|
+
"""Return backend-specific cancellation exception type.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
asyncio.CancelledError for asyncio, trio.Cancelled for trio.
|
|
35
|
+
"""
|
|
36
|
+
return anyio.get_cancelled_exc_class()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def is_cancelled(exc: BaseException) -> bool:
|
|
40
|
+
"""Check if exception is a backend cancellation.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
exc: Exception to check.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
True if exc is the backend's cancellation exception type.
|
|
47
|
+
"""
|
|
48
|
+
return isinstance(exc, anyio.get_cancelled_exc_class())
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
async def shield(func: Callable[P, Awaitable[T]], *args: P.args, **kwargs: P.kwargs) -> T:
|
|
52
|
+
"""Execute async function protected from outer cancellation.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
func: Async callable to shield.
|
|
56
|
+
*args: Positional arguments for func.
|
|
57
|
+
**kwargs: Keyword arguments for func.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Result of func(*args, **kwargs).
|
|
61
|
+
|
|
62
|
+
Note:
|
|
63
|
+
Use sparingly. Shielded code cannot be cancelled, which may
|
|
64
|
+
delay shutdown. Prefer short critical sections only.
|
|
65
|
+
"""
|
|
66
|
+
with anyio.CancelScope(shield=True):
|
|
67
|
+
result = await func(*args, **kwargs)
|
|
68
|
+
return result # type: ignore[return-value]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def split_cancellation(
|
|
72
|
+
eg: BaseExceptionGroup,
|
|
73
|
+
) -> tuple[BaseExceptionGroup | None, BaseExceptionGroup | None]:
|
|
74
|
+
"""Partition ExceptionGroup into cancellations and other errors.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
eg: ExceptionGroup to split (Python 3.11+).
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Tuple of (cancellation_group, other_errors_group).
|
|
81
|
+
Either may be None if no matching exceptions.
|
|
82
|
+
"""
|
|
83
|
+
return eg.split(anyio.get_cancelled_exc_class())
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def non_cancel_subgroup(eg: BaseExceptionGroup) -> BaseExceptionGroup | None:
|
|
87
|
+
"""Extract non-cancellation exceptions from ExceptionGroup.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
eg: ExceptionGroup to filter.
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
ExceptionGroup of non-cancellation errors, or None if all were cancellations.
|
|
94
|
+
"""
|
|
95
|
+
_, rest = eg.split(anyio.get_cancelled_exc_class())
|
|
96
|
+
return rest
|