dycw-utilities 0.135.0__py3-none-any.whl → 0.178.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dycw-utilities might be problematic. Click here for more details.
- dycw_utilities-0.178.1.dist-info/METADATA +34 -0
- dycw_utilities-0.178.1.dist-info/RECORD +105 -0
- dycw_utilities-0.178.1.dist-info/WHEEL +4 -0
- dycw_utilities-0.178.1.dist-info/entry_points.txt +4 -0
- utilities/__init__.py +1 -1
- utilities/altair.py +13 -10
- utilities/asyncio.py +312 -787
- utilities/atomicwrites.py +18 -6
- utilities/atools.py +64 -4
- utilities/cachetools.py +9 -6
- utilities/click.py +195 -77
- utilities/concurrent.py +1 -1
- utilities/contextlib.py +216 -17
- utilities/contextvars.py +20 -1
- utilities/cryptography.py +3 -3
- utilities/dataclasses.py +15 -28
- utilities/docker.py +387 -0
- utilities/enum.py +2 -2
- utilities/errors.py +17 -3
- utilities/fastapi.py +28 -59
- utilities/fpdf2.py +2 -2
- utilities/functions.py +24 -269
- utilities/git.py +9 -30
- utilities/grp.py +28 -0
- utilities/gzip.py +31 -0
- utilities/http.py +3 -2
- utilities/hypothesis.py +513 -159
- utilities/importlib.py +17 -1
- utilities/inflect.py +12 -4
- utilities/iterables.py +33 -58
- utilities/jinja2.py +148 -0
- utilities/json.py +70 -0
- utilities/libcst.py +38 -17
- utilities/lightweight_charts.py +4 -7
- utilities/logging.py +136 -93
- utilities/math.py +8 -4
- utilities/more_itertools.py +43 -45
- utilities/operator.py +27 -27
- utilities/orjson.py +189 -36
- utilities/os.py +61 -4
- utilities/packaging.py +115 -0
- utilities/parse.py +8 -5
- utilities/pathlib.py +269 -40
- utilities/permissions.py +298 -0
- utilities/platform.py +7 -6
- utilities/polars.py +1205 -413
- utilities/polars_ols.py +1 -1
- utilities/postgres.py +408 -0
- utilities/pottery.py +43 -19
- utilities/pqdm.py +3 -3
- utilities/psutil.py +5 -57
- utilities/pwd.py +28 -0
- utilities/pydantic.py +4 -52
- utilities/pydantic_settings.py +240 -0
- utilities/pydantic_settings_sops.py +76 -0
- utilities/pyinstrument.py +7 -7
- utilities/pytest.py +104 -143
- utilities/pytest_plugins/__init__.py +1 -0
- utilities/pytest_plugins/pytest_randomly.py +23 -0
- utilities/pytest_plugins/pytest_regressions.py +56 -0
- utilities/pytest_regressions.py +26 -46
- utilities/random.py +11 -6
- utilities/re.py +1 -1
- utilities/redis.py +220 -343
- utilities/sentinel.py +10 -0
- utilities/shelve.py +4 -1
- utilities/shutil.py +25 -0
- utilities/slack_sdk.py +35 -104
- utilities/sqlalchemy.py +496 -471
- utilities/sqlalchemy_polars.py +29 -54
- utilities/string.py +2 -3
- utilities/subprocess.py +1977 -0
- utilities/tempfile.py +112 -4
- utilities/testbook.py +50 -0
- utilities/text.py +174 -42
- utilities/throttle.py +158 -0
- utilities/timer.py +2 -2
- utilities/traceback.py +70 -35
- utilities/types.py +102 -30
- utilities/typing.py +479 -19
- utilities/uuid.py +42 -5
- utilities/version.py +27 -26
- utilities/whenever.py +1559 -361
- utilities/zoneinfo.py +80 -22
- dycw_utilities-0.135.0.dist-info/METADATA +0 -39
- dycw_utilities-0.135.0.dist-info/RECORD +0 -96
- dycw_utilities-0.135.0.dist-info/WHEEL +0 -4
- dycw_utilities-0.135.0.dist-info/licenses/LICENSE +0 -21
- utilities/aiolimiter.py +0 -25
- utilities/arq.py +0 -216
- utilities/eventkit.py +0 -388
- utilities/luigi.py +0 -183
- utilities/period.py +0 -152
- utilities/pudb.py +0 -62
- utilities/python_dotenv.py +0 -101
- utilities/streamlit.py +0 -105
- utilities/typed_settings.py +0 -123
utilities/asyncio.py
CHANGED
|
@@ -2,21 +2,16 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
4
|
from asyncio import (
|
|
5
|
-
Event,
|
|
6
5
|
Lock,
|
|
7
|
-
PriorityQueue,
|
|
8
6
|
Queue,
|
|
9
7
|
QueueEmpty,
|
|
10
|
-
QueueFull,
|
|
11
8
|
Semaphore,
|
|
12
9
|
StreamReader,
|
|
13
10
|
Task,
|
|
14
11
|
TaskGroup,
|
|
15
12
|
create_subprocess_shell,
|
|
16
|
-
create_task,
|
|
17
13
|
sleep,
|
|
18
14
|
)
|
|
19
|
-
from collections.abc import Callable, Hashable, Iterable, Iterator
|
|
20
15
|
from contextlib import (
|
|
21
16
|
AbstractAsyncContextManager,
|
|
22
17
|
AsyncExitStack,
|
|
@@ -24,194 +19,206 @@ from contextlib import (
|
|
|
24
19
|
asynccontextmanager,
|
|
25
20
|
suppress,
|
|
26
21
|
)
|
|
27
|
-
from dataclasses import dataclass
|
|
22
|
+
from dataclasses import dataclass
|
|
28
23
|
from io import StringIO
|
|
29
|
-
from
|
|
30
|
-
from logging import DEBUG, Logger, getLogger
|
|
24
|
+
from pathlib import Path
|
|
31
25
|
from subprocess import PIPE
|
|
32
26
|
from sys import stderr, stdout
|
|
33
|
-
from typing import
|
|
34
|
-
|
|
35
|
-
|
|
27
|
+
from typing import (
|
|
28
|
+
TYPE_CHECKING,
|
|
29
|
+
Any,
|
|
30
|
+
ClassVar,
|
|
31
|
+
Self,
|
|
32
|
+
TextIO,
|
|
33
|
+
assert_never,
|
|
34
|
+
cast,
|
|
35
|
+
overload,
|
|
36
|
+
override,
|
|
37
|
+
)
|
|
36
38
|
|
|
37
|
-
from utilities.dataclasses import replace_non_sentinel
|
|
38
|
-
from utilities.errors import repr_error
|
|
39
39
|
from utilities.functions import ensure_int, ensure_not_none
|
|
40
|
+
from utilities.os import is_pytest
|
|
40
41
|
from utilities.random import SYSTEM_RANDOM
|
|
42
|
+
from utilities.reprlib import get_repr
|
|
41
43
|
from utilities.sentinel import Sentinel, sentinel
|
|
42
|
-
from utilities.
|
|
43
|
-
from utilities.
|
|
44
|
+
from utilities.shelve import yield_shelf
|
|
45
|
+
from utilities.text import to_bool
|
|
46
|
+
from utilities.warnings import suppress_warnings
|
|
47
|
+
from utilities.whenever import get_now, round_date_or_date_time, to_nanoseconds
|
|
44
48
|
|
|
45
49
|
if TYPE_CHECKING:
|
|
46
50
|
from asyncio import _CoroutineLike
|
|
47
51
|
from asyncio.subprocess import Process
|
|
48
|
-
from collections import
|
|
49
|
-
|
|
52
|
+
from collections.abc import (
|
|
53
|
+
AsyncIterable,
|
|
54
|
+
AsyncIterator,
|
|
55
|
+
Callable,
|
|
56
|
+
ItemsView,
|
|
57
|
+
Iterable,
|
|
58
|
+
Iterator,
|
|
59
|
+
KeysView,
|
|
60
|
+
Sequence,
|
|
61
|
+
ValuesView,
|
|
62
|
+
)
|
|
50
63
|
from contextvars import Context
|
|
51
64
|
from random import Random
|
|
65
|
+
from shelve import Shelf
|
|
52
66
|
from types import TracebackType
|
|
53
67
|
|
|
54
|
-
from whenever import
|
|
55
|
-
|
|
56
|
-
from utilities.types import DateTimeRoundUnit, MaybeCallableEvent, MaybeType
|
|
68
|
+
from whenever import ZonedDateTime
|
|
57
69
|
|
|
70
|
+
from utilities.shelve import _Flag
|
|
71
|
+
from utilities.types import (
|
|
72
|
+
Coro,
|
|
73
|
+
Delta,
|
|
74
|
+
MaybeCallableBoolLike,
|
|
75
|
+
MaybeType,
|
|
76
|
+
PathLike,
|
|
77
|
+
SupportsKeysAndGetItem,
|
|
78
|
+
)
|
|
58
79
|
|
|
59
|
-
class EnhancedQueue[T](Queue[T]):
|
|
60
|
-
"""An asynchronous deque."""
|
|
61
80
|
|
|
81
|
+
class AsyncDict[K, V]:
|
|
82
|
+
@overload
|
|
83
|
+
def __init__(self) -> None: ...
|
|
84
|
+
@overload
|
|
85
|
+
def __init__(self, map: SupportsKeysAndGetItem[K, V], /) -> None: ...
|
|
86
|
+
@overload
|
|
87
|
+
def __init__(self, iterable: Iterable[tuple[K, V]], /) -> None: ...
|
|
62
88
|
@override
|
|
63
|
-
def __init__(self,
|
|
64
|
-
super().__init__(
|
|
65
|
-
self.
|
|
66
|
-
self.
|
|
67
|
-
self._putters: deque[Any]
|
|
68
|
-
self._queue: deque[T]
|
|
69
|
-
self._unfinished_tasks: int
|
|
89
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
90
|
+
super().__init__()
|
|
91
|
+
self._dict = dict[K, V](*args, **kwargs)
|
|
92
|
+
self._lock = Lock()
|
|
70
93
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
raise RuntimeError # pragma: no cover
|
|
94
|
+
async def __aenter__(self) -> dict[K, V]:
|
|
95
|
+
await self._lock.__aenter__()
|
|
96
|
+
return self._dict
|
|
75
97
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
98
|
+
async def __aexit__(
|
|
99
|
+
self,
|
|
100
|
+
exc_type: type[BaseException] | None,
|
|
101
|
+
exc: BaseException | None,
|
|
102
|
+
tb: TracebackType | None,
|
|
103
|
+
/,
|
|
104
|
+
) -> None:
|
|
105
|
+
await self._lock.__aexit__(exc_type, exc, tb)
|
|
80
106
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
async def put(self, item: T) -> None:
|
|
84
|
-
raise RuntimeError(item) # pragma: no cover
|
|
107
|
+
def __contains__(self, key: Any, /) -> bool:
|
|
108
|
+
return key in self._dict
|
|
85
109
|
|
|
86
110
|
@override
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
raise RuntimeError(item) # pragma: no cover
|
|
90
|
-
|
|
91
|
-
# get all
|
|
111
|
+
def __eq__(self, other: Any, /) -> bool:
|
|
112
|
+
return self._dict == other
|
|
92
113
|
|
|
93
|
-
|
|
94
|
-
"""Remove and return all items from the queue."""
|
|
95
|
-
first = await (self.get_right() if reverse else self.get_left())
|
|
96
|
-
return list(chain([first], self.get_all_nowait(reverse=reverse)))
|
|
114
|
+
__hash__: ClassVar[None] = None # pyright: ignore[reportIncompatibleMethodOverride]
|
|
97
115
|
|
|
98
|
-
def
|
|
99
|
-
|
|
100
|
-
items: Sequence[T] = []
|
|
101
|
-
while True:
|
|
102
|
-
try:
|
|
103
|
-
items.append(
|
|
104
|
-
self.get_right_nowait() if reverse else self.get_left_nowait()
|
|
105
|
-
)
|
|
106
|
-
except QueueEmpty:
|
|
107
|
-
return items
|
|
116
|
+
def __getitem__(self, key: K, /) -> V:
|
|
117
|
+
return self._dict[key]
|
|
108
118
|
|
|
109
|
-
|
|
119
|
+
def __iter__(self) -> Iterator[K]:
|
|
120
|
+
yield from self._dict
|
|
110
121
|
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
return await self._get_left_or_right(self._get)
|
|
114
|
-
|
|
115
|
-
async def get_right(self) -> T:
|
|
116
|
-
"""Remove and return an item from the end of the queue."""
|
|
117
|
-
return await self._get_left_or_right(self._get_right)
|
|
118
|
-
|
|
119
|
-
def get_left_nowait(self) -> T:
|
|
120
|
-
"""Remove and return an item from the start of the queue without blocking."""
|
|
121
|
-
return self._get_left_or_right_nowait(self._get)
|
|
122
|
-
|
|
123
|
-
def get_right_nowait(self) -> T:
|
|
124
|
-
"""Remove and return an item from the end of the queue without blocking."""
|
|
125
|
-
return self._get_left_or_right_nowait(self._get_right)
|
|
122
|
+
def __len__(self) -> int:
|
|
123
|
+
return len(self._dict)
|
|
126
124
|
|
|
127
|
-
|
|
125
|
+
@override
|
|
126
|
+
def __repr__(self) -> str:
|
|
127
|
+
return repr(self._dict)
|
|
128
128
|
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
return await self._put_left_or_right(self._put_left, *items)
|
|
129
|
+
def __reversed__(self) -> Iterator[K]:
|
|
130
|
+
return reversed(self._dict)
|
|
132
131
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
return
|
|
132
|
+
@override
|
|
133
|
+
def __str__(self) -> str:
|
|
134
|
+
return str(self._dict)
|
|
136
135
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
136
|
+
@property
|
|
137
|
+
def empty(self) -> bool:
|
|
138
|
+
return len(self) == 0
|
|
139
|
+
|
|
140
|
+
@classmethod
|
|
141
|
+
@overload
|
|
142
|
+
def fromkeys[T](
|
|
143
|
+
cls, iterable: Iterable[T], value: None = None, /
|
|
144
|
+
) -> AsyncDict[T, Any | None]: ...
|
|
145
|
+
@classmethod
|
|
146
|
+
@overload
|
|
147
|
+
def fromkeys[K2, V2](
|
|
148
|
+
cls, iterable: Iterable[K2], value: V2, /
|
|
149
|
+
) -> AsyncDict[K2, V2]: ...
|
|
150
|
+
@classmethod
|
|
151
|
+
def fromkeys(
|
|
152
|
+
cls, iterable: Iterable[Any], value: Any = None, /
|
|
153
|
+
) -> AsyncDict[Any, Any]:
|
|
154
|
+
return cls(dict.fromkeys(iterable, value))
|
|
155
|
+
|
|
156
|
+
async def clear(self) -> None:
|
|
157
|
+
async with self._lock:
|
|
158
|
+
self._dict.clear()
|
|
140
159
|
|
|
141
|
-
def
|
|
142
|
-
|
|
143
|
-
self._put_left_or_right_nowait(self._put, *items)
|
|
160
|
+
def copy(self) -> Self:
|
|
161
|
+
return type(self)(self._dict.items())
|
|
144
162
|
|
|
145
|
-
|
|
163
|
+
async def del_(self, key: K, /) -> None:
|
|
164
|
+
async with self._lock:
|
|
165
|
+
del self._dict[key]
|
|
166
|
+
|
|
167
|
+
@overload
|
|
168
|
+
def get(self, key: K, default: None = None, /) -> V | None: ...
|
|
169
|
+
@overload
|
|
170
|
+
def get(self, key: K, default: V, /) -> V: ...
|
|
171
|
+
@overload
|
|
172
|
+
def get[V2](self, key: K, default: V2, /) -> V | V2: ...
|
|
173
|
+
def get(self, key: K, default: Any = sentinel, /) -> Any:
|
|
174
|
+
match default:
|
|
175
|
+
case Sentinel():
|
|
176
|
+
return self._dict.get(key)
|
|
177
|
+
case _:
|
|
178
|
+
return self._dict.get(key, default)
|
|
179
|
+
|
|
180
|
+
def keys(self) -> KeysView[K]:
|
|
181
|
+
return self._dict.keys()
|
|
182
|
+
|
|
183
|
+
def items(self) -> ItemsView[K, V]:
|
|
184
|
+
return self._dict.items()
|
|
185
|
+
|
|
186
|
+
@overload
|
|
187
|
+
async def pop(self, key: K, /) -> V: ...
|
|
188
|
+
@overload
|
|
189
|
+
async def pop(self, key: K, default: V, /) -> V: ...
|
|
190
|
+
@overload
|
|
191
|
+
async def pop[V2](self, key: K, default: V2, /) -> V | V2: ...
|
|
192
|
+
async def pop(self, key: K, default: Any = sentinel, /) -> Any:
|
|
193
|
+
async with self._lock:
|
|
194
|
+
match default:
|
|
195
|
+
case Sentinel():
|
|
196
|
+
return self._dict.pop(key)
|
|
197
|
+
case _:
|
|
198
|
+
return self._dict.pop(key, default)
|
|
146
199
|
|
|
147
|
-
def
|
|
148
|
-
self.
|
|
200
|
+
async def popitem(self) -> tuple[K, V]:
|
|
201
|
+
async with self._lock:
|
|
202
|
+
return self._dict.popitem()
|
|
149
203
|
|
|
150
|
-
def
|
|
151
|
-
|
|
204
|
+
async def set(self, key: K, value: V, /) -> None:
|
|
205
|
+
async with self._lock:
|
|
206
|
+
self._dict[key] = value
|
|
152
207
|
|
|
153
|
-
async def
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
self._getters.append(getter)
|
|
157
|
-
try:
|
|
158
|
-
await getter
|
|
159
|
-
except:
|
|
160
|
-
getter.cancel()
|
|
161
|
-
with suppress(ValueError):
|
|
162
|
-
self._getters.remove(getter)
|
|
163
|
-
if not self.empty() and not getter.cancelled():
|
|
164
|
-
self._wakeup_next(self._getters) # pyright: ignore[reportAttributeAccessIssue]
|
|
165
|
-
raise
|
|
166
|
-
return getter_use()
|
|
167
|
-
|
|
168
|
-
def _get_left_or_right_nowait(self, getter: Callable[[], T], /) -> T:
|
|
169
|
-
if self.empty():
|
|
170
|
-
raise QueueEmpty
|
|
171
|
-
item = getter()
|
|
172
|
-
self._wakeup_next(self._putters) # pyright: ignore[reportAttributeAccessIssue]
|
|
173
|
-
return item
|
|
174
|
-
|
|
175
|
-
async def _put_left_or_right(
|
|
176
|
-
self, putter_use: Callable[[T], None], /, *items: T
|
|
177
|
-
) -> None:
|
|
178
|
-
"""Put an item into the queue."""
|
|
179
|
-
for item in items:
|
|
180
|
-
await self._put_left_or_right_one(putter_use, item)
|
|
208
|
+
async def setdefault(self, key: K, default: V, /) -> V:
|
|
209
|
+
async with self._lock:
|
|
210
|
+
return self._dict.setdefault(key, default)
|
|
181
211
|
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
self.
|
|
189
|
-
try:
|
|
190
|
-
await putter
|
|
191
|
-
except:
|
|
192
|
-
putter.cancel()
|
|
193
|
-
with suppress(ValueError):
|
|
194
|
-
self._putters.remove(putter)
|
|
195
|
-
if not self.full() and not putter.cancelled():
|
|
196
|
-
self._wakeup_next(self._putters) # pyright: ignore[reportAttributeAccessIssue]
|
|
197
|
-
raise
|
|
198
|
-
return putter_use(item)
|
|
199
|
-
|
|
200
|
-
def _put_left_or_right_nowait(
|
|
201
|
-
self, putter: Callable[[T], None], /, *items: T
|
|
202
|
-
) -> None:
|
|
203
|
-
for item in items:
|
|
204
|
-
self._put_left_or_right_nowait_one(putter, item)
|
|
212
|
+
@overload
|
|
213
|
+
async def update(self, m: SupportsKeysAndGetItem[K, V], /) -> None: ...
|
|
214
|
+
@overload
|
|
215
|
+
async def update(self, m: Iterable[tuple[K, V]], /) -> None: ...
|
|
216
|
+
async def update(self, *args: Any, **kwargs: V) -> None:
|
|
217
|
+
async with self._lock:
|
|
218
|
+
self._dict.update(*args, **kwargs)
|
|
205
219
|
|
|
206
|
-
def
|
|
207
|
-
self
|
|
208
|
-
) -> None:
|
|
209
|
-
if self.full(): # pragma: no cover
|
|
210
|
-
raise QueueFull
|
|
211
|
-
putter(item)
|
|
212
|
-
self._unfinished_tasks += 1
|
|
213
|
-
self._finished.clear()
|
|
214
|
-
self._wakeup_next(self._getters) # pyright: ignore[reportAttributeAccessIssue]
|
|
220
|
+
def values(self) -> ValuesView[V]:
|
|
221
|
+
return self._dict.values()
|
|
215
222
|
|
|
216
223
|
|
|
217
224
|
##
|
|
@@ -220,10 +227,11 @@ class EnhancedQueue[T](Queue[T]):
|
|
|
220
227
|
class EnhancedTaskGroup(TaskGroup):
|
|
221
228
|
"""Task group with enhanced features."""
|
|
222
229
|
|
|
230
|
+
_max_tasks: int | None
|
|
223
231
|
_semaphore: Semaphore | None
|
|
224
|
-
_timeout:
|
|
232
|
+
_timeout: Delta | None
|
|
225
233
|
_error: MaybeType[BaseException]
|
|
226
|
-
_debug:
|
|
234
|
+
_debug: MaybeCallableBoolLike
|
|
227
235
|
_stack: AsyncExitStack
|
|
228
236
|
_timeout_cm: _AsyncGeneratorContextManager[None] | None
|
|
229
237
|
|
|
@@ -232,12 +240,16 @@ class EnhancedTaskGroup(TaskGroup):
|
|
|
232
240
|
self,
|
|
233
241
|
*,
|
|
234
242
|
max_tasks: int | None = None,
|
|
235
|
-
timeout:
|
|
243
|
+
timeout: Delta | None = None,
|
|
236
244
|
error: MaybeType[BaseException] = TimeoutError,
|
|
237
|
-
debug:
|
|
245
|
+
debug: MaybeCallableBoolLike = False,
|
|
238
246
|
) -> None:
|
|
239
247
|
super().__init__()
|
|
240
|
-
self.
|
|
248
|
+
self._max_tasks = max_tasks
|
|
249
|
+
if (max_tasks is None) or (max_tasks <= 0):
|
|
250
|
+
self._semaphore = None
|
|
251
|
+
else:
|
|
252
|
+
self._semaphore = Semaphore(max_tasks)
|
|
241
253
|
self._timeout = timeout
|
|
242
254
|
self._error = error
|
|
243
255
|
self._debug = debug
|
|
@@ -257,13 +269,13 @@ class EnhancedTaskGroup(TaskGroup):
|
|
|
257
269
|
tb: TracebackType | None,
|
|
258
270
|
) -> None:
|
|
259
271
|
_ = await self._stack.__aexit__(et, exc, tb)
|
|
260
|
-
match self.
|
|
272
|
+
match self._is_debug():
|
|
261
273
|
case True:
|
|
262
274
|
with suppress(Exception):
|
|
263
275
|
_ = await super().__aexit__(et, exc, tb)
|
|
264
276
|
case False:
|
|
265
277
|
_ = await super().__aexit__(et, exc, tb)
|
|
266
|
-
case
|
|
278
|
+
case never:
|
|
267
279
|
assert_never(never)
|
|
268
280
|
|
|
269
281
|
@override
|
|
@@ -286,6 +298,23 @@ class EnhancedTaskGroup(TaskGroup):
|
|
|
286
298
|
_ = self._stack.push_async_callback(cm.__aexit__, None, None, None)
|
|
287
299
|
return self.create_task(cm.__aenter__())
|
|
288
300
|
|
|
301
|
+
async def run_or_create_many_tasks[**P, T](
|
|
302
|
+
self,
|
|
303
|
+
make_coro: Callable[P, _CoroutineLike[T]],
|
|
304
|
+
*args: P.args,
|
|
305
|
+
**kwargs: P.kwargs,
|
|
306
|
+
) -> T | Sequence[Task[T]]:
|
|
307
|
+
match self._is_debug(), self._max_tasks:
|
|
308
|
+
case (True, _) | (False, None):
|
|
309
|
+
return await make_coro(*args, **kwargs)
|
|
310
|
+
case False, int():
|
|
311
|
+
return [
|
|
312
|
+
self.create_task(make_coro(*args, **kwargs))
|
|
313
|
+
for _ in range(self._max_tasks)
|
|
314
|
+
]
|
|
315
|
+
case never:
|
|
316
|
+
assert_never(never)
|
|
317
|
+
|
|
289
318
|
async def run_or_create_task[T](
|
|
290
319
|
self,
|
|
291
320
|
coro: _CoroutineLike[T],
|
|
@@ -293,14 +322,19 @@ class EnhancedTaskGroup(TaskGroup):
|
|
|
293
322
|
name: str | None = None,
|
|
294
323
|
context: Context | None = None,
|
|
295
324
|
) -> T | Task[T]:
|
|
296
|
-
match self.
|
|
325
|
+
match self._is_debug():
|
|
297
326
|
case True:
|
|
298
327
|
return await coro
|
|
299
328
|
case False:
|
|
300
329
|
return self.create_task(coro, name=name, context=context)
|
|
301
|
-
case
|
|
330
|
+
case never:
|
|
302
331
|
assert_never(never)
|
|
303
332
|
|
|
333
|
+
def _is_debug(self) -> bool:
|
|
334
|
+
return to_bool(self._debug) or (
|
|
335
|
+
(self._max_tasks is not None) and (self._max_tasks <= 0)
|
|
336
|
+
)
|
|
337
|
+
|
|
304
338
|
async def _wrap_with_semaphore[T](
|
|
305
339
|
self, semaphore: Semaphore, coroutine: _CoroutineLike[T], /
|
|
306
340
|
) -> T:
|
|
@@ -315,599 +349,33 @@ class EnhancedTaskGroup(TaskGroup):
|
|
|
315
349
|
##
|
|
316
350
|
|
|
317
351
|
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
@dataclass(kw_only=True, slots=True)
|
|
323
|
-
class _LooperNoTaskError(LooperError):
|
|
324
|
-
looper: Looper
|
|
325
|
-
|
|
326
|
-
@override
|
|
327
|
-
def __str__(self) -> str:
|
|
328
|
-
return f"{self.looper} has no running task"
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
@dataclass(kw_only=True, unsafe_hash=True)
|
|
332
|
-
class Looper[T]:
|
|
333
|
-
"""A looper of a core coroutine, handling errors."""
|
|
334
|
-
|
|
335
|
-
auto_start: bool = field(default=False, repr=False)
|
|
336
|
-
freq: TimeDelta = field(default=SECOND, repr=False)
|
|
337
|
-
backoff: TimeDelta = field(default=10 * SECOND, repr=False)
|
|
338
|
-
empty_upon_exit: bool = field(default=False, repr=False)
|
|
339
|
-
logger: str | None = field(default=None, repr=False)
|
|
340
|
-
timeout: TimeDelta | None = field(default=None, repr=False)
|
|
341
|
-
# settings
|
|
342
|
-
_debug: bool = field(default=False, repr=False)
|
|
343
|
-
# counts
|
|
344
|
-
_entries: int = field(default=0, init=False, repr=False)
|
|
345
|
-
_core_attempts: int = field(default=0, init=False, repr=False)
|
|
346
|
-
_core_successes: int = field(default=0, init=False, repr=False)
|
|
347
|
-
_core_failures: int = field(default=0, init=False, repr=False)
|
|
348
|
-
_initialization_attempts: int = field(default=0, init=False, repr=False)
|
|
349
|
-
_initialization_successes: int = field(default=0, init=False, repr=False)
|
|
350
|
-
_initialization_failures: int = field(default=0, init=False, repr=False)
|
|
351
|
-
_tear_down_attempts: int = field(default=0, init=False, repr=False)
|
|
352
|
-
_tear_down_successes: int = field(default=0, init=False, repr=False)
|
|
353
|
-
_tear_down_failures: int = field(default=0, init=False, repr=False)
|
|
354
|
-
_restart_attempts: int = field(default=0, init=False, repr=False)
|
|
355
|
-
_restart_successes: int = field(default=0, init=False, repr=False)
|
|
356
|
-
_restart_failures: int = field(default=0, init=False, repr=False)
|
|
357
|
-
_stops: int = field(default=0, init=False, repr=False)
|
|
358
|
-
# flags
|
|
359
|
-
_is_entered: Event = field(default_factory=Event, init=False, repr=False)
|
|
360
|
-
_is_initialized: Event = field(default_factory=Event, init=False, repr=False)
|
|
361
|
-
_is_initializing: Event = field(default_factory=Event, init=False, repr=False)
|
|
362
|
-
_is_pending_back_off: Event = field(default_factory=Event, init=False, repr=False)
|
|
363
|
-
_is_pending_restart: Event = field(default_factory=Event, init=False, repr=False)
|
|
364
|
-
_is_pending_stop: Event = field(default_factory=Event, init=False, repr=False)
|
|
365
|
-
_is_pending_stop_when_empty: Event = field(
|
|
366
|
-
default_factory=Event, init=False, repr=False
|
|
367
|
-
)
|
|
368
|
-
_is_stopped: Event = field(default_factory=Event, init=False, repr=False)
|
|
369
|
-
_is_tearing_down: Event = field(default_factory=Event, init=False, repr=False)
|
|
370
|
-
# internal objects
|
|
371
|
-
_lock: Lock = field(default_factory=Lock, init=False, repr=False, hash=False)
|
|
372
|
-
_logger: Logger = field(init=False, repr=False, hash=False)
|
|
373
|
-
_queue: EnhancedQueue[T] = field(
|
|
374
|
-
default_factory=EnhancedQueue, init=False, repr=False, hash=False
|
|
375
|
-
)
|
|
376
|
-
_stack: AsyncExitStack = field(
|
|
377
|
-
default_factory=AsyncExitStack, init=False, repr=False, hash=False
|
|
378
|
-
)
|
|
379
|
-
_task: Task[None] | None = field(default=None, init=False, repr=False, hash=False)
|
|
380
|
-
|
|
381
|
-
def __post_init__(self) -> None:
|
|
382
|
-
self._logger = getLogger(name=self.logger)
|
|
383
|
-
self._logger.setLevel(DEBUG)
|
|
384
|
-
|
|
385
|
-
async def __aenter__(self) -> Self:
|
|
386
|
-
"""Enter the context manager."""
|
|
387
|
-
match self._is_entered.is_set():
|
|
388
|
-
case True:
|
|
389
|
-
_ = self._debug and self._logger.debug("%s: already entered", self)
|
|
390
|
-
case False:
|
|
391
|
-
_ = self._debug and self._logger.debug("%s: entering context...", self)
|
|
392
|
-
self._is_entered.set()
|
|
393
|
-
async with self._lock:
|
|
394
|
-
self._entries += 1
|
|
395
|
-
self._task = create_task(self.run_looper())
|
|
396
|
-
for looper in self._yield_sub_loopers():
|
|
397
|
-
_ = self._debug and self._logger.debug(
|
|
398
|
-
"%s: adding sub-looper %s", self, looper
|
|
399
|
-
)
|
|
400
|
-
_ = await self._stack.enter_async_context(looper)
|
|
401
|
-
if self.auto_start:
|
|
402
|
-
_ = self._debug and self._logger.debug("%s: auto-starting...", self)
|
|
403
|
-
with suppress(TimeoutError):
|
|
404
|
-
await self._task
|
|
405
|
-
case _ as never:
|
|
406
|
-
assert_never(never)
|
|
407
|
-
return self
|
|
408
|
-
|
|
409
|
-
async def __aexit__(
|
|
410
|
-
self,
|
|
411
|
-
exc_type: type[BaseException] | None = None,
|
|
412
|
-
exc_value: BaseException | None = None,
|
|
413
|
-
traceback: TracebackType | None = None,
|
|
414
|
-
) -> None:
|
|
415
|
-
"""Exit the context manager."""
|
|
416
|
-
match self._is_entered.is_set():
|
|
417
|
-
case True:
|
|
418
|
-
_ = self._debug and self._logger.debug("%s: exiting context...", self)
|
|
419
|
-
self._is_entered.clear()
|
|
420
|
-
if (
|
|
421
|
-
(exc_type is not None)
|
|
422
|
-
and (exc_value is not None)
|
|
423
|
-
and (traceback is not None)
|
|
424
|
-
):
|
|
425
|
-
_ = self._debug and self._logger.warning(
|
|
426
|
-
"%s: encountered %s whilst in context",
|
|
427
|
-
self,
|
|
428
|
-
repr_error(exc_value),
|
|
429
|
-
)
|
|
430
|
-
_ = await self._stack.__aexit__(exc_type, exc_value, traceback)
|
|
431
|
-
await self.stop()
|
|
432
|
-
if self.empty_upon_exit:
|
|
433
|
-
await self.run_until_empty()
|
|
434
|
-
case False:
|
|
435
|
-
_ = self._debug and self._logger.debug("%s: already exited", self)
|
|
436
|
-
case _ as never:
|
|
437
|
-
assert_never(never)
|
|
438
|
-
|
|
439
|
-
def __await__(self) -> Any:
|
|
440
|
-
if (task := self._task) is None: # cannot use match
|
|
441
|
-
raise _LooperNoTaskError(looper=self)
|
|
442
|
-
return task.__await__()
|
|
443
|
-
|
|
444
|
-
def __len__(self) -> int:
|
|
445
|
-
return self._queue.qsize()
|
|
446
|
-
|
|
447
|
-
async def _apply_back_off(self) -> None:
|
|
448
|
-
"""Apply a back off period."""
|
|
449
|
-
await sleep_td(self.backoff)
|
|
450
|
-
self._is_pending_back_off.clear()
|
|
451
|
-
|
|
452
|
-
async def core(self) -> None:
|
|
453
|
-
"""Core part of running the looper."""
|
|
454
|
-
|
|
455
|
-
def empty(self) -> bool:
|
|
456
|
-
"""Check if the queue is empty."""
|
|
457
|
-
return self._queue.empty()
|
|
458
|
-
|
|
459
|
-
def get_all_nowait(self, *, reverse: bool = False) -> Sequence[T]:
|
|
460
|
-
"""Remove and return all items from the queue without blocking."""
|
|
461
|
-
return self._queue.get_all_nowait(reverse=reverse)
|
|
462
|
-
|
|
463
|
-
def get_left_nowait(self) -> T:
|
|
464
|
-
"""Remove and return an item from the start of the queue without blocking."""
|
|
465
|
-
return self._queue.get_left_nowait()
|
|
466
|
-
|
|
467
|
-
def get_right_nowait(self) -> T:
|
|
468
|
-
"""Remove and return an item from the end of the queue without blocking."""
|
|
469
|
-
return self._queue.get_right_nowait()
|
|
470
|
-
|
|
471
|
-
async def initialize(
|
|
472
|
-
self, *, skip_sleep_if_failure: bool = False
|
|
473
|
-
) -> Exception | None:
|
|
474
|
-
"""Initialize the looper."""
|
|
475
|
-
match self._is_initializing.is_set():
|
|
476
|
-
case True:
|
|
477
|
-
_ = self._debug and self._logger.debug("%s: already initializing", self)
|
|
478
|
-
return None
|
|
479
|
-
case False:
|
|
480
|
-
_ = self._debug and self._logger.debug("%s: initializing...", self)
|
|
481
|
-
self._is_initializing.set()
|
|
482
|
-
self._is_initialized.clear()
|
|
483
|
-
async with self._lock:
|
|
484
|
-
self._initialization_attempts += 1
|
|
485
|
-
try:
|
|
486
|
-
await self._initialize_core()
|
|
487
|
-
except Exception as error: # noqa: BLE001
|
|
488
|
-
async with self._lock:
|
|
489
|
-
self._initialization_failures += 1
|
|
490
|
-
ret = error
|
|
491
|
-
match skip_sleep_if_failure:
|
|
492
|
-
case True:
|
|
493
|
-
_ = self._logger.warning(
|
|
494
|
-
"%s: encountered %s whilst initializing",
|
|
495
|
-
self,
|
|
496
|
-
repr_error(error),
|
|
497
|
-
)
|
|
498
|
-
case False:
|
|
499
|
-
_ = self._logger.warning(
|
|
500
|
-
"%s: encountered %s whilst initializing; sleeping for %s...",
|
|
501
|
-
self,
|
|
502
|
-
repr_error(error),
|
|
503
|
-
self.backoff,
|
|
504
|
-
)
|
|
505
|
-
await self._apply_back_off()
|
|
506
|
-
case _ as never:
|
|
507
|
-
assert_never(never)
|
|
508
|
-
else:
|
|
509
|
-
_ = self._debug and self._logger.debug(
|
|
510
|
-
"%s: finished initializing", self
|
|
511
|
-
)
|
|
512
|
-
self._is_initialized.set()
|
|
513
|
-
async with self._lock:
|
|
514
|
-
self._initialization_successes += 1
|
|
515
|
-
ret = None
|
|
516
|
-
finally:
|
|
517
|
-
self._is_initializing.clear()
|
|
518
|
-
return ret
|
|
519
|
-
case _ as never:
|
|
520
|
-
assert_never(never)
|
|
521
|
-
|
|
522
|
-
async def _initialize_core(self) -> None:
|
|
523
|
-
"""Core part of initializing the looper."""
|
|
524
|
-
|
|
525
|
-
def put_left_nowait(self, *items: T) -> None:
|
|
526
|
-
"""Put items into the queue at the start without blocking."""
|
|
527
|
-
self._queue.put_left_nowait(*items)
|
|
528
|
-
|
|
529
|
-
def put_right_nowait(self, *items: T) -> None:
|
|
530
|
-
"""Put items into the queue at the end without blocking."""
|
|
531
|
-
self._queue.put_right_nowait(*items)
|
|
532
|
-
|
|
533
|
-
def qsize(self) -> int:
|
|
534
|
-
"""Get the number of items in the queue."""
|
|
535
|
-
return self._queue.qsize()
|
|
536
|
-
|
|
537
|
-
def replace(
|
|
538
|
-
self,
|
|
539
|
-
*,
|
|
540
|
-
auto_start: bool | Sentinel = sentinel,
|
|
541
|
-
empty_upon_exit: bool | Sentinel = sentinel,
|
|
542
|
-
freq: TimeDelta | Sentinel = sentinel,
|
|
543
|
-
backoff: TimeDelta | Sentinel = sentinel,
|
|
544
|
-
logger: str | None | Sentinel = sentinel,
|
|
545
|
-
timeout: TimeDelta | None | Sentinel = sentinel,
|
|
546
|
-
_debug: bool | Sentinel = sentinel,
|
|
547
|
-
**kwargs: Any,
|
|
548
|
-
) -> Self:
|
|
549
|
-
"""Replace elements of the looper."""
|
|
550
|
-
return replace_non_sentinel(
|
|
551
|
-
self,
|
|
552
|
-
auto_start=auto_start,
|
|
553
|
-
empty_upon_exit=empty_upon_exit,
|
|
554
|
-
freq=freq,
|
|
555
|
-
backoff=backoff,
|
|
556
|
-
logger=logger,
|
|
557
|
-
timeout=timeout,
|
|
558
|
-
_debug=_debug,
|
|
559
|
-
**kwargs,
|
|
560
|
-
)
|
|
561
|
-
|
|
562
|
-
def request_back_off(self) -> None:
|
|
563
|
-
"""Request the looper to back off."""
|
|
564
|
-
match self._is_pending_back_off.is_set():
|
|
565
|
-
case True:
|
|
566
|
-
_ = self._debug and self._logger.debug(
|
|
567
|
-
"%s: already requested back off", self
|
|
568
|
-
)
|
|
569
|
-
case False:
|
|
570
|
-
_ = self._debug and self._logger.debug(
|
|
571
|
-
"%s: requesting back off...", self
|
|
572
|
-
)
|
|
573
|
-
self._is_pending_back_off.set()
|
|
574
|
-
case _ as never:
|
|
575
|
-
assert_never(never)
|
|
576
|
-
|
|
577
|
-
def request_restart(self) -> None:
|
|
578
|
-
"""Request the looper to restart."""
|
|
579
|
-
match self._is_pending_restart.is_set():
|
|
580
|
-
case True:
|
|
581
|
-
_ = self._debug and self._logger.debug(
|
|
582
|
-
"%s: already requested restart", self
|
|
583
|
-
)
|
|
584
|
-
case False:
|
|
585
|
-
_ = self._debug and self._logger.debug(
|
|
586
|
-
"%s: requesting restart...", self
|
|
587
|
-
)
|
|
588
|
-
self._is_pending_restart.set()
|
|
589
|
-
case _ as never:
|
|
590
|
-
assert_never(never)
|
|
591
|
-
self.request_back_off()
|
|
592
|
-
|
|
593
|
-
def request_stop(self) -> None:
|
|
594
|
-
"""Request the looper to stop."""
|
|
595
|
-
match self._is_pending_stop.is_set():
|
|
596
|
-
case True:
|
|
597
|
-
_ = self._debug and self._logger.debug(
|
|
598
|
-
"%s: already requested stop", self
|
|
599
|
-
)
|
|
600
|
-
case False:
|
|
601
|
-
_ = self._debug and self._logger.debug("%s: requesting stop...", self)
|
|
602
|
-
self._is_pending_stop.set()
|
|
603
|
-
case _ as never:
|
|
604
|
-
assert_never(never)
|
|
605
|
-
|
|
606
|
-
def request_stop_when_empty(self) -> None:
|
|
607
|
-
"""Request the looper to stop when the queue is empty."""
|
|
608
|
-
match self._is_pending_stop_when_empty.is_set():
|
|
609
|
-
case True:
|
|
610
|
-
_ = self._debug and self._logger.debug(
|
|
611
|
-
"%s: already requested stop when empty", self
|
|
612
|
-
)
|
|
613
|
-
case False:
|
|
614
|
-
_ = self._debug and self._logger.debug(
|
|
615
|
-
"%s: requesting stop when empty...", self
|
|
616
|
-
)
|
|
617
|
-
self._is_pending_stop_when_empty.set()
|
|
618
|
-
case _ as never:
|
|
619
|
-
assert_never(never)
|
|
620
|
-
|
|
621
|
-
async def restart(self) -> None:
|
|
622
|
-
"""Restart the looper."""
|
|
623
|
-
_ = self._debug and self._logger.debug("%s: restarting...", self)
|
|
624
|
-
self._is_pending_restart.clear()
|
|
625
|
-
async with self._lock:
|
|
626
|
-
self._restart_attempts += 1
|
|
627
|
-
tear_down = await self.tear_down(skip_sleep_if_failure=True)
|
|
628
|
-
initialization = await self.initialize(skip_sleep_if_failure=True)
|
|
629
|
-
match tear_down, initialization:
|
|
630
|
-
case None, None:
|
|
631
|
-
_ = self._debug and self._logger.debug("%s: finished restarting", self)
|
|
632
|
-
async with self._lock:
|
|
633
|
-
self._restart_successes += 1
|
|
634
|
-
case Exception(), None:
|
|
635
|
-
async with self._lock:
|
|
636
|
-
self._restart_failures += 1
|
|
637
|
-
_ = self._logger.warning(
|
|
638
|
-
"%s: encountered %s whilst restarting (tear down); sleeping for %s...",
|
|
639
|
-
self,
|
|
640
|
-
repr_error(tear_down),
|
|
641
|
-
self.backoff,
|
|
642
|
-
)
|
|
643
|
-
await self._apply_back_off()
|
|
644
|
-
case None, Exception():
|
|
645
|
-
async with self._lock:
|
|
646
|
-
self._restart_failures += 1
|
|
647
|
-
_ = self._logger.warning(
|
|
648
|
-
"%s: encountered %s whilst restarting (initialize); sleeping for %s...",
|
|
649
|
-
self,
|
|
650
|
-
repr_error(initialization),
|
|
651
|
-
self.backoff,
|
|
652
|
-
)
|
|
653
|
-
await self._apply_back_off()
|
|
654
|
-
case Exception(), Exception():
|
|
655
|
-
async with self._lock:
|
|
656
|
-
self._restart_failures += 1
|
|
657
|
-
_ = self._logger.warning(
|
|
658
|
-
"%s: encountered %s (tear down) and then %s (initialization) whilst restarting; sleeping for %s...",
|
|
659
|
-
self,
|
|
660
|
-
repr_error(tear_down),
|
|
661
|
-
repr_error(initialization),
|
|
662
|
-
self.backoff,
|
|
663
|
-
)
|
|
664
|
-
await self._apply_back_off()
|
|
665
|
-
case _ as never:
|
|
666
|
-
assert_never(never)
|
|
667
|
-
|
|
668
|
-
async def run_looper(self) -> None:
|
|
669
|
-
"""Run the looper."""
|
|
670
|
-
try:
|
|
671
|
-
async with timeout_td(self.timeout):
|
|
672
|
-
while True:
|
|
673
|
-
if self._is_stopped.is_set():
|
|
674
|
-
_ = self._debug and self._logger.debug("%s: stopped", self)
|
|
675
|
-
return
|
|
676
|
-
if (self._is_pending_stop.is_set()) or (
|
|
677
|
-
self._is_pending_stop_when_empty.is_set() and self.empty()
|
|
678
|
-
):
|
|
679
|
-
await self.stop()
|
|
680
|
-
elif self._is_pending_back_off.is_set():
|
|
681
|
-
await self._apply_back_off()
|
|
682
|
-
elif self._is_pending_restart.is_set():
|
|
683
|
-
await self.restart()
|
|
684
|
-
elif not self._is_initialized.is_set():
|
|
685
|
-
_ = await self.initialize()
|
|
686
|
-
else:
|
|
687
|
-
_ = self._debug and self._logger.debug(
|
|
688
|
-
"%s: running core...", self
|
|
689
|
-
)
|
|
690
|
-
async with self._lock:
|
|
691
|
-
self._core_attempts += 1
|
|
692
|
-
try:
|
|
693
|
-
await self.core()
|
|
694
|
-
except Exception as error: # noqa: BLE001
|
|
695
|
-
_ = self._logger.warning(
|
|
696
|
-
"%s: encountered %s whilst running core...",
|
|
697
|
-
self,
|
|
698
|
-
repr_error(error),
|
|
699
|
-
)
|
|
700
|
-
async with self._lock:
|
|
701
|
-
self._core_failures += 1
|
|
702
|
-
self.request_restart()
|
|
703
|
-
else:
|
|
704
|
-
async with self._lock:
|
|
705
|
-
self._core_successes += 1
|
|
706
|
-
await sleep_td(self.freq)
|
|
707
|
-
except RuntimeError as error: # pragma: no cover
|
|
708
|
-
if error.args[0] == "generator didn't stop after athrow()":
|
|
709
|
-
return
|
|
710
|
-
raise
|
|
711
|
-
except TimeoutError:
|
|
712
|
-
pass
|
|
713
|
-
|
|
714
|
-
async def run_until_empty(self) -> None:
|
|
715
|
-
"""Run until the queue is empty."""
|
|
716
|
-
while not self.empty():
|
|
717
|
-
await self.core()
|
|
718
|
-
if not self.empty():
|
|
719
|
-
await sleep_td(self.freq)
|
|
720
|
-
|
|
721
|
-
@property
|
|
722
|
-
def stats(self) -> _LooperStats:
|
|
723
|
-
"""Return the statistics."""
|
|
724
|
-
return _LooperStats(
|
|
725
|
-
entries=self._entries,
|
|
726
|
-
core_attempts=self._core_attempts,
|
|
727
|
-
core_successes=self._core_successes,
|
|
728
|
-
core_failures=self._core_failures,
|
|
729
|
-
initialization_attempts=self._initialization_attempts,
|
|
730
|
-
initialization_successes=self._initialization_successes,
|
|
731
|
-
initialization_failures=self._initialization_failures,
|
|
732
|
-
tear_down_attempts=self._tear_down_attempts,
|
|
733
|
-
tear_down_successes=self._tear_down_successes,
|
|
734
|
-
tear_down_failures=self._tear_down_failures,
|
|
735
|
-
restart_attempts=self._restart_attempts,
|
|
736
|
-
restart_successes=self._restart_successes,
|
|
737
|
-
restart_failures=self._restart_failures,
|
|
738
|
-
stops=self._stops,
|
|
739
|
-
)
|
|
740
|
-
|
|
741
|
-
async def stop(self) -> None:
|
|
742
|
-
"""Stop the looper."""
|
|
743
|
-
match self._is_stopped.is_set():
|
|
744
|
-
case True:
|
|
745
|
-
_ = self._debug and self._logger.debug("%s: already stopped", self)
|
|
746
|
-
case False:
|
|
747
|
-
_ = self._debug and self._logger.debug("%s: stopping...", self)
|
|
748
|
-
self._is_pending_stop.clear()
|
|
749
|
-
self._is_stopped.set()
|
|
750
|
-
async with self._lock:
|
|
751
|
-
self._stops += 1
|
|
752
|
-
_ = self._debug and self._logger.debug("%s: stopped", self)
|
|
753
|
-
case _ as never:
|
|
754
|
-
assert_never(never)
|
|
755
|
-
|
|
756
|
-
async def tear_down(
|
|
757
|
-
self, *, skip_sleep_if_failure: bool = False
|
|
758
|
-
) -> Exception | None:
|
|
759
|
-
"""Tear down the looper."""
|
|
760
|
-
match self._is_tearing_down.is_set():
|
|
761
|
-
case True:
|
|
762
|
-
_ = self._debug and self._logger.debug("%s: already tearing down", self)
|
|
763
|
-
return None
|
|
764
|
-
case False:
|
|
765
|
-
_ = self._debug and self._logger.debug("%s: tearing down...", self)
|
|
766
|
-
self._is_tearing_down.set()
|
|
767
|
-
async with self._lock:
|
|
768
|
-
self._tear_down_attempts += 1
|
|
769
|
-
try:
|
|
770
|
-
await self._tear_down_core()
|
|
771
|
-
except Exception as error: # noqa: BLE001
|
|
772
|
-
async with self._lock:
|
|
773
|
-
self._tear_down_failures += 1
|
|
774
|
-
ret = error
|
|
775
|
-
match skip_sleep_if_failure:
|
|
776
|
-
case True:
|
|
777
|
-
_ = self._logger.warning(
|
|
778
|
-
"%s: encountered %s whilst tearing down",
|
|
779
|
-
self,
|
|
780
|
-
repr_error(error),
|
|
781
|
-
)
|
|
782
|
-
case False:
|
|
783
|
-
_ = self._logger.warning(
|
|
784
|
-
"%s: encountered %s whilst tearing down; sleeping for %s...",
|
|
785
|
-
self,
|
|
786
|
-
repr_error(error),
|
|
787
|
-
self.backoff,
|
|
788
|
-
)
|
|
789
|
-
await self._apply_back_off()
|
|
790
|
-
case _ as never:
|
|
791
|
-
assert_never(never)
|
|
792
|
-
else:
|
|
793
|
-
_ = self._debug and self._logger.debug(
|
|
794
|
-
"%s: finished tearing down", self
|
|
795
|
-
)
|
|
796
|
-
async with self._lock:
|
|
797
|
-
self._tear_down_successes += 1
|
|
798
|
-
ret = None
|
|
799
|
-
finally:
|
|
800
|
-
self._is_tearing_down.clear()
|
|
801
|
-
return ret
|
|
802
|
-
case _ as never:
|
|
803
|
-
assert_never(never)
|
|
804
|
-
|
|
805
|
-
async def _tear_down_core(self) -> None:
|
|
806
|
-
"""Core part of tearing down the looper."""
|
|
807
|
-
|
|
808
|
-
@property
|
|
809
|
-
def with_auto_start(self) -> Self:
|
|
810
|
-
"""Replace the auto start flag of the looper."""
|
|
811
|
-
return self.replace(auto_start=True)
|
|
812
|
-
|
|
813
|
-
def _yield_sub_loopers(self) -> Iterator[Looper]:
|
|
814
|
-
"""Yield all sub-loopers."""
|
|
815
|
-
yield from []
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
@dataclass(kw_only=True, slots=True)
|
|
819
|
-
class _LooperStats:
|
|
820
|
-
entries: int = 0
|
|
821
|
-
core_attempts: int = 0
|
|
822
|
-
core_successes: int = 0
|
|
823
|
-
core_failures: int = 0
|
|
824
|
-
initialization_attempts: int = 0
|
|
825
|
-
initialization_successes: int = 0
|
|
826
|
-
initialization_failures: int = 0
|
|
827
|
-
tear_down_attempts: int = 0
|
|
828
|
-
tear_down_successes: int = 0
|
|
829
|
-
tear_down_failures: int = 0
|
|
830
|
-
restart_attempts: int = 0
|
|
831
|
-
restart_successes: int = 0
|
|
832
|
-
restart_failures: int = 0
|
|
833
|
-
stops: int = 0
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
##
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
class UniquePriorityQueue[T: SupportsRichComparison, U: Hashable](
|
|
840
|
-
PriorityQueue[tuple[T, U]]
|
|
841
|
-
):
|
|
842
|
-
"""Priority queue with unique tasks."""
|
|
352
|
+
def chain_async[T](*iterables: Iterable[T] | AsyncIterable[T]) -> AsyncIterator[T]:
|
|
353
|
+
"""Asynchronous version of `chain`."""
|
|
843
354
|
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
_, value = item
|
|
853
|
-
self._set.remove(value)
|
|
854
|
-
return item
|
|
855
|
-
|
|
856
|
-
@override
|
|
857
|
-
def _put(self, item: tuple[T, U]) -> None:
|
|
858
|
-
_, value = item
|
|
859
|
-
if value not in self._set:
|
|
860
|
-
super()._put(item)
|
|
861
|
-
self._set.add(value)
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
class UniqueQueue[T: Hashable](Queue[T]):
|
|
865
|
-
"""Queue with unique tasks."""
|
|
866
|
-
|
|
867
|
-
@override
|
|
868
|
-
def __init__(self, maxsize: int = 0) -> None:
|
|
869
|
-
super().__init__(maxsize)
|
|
870
|
-
self._set: set[T] = set()
|
|
871
|
-
|
|
872
|
-
@override
|
|
873
|
-
def _get(self) -> T:
|
|
874
|
-
item = super()._get()
|
|
875
|
-
self._set.remove(item)
|
|
876
|
-
return item
|
|
355
|
+
async def iterator() -> AsyncIterator[T]:
|
|
356
|
+
for it in iterables:
|
|
357
|
+
try:
|
|
358
|
+
async for item in cast("AsyncIterable[T]", it):
|
|
359
|
+
yield item
|
|
360
|
+
except TypeError:
|
|
361
|
+
for item in cast("Iterable[T]", it):
|
|
362
|
+
yield item
|
|
877
363
|
|
|
878
|
-
|
|
879
|
-
def _put(self, item: T) -> None:
|
|
880
|
-
if item not in self._set:
|
|
881
|
-
super()._put(item)
|
|
882
|
-
self._set.add(item)
|
|
364
|
+
return iterator()
|
|
883
365
|
|
|
884
366
|
|
|
885
367
|
##
|
|
886
368
|
|
|
887
369
|
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
def get_event(
|
|
898
|
-
*, event: MaybeCallableEvent | None | Sentinel = sentinel
|
|
899
|
-
) -> Event | None | Sentinel: ...
|
|
900
|
-
def get_event(
|
|
901
|
-
*, event: MaybeCallableEvent | None | Sentinel = sentinel
|
|
902
|
-
) -> Event | None | Sentinel:
|
|
903
|
-
"""Get the event."""
|
|
904
|
-
match event:
|
|
905
|
-
case Event() | None | Sentinel():
|
|
906
|
-
return event
|
|
907
|
-
case Callable() as func:
|
|
908
|
-
return get_event(event=func())
|
|
909
|
-
case _ as never:
|
|
910
|
-
assert_never(never)
|
|
370
|
+
def get_coroutine_name(func: Callable[[], Coro[Any]], /) -> str:
|
|
371
|
+
"""Get the name of a coroutine, and then dispose of it gracefully."""
|
|
372
|
+
coro = func()
|
|
373
|
+
name = coro.__name__
|
|
374
|
+
with suppress_warnings(
|
|
375
|
+
message="coroutine '.*' was never awaited", category=RuntimeWarning
|
|
376
|
+
):
|
|
377
|
+
del coro
|
|
378
|
+
return name
|
|
911
379
|
|
|
912
380
|
|
|
913
381
|
##
|
|
@@ -918,9 +386,9 @@ async def get_items[T](queue: Queue[T], /, *, max_size: int | None = None) -> li
|
|
|
918
386
|
try:
|
|
919
387
|
items = [await queue.get()]
|
|
920
388
|
except RuntimeError as error: # pragma: no cover
|
|
921
|
-
if error.args[0]
|
|
922
|
-
|
|
923
|
-
|
|
389
|
+
if (not is_pytest()) or (error.args[0] != "Event loop is closed"):
|
|
390
|
+
raise
|
|
391
|
+
return []
|
|
924
392
|
max_size_use = None if max_size is None else (max_size - 1)
|
|
925
393
|
items.extend(get_items_nowait(queue, max_size=max_size_use))
|
|
926
394
|
return items
|
|
@@ -947,6 +415,43 @@ def get_items_nowait[T](queue: Queue[T], /, *, max_size: int | None = None) -> l
|
|
|
947
415
|
##
|
|
948
416
|
|
|
949
417
|
|
|
418
|
+
async def one_async[T](*iterables: Iterable[T] | AsyncIterable[T]) -> T:
|
|
419
|
+
"""Asynchronous version of `one`."""
|
|
420
|
+
result: T | Sentinel = sentinel
|
|
421
|
+
async for item in chain_async(*iterables):
|
|
422
|
+
if not isinstance(result, Sentinel):
|
|
423
|
+
raise OneAsyncNonUniqueError(iterables=iterables, first=result, second=item)
|
|
424
|
+
result = item
|
|
425
|
+
if isinstance(result, Sentinel):
|
|
426
|
+
raise OneAsyncEmptyError(iterables=iterables)
|
|
427
|
+
return result
|
|
428
|
+
|
|
429
|
+
|
|
430
|
+
@dataclass(kw_only=True, slots=True)
|
|
431
|
+
class OneAsyncError[T](Exception):
|
|
432
|
+
iterables: tuple[Iterable[T] | AsyncIterable[T], ...]
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
@dataclass(kw_only=True, slots=True)
|
|
436
|
+
class OneAsyncEmptyError[T](OneAsyncError[T]):
|
|
437
|
+
@override
|
|
438
|
+
def __str__(self) -> str:
|
|
439
|
+
return f"Iterable(s) {get_repr(self.iterables)} must not be empty"
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
@dataclass(kw_only=True, slots=True)
|
|
443
|
+
class OneAsyncNonUniqueError[T](OneAsyncError):
|
|
444
|
+
first: T
|
|
445
|
+
second: T
|
|
446
|
+
|
|
447
|
+
@override
|
|
448
|
+
def __str__(self) -> str:
|
|
449
|
+
return f"Iterable(s) {get_repr(self.iterables)} must contain exactly one item; got {self.first}, {self.second} and perhaps more"
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
##
|
|
453
|
+
|
|
454
|
+
|
|
950
455
|
async def put_items[T](items: Iterable[T], queue: Queue[T], /) -> None:
|
|
951
456
|
"""Put items into a queue; if full then wait."""
|
|
952
457
|
for item in items:
|
|
@@ -963,32 +468,30 @@ def put_items_nowait[T](items: Iterable[T], queue: Queue[T], /) -> None:
|
|
|
963
468
|
|
|
964
469
|
|
|
965
470
|
async def sleep_max(
|
|
966
|
-
sleep:
|
|
471
|
+
sleep: Delta | None = None, /, *, random: Random = SYSTEM_RANDOM
|
|
967
472
|
) -> None:
|
|
968
473
|
"""Sleep which accepts deltas."""
|
|
969
474
|
if sleep is None:
|
|
970
475
|
return
|
|
971
|
-
await asyncio.sleep(random.uniform(0.0, sleep
|
|
476
|
+
await asyncio.sleep(random.uniform(0.0, to_nanoseconds(sleep) / 1e9))
|
|
972
477
|
|
|
973
478
|
|
|
974
479
|
##
|
|
975
480
|
|
|
976
481
|
|
|
977
|
-
async def sleep_rounded(
|
|
978
|
-
*, unit: DateTimeRoundUnit = "second", increment: int = 1
|
|
979
|
-
) -> None:
|
|
482
|
+
async def sleep_rounded(delta: Delta, /) -> None:
|
|
980
483
|
"""Sleep until a rounded time."""
|
|
981
|
-
await sleep_until(get_now()
|
|
484
|
+
await sleep_until(round_date_or_date_time(get_now(), delta, mode="ceil"))
|
|
982
485
|
|
|
983
486
|
|
|
984
487
|
##
|
|
985
488
|
|
|
986
489
|
|
|
987
|
-
async def sleep_td(delta:
|
|
490
|
+
async def sleep_td(delta: Delta | None = None, /) -> None:
|
|
988
491
|
"""Sleep which accepts deltas."""
|
|
989
492
|
if delta is None:
|
|
990
493
|
return
|
|
991
|
-
await sleep(delta
|
|
494
|
+
await sleep(to_nanoseconds(delta) / 1e9)
|
|
992
495
|
|
|
993
496
|
|
|
994
497
|
##
|
|
@@ -1010,27 +513,21 @@ class StreamCommandOutput:
|
|
|
1010
513
|
|
|
1011
514
|
@property
|
|
1012
515
|
def return_code(self) -> int:
|
|
1013
|
-
return ensure_int(self.process.returncode)
|
|
516
|
+
return ensure_int(self.process.returncode)
|
|
1014
517
|
|
|
1015
518
|
|
|
1016
519
|
async def stream_command(cmd: str, /) -> StreamCommandOutput:
|
|
1017
520
|
"""Run a shell command asynchronously and stream its output in real time."""
|
|
1018
|
-
process = await create_subprocess_shell(
|
|
1019
|
-
|
|
1020
|
-
)
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
)
|
|
1024
|
-
proc_stderr = ensure_not_none( # skipif-not-windows
|
|
1025
|
-
process.stderr, desc="process.stderr"
|
|
1026
|
-
)
|
|
1027
|
-
ret_stdout = StringIO() # skipif-not-windows
|
|
1028
|
-
ret_stderr = StringIO() # skipif-not-windows
|
|
1029
|
-
async with TaskGroup() as tg: # skipif-not-windows
|
|
521
|
+
process = await create_subprocess_shell(cmd, stdout=PIPE, stderr=PIPE)
|
|
522
|
+
proc_stdout = ensure_not_none(process.stdout, desc="process.stdout")
|
|
523
|
+
proc_stderr = ensure_not_none(process.stderr, desc="process.stderr")
|
|
524
|
+
ret_stdout = StringIO()
|
|
525
|
+
ret_stderr = StringIO()
|
|
526
|
+
async with TaskGroup() as tg:
|
|
1030
527
|
_ = tg.create_task(_stream_one(proc_stdout, stdout, ret_stdout))
|
|
1031
528
|
_ = tg.create_task(_stream_one(proc_stderr, stderr, ret_stderr))
|
|
1032
|
-
_ = await process.wait()
|
|
1033
|
-
return StreamCommandOutput(
|
|
529
|
+
_ = await process.wait()
|
|
530
|
+
return StreamCommandOutput(
|
|
1034
531
|
process=process, stdout=ret_stdout.getvalue(), stderr=ret_stderr.getvalue()
|
|
1035
532
|
)
|
|
1036
533
|
|
|
@@ -1039,7 +536,7 @@ async def _stream_one(
|
|
|
1039
536
|
input_: StreamReader, out_stream: TextIO, ret_stream: StringIO, /
|
|
1040
537
|
) -> None:
|
|
1041
538
|
"""Asynchronously read from a stream and write to the target output stream."""
|
|
1042
|
-
while True:
|
|
539
|
+
while True:
|
|
1043
540
|
line = await input_.readline()
|
|
1044
541
|
if not line:
|
|
1045
542
|
break
|
|
@@ -1054,13 +551,10 @@ async def _stream_one(
|
|
|
1054
551
|
|
|
1055
552
|
@asynccontextmanager
|
|
1056
553
|
async def timeout_td(
|
|
1057
|
-
timeout:
|
|
1058
|
-
/,
|
|
1059
|
-
*,
|
|
1060
|
-
error: MaybeType[BaseException] = TimeoutError,
|
|
554
|
+
timeout: Delta | None = None, /, *, error: MaybeType[BaseException] = TimeoutError
|
|
1061
555
|
) -> AsyncIterator[None]:
|
|
1062
556
|
"""Timeout context manager which accepts deltas."""
|
|
1063
|
-
timeout_use = None if timeout is None else timeout
|
|
557
|
+
timeout_use = None if timeout is None else (to_nanoseconds(timeout) / 1e9)
|
|
1064
558
|
try:
|
|
1065
559
|
async with asyncio.timeout(timeout_use):
|
|
1066
560
|
yield
|
|
@@ -1068,17 +562,47 @@ async def timeout_td(
|
|
|
1068
562
|
raise error from None
|
|
1069
563
|
|
|
1070
564
|
|
|
565
|
+
##
|
|
566
|
+
|
|
567
|
+
|
|
568
|
+
_LOCKS: AsyncDict[Path, Lock] = AsyncDict()
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
@asynccontextmanager
|
|
572
|
+
async def yield_locked_shelf(
|
|
573
|
+
path: PathLike,
|
|
574
|
+
/,
|
|
575
|
+
*,
|
|
576
|
+
flag: _Flag = "c",
|
|
577
|
+
protocol: int | None = None,
|
|
578
|
+
writeback: bool = False,
|
|
579
|
+
) -> AsyncIterator[Shelf[Any]]:
|
|
580
|
+
"""Yield a shelf, behind a lock."""
|
|
581
|
+
path = Path(path)
|
|
582
|
+
try:
|
|
583
|
+
lock = _LOCKS[path]
|
|
584
|
+
except KeyError:
|
|
585
|
+
lock = Lock()
|
|
586
|
+
await _LOCKS.set(path, lock)
|
|
587
|
+
async with lock:
|
|
588
|
+
with yield_shelf(
|
|
589
|
+
path, flag=flag, protocol=protocol, writeback=writeback
|
|
590
|
+
) as shelf:
|
|
591
|
+
yield shelf
|
|
592
|
+
|
|
593
|
+
|
|
1071
594
|
__all__ = [
|
|
1072
|
-
"
|
|
595
|
+
"AsyncDict",
|
|
1073
596
|
"EnhancedTaskGroup",
|
|
1074
|
-
"
|
|
1075
|
-
"
|
|
597
|
+
"OneAsyncEmptyError",
|
|
598
|
+
"OneAsyncError",
|
|
599
|
+
"OneAsyncNonUniqueError",
|
|
1076
600
|
"StreamCommandOutput",
|
|
1077
|
-
"
|
|
1078
|
-
"
|
|
1079
|
-
"get_event",
|
|
601
|
+
"chain_async",
|
|
602
|
+
"get_coroutine_name",
|
|
1080
603
|
"get_items",
|
|
1081
604
|
"get_items_nowait",
|
|
605
|
+
"one_async",
|
|
1082
606
|
"put_items",
|
|
1083
607
|
"put_items_nowait",
|
|
1084
608
|
"sleep_max",
|
|
@@ -1087,4 +611,5 @@ __all__ = [
|
|
|
1087
611
|
"sleep_until",
|
|
1088
612
|
"stream_command",
|
|
1089
613
|
"timeout_td",
|
|
614
|
+
"yield_locked_shelf",
|
|
1090
615
|
]
|