glow 0.15.4__tar.gz → 0.15.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {glow-0.15.4 → glow-0.15.5}/PKG-INFO +1 -1
- {glow-0.15.4 → glow-0.15.5}/pyproject.toml +1 -1
- {glow-0.15.4 → glow-0.15.5}/src/glow/_async.py +3 -30
- {glow-0.15.4 → glow-0.15.5}/src/glow/_cache.py +135 -187
- {glow-0.15.4 → glow-0.15.5}/src/glow/_concurrency.py +14 -65
- glow-0.15.5/src/glow/_dev.py +29 -0
- glow-0.15.5/src/glow/_futures.py +104 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_types.py +2 -1
- glow-0.15.4/src/glow/_dev.py +0 -18
- {glow-0.15.4 → glow-0.15.5}/.gitignore +0 -0
- {glow-0.15.4 → glow-0.15.5}/LICENSE +0 -0
- {glow-0.15.4 → glow-0.15.5}/README.md +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/__init__.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_array.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_async.pyi +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_cache.pyi +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_concurrency.pyi +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_coro.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_debug.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_ic.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_import_hook.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_imutil.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_keys.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_logging.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_more.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_parallel.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_parallel.pyi +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_patch_len.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_patch_print.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_patch_scipy.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_profile.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_profile.pyi +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_reduction.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_repr.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_reusable.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_sizeof.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_streams.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_thread_quota.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_uuid.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/_wrap.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/api/__init__.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/api/config.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/api/exporting.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/cli.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/cli.pyi +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/io/__init__.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/io/_sound.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/io/_svg.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/src/glow/py.typed +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/__init__.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_api.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_batch.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_buffered.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_cli.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_iter.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_shm.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_thread_pool.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_timed.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_timer.py +0 -0
- {glow-0.15.4 → glow-0.15.5}/test/test_uuid.py +0 -0
|
@@ -17,6 +17,7 @@ from functools import partial
|
|
|
17
17
|
from typing import TypeGuard, cast, overload
|
|
18
18
|
|
|
19
19
|
from ._dev import hide_frame
|
|
20
|
+
from ._futures import adispatch
|
|
20
21
|
from ._types import (
|
|
21
22
|
ABatchDecorator,
|
|
22
23
|
ABatchFn,
|
|
@@ -24,7 +25,6 @@ from ._types import (
|
|
|
24
25
|
AnyIterable,
|
|
25
26
|
AnyIterator,
|
|
26
27
|
Coro,
|
|
27
|
-
Some,
|
|
28
28
|
)
|
|
29
29
|
|
|
30
30
|
type _Job[T, R] = tuple[T, AnyFuture[R]]
|
|
@@ -293,7 +293,7 @@ def astreaming[T, R](
|
|
|
293
293
|
if batch_size is not None and len(buf) == batch_size:
|
|
294
294
|
batch, buf[:] = buf[:], []
|
|
295
295
|
async with lock:
|
|
296
|
-
await
|
|
296
|
+
await adispatch(fn, *batch)
|
|
297
297
|
finally:
|
|
298
298
|
ncalls -= 1
|
|
299
299
|
|
|
@@ -308,36 +308,9 @@ def astreaming[T, R](
|
|
|
308
308
|
if not notified:
|
|
309
309
|
batch, buf[:] = buf[:], []
|
|
310
310
|
async with lock:
|
|
311
|
-
await
|
|
311
|
+
await adispatch(fn, *batch)
|
|
312
312
|
|
|
313
313
|
with hide_frame:
|
|
314
314
|
return await asyncio.gather(*fs)
|
|
315
315
|
|
|
316
316
|
return wrapper
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
async def _adispatch[T, R](fn: ABatchFn[T, R], *xs: _Job[T, R]) -> None:
|
|
320
|
-
if not xs:
|
|
321
|
-
return
|
|
322
|
-
obj: Some[Sequence[R]] | BaseException
|
|
323
|
-
try:
|
|
324
|
-
with hide_frame:
|
|
325
|
-
obj = Some(await fn([x for x, _ in xs]))
|
|
326
|
-
if not isinstance(obj.x, Sequence):
|
|
327
|
-
obj = TypeError(
|
|
328
|
-
f'Call returned non-sequence. Got {type(obj.x).__name__}'
|
|
329
|
-
)
|
|
330
|
-
elif len(obj.x) != len(xs):
|
|
331
|
-
obj = RuntimeError(
|
|
332
|
-
f'Call with {len(xs)} arguments '
|
|
333
|
-
f'incorrectly returned {len(obj.x)} results'
|
|
334
|
-
)
|
|
335
|
-
except BaseException as exc: # noqa: BLE001
|
|
336
|
-
obj = exc
|
|
337
|
-
|
|
338
|
-
if isinstance(obj, Some):
|
|
339
|
-
for (_, f), res in zip(xs, obj.x):
|
|
340
|
-
f.set_result(res)
|
|
341
|
-
else:
|
|
342
|
-
for _, f in xs:
|
|
343
|
-
f.set_exception(obj)
|
|
@@ -11,16 +11,17 @@ from collections.abc import (
|
|
|
11
11
|
Iterable,
|
|
12
12
|
Iterator,
|
|
13
13
|
KeysView,
|
|
14
|
+
Mapping,
|
|
14
15
|
MutableMapping,
|
|
15
|
-
Sequence,
|
|
16
16
|
)
|
|
17
17
|
from dataclasses import dataclass, field
|
|
18
18
|
from inspect import iscoroutinefunction
|
|
19
19
|
from threading import RLock
|
|
20
|
-
from typing import Final, Protocol, SupportsInt, cast
|
|
20
|
+
from typing import Any, Final, Protocol, SupportsInt, cast
|
|
21
21
|
from weakref import WeakValueDictionary
|
|
22
22
|
|
|
23
|
-
from ._dev import hide_frame
|
|
23
|
+
from ._dev import clone_exc, hide_frame
|
|
24
|
+
from ._futures import adispatch, dispatch, gather_fs
|
|
24
25
|
from ._keys import make_key
|
|
25
26
|
from ._repr import si_bin
|
|
26
27
|
from ._sizeof import sizeof
|
|
@@ -30,6 +31,7 @@ from ._types import (
|
|
|
30
31
|
BatchFn,
|
|
31
32
|
CachePolicy,
|
|
32
33
|
Decorator,
|
|
34
|
+
Job,
|
|
33
35
|
KeyFn,
|
|
34
36
|
Some,
|
|
35
37
|
)
|
|
@@ -85,23 +87,23 @@ def cache_status() -> str:
|
|
|
85
87
|
_REFS: MutableMapping[int, '_Cache'] = WeakValueDictionary()
|
|
86
88
|
|
|
87
89
|
|
|
88
|
-
class _AbstractCache[T](Protocol):
|
|
89
|
-
def __getitem__(self, key:
|
|
90
|
-
def __setitem__(self, key:
|
|
90
|
+
class _AbstractCache[K: Hashable, T](Protocol):
|
|
91
|
+
def __getitem__(self, key: K, /) -> T | _Empty: ...
|
|
92
|
+
def __setitem__(self, key: K, value: T, /) -> None: ...
|
|
91
93
|
|
|
92
94
|
|
|
93
|
-
class _CacheMaker[T](Protocol):
|
|
95
|
+
class _CacheMaker[K, T](Protocol):
|
|
94
96
|
def __call__(
|
|
95
97
|
self, capacity: int, make_node: Callable[[T], _Node[T]]
|
|
96
|
-
) -> '_AbstractCache[T]': ...
|
|
98
|
+
) -> '_AbstractCache[K, T]': ...
|
|
97
99
|
|
|
98
100
|
|
|
99
101
|
@dataclass(repr=False, slots=True, weakref_slot=True)
|
|
100
|
-
class _Cache[T]:
|
|
102
|
+
class _Cache[K: Hashable, T]:
|
|
101
103
|
capacity: int
|
|
102
104
|
make_node: Callable[[T], _Node[T]] = field(repr=False)
|
|
103
105
|
size: int = 0
|
|
104
|
-
store: dict[
|
|
106
|
+
store: dict[K, _Node[T]] = field(default_factory=dict)
|
|
105
107
|
stats: Stats = field(default_factory=Stats)
|
|
106
108
|
|
|
107
109
|
def __post_init__(self) -> None:
|
|
@@ -132,8 +134,8 @@ class _Cache[T]:
|
|
|
132
134
|
return f'{type(self).__name__}({", ".join(args)})'
|
|
133
135
|
|
|
134
136
|
|
|
135
|
-
class _Heap[T](_Cache[T]):
|
|
136
|
-
def __getitem__(self, key:
|
|
137
|
+
class _Heap[K: Hashable, T](_Cache[K, T]):
|
|
138
|
+
def __getitem__(self, key: K, /) -> T | _Empty:
|
|
137
139
|
if node := self.store.get(key):
|
|
138
140
|
self.stats.hits += 1
|
|
139
141
|
return node.value
|
|
@@ -141,7 +143,7 @@ class _Heap[T](_Cache[T]):
|
|
|
141
143
|
self.stats.misses += 1
|
|
142
144
|
return _empty
|
|
143
145
|
|
|
144
|
-
def __setitem__(self, key:
|
|
146
|
+
def __setitem__(self, key: K, value: T, /) -> None:
|
|
145
147
|
if key in self.store:
|
|
146
148
|
return
|
|
147
149
|
node = self.make_node(value)
|
|
@@ -154,8 +156,8 @@ class _Heap[T](_Cache[T]):
|
|
|
154
156
|
self.size += node.size
|
|
155
157
|
|
|
156
158
|
|
|
157
|
-
class _LruMruCache[T](_Cache[T]):
|
|
158
|
-
def __getitem__(self, key:
|
|
159
|
+
class _LruMruCache[K: Hashable, T](_Cache[K, T]):
|
|
160
|
+
def __getitem__(self, key: K, /) -> T | _Empty:
|
|
159
161
|
if node := self.store.pop(key, None):
|
|
160
162
|
self.stats.hits += 1
|
|
161
163
|
self.store[key] = node
|
|
@@ -164,7 +166,7 @@ class _LruMruCache[T](_Cache[T]):
|
|
|
164
166
|
self.stats.misses += 1
|
|
165
167
|
return _empty
|
|
166
168
|
|
|
167
|
-
def __setitem__(self, key:
|
|
169
|
+
def __setitem__(self, key: K, value: T, /) -> None:
|
|
168
170
|
if key in self.store:
|
|
169
171
|
return
|
|
170
172
|
node = self.make_node(value)
|
|
@@ -184,13 +186,13 @@ class _LruMruCache[T](_Cache[T]):
|
|
|
184
186
|
raise NotImplementedError
|
|
185
187
|
|
|
186
188
|
|
|
187
|
-
class _LruCache[T](_LruMruCache[T]):
|
|
189
|
+
class _LruCache[K: Hashable, T](_LruMruCache[K, T]):
|
|
188
190
|
def pop(self) -> _Node:
|
|
189
191
|
"""Drop oldest node."""
|
|
190
192
|
return self.store.pop(next(iter(self.store)))
|
|
191
193
|
|
|
192
194
|
|
|
193
|
-
class _MruCache[T](_LruMruCache[T]):
|
|
195
|
+
class _MruCache[K: Hashable, T](_LruMruCache[K, T]):
|
|
194
196
|
def pop(self) -> _Node:
|
|
195
197
|
"""Drop most recently added node."""
|
|
196
198
|
return self.store.popitem()[1]
|
|
@@ -200,26 +202,26 @@ class _MruCache[T](_LruMruCache[T]):
|
|
|
200
202
|
|
|
201
203
|
|
|
202
204
|
@dataclass(frozen=True, kw_only=True)
|
|
203
|
-
class _WeakCache[T]:
|
|
205
|
+
class _WeakCache[K: Hashable, T]:
|
|
204
206
|
"""Retrieve items via weak references from everywhere."""
|
|
205
207
|
|
|
206
|
-
alive: WeakValueDictionary[
|
|
208
|
+
alive: WeakValueDictionary[K, T] = field(
|
|
207
209
|
default_factory=WeakValueDictionary
|
|
208
210
|
)
|
|
209
211
|
|
|
210
|
-
def __getitem__(self, key:
|
|
212
|
+
def __getitem__(self, key: K, /) -> T | _Empty:
|
|
211
213
|
return self.alive.get(key, _empty)
|
|
212
214
|
|
|
213
|
-
def __setitem__(self, key:
|
|
215
|
+
def __setitem__(self, key: K, value: T, /) -> None:
|
|
214
216
|
if type(value).__weakrefoffset__: # Support weak reference.
|
|
215
217
|
self.alive[key] = value
|
|
216
218
|
|
|
217
219
|
|
|
218
220
|
@dataclass(frozen=True, kw_only=True)
|
|
219
|
-
class _StrongCache[
|
|
220
|
-
cache: _AbstractCache[
|
|
221
|
+
class _StrongCache[K: Hashable, T](_WeakCache[K, T]):
|
|
222
|
+
cache: _AbstractCache[K, T]
|
|
221
223
|
|
|
222
|
-
def __getitem__(self, key:
|
|
224
|
+
def __getitem__(self, key: K, /) -> T | _Empty:
|
|
223
225
|
# Alive and stored items.
|
|
224
226
|
# Called first to update cache stats (i.e. MRU/LRU if any).
|
|
225
227
|
# `cache` has subset of objects from `alive`.
|
|
@@ -228,16 +230,16 @@ class _StrongCache[R](_WeakCache[R]):
|
|
|
228
230
|
# Item could still exist, try reference ...
|
|
229
231
|
return super().__getitem__(key)
|
|
230
232
|
|
|
231
|
-
def __setitem__(self, key:
|
|
233
|
+
def __setitem__(self, key: K, value: T, /) -> None:
|
|
232
234
|
self.cache[key] = value
|
|
233
235
|
super().__setitem__(key, value)
|
|
234
236
|
|
|
235
237
|
|
|
236
238
|
@dataclass(frozen=True, slots=True)
|
|
237
|
-
class _CacheState[R]:
|
|
238
|
-
cache: _AbstractCache[R]
|
|
239
|
-
key_fn: KeyFn
|
|
240
|
-
futures: WeakValueDictionary[
|
|
239
|
+
class _CacheState[K: Hashable, R]:
|
|
240
|
+
cache: _AbstractCache[K, R]
|
|
241
|
+
key_fn: KeyFn[K]
|
|
242
|
+
futures: WeakValueDictionary[K, AnyFuture[R]] = field(
|
|
241
243
|
default_factory=WeakValueDictionary
|
|
242
244
|
)
|
|
243
245
|
|
|
@@ -245,8 +247,19 @@ class _CacheState[R]:
|
|
|
245
247
|
# --------------------------------- wrapping ---------------------------------
|
|
246
248
|
|
|
247
249
|
|
|
248
|
-
def
|
|
249
|
-
|
|
250
|
+
def _result[T](f: cf.Future[T]) -> T:
|
|
251
|
+
if f.cancelled():
|
|
252
|
+
with hide_frame:
|
|
253
|
+
raise cf.CancelledError
|
|
254
|
+
if exc := f.exception():
|
|
255
|
+
with hide_frame:
|
|
256
|
+
raise exc
|
|
257
|
+
return f.result()
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _sync_memoize[K: Hashable, **P, R](
|
|
261
|
+
fn: Callable[P, R],
|
|
262
|
+
cs: _CacheState[K, R],
|
|
250
263
|
) -> Callable[P, R]:
|
|
251
264
|
lock = RLock()
|
|
252
265
|
|
|
@@ -260,35 +273,39 @@ def _sync_memoize[**P, R](
|
|
|
260
273
|
|
|
261
274
|
# ... or it could be computed somewhere else, join there.
|
|
262
275
|
f = cs.futures.get(key)
|
|
263
|
-
if
|
|
276
|
+
if f:
|
|
277
|
+
assert isinstance(f, cf.Future)
|
|
278
|
+
else:
|
|
264
279
|
cs.futures[key] = f = cf.Future[R]()
|
|
265
280
|
is_owner = True
|
|
266
281
|
|
|
267
282
|
# Release lock to allow function to run
|
|
268
283
|
if not is_owner:
|
|
269
|
-
|
|
284
|
+
with hide_frame:
|
|
285
|
+
return _result(f)
|
|
270
286
|
|
|
271
|
-
|
|
272
|
-
|
|
287
|
+
try:
|
|
288
|
+
with hide_frame:
|
|
273
289
|
ret = fn(*args, **kwargs)
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
290
|
+
except BaseException as exc:
|
|
291
|
+
exc = clone_exc(exc) # Protect from mutation by outer frame
|
|
292
|
+
f.set_exception(exc)
|
|
293
|
+
with lock:
|
|
294
|
+
cs.futures.pop(key)
|
|
295
|
+
raise
|
|
296
|
+
else:
|
|
297
|
+
f.set_result(ret)
|
|
298
|
+
with lock:
|
|
299
|
+
cs.cache[key] = ret
|
|
300
|
+
cs.futures.pop(key)
|
|
301
|
+
return ret
|
|
285
302
|
|
|
286
303
|
return wrapper
|
|
287
304
|
|
|
288
305
|
|
|
289
|
-
def _async_memoize[**P, R](
|
|
306
|
+
def _async_memoize[K: Hashable, **P, R](
|
|
290
307
|
fn: Callable[P, Awaitable[R]],
|
|
291
|
-
cs: _CacheState[R],
|
|
308
|
+
cs: _CacheState[K, R],
|
|
292
309
|
) -> Callable[P, Awaitable[R]]:
|
|
293
310
|
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
|
294
311
|
key = cs.key_fn(*args, **kwargs)
|
|
@@ -299,23 +316,25 @@ def _async_memoize[**P, R](
|
|
|
299
316
|
# ... or it could be computed somewhere else, join there.
|
|
300
317
|
if f := cs.futures.get(key):
|
|
301
318
|
assert isinstance(f, asyncio.Future)
|
|
302
|
-
|
|
319
|
+
with hide_frame:
|
|
320
|
+
return await f
|
|
303
321
|
cs.futures[key] = f = asyncio.Future[R]()
|
|
304
322
|
|
|
305
323
|
# NOTE: fn() is not within threading.Lock, thus it's not thread safe
|
|
306
324
|
# NOTE: but it's async-safe because this `await` is only one here.
|
|
307
|
-
|
|
308
|
-
|
|
325
|
+
try:
|
|
326
|
+
with hide_frame:
|
|
309
327
|
ret = await fn(*args, **kwargs)
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
328
|
+
except BaseException as exc:
|
|
329
|
+
exc = clone_exc(exc)
|
|
330
|
+
f.set_exception(exc)
|
|
331
|
+
cs.futures.pop(key)
|
|
332
|
+
raise
|
|
333
|
+
else:
|
|
334
|
+
f.set_result(ret)
|
|
335
|
+
cs.cache[key] = ret
|
|
336
|
+
cs.futures.pop(key)
|
|
337
|
+
return ret
|
|
319
338
|
|
|
320
339
|
return wrapper
|
|
321
340
|
|
|
@@ -323,26 +342,20 @@ def _async_memoize[**P, R](
|
|
|
323
342
|
# ----------------------- wrapper with batching support ----------------------
|
|
324
343
|
|
|
325
344
|
|
|
326
|
-
|
|
327
|
-
class _Arg[T]:
|
|
328
|
-
arg: T
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
class _BatchedQuery[T, R]:
|
|
345
|
+
class _BatchedQuery[K: Hashable, T, R]:
|
|
332
346
|
def __init__(
|
|
333
|
-
self, cs: _CacheState[R], *tokens: T, aio: bool = False
|
|
347
|
+
self, cs: _CacheState[K, R], *tokens: T, aio: bool = False
|
|
334
348
|
) -> None:
|
|
335
349
|
self._cs = cs
|
|
336
350
|
self._keys = [cs.key_fn(t) for t in tokens] # All keys with duplicates
|
|
337
351
|
|
|
338
|
-
self.
|
|
339
|
-
self.
|
|
340
|
-
self._done: dict[Hashable, R] = {}
|
|
352
|
+
self.jobs: list[tuple[K, Some[T] | None, AnyFuture[R]]] = []
|
|
353
|
+
self._done: dict[K, R] = {}
|
|
341
354
|
|
|
342
355
|
for k, t in dict(zip(self._keys, tokens)).items():
|
|
343
356
|
# If this key is processing right now, wait till its done ...
|
|
344
357
|
if f := cs.futures.get(k): # ! Requires sync
|
|
345
|
-
self.
|
|
358
|
+
self.jobs.append((k, None, f)) # Wait for this
|
|
346
359
|
|
|
347
360
|
# ... else check if it's done ...
|
|
348
361
|
elif (r := cs.cache[k]) is not _empty: # ! Requires sync
|
|
@@ -351,88 +364,30 @@ class _BatchedQuery[T, R]:
|
|
|
351
364
|
# ... otherwise schedule a new job.
|
|
352
365
|
else:
|
|
353
366
|
f = asyncio.Future[R]() if aio else cf.Future[R]()
|
|
354
|
-
self.
|
|
367
|
+
self.jobs.append((k, Some(t), f)) # Resolve this manually
|
|
355
368
|
cs.futures[k] = f # ! Requires sync
|
|
356
369
|
|
|
357
|
-
self._errors: dict[BaseException, None] = {}
|
|
358
|
-
self._default_tp: type[BaseException] | None = None
|
|
359
|
-
|
|
360
|
-
def __bool__(self) -> bool:
|
|
361
|
-
return bool(self._jobs)
|
|
362
|
-
|
|
363
370
|
@property
|
|
364
|
-
def
|
|
365
|
-
|
|
366
|
-
case []:
|
|
367
|
-
if self._default_tp:
|
|
368
|
-
return self._default_tp()
|
|
369
|
-
return Some([self._done[k] for k in self._keys])
|
|
370
|
-
case [e]:
|
|
371
|
-
return e
|
|
372
|
-
case excs:
|
|
373
|
-
msg = 'Got multiple exceptions'
|
|
374
|
-
if all(isinstance(e, Exception) for e in excs):
|
|
375
|
-
return ExceptionGroup(msg, excs) # type: ignore[type-var]
|
|
376
|
-
return BaseExceptionGroup(msg, excs)
|
|
377
|
-
|
|
378
|
-
@result.setter
|
|
379
|
-
def result(self, obj: Some[Sequence[R]] | BaseException) -> None:
|
|
380
|
-
done_jobs = [(k, f) for k, a, f in self._jobs if a]
|
|
381
|
-
|
|
382
|
-
if isinstance(obj, Some):
|
|
383
|
-
if isinstance(obj.x, Sequence):
|
|
384
|
-
if len(obj.x) == len(done_jobs):
|
|
385
|
-
for (k, f), value in zip(done_jobs, obj.x):
|
|
386
|
-
f.set_result(value)
|
|
387
|
-
self._stash.append((k, value))
|
|
388
|
-
return
|
|
389
|
-
|
|
390
|
-
obj = RuntimeError(
|
|
391
|
-
f'Call with {len(done_jobs)} arguments '
|
|
392
|
-
f'incorrectly returned {len(obj.x)} results'
|
|
393
|
-
)
|
|
394
|
-
else:
|
|
395
|
-
obj = TypeError(
|
|
396
|
-
f'Call returned non-sequence. Got {type(obj.x).__name__}'
|
|
397
|
-
)
|
|
371
|
+
def pending_jobs(self) -> list[Job[T, R]]:
|
|
372
|
+
return [(a.x, f) for _, a, f in self.jobs if a]
|
|
398
373
|
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
if isinstance(f, asyncio.Future):
|
|
402
|
-
f.exception() # Mark exception as retrieved
|
|
403
|
-
self._errors[obj] = None
|
|
374
|
+
def running_as[F: AnyFuture](self, tp: type[F]) -> set[F]:
|
|
375
|
+
return {f for _, a, f in self.jobs if not a and isinstance(f, tp)}
|
|
404
376
|
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
return [a.arg for _, a, _ in self._jobs if a]
|
|
408
|
-
|
|
409
|
-
def fs_as[F: AnyFuture](self, tp: type[F]) -> set[F]:
|
|
410
|
-
return {f for _, a, f in self._jobs if not a and isinstance(f, tp)}
|
|
411
|
-
|
|
412
|
-
def finalize_fs(self) -> None:
|
|
413
|
-
cerr = cf.CancelledError
|
|
414
|
-
aerr = asyncio.CancelledError
|
|
415
|
-
for k, a, f in self._jobs:
|
|
416
|
-
if a:
|
|
417
|
-
continue # Our task, not "borrowed" one
|
|
418
|
-
if f.cancelled():
|
|
419
|
-
self._default_tp = cerr if isinstance(f, cf.Future) else aerr
|
|
420
|
-
elif e := f.exception():
|
|
421
|
-
self._errors[e] = None
|
|
422
|
-
else:
|
|
423
|
-
self._stash.append((k, f.result()))
|
|
424
|
-
|
|
425
|
-
def sync(self) -> None:
|
|
426
|
-
for k, r in self._stash:
|
|
377
|
+
def sync(self, stash: Mapping[K, R]) -> None:
|
|
378
|
+
for k, r in stash.items():
|
|
427
379
|
self._done[k] = self._cs.cache[k] = r
|
|
428
380
|
|
|
429
381
|
# Force next callers to use cache # ! optional
|
|
430
|
-
for k in self.
|
|
382
|
+
for k, _, _ in self.jobs:
|
|
431
383
|
self._cs.futures.pop(k, None)
|
|
432
384
|
|
|
385
|
+
def result(self) -> list[R]:
|
|
386
|
+
return [self._done[k] for k in self._keys]
|
|
387
|
+
|
|
433
388
|
|
|
434
|
-
def _sync_memoize_batched[T, R](
|
|
435
|
-
fn: BatchFn[T, R], cs: _CacheState[R]
|
|
389
|
+
def _sync_memoize_batched[K: Hashable, T, R](
|
|
390
|
+
fn: BatchFn[T, R], cs: _CacheState[K, R]
|
|
436
391
|
) -> BatchFn[T, R]:
|
|
437
392
|
lock = RLock()
|
|
438
393
|
|
|
@@ -440,58 +395,50 @@ def _sync_memoize_batched[T, R](
|
|
|
440
395
|
with lock:
|
|
441
396
|
q = _BatchedQuery(cs, *tokens)
|
|
442
397
|
|
|
398
|
+
stash: dict[K, R] = {}
|
|
443
399
|
try:
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
q.result = Some(fn(args))
|
|
449
|
-
except BaseException as exc: # noqa: BLE001
|
|
450
|
-
q.result = exc
|
|
451
|
-
|
|
452
|
-
# Wait for completion of tasks scheduled by neighbour calls
|
|
453
|
-
if fs := q.fs_as(cf.Future):
|
|
400
|
+
if jobs := q.pending_jobs:
|
|
401
|
+
dispatch(fn, *jobs)
|
|
402
|
+
|
|
403
|
+
if fs := q.running_as(cf.Future):
|
|
454
404
|
cf.wait(fs)
|
|
455
|
-
|
|
405
|
+
|
|
406
|
+
stash, err = gather_fs((k, f) for k, _, f in q.jobs)
|
|
456
407
|
finally:
|
|
457
|
-
if q:
|
|
408
|
+
if q.jobs:
|
|
458
409
|
with lock:
|
|
459
|
-
q.sync()
|
|
410
|
+
q.sync(stash)
|
|
460
411
|
|
|
461
|
-
if
|
|
462
|
-
return
|
|
412
|
+
if err is None:
|
|
413
|
+
return q.result()
|
|
463
414
|
with hide_frame:
|
|
464
|
-
raise
|
|
415
|
+
raise err
|
|
465
416
|
|
|
466
417
|
return wrapper
|
|
467
418
|
|
|
468
419
|
|
|
469
|
-
def _async_memoize_batched[T, R](
|
|
470
|
-
fn: ABatchFn[T, R], cs: _CacheState[R]
|
|
420
|
+
def _async_memoize_batched[K: Hashable, T, R](
|
|
421
|
+
fn: ABatchFn[T, R], cs: _CacheState[K, R]
|
|
471
422
|
) -> ABatchFn[T, R]:
|
|
472
423
|
async def wrapper(tokens: Iterable[T]) -> list[R]:
|
|
473
424
|
q = _BatchedQuery(cs, *tokens, aio=True)
|
|
474
425
|
|
|
426
|
+
stash: dict[K, R] = {}
|
|
475
427
|
try:
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
q.result = Some(await fn(args))
|
|
481
|
-
except BaseException as exc: # noqa: BLE001
|
|
482
|
-
q.result = exc # Raise later in `q.exception()`
|
|
483
|
-
|
|
484
|
-
# Wait for completion of tasks scheduled by neighbour calls
|
|
485
|
-
if fs := q.fs_as(asyncio.Future):
|
|
428
|
+
if jobs := q.pending_jobs:
|
|
429
|
+
await adispatch(fn, *jobs)
|
|
430
|
+
|
|
431
|
+
if fs := q.running_as(asyncio.Future):
|
|
486
432
|
await asyncio.wait(fs)
|
|
487
|
-
|
|
433
|
+
|
|
434
|
+
stash, err = gather_fs((k, f) for k, _, f in q.jobs)
|
|
488
435
|
finally:
|
|
489
|
-
q.sync()
|
|
436
|
+
q.sync(stash)
|
|
490
437
|
|
|
491
|
-
if
|
|
492
|
-
return
|
|
438
|
+
if err is None:
|
|
439
|
+
return q.result()
|
|
493
440
|
with hide_frame:
|
|
494
|
-
raise
|
|
441
|
+
raise err
|
|
495
442
|
|
|
496
443
|
return wrapper
|
|
497
444
|
|
|
@@ -499,15 +446,12 @@ def _async_memoize_batched[T, R](
|
|
|
499
446
|
# ------------------------------- decorations --------------------------------
|
|
500
447
|
|
|
501
448
|
|
|
502
|
-
def _memoize[**P, R](
|
|
449
|
+
def _memoize[K: Hashable, **P, R](
|
|
503
450
|
fn: Callable[P, R],
|
|
504
451
|
*,
|
|
505
|
-
|
|
506
|
-
key_fn: KeyFn,
|
|
452
|
+
cs: _CacheState[K, Any],
|
|
507
453
|
batched: bool,
|
|
508
454
|
) -> Callable[P, R]:
|
|
509
|
-
cs = _CacheState(cache, key_fn)
|
|
510
|
-
|
|
511
455
|
if batched and iscoroutinefunction(fn):
|
|
512
456
|
w = cast(
|
|
513
457
|
'Callable[P, R]',
|
|
@@ -523,9 +467,12 @@ def _memoize[**P, R](
|
|
|
523
467
|
else:
|
|
524
468
|
w = _sync_memoize(fn, cs=cs)
|
|
525
469
|
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
470
|
+
w.running = cs.futures # type: ignore[attr-defined]
|
|
471
|
+
if isinstance(cs.cache, _WeakCache):
|
|
472
|
+
w.wrefs = cs.cache.alive # type: ignore[attr-defined]
|
|
473
|
+
if isinstance(cs.cache, _StrongCache):
|
|
474
|
+
w.cache = cs.cache.cache # type: ignore[attr-defined]
|
|
475
|
+
|
|
529
476
|
return functools.update_wrapper(w, fn)
|
|
530
477
|
|
|
531
478
|
|
|
@@ -559,7 +506,9 @@ def memoize(
|
|
|
559
506
|
capacity = max(count, nbytes)
|
|
560
507
|
if int(capacity) == 0:
|
|
561
508
|
return functools.partial( # type: ignore[return-value]
|
|
562
|
-
_memoize,
|
|
509
|
+
_memoize,
|
|
510
|
+
cs=_CacheState(_WeakCache(), key_fn),
|
|
511
|
+
batched=batched,
|
|
563
512
|
)
|
|
564
513
|
|
|
565
514
|
if cache_cls := _CACHES.get(policy):
|
|
@@ -574,8 +523,7 @@ def memoize(
|
|
|
574
523
|
cache = cache_cls(capacity, make_node)
|
|
575
524
|
return functools.partial( # type: ignore[return-value]
|
|
576
525
|
_memoize,
|
|
577
|
-
|
|
578
|
-
key_fn=key_fn,
|
|
526
|
+
cs=_CacheState(_StrongCache(cache=cache), key_fn),
|
|
579
527
|
batched=batched,
|
|
580
528
|
)
|
|
581
529
|
|
|
@@ -8,8 +8,8 @@ __all__ = [
|
|
|
8
8
|
|
|
9
9
|
import sys
|
|
10
10
|
import threading
|
|
11
|
-
from collections.abc import Callable,
|
|
12
|
-
from concurrent.futures import
|
|
11
|
+
from collections.abc import Callable, Sequence
|
|
12
|
+
from concurrent.futures import Future, wait
|
|
13
13
|
from functools import partial, update_wrapper
|
|
14
14
|
from queue import Empty, SimpleQueue
|
|
15
15
|
from threading import Lock, Thread
|
|
@@ -19,7 +19,8 @@ from warnings import warn
|
|
|
19
19
|
|
|
20
20
|
from ._cache import memoize
|
|
21
21
|
from ._dev import hide_frame
|
|
22
|
-
from .
|
|
22
|
+
from ._futures import dispatch, gather_fs
|
|
23
|
+
from ._types import BatchDecorator, BatchFn
|
|
23
24
|
|
|
24
25
|
_PATIENCE = 0.01
|
|
25
26
|
|
|
@@ -114,37 +115,6 @@ def _fetch_batch[T](
|
|
|
114
115
|
return batch
|
|
115
116
|
|
|
116
117
|
|
|
117
|
-
def _batch_invoke[T, R](
|
|
118
|
-
func: BatchFn[T, R], batch: Sequence[_Job[T, R]]
|
|
119
|
-
) -> None:
|
|
120
|
-
batch = [(x, f) for x, f in batch if f.set_running_or_notify_cancel()]
|
|
121
|
-
if not batch:
|
|
122
|
-
return
|
|
123
|
-
|
|
124
|
-
obj: Some[Sequence[R]] | BaseException
|
|
125
|
-
try:
|
|
126
|
-
with hide_frame:
|
|
127
|
-
obj = Some(func([x for x, _ in batch]))
|
|
128
|
-
if not isinstance(obj.x, Sequence):
|
|
129
|
-
obj = TypeError(
|
|
130
|
-
f'Call returned non-sequence. Got {type(obj.x).__name__}'
|
|
131
|
-
)
|
|
132
|
-
elif len(obj.x) != len(batch):
|
|
133
|
-
obj = RuntimeError(
|
|
134
|
-
f'Call with {len(batch)} arguments '
|
|
135
|
-
f'incorrectly returned {len(obj.x)} results'
|
|
136
|
-
)
|
|
137
|
-
except BaseException as exc: # noqa: BLE001
|
|
138
|
-
obj = exc
|
|
139
|
-
|
|
140
|
-
if isinstance(obj, Some):
|
|
141
|
-
for (_, f), r in zip(batch, obj.x):
|
|
142
|
-
f.set_result(r)
|
|
143
|
-
else:
|
|
144
|
-
for _, f in batch:
|
|
145
|
-
f.set_exception(obj)
|
|
146
|
-
|
|
147
|
-
|
|
148
118
|
def _start_fetch_compute[T, R](
|
|
149
119
|
func: BatchFn[T, R],
|
|
150
120
|
workers: int,
|
|
@@ -162,8 +132,10 @@ def _start_fetch_compute[T, R](
|
|
|
162
132
|
# TODO: implement above
|
|
163
133
|
with lock: # Ensurance that none worker steals tasks from other
|
|
164
134
|
batch = _fetch_batch(q, batch_size, timeout)
|
|
165
|
-
if batch
|
|
166
|
-
|
|
135
|
+
if batch := [
|
|
136
|
+
(x, f) for x, f in batch if f.set_running_or_notify_cancel()
|
|
137
|
+
]:
|
|
138
|
+
dispatch(func, *batch)
|
|
167
139
|
else:
|
|
168
140
|
sleep(0.001)
|
|
169
141
|
|
|
@@ -217,7 +189,7 @@ def streaming[T, R](
|
|
|
217
189
|
q = _start_fetch_compute(func, workers, batch_size, timeout)
|
|
218
190
|
|
|
219
191
|
def wrapper(items: Sequence[T]) -> Sequence[R]:
|
|
220
|
-
fs = {Future(): item for item in items}
|
|
192
|
+
fs = {Future[R](): item for item in items}
|
|
221
193
|
try:
|
|
222
194
|
for f, x in fs.items():
|
|
223
195
|
q.put((x, f)) # Schedule task
|
|
@@ -232,37 +204,14 @@ def streaming[T, R](
|
|
|
232
204
|
raise TimeoutError
|
|
233
205
|
|
|
234
206
|
# Cannot time out - all are done
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
207
|
+
rs, err = gather_fs(enumerate(fs))
|
|
208
|
+
if err is None:
|
|
209
|
+
return list(rs.values())
|
|
210
|
+
with hide_frame:
|
|
211
|
+
raise err
|
|
239
212
|
|
|
240
213
|
# TODO: if func is instance method - recreate wrapper per instance
|
|
241
214
|
# TODO: find how to distinguish between
|
|
242
215
|
# TODO: not yet bound method and plain function
|
|
243
216
|
# TODO: maybe implement __get__ on wrapper
|
|
244
217
|
return update_wrapper(wrapper, func)
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
def _gather[R](fs: Iterable[AnyFuture[R]]) -> list[R] | BaseException:
|
|
248
|
-
cancel: CancelledError | None = None
|
|
249
|
-
errors: dict[BaseException, None] = {}
|
|
250
|
-
results: list[R] = []
|
|
251
|
-
for f in fs:
|
|
252
|
-
if f.cancelled():
|
|
253
|
-
cancel = CancelledError()
|
|
254
|
-
elif exc := f.exception():
|
|
255
|
-
errors[exc] = None
|
|
256
|
-
else:
|
|
257
|
-
results.append(f.result())
|
|
258
|
-
|
|
259
|
-
match list(errors):
|
|
260
|
-
case []:
|
|
261
|
-
return cancel or results
|
|
262
|
-
case [err]:
|
|
263
|
-
return err
|
|
264
|
-
case errs:
|
|
265
|
-
msg = 'Got multiple exceptions'
|
|
266
|
-
if all(isinstance(e, Exception) for e in errs):
|
|
267
|
-
return ExceptionGroup(msg, errs) # type: ignore[type-var]
|
|
268
|
-
return BaseExceptionGroup(msg, errs)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
__all__ = ['clone_exc', 'hide_frame']
|
|
2
|
+
|
|
3
|
+
from types import TracebackType
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class _HideFrame:
|
|
7
|
+
"""Context manager to hide current frame in traceback"""
|
|
8
|
+
|
|
9
|
+
def __enter__(self):
|
|
10
|
+
return self
|
|
11
|
+
|
|
12
|
+
def __exit__(
|
|
13
|
+
self, tp, val: BaseException | None, tb: TracebackType | None
|
|
14
|
+
):
|
|
15
|
+
if val is not None:
|
|
16
|
+
tb = val.__traceback__ or tb
|
|
17
|
+
if tb:
|
|
18
|
+
val.__traceback__ = tb.tb_next # Drop outer traceback frame
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def clone_exc[E: BaseException](exc: E) -> E:
|
|
22
|
+
new_exc = type(exc)(*exc.args)
|
|
23
|
+
new_exc.__cause__ = exc.__cause__
|
|
24
|
+
new_exc.__context__ = exc.__context__
|
|
25
|
+
new_exc.__traceback__ = exc.__traceback__
|
|
26
|
+
return new_exc
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
hide_frame = _HideFrame()
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import concurrent.futures as cf
|
|
3
|
+
from collections.abc import Hashable, Iterable, Sequence
|
|
4
|
+
|
|
5
|
+
from ._dev import hide_frame
|
|
6
|
+
from ._types import ABatchFn, AnyFuture, BatchFn, Job, Some
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def dispatch[T, R](fn: BatchFn[T, R], *xs: Job[T, R]) -> None:
|
|
10
|
+
if not xs:
|
|
11
|
+
return
|
|
12
|
+
|
|
13
|
+
obj: Some[Sequence[R]] | BaseException
|
|
14
|
+
try:
|
|
15
|
+
with hide_frame:
|
|
16
|
+
ret = fn([x for x, _ in xs])
|
|
17
|
+
except BaseException as exc: # noqa: BLE001
|
|
18
|
+
obj = exc
|
|
19
|
+
else:
|
|
20
|
+
obj = _check_protocol(ret, len(xs))
|
|
21
|
+
|
|
22
|
+
if isinstance(obj, Some):
|
|
23
|
+
for (_, f), res in zip(xs, obj.x):
|
|
24
|
+
f.set_result(res)
|
|
25
|
+
else:
|
|
26
|
+
for _, f in xs:
|
|
27
|
+
f.set_exception(obj)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
async def adispatch[T, R](fn: ABatchFn[T, R], *xs: Job[T, R]) -> None:
|
|
31
|
+
if not xs:
|
|
32
|
+
return
|
|
33
|
+
|
|
34
|
+
obj: Some[Sequence[R]] | BaseException
|
|
35
|
+
try:
|
|
36
|
+
with hide_frame:
|
|
37
|
+
ret = await fn([x for x, _ in xs])
|
|
38
|
+
except asyncio.CancelledError:
|
|
39
|
+
for _, f in xs:
|
|
40
|
+
f.cancel()
|
|
41
|
+
raise
|
|
42
|
+
except BaseException as exc: # noqa: BLE001
|
|
43
|
+
obj = exc
|
|
44
|
+
else:
|
|
45
|
+
obj = _check_protocol(ret, len(xs))
|
|
46
|
+
|
|
47
|
+
if isinstance(obj, Some):
|
|
48
|
+
for (_, f), res in zip(xs, obj.x):
|
|
49
|
+
f.set_result(res)
|
|
50
|
+
else:
|
|
51
|
+
for _, f in xs:
|
|
52
|
+
f.set_exception(obj)
|
|
53
|
+
if isinstance(f, asyncio.Future):
|
|
54
|
+
f.exception() # Mark exception as retrieved
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _check_protocol[S: Sequence](ret: S, n: int) -> Some[S] | BaseException:
|
|
58
|
+
if not isinstance(ret, Sequence):
|
|
59
|
+
return TypeError(
|
|
60
|
+
f'Call returned non-sequence. Got {type(ret).__name__}'
|
|
61
|
+
)
|
|
62
|
+
if len(ret) != n:
|
|
63
|
+
return RuntimeError(
|
|
64
|
+
f'Call with {n} arguments '
|
|
65
|
+
f'incorrectly returned {len(ret)} results'
|
|
66
|
+
)
|
|
67
|
+
return Some(ret)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def gather_fs[K: Hashable, R](
|
|
71
|
+
fs: Iterable[tuple[K, AnyFuture[R]]],
|
|
72
|
+
) -> tuple[dict[K, R], BaseException | None]:
|
|
73
|
+
results: dict[K, R] = {}
|
|
74
|
+
errors = set[BaseException]()
|
|
75
|
+
default: BaseException | None = None
|
|
76
|
+
for k, f in fs:
|
|
77
|
+
if f.cancelled():
|
|
78
|
+
exc_tp = _fut_tp_to_cancel_tp.get(type(f))
|
|
79
|
+
assert exc_tp, f'Unknown future type: {type(f).__qualname__}'
|
|
80
|
+
assert default is None or isinstance(default, exc_tp)
|
|
81
|
+
default = exc_tp()
|
|
82
|
+
elif e := f.exception():
|
|
83
|
+
errors.add(e)
|
|
84
|
+
else:
|
|
85
|
+
results[k] = f.result()
|
|
86
|
+
|
|
87
|
+
match list(errors):
|
|
88
|
+
case []:
|
|
89
|
+
return (results, default)
|
|
90
|
+
case [err]:
|
|
91
|
+
return (results, err)
|
|
92
|
+
case errs:
|
|
93
|
+
msg = 'Got multiple exceptions'
|
|
94
|
+
if all(isinstance(e, Exception) for e in errs):
|
|
95
|
+
err = ExceptionGroup(msg, errs) # type: ignore[type-var]
|
|
96
|
+
else:
|
|
97
|
+
err = BaseExceptionGroup(msg, errs)
|
|
98
|
+
return (results, err)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
_fut_tp_to_cancel_tp: dict[type[AnyFuture], type[BaseException]] = {
|
|
102
|
+
cf.Future: cf.CancelledError,
|
|
103
|
+
asyncio.Future: asyncio.CancelledError,
|
|
104
|
+
}
|
|
@@ -13,7 +13,7 @@ from collections.abc import (
|
|
|
13
13
|
from dataclasses import dataclass
|
|
14
14
|
from typing import Any, Literal, Protocol, overload
|
|
15
15
|
|
|
16
|
-
type KeyFn = Callable[...,
|
|
16
|
+
type KeyFn[H: Hashable] = Callable[..., H]
|
|
17
17
|
|
|
18
18
|
type Coro[T] = Coroutine[Any, Any, T]
|
|
19
19
|
type AnyIterable[T] = AsyncIterable[T] | Iterable[T]
|
|
@@ -23,6 +23,7 @@ type BatchFn[T, R] = Callable[[Sequence[T]], Sequence[R]]
|
|
|
23
23
|
type ABatchFn[T, R] = Callable[[Sequence[T]], Coro[Sequence[R]]]
|
|
24
24
|
|
|
25
25
|
type AnyFuture[R] = cf.Future[R] | asyncio.Future[R]
|
|
26
|
+
type Job[T, R] = tuple[T, AnyFuture[R]]
|
|
26
27
|
|
|
27
28
|
type Get[T] = Callable[[], T]
|
|
28
29
|
type Callback[T] = Callable[[T], object]
|
glow-0.15.4/src/glow/_dev.py
DELETED
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
__all__ = ['hide_frame']
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
class _HideFrame:
|
|
5
|
-
"""Context manager to hide current frame in traceback"""
|
|
6
|
-
|
|
7
|
-
def __enter__(self):
|
|
8
|
-
return self
|
|
9
|
-
|
|
10
|
-
def __exit__(self, tp, val, tb):
|
|
11
|
-
if tp is None:
|
|
12
|
-
return True
|
|
13
|
-
if tb := val.__traceback__:
|
|
14
|
-
val.__traceback__ = tb.tb_next
|
|
15
|
-
return False
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
hide_frame = _HideFrame()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|