glow 0.15.4__tar.gz → 0.15.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. {glow-0.15.4 → glow-0.15.6}/PKG-INFO +1 -1
  2. {glow-0.15.4 → glow-0.15.6}/pyproject.toml +1 -1
  3. {glow-0.15.4 → glow-0.15.6}/src/glow/_async.py +5 -41
  4. {glow-0.15.4 → glow-0.15.6}/src/glow/_async.pyi +2 -1
  5. {glow-0.15.4 → glow-0.15.6}/src/glow/_cache.py +142 -195
  6. {glow-0.15.4 → glow-0.15.6}/src/glow/_cache.pyi +2 -1
  7. {glow-0.15.4 → glow-0.15.6}/src/glow/_concurrency.py +19 -72
  8. {glow-0.15.4 → glow-0.15.6}/src/glow/_concurrency.pyi +5 -4
  9. {glow-0.15.4 → glow-0.15.6}/src/glow/_coro.py +1 -1
  10. {glow-0.15.4 → glow-0.15.6}/src/glow/_debug.py +13 -6
  11. glow-0.15.6/src/glow/_dev.py +29 -0
  12. glow-0.15.6/src/glow/_futures.py +124 -0
  13. {glow-0.15.4 → glow-0.15.6}/src/glow/_import_hook.py +4 -4
  14. {glow-0.15.4 → glow-0.15.6}/src/glow/_parallel.py +10 -6
  15. {glow-0.15.4 → glow-0.15.6}/src/glow/_profile.py +8 -8
  16. {glow-0.15.4 → glow-0.15.6}/src/glow/_profile.pyi +7 -7
  17. {glow-0.15.4 → glow-0.15.6}/src/glow/_reduction.py +2 -1
  18. {glow-0.15.4 → glow-0.15.6}/src/glow/_streams.py +4 -3
  19. glow-0.15.6/src/glow/_types.py +31 -0
  20. {glow-0.15.4 → glow-0.15.6}/src/glow/_uuid.py +7 -1
  21. {glow-0.15.4 → glow-0.15.6}/src/glow/_wrap.py +3 -1
  22. {glow-0.15.4 → glow-0.15.6}/src/glow/cli.py +10 -11
  23. {glow-0.15.4 → glow-0.15.6}/src/glow/cli.pyi +3 -1
  24. {glow-0.15.4 → glow-0.15.6}/src/glow/io/_sound.py +20 -8
  25. {glow-0.15.4 → glow-0.15.6}/test/test_cli.py +7 -0
  26. glow-0.15.4/src/glow/_dev.py +0 -18
  27. glow-0.15.4/src/glow/_types.py +0 -52
  28. {glow-0.15.4 → glow-0.15.6}/.gitignore +0 -0
  29. {glow-0.15.4 → glow-0.15.6}/LICENSE +0 -0
  30. {glow-0.15.4 → glow-0.15.6}/README.md +0 -0
  31. {glow-0.15.4 → glow-0.15.6}/src/glow/__init__.py +0 -0
  32. {glow-0.15.4 → glow-0.15.6}/src/glow/_array.py +0 -0
  33. {glow-0.15.4 → glow-0.15.6}/src/glow/_ic.py +0 -0
  34. {glow-0.15.4 → glow-0.15.6}/src/glow/_imutil.py +0 -0
  35. {glow-0.15.4 → glow-0.15.6}/src/glow/_keys.py +0 -0
  36. {glow-0.15.4 → glow-0.15.6}/src/glow/_logging.py +0 -0
  37. {glow-0.15.4 → glow-0.15.6}/src/glow/_more.py +0 -0
  38. {glow-0.15.4 → glow-0.15.6}/src/glow/_parallel.pyi +0 -0
  39. {glow-0.15.4 → glow-0.15.6}/src/glow/_patch_len.py +0 -0
  40. {glow-0.15.4 → glow-0.15.6}/src/glow/_patch_print.py +0 -0
  41. {glow-0.15.4 → glow-0.15.6}/src/glow/_patch_scipy.py +0 -0
  42. {glow-0.15.4 → glow-0.15.6}/src/glow/_repr.py +0 -0
  43. {glow-0.15.4 → glow-0.15.6}/src/glow/_reusable.py +0 -0
  44. {glow-0.15.4 → glow-0.15.6}/src/glow/_sizeof.py +0 -0
  45. {glow-0.15.4 → glow-0.15.6}/src/glow/_thread_quota.py +0 -0
  46. {glow-0.15.4 → glow-0.15.6}/src/glow/api/__init__.py +0 -0
  47. {glow-0.15.4 → glow-0.15.6}/src/glow/api/config.py +0 -0
  48. {glow-0.15.4 → glow-0.15.6}/src/glow/api/exporting.py +0 -0
  49. {glow-0.15.4 → glow-0.15.6}/src/glow/io/__init__.py +0 -0
  50. {glow-0.15.4 → glow-0.15.6}/src/glow/io/_svg.py +0 -0
  51. {glow-0.15.4 → glow-0.15.6}/src/glow/py.typed +0 -0
  52. {glow-0.15.4 → glow-0.15.6}/test/__init__.py +0 -0
  53. {glow-0.15.4 → glow-0.15.6}/test/test_api.py +0 -0
  54. {glow-0.15.4 → glow-0.15.6}/test/test_batch.py +0 -0
  55. {glow-0.15.4 → glow-0.15.6}/test/test_buffered.py +0 -0
  56. {glow-0.15.4 → glow-0.15.6}/test/test_iter.py +0 -0
  57. {glow-0.15.4 → glow-0.15.6}/test/test_shm.py +0 -0
  58. {glow-0.15.4 → glow-0.15.6}/test/test_thread_pool.py +0 -0
  59. {glow-0.15.4 → glow-0.15.6}/test/test_timed.py +0 -0
  60. {glow-0.15.4 → glow-0.15.6}/test/test_timer.py +0 -0
  61. {glow-0.15.4 → glow-0.15.6}/test/test_uuid.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: glow
3
- Version: 0.15.4
3
+ Version: 0.15.6
4
4
  Summary: Functional Python tools
5
5
  Project-URL: homepage, https://github.com/arquolo/glow
6
6
  Author-email: Paul Maevskikh <arquolo@gmail.com>
@@ -7,7 +7,7 @@ only-packages = true
7
7
 
8
8
  [project]
9
9
  name = "glow"
10
- version = "0.15.4"
10
+ version = "0.15.6"
11
11
  description = "Functional Python tools"
12
12
  readme = "README.md"
13
13
  requires-python = ">=3.12"
@@ -17,17 +17,8 @@ from functools import partial
17
17
  from typing import TypeGuard, cast, overload
18
18
 
19
19
  from ._dev import hide_frame
20
- from ._types import (
21
- ABatchDecorator,
22
- ABatchFn,
23
- AnyFuture,
24
- AnyIterable,
25
- AnyIterator,
26
- Coro,
27
- Some,
28
- )
29
-
30
- type _Job[T, R] = tuple[T, AnyFuture[R]]
20
+ from ._futures import ABatchDecorator, ABatchFn, Job, adispatch
21
+ from ._types import AnyIterable, AnyIterator, Coro
31
22
 
32
23
 
33
24
  async def amap_dict[K, T1, T2](
@@ -263,7 +254,7 @@ def astreaming[T, R](
263
254
  assert batch_size is None or batch_size >= 1
264
255
  assert timeout > 0
265
256
 
266
- buf: list[_Job[T, R]] = []
257
+ buf: list[Job[T, R]] = []
267
258
  deadline = float('-inf')
268
259
  not_last = asyncio.Event()
269
260
  lock = asyncio.Lock()
@@ -293,7 +284,7 @@ def astreaming[T, R](
293
284
  if batch_size is not None and len(buf) == batch_size:
294
285
  batch, buf[:] = buf[:], []
295
286
  async with lock:
296
- await _adispatch(fn, *batch)
287
+ await adispatch(fn, *batch)
297
288
  finally:
298
289
  ncalls -= 1
299
290
 
@@ -308,36 +299,9 @@ def astreaming[T, R](
308
299
  if not notified:
309
300
  batch, buf[:] = buf[:], []
310
301
  async with lock:
311
- await _adispatch(fn, *batch)
302
+ await adispatch(fn, *batch)
312
303
 
313
304
  with hide_frame:
314
305
  return await asyncio.gather(*fs)
315
306
 
316
307
  return wrapper
317
-
318
-
319
- async def _adispatch[T, R](fn: ABatchFn[T, R], *xs: _Job[T, R]) -> None:
320
- if not xs:
321
- return
322
- obj: Some[Sequence[R]] | BaseException
323
- try:
324
- with hide_frame:
325
- obj = Some(await fn([x for x, _ in xs]))
326
- if not isinstance(obj.x, Sequence):
327
- obj = TypeError(
328
- f'Call returned non-sequence. Got {type(obj.x).__name__}'
329
- )
330
- elif len(obj.x) != len(xs):
331
- obj = RuntimeError(
332
- f'Call with {len(xs)} arguments '
333
- f'incorrectly returned {len(obj.x)} results'
334
- )
335
- except BaseException as exc: # noqa: BLE001
336
- obj = exc
337
-
338
- if isinstance(obj, Some):
339
- for (_, f), res in zip(xs, obj.x):
340
- f.set_result(res)
341
- else:
342
- for _, f in xs:
343
- f.set_exception(obj)
@@ -1,7 +1,8 @@
1
1
  from collections.abc import AsyncIterator, Callable, Mapping
2
2
  from typing import Any, Required, TypedDict, Unpack, overload
3
3
 
4
- from ._types import ABatchDecorator, ABatchFn, AnyIterable, Coro
4
+ from ._futures import ABatchDecorator, ABatchFn
5
+ from ._types import AnyIterable, Coro
5
6
 
6
7
  class _AmapKwargs(TypedDict, total=False):
7
8
  limit: Required[int]
@@ -11,28 +11,29 @@ from collections.abc import (
11
11
  Iterable,
12
12
  Iterator,
13
13
  KeysView,
14
+ Mapping,
14
15
  MutableMapping,
15
- Sequence,
16
16
  )
17
17
  from dataclasses import dataclass, field
18
18
  from inspect import iscoroutinefunction
19
19
  from threading import RLock
20
- from typing import Final, Protocol, SupportsInt, cast
20
+ from typing import Any, Final, Protocol, SupportsInt, cast
21
21
  from weakref import WeakValueDictionary
22
22
 
23
- from ._dev import hide_frame
24
- from ._keys import make_key
25
- from ._repr import si_bin
26
- from ._sizeof import sizeof
27
- from ._types import (
23
+ from ._dev import clone_exc, hide_frame
24
+ from ._futures import (
28
25
  ABatchFn,
29
26
  AnyFuture,
30
27
  BatchFn,
31
- CachePolicy,
32
- Decorator,
33
- KeyFn,
34
- Some,
28
+ Job,
29
+ adispatch,
30
+ dispatch,
31
+ gather_fs,
35
32
  )
33
+ from ._keys import make_key
34
+ from ._repr import si_bin
35
+ from ._sizeof import sizeof
36
+ from ._types import CachePolicy, Decorator, KeyFn, Some
36
37
 
37
38
 
38
39
  class _Empty(enum.Enum):
@@ -85,23 +86,23 @@ def cache_status() -> str:
85
86
  _REFS: MutableMapping[int, '_Cache'] = WeakValueDictionary()
86
87
 
87
88
 
88
- class _AbstractCache[T](Protocol):
89
- def __getitem__(self, key: Hashable) -> T | _Empty: ...
90
- def __setitem__(self, key: Hashable, value: T) -> None: ...
89
+ class _AbstractCache[K: Hashable, T](Protocol):
90
+ def __getitem__(self, key: K, /) -> T | _Empty: ...
91
+ def __setitem__(self, key: K, value: T, /) -> None: ...
91
92
 
92
93
 
93
- class _CacheMaker[T](Protocol):
94
+ class _CacheMaker[K, T](Protocol):
94
95
  def __call__(
95
96
  self, capacity: int, make_node: Callable[[T], _Node[T]]
96
- ) -> '_AbstractCache[T]': ...
97
+ ) -> '_AbstractCache[K, T]': ...
97
98
 
98
99
 
99
100
  @dataclass(repr=False, slots=True, weakref_slot=True)
100
- class _Cache[T]:
101
+ class _Cache[K: Hashable, T]:
101
102
  capacity: int
102
103
  make_node: Callable[[T], _Node[T]] = field(repr=False)
103
104
  size: int = 0
104
- store: dict[Hashable, _Node[T]] = field(default_factory=dict)
105
+ store: dict[K, _Node[T]] = field(default_factory=dict)
105
106
  stats: Stats = field(default_factory=Stats)
106
107
 
107
108
  def __post_init__(self) -> None:
@@ -132,8 +133,8 @@ class _Cache[T]:
132
133
  return f'{type(self).__name__}({", ".join(args)})'
133
134
 
134
135
 
135
- class _Heap[T](_Cache[T]):
136
- def __getitem__(self, key: Hashable) -> T | _Empty:
136
+ class _Heap[K: Hashable, T](_Cache[K, T]):
137
+ def __getitem__(self, key: K, /) -> T | _Empty:
137
138
  if node := self.store.get(key):
138
139
  self.stats.hits += 1
139
140
  return node.value
@@ -141,7 +142,7 @@ class _Heap[T](_Cache[T]):
141
142
  self.stats.misses += 1
142
143
  return _empty
143
144
 
144
- def __setitem__(self, key: Hashable, value: T) -> None:
145
+ def __setitem__(self, key: K, value: T, /) -> None:
145
146
  if key in self.store:
146
147
  return
147
148
  node = self.make_node(value)
@@ -154,8 +155,8 @@ class _Heap[T](_Cache[T]):
154
155
  self.size += node.size
155
156
 
156
157
 
157
- class _LruMruCache[T](_Cache[T]):
158
- def __getitem__(self, key: Hashable) -> T | _Empty:
158
+ class _LruMruCache[K: Hashable, T](_Cache[K, T]):
159
+ def __getitem__(self, key: K, /) -> T | _Empty:
159
160
  if node := self.store.pop(key, None):
160
161
  self.stats.hits += 1
161
162
  self.store[key] = node
@@ -164,7 +165,7 @@ class _LruMruCache[T](_Cache[T]):
164
165
  self.stats.misses += 1
165
166
  return _empty
166
167
 
167
- def __setitem__(self, key: Hashable, value: T) -> None:
168
+ def __setitem__(self, key: K, value: T, /) -> None:
168
169
  if key in self.store:
169
170
  return
170
171
  node = self.make_node(value)
@@ -184,13 +185,13 @@ class _LruMruCache[T](_Cache[T]):
184
185
  raise NotImplementedError
185
186
 
186
187
 
187
- class _LruCache[T](_LruMruCache[T]):
188
+ class _LruCache[K: Hashable, T](_LruMruCache[K, T]):
188
189
  def pop(self) -> _Node:
189
190
  """Drop oldest node."""
190
191
  return self.store.pop(next(iter(self.store)))
191
192
 
192
193
 
193
- class _MruCache[T](_LruMruCache[T]):
194
+ class _MruCache[K: Hashable, T](_LruMruCache[K, T]):
194
195
  def pop(self) -> _Node:
195
196
  """Drop most recently added node."""
196
197
  return self.store.popitem()[1]
@@ -200,26 +201,26 @@ class _MruCache[T](_LruMruCache[T]):
200
201
 
201
202
 
202
203
  @dataclass(frozen=True, kw_only=True)
203
- class _WeakCache[T]:
204
+ class _WeakCache[K: Hashable, T]:
204
205
  """Retrieve items via weak references from everywhere."""
205
206
 
206
- alive: WeakValueDictionary[Hashable, T] = field(
207
+ alive: WeakValueDictionary[K, T] = field(
207
208
  default_factory=WeakValueDictionary
208
209
  )
209
210
 
210
- def __getitem__(self, key: Hashable) -> T | _Empty:
211
+ def __getitem__(self, key: K, /) -> T | _Empty:
211
212
  return self.alive.get(key, _empty)
212
213
 
213
- def __setitem__(self, key: Hashable, value: T) -> None:
214
+ def __setitem__(self, key: K, value: T, /) -> None:
214
215
  if type(value).__weakrefoffset__: # Support weak reference.
215
216
  self.alive[key] = value
216
217
 
217
218
 
218
219
  @dataclass(frozen=True, kw_only=True)
219
- class _StrongCache[R](_WeakCache[R]):
220
- cache: _AbstractCache[R]
220
+ class _StrongCache[K: Hashable, T](_WeakCache[K, T]):
221
+ cache: _AbstractCache[K, T]
221
222
 
222
- def __getitem__(self, key: Hashable) -> R | _Empty:
223
+ def __getitem__(self, key: K, /) -> T | _Empty:
223
224
  # Alive and stored items.
224
225
  # Called first to update cache stats (i.e. MRU/LRU if any).
225
226
  # `cache` has subset of objects from `alive`.
@@ -228,16 +229,16 @@ class _StrongCache[R](_WeakCache[R]):
228
229
  # Item could still exist, try reference ...
229
230
  return super().__getitem__(key)
230
231
 
231
- def __setitem__(self, key: Hashable, value: R) -> None:
232
+ def __setitem__(self, key: K, value: T, /) -> None:
232
233
  self.cache[key] = value
233
234
  super().__setitem__(key, value)
234
235
 
235
236
 
236
237
  @dataclass(frozen=True, slots=True)
237
- class _CacheState[R]:
238
- cache: _AbstractCache[R]
239
- key_fn: KeyFn
240
- futures: WeakValueDictionary[Hashable, AnyFuture[R]] = field(
238
+ class _CacheState[K: Hashable, R]:
239
+ cache: _AbstractCache[K, R]
240
+ key_fn: KeyFn[K]
241
+ futures: WeakValueDictionary[K, AnyFuture[R]] = field(
241
242
  default_factory=WeakValueDictionary
242
243
  )
243
244
 
@@ -245,8 +246,19 @@ class _CacheState[R]:
245
246
  # --------------------------------- wrapping ---------------------------------
246
247
 
247
248
 
248
- def _sync_memoize[**P, R](
249
- fn: Callable[P, R], cs: _CacheState[R]
249
+ def _result[T](f: cf.Future[T]) -> T:
250
+ if f.cancelled():
251
+ with hide_frame:
252
+ raise cf.CancelledError
253
+ if exc := f.exception():
254
+ with hide_frame:
255
+ raise exc
256
+ return f.result()
257
+
258
+
259
+ def _sync_memoize[K: Hashable, **P, R](
260
+ fn: Callable[P, R],
261
+ cs: _CacheState[K, R],
250
262
  ) -> Callable[P, R]:
251
263
  lock = RLock()
252
264
 
@@ -260,35 +272,39 @@ def _sync_memoize[**P, R](
260
272
 
261
273
  # ... or it could be computed somewhere else, join there.
262
274
  f = cs.futures.get(key)
263
- if not f:
275
+ if f:
276
+ assert isinstance(f, cf.Future)
277
+ else:
264
278
  cs.futures[key] = f = cf.Future[R]()
265
279
  is_owner = True
266
280
 
267
281
  # Release lock to allow function to run
268
282
  if not is_owner:
269
- return f.result()
283
+ with hide_frame:
284
+ return _result(f)
270
285
 
271
- with hide_frame:
272
- try:
286
+ try:
287
+ with hide_frame:
273
288
  ret = fn(*args, **kwargs)
274
- except BaseException as exc:
275
- f.set_exception(exc)
276
- with lock:
277
- cs.futures.pop(key)
278
- raise
279
- else:
280
- f.set_result(ret)
281
- with lock:
282
- cs.cache[key] = ret
283
- cs.futures.pop(key)
284
- return ret
289
+ except BaseException as exc:
290
+ exc = clone_exc(exc) # Protect from mutation by outer frame
291
+ f.set_exception(exc)
292
+ with lock:
293
+ cs.futures.pop(key)
294
+ raise
295
+ else:
296
+ f.set_result(ret)
297
+ with lock:
298
+ cs.cache[key] = ret
299
+ cs.futures.pop(key)
300
+ return ret
285
301
 
286
302
  return wrapper
287
303
 
288
304
 
289
- def _async_memoize[**P, R](
305
+ def _async_memoize[K: Hashable, **P, R](
290
306
  fn: Callable[P, Awaitable[R]],
291
- cs: _CacheState[R],
307
+ cs: _CacheState[K, R],
292
308
  ) -> Callable[P, Awaitable[R]]:
293
309
  async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
294
310
  key = cs.key_fn(*args, **kwargs)
@@ -299,23 +315,25 @@ def _async_memoize[**P, R](
299
315
  # ... or it could be computed somewhere else, join there.
300
316
  if f := cs.futures.get(key):
301
317
  assert isinstance(f, asyncio.Future)
302
- return await f
318
+ with hide_frame:
319
+ return await f
303
320
  cs.futures[key] = f = asyncio.Future[R]()
304
321
 
305
322
  # NOTE: fn() is not within threading.Lock, thus it's not thread safe
306
323
  # NOTE: but it's async-safe because this `await` is only one here.
307
- with hide_frame:
308
- try:
324
+ try:
325
+ with hide_frame:
309
326
  ret = await fn(*args, **kwargs)
310
- except BaseException as exc:
311
- f.set_exception(exc)
312
- cs.futures.pop(key)
313
- raise
314
- else:
315
- f.set_result(ret)
316
- cs.cache[key] = ret
317
- cs.futures.pop(key)
318
- return ret
327
+ except BaseException as exc:
328
+ exc = clone_exc(exc)
329
+ f.set_exception(exc)
330
+ cs.futures.pop(key)
331
+ raise
332
+ else:
333
+ f.set_result(ret)
334
+ cs.cache[key] = ret
335
+ cs.futures.pop(key)
336
+ return ret
319
337
 
320
338
  return wrapper
321
339
 
@@ -323,26 +341,20 @@ def _async_memoize[**P, R](
323
341
  # ----------------------- wrapper with batching support ----------------------
324
342
 
325
343
 
326
- @dataclass(slots=True, frozen=True)
327
- class _Arg[T]:
328
- arg: T
329
-
330
-
331
- class _BatchedQuery[T, R]:
344
+ class _BatchedQuery[K: Hashable, T, R]:
332
345
  def __init__(
333
- self, cs: _CacheState[R], *tokens: T, aio: bool = False
346
+ self, cs: _CacheState[K, R], *tokens: T, aio: bool = False
334
347
  ) -> None:
335
348
  self._cs = cs
336
349
  self._keys = [cs.key_fn(t) for t in tokens] # All keys with duplicates
337
350
 
338
- self._jobs: list[tuple[Hashable, _Arg[T] | None, AnyFuture[R]]] = []
339
- self._stash: list[tuple[Hashable, R]] = []
340
- self._done: dict[Hashable, R] = {}
351
+ self.jobs: list[tuple[K, Some[T] | None, AnyFuture[R]]] = []
352
+ self._done: dict[K, R] = {}
341
353
 
342
354
  for k, t in dict(zip(self._keys, tokens)).items():
343
355
  # If this key is processing right now, wait till its done ...
344
356
  if f := cs.futures.get(k): # ! Requires sync
345
- self._jobs.append((k, None, f)) # Wait for this
357
+ self.jobs.append((k, None, f)) # Wait for this
346
358
 
347
359
  # ... else check if it's done ...
348
360
  elif (r := cs.cache[k]) is not _empty: # ! Requires sync
@@ -351,88 +363,30 @@ class _BatchedQuery[T, R]:
351
363
  # ... otherwise schedule a new job.
352
364
  else:
353
365
  f = asyncio.Future[R]() if aio else cf.Future[R]()
354
- self._jobs.append((k, _Arg(t), f)) # Resolve this manually
366
+ self.jobs.append((k, Some(t), f)) # Resolve this manually
355
367
  cs.futures[k] = f # ! Requires sync
356
368
 
357
- self._errors: dict[BaseException, None] = {}
358
- self._default_tp: type[BaseException] | None = None
359
-
360
- def __bool__(self) -> bool:
361
- return bool(self._jobs)
362
-
363
369
  @property
364
- def result(self) -> Some[Sequence[R]] | BaseException:
365
- match list(self._errors):
366
- case []:
367
- if self._default_tp:
368
- return self._default_tp()
369
- return Some([self._done[k] for k in self._keys])
370
- case [e]:
371
- return e
372
- case excs:
373
- msg = 'Got multiple exceptions'
374
- if all(isinstance(e, Exception) for e in excs):
375
- return ExceptionGroup(msg, excs) # type: ignore[type-var]
376
- return BaseExceptionGroup(msg, excs)
377
-
378
- @result.setter
379
- def result(self, obj: Some[Sequence[R]] | BaseException) -> None:
380
- done_jobs = [(k, f) for k, a, f in self._jobs if a]
381
-
382
- if isinstance(obj, Some):
383
- if isinstance(obj.x, Sequence):
384
- if len(obj.x) == len(done_jobs):
385
- for (k, f), value in zip(done_jobs, obj.x):
386
- f.set_result(value)
387
- self._stash.append((k, value))
388
- return
389
-
390
- obj = RuntimeError(
391
- f'Call with {len(done_jobs)} arguments '
392
- f'incorrectly returned {len(obj.x)} results'
393
- )
394
- else:
395
- obj = TypeError(
396
- f'Call returned non-sequence. Got {type(obj.x).__name__}'
397
- )
370
+ def pending_jobs(self) -> list[Job[T, R]]:
371
+ return [(a.x, f) for _, a, f in self.jobs if a]
398
372
 
399
- for _, f in done_jobs:
400
- f.set_exception(obj)
401
- if isinstance(f, asyncio.Future):
402
- f.exception() # Mark exception as retrieved
403
- self._errors[obj] = None
373
+ def running_as[F: AnyFuture](self, tp: type[F]) -> set[F]:
374
+ return {f for _, a, f in self.jobs if not a and isinstance(f, tp)}
404
375
 
405
- @property
406
- def args(self) -> list[T]:
407
- return [a.arg for _, a, _ in self._jobs if a]
408
-
409
- def fs_as[F: AnyFuture](self, tp: type[F]) -> set[F]:
410
- return {f for _, a, f in self._jobs if not a and isinstance(f, tp)}
411
-
412
- def finalize_fs(self) -> None:
413
- cerr = cf.CancelledError
414
- aerr = asyncio.CancelledError
415
- for k, a, f in self._jobs:
416
- if a:
417
- continue # Our task, not "borrowed" one
418
- if f.cancelled():
419
- self._default_tp = cerr if isinstance(f, cf.Future) else aerr
420
- elif e := f.exception():
421
- self._errors[e] = None
422
- else:
423
- self._stash.append((k, f.result()))
424
-
425
- def sync(self) -> None:
426
- for k, r in self._stash:
376
+ def sync(self, stash: Mapping[K, R]) -> None:
377
+ for k, r in stash.items():
427
378
  self._done[k] = self._cs.cache[k] = r
428
379
 
429
380
  # Force next callers to use cache # ! optional
430
- for k in self._jobs:
381
+ for k, _, _ in self.jobs:
431
382
  self._cs.futures.pop(k, None)
432
383
 
384
+ def result(self) -> list[R]:
385
+ return [self._done[k] for k in self._keys]
433
386
 
434
- def _sync_memoize_batched[T, R](
435
- fn: BatchFn[T, R], cs: _CacheState[R]
387
+
388
+ def _sync_memoize_batched[K: Hashable, T, R](
389
+ fn: BatchFn[T, R], cs: _CacheState[K, R]
436
390
  ) -> BatchFn[T, R]:
437
391
  lock = RLock()
438
392
 
@@ -440,58 +394,50 @@ def _sync_memoize_batched[T, R](
440
394
  with lock:
441
395
  q = _BatchedQuery(cs, *tokens)
442
396
 
397
+ stash: dict[K, R] = {}
443
398
  try:
444
- # Run tasks we are first to schedule
445
- if args := q.args:
446
- try:
447
- with hide_frame:
448
- q.result = Some(fn(args))
449
- except BaseException as exc: # noqa: BLE001
450
- q.result = exc
451
-
452
- # Wait for completion of tasks scheduled by neighbour calls
453
- if fs := q.fs_as(cf.Future):
399
+ if jobs := q.pending_jobs:
400
+ dispatch(fn, *jobs)
401
+
402
+ if fs := q.running_as(cf.Future):
454
403
  cf.wait(fs)
455
- q.finalize_fs()
404
+
405
+ stash, err = gather_fs((k, f) for k, _, f in q.jobs)
456
406
  finally:
457
- if q:
407
+ if q.jobs:
458
408
  with lock:
459
- q.sync()
409
+ q.sync(stash)
460
410
 
461
- if isinstance(ret := q.result, Some):
462
- return list(ret.x)
411
+ if err is None:
412
+ return q.result()
463
413
  with hide_frame:
464
- raise ret
414
+ raise err
465
415
 
466
416
  return wrapper
467
417
 
468
418
 
469
- def _async_memoize_batched[T, R](
470
- fn: ABatchFn[T, R], cs: _CacheState[R]
419
+ def _async_memoize_batched[K: Hashable, T, R](
420
+ fn: ABatchFn[T, R], cs: _CacheState[K, R]
471
421
  ) -> ABatchFn[T, R]:
472
422
  async def wrapper(tokens: Iterable[T]) -> list[R]:
473
423
  q = _BatchedQuery(cs, *tokens, aio=True)
474
424
 
425
+ stash: dict[K, R] = {}
475
426
  try:
476
- # Run tasks we are first to schedule
477
- if args := q.args:
478
- try:
479
- with hide_frame:
480
- q.result = Some(await fn(args))
481
- except BaseException as exc: # noqa: BLE001
482
- q.result = exc # Raise later in `q.exception()`
483
-
484
- # Wait for completion of tasks scheduled by neighbour calls
485
- if fs := q.fs_as(asyncio.Future):
427
+ if jobs := q.pending_jobs:
428
+ await adispatch(fn, *jobs)
429
+
430
+ if fs := q.running_as(asyncio.Future):
486
431
  await asyncio.wait(fs)
487
- q.finalize_fs()
432
+
433
+ stash, err = gather_fs((k, f) for k, _, f in q.jobs)
488
434
  finally:
489
- q.sync()
435
+ q.sync(stash)
490
436
 
491
- if isinstance(ret := q.result, Some):
492
- return list(ret.x)
437
+ if err is None:
438
+ return q.result()
493
439
  with hide_frame:
494
- raise ret
440
+ raise err
495
441
 
496
442
  return wrapper
497
443
 
@@ -499,15 +445,12 @@ def _async_memoize_batched[T, R](
499
445
  # ------------------------------- decorations --------------------------------
500
446
 
501
447
 
502
- def _memoize[**P, R](
448
+ def _memoize[K: Hashable, **P, R](
503
449
  fn: Callable[P, R],
504
450
  *,
505
- cache: _AbstractCache,
506
- key_fn: KeyFn,
451
+ cs: _CacheState[K, Any],
507
452
  batched: bool,
508
453
  ) -> Callable[P, R]:
509
- cs = _CacheState(cache, key_fn)
510
-
511
454
  if batched and iscoroutinefunction(fn):
512
455
  w = cast(
513
456
  'Callable[P, R]',
@@ -523,9 +466,12 @@ def _memoize[**P, R](
523
466
  else:
524
467
  w = _sync_memoize(fn, cs=cs)
525
468
 
526
- while isinstance(cache, _StrongCache):
527
- cache = cache.cache
528
- w.cache = cache # type: ignore[attr-defined]
469
+ w.running = cs.futures # type: ignore[attr-defined]
470
+ if isinstance(cs.cache, _WeakCache):
471
+ w.wrefs = cs.cache.alive # type: ignore[attr-defined]
472
+ if isinstance(cs.cache, _StrongCache):
473
+ w.cache = cs.cache.cache # type: ignore[attr-defined]
474
+
529
475
  return functools.update_wrapper(w, fn)
530
476
 
531
477
 
@@ -559,7 +505,9 @@ def memoize(
559
505
  capacity = max(count, nbytes)
560
506
  if int(capacity) == 0:
561
507
  return functools.partial( # type: ignore[return-value]
562
- _memoize, cache=_WeakCache(), batched=batched, key_fn=key_fn
508
+ _memoize,
509
+ cs=_CacheState(_WeakCache(), key_fn),
510
+ batched=batched,
563
511
  )
564
512
 
565
513
  if cache_cls := _CACHES.get(policy):
@@ -574,8 +522,7 @@ def memoize(
574
522
  cache = cache_cls(capacity, make_node)
575
523
  return functools.partial( # type: ignore[return-value]
576
524
  _memoize,
577
- cache=_StrongCache(cache=cache),
578
- key_fn=key_fn,
525
+ cs=_CacheState(_StrongCache(cache=cache), key_fn),
579
526
  batched=batched,
580
527
  )
581
528
 
@@ -1,6 +1,7 @@
1
1
  from typing import Literal, overload
2
2
 
3
- from ._types import AnyBatchDecorator, CachePolicy, Decorator, KeyFn
3
+ from ._futures import AnyBatchDecorator
4
+ from ._types import CachePolicy, Decorator, KeyFn
4
5
 
5
6
  def cache_status() -> str: ...
6
7