glow 0.15.3__tar.gz → 0.15.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. {glow-0.15.3 → glow-0.15.5}/PKG-INFO +1 -1
  2. {glow-0.15.3 → glow-0.15.5}/pyproject.toml +1 -1
  3. {glow-0.15.3 → glow-0.15.5}/src/glow/_async.py +6 -26
  4. {glow-0.15.3 → glow-0.15.5}/src/glow/_cache.py +126 -163
  5. {glow-0.15.3 → glow-0.15.5}/src/glow/_concurrency.py +15 -59
  6. glow-0.15.5/src/glow/_dev.py +29 -0
  7. glow-0.15.5/src/glow/_futures.py +104 -0
  8. {glow-0.15.3 → glow-0.15.5}/src/glow/_parallel.py +50 -48
  9. {glow-0.15.3 → glow-0.15.5}/src/glow/_types.py +2 -1
  10. {glow-0.15.3 → glow-0.15.5}/src/glow/cli.py +82 -23
  11. {glow-0.15.3 → glow-0.15.5}/test/test_cli.py +25 -19
  12. glow-0.15.3/src/glow/_dev.py +0 -16
  13. {glow-0.15.3 → glow-0.15.5}/.gitignore +0 -0
  14. {glow-0.15.3 → glow-0.15.5}/LICENSE +0 -0
  15. {glow-0.15.3 → glow-0.15.5}/README.md +0 -0
  16. {glow-0.15.3 → glow-0.15.5}/src/glow/__init__.py +0 -0
  17. {glow-0.15.3 → glow-0.15.5}/src/glow/_array.py +0 -0
  18. {glow-0.15.3 → glow-0.15.5}/src/glow/_async.pyi +0 -0
  19. {glow-0.15.3 → glow-0.15.5}/src/glow/_cache.pyi +0 -0
  20. {glow-0.15.3 → glow-0.15.5}/src/glow/_concurrency.pyi +0 -0
  21. {glow-0.15.3 → glow-0.15.5}/src/glow/_coro.py +0 -0
  22. {glow-0.15.3 → glow-0.15.5}/src/glow/_debug.py +0 -0
  23. {glow-0.15.3 → glow-0.15.5}/src/glow/_ic.py +0 -0
  24. {glow-0.15.3 → glow-0.15.5}/src/glow/_import_hook.py +0 -0
  25. {glow-0.15.3 → glow-0.15.5}/src/glow/_imutil.py +0 -0
  26. {glow-0.15.3 → glow-0.15.5}/src/glow/_keys.py +0 -0
  27. {glow-0.15.3 → glow-0.15.5}/src/glow/_logging.py +0 -0
  28. {glow-0.15.3 → glow-0.15.5}/src/glow/_more.py +0 -0
  29. {glow-0.15.3 → glow-0.15.5}/src/glow/_parallel.pyi +0 -0
  30. {glow-0.15.3 → glow-0.15.5}/src/glow/_patch_len.py +0 -0
  31. {glow-0.15.3 → glow-0.15.5}/src/glow/_patch_print.py +0 -0
  32. {glow-0.15.3 → glow-0.15.5}/src/glow/_patch_scipy.py +0 -0
  33. {glow-0.15.3 → glow-0.15.5}/src/glow/_profile.py +0 -0
  34. {glow-0.15.3 → glow-0.15.5}/src/glow/_profile.pyi +0 -0
  35. {glow-0.15.3 → glow-0.15.5}/src/glow/_reduction.py +0 -0
  36. {glow-0.15.3 → glow-0.15.5}/src/glow/_repr.py +0 -0
  37. {glow-0.15.3 → glow-0.15.5}/src/glow/_reusable.py +0 -0
  38. {glow-0.15.3 → glow-0.15.5}/src/glow/_sizeof.py +0 -0
  39. {glow-0.15.3 → glow-0.15.5}/src/glow/_streams.py +0 -0
  40. {glow-0.15.3 → glow-0.15.5}/src/glow/_thread_quota.py +0 -0
  41. {glow-0.15.3 → glow-0.15.5}/src/glow/_uuid.py +0 -0
  42. {glow-0.15.3 → glow-0.15.5}/src/glow/_wrap.py +0 -0
  43. {glow-0.15.3 → glow-0.15.5}/src/glow/api/__init__.py +0 -0
  44. {glow-0.15.3 → glow-0.15.5}/src/glow/api/config.py +0 -0
  45. {glow-0.15.3 → glow-0.15.5}/src/glow/api/exporting.py +0 -0
  46. {glow-0.15.3 → glow-0.15.5}/src/glow/cli.pyi +0 -0
  47. {glow-0.15.3 → glow-0.15.5}/src/glow/io/__init__.py +0 -0
  48. {glow-0.15.3 → glow-0.15.5}/src/glow/io/_sound.py +0 -0
  49. {glow-0.15.3 → glow-0.15.5}/src/glow/io/_svg.py +0 -0
  50. {glow-0.15.3 → glow-0.15.5}/src/glow/py.typed +0 -0
  51. {glow-0.15.3 → glow-0.15.5}/test/__init__.py +0 -0
  52. {glow-0.15.3 → glow-0.15.5}/test/test_api.py +0 -0
  53. {glow-0.15.3 → glow-0.15.5}/test/test_batch.py +0 -0
  54. {glow-0.15.3 → glow-0.15.5}/test/test_buffered.py +0 -0
  55. {glow-0.15.3 → glow-0.15.5}/test/test_iter.py +0 -0
  56. {glow-0.15.3 → glow-0.15.5}/test/test_shm.py +0 -0
  57. {glow-0.15.3 → glow-0.15.5}/test/test_thread_pool.py +0 -0
  58. {glow-0.15.3 → glow-0.15.5}/test/test_timed.py +0 -0
  59. {glow-0.15.3 → glow-0.15.5}/test/test_timer.py +0 -0
  60. {glow-0.15.3 → glow-0.15.5}/test/test_uuid.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: glow
3
- Version: 0.15.3
3
+ Version: 0.15.5
4
4
  Summary: Functional Python tools
5
5
  Project-URL: homepage, https://github.com/arquolo/glow
6
6
  Author-email: Paul Maevskikh <arquolo@gmail.com>
@@ -7,7 +7,7 @@ only-packages = true
7
7
 
8
8
  [project]
9
9
  name = "glow"
10
- version = "0.15.3"
10
+ version = "0.15.5"
11
11
  description = "Functional Python tools"
12
12
  readme = "README.md"
13
13
  requires-python = ">=3.12"
@@ -16,7 +16,8 @@ from contextlib import suppress
16
16
  from functools import partial
17
17
  from typing import TypeGuard, cast, overload
18
18
 
19
- from ._dev import declutter_tb
19
+ from ._dev import hide_frame
20
+ from ._futures import adispatch
20
21
  from ._types import (
21
22
  ABatchDecorator,
22
23
  ABatchFn,
@@ -292,7 +293,7 @@ def astreaming[T, R](
292
293
  if batch_size is not None and len(buf) == batch_size:
293
294
  batch, buf[:] = buf[:], []
294
295
  async with lock:
295
- await _adispatch(fn, *batch)
296
+ await adispatch(fn, *batch)
296
297
  finally:
297
298
  ncalls -= 1
298
299
 
@@ -307,30 +308,9 @@ def astreaming[T, R](
307
308
  if not notified:
308
309
  batch, buf[:] = buf[:], []
309
310
  async with lock:
310
- await _adispatch(fn, *batch)
311
+ await adispatch(fn, *batch)
311
312
 
312
- return await asyncio.gather(*fs)
313
+ with hide_frame:
314
+ return await asyncio.gather(*fs)
313
315
 
314
316
  return wrapper
315
-
316
-
317
- async def _adispatch[T, R](fn: ABatchFn[T, R], *xs: _Job[T, R]) -> None:
318
- if not xs:
319
- return
320
- obj: list[R] | BaseException
321
- try:
322
- obj = list(await fn([x for x, _ in xs]))
323
- if len(obj) != len(xs):
324
- obj = RuntimeError(
325
- f'Call with {len(xs)} arguments '
326
- f'incorrectly returned {len(obj)} results'
327
- )
328
- except BaseException as exc: # noqa: BLE001
329
- obj = declutter_tb(exc, fn.__code__)
330
-
331
- if isinstance(obj, BaseException):
332
- for _, f in xs:
333
- f.set_exception(obj)
334
- else:
335
- for (_, f), res in zip(xs, obj):
336
- f.set_result(res)
@@ -11,20 +11,30 @@ from collections.abc import (
11
11
  Iterable,
12
12
  Iterator,
13
13
  KeysView,
14
+ Mapping,
14
15
  MutableMapping,
15
16
  )
16
17
  from dataclasses import dataclass, field
17
18
  from inspect import iscoroutinefunction
18
19
  from threading import RLock
19
- from types import CodeType
20
- from typing import Final, Protocol, SupportsInt, cast
20
+ from typing import Any, Final, Protocol, SupportsInt, cast
21
21
  from weakref import WeakValueDictionary
22
22
 
23
- from ._dev import declutter_tb
23
+ from ._dev import clone_exc, hide_frame
24
+ from ._futures import adispatch, dispatch, gather_fs
24
25
  from ._keys import make_key
25
26
  from ._repr import si_bin
26
27
  from ._sizeof import sizeof
27
- from ._types import ABatchFn, AnyFuture, BatchFn, CachePolicy, Decorator, KeyFn
28
+ from ._types import (
29
+ ABatchFn,
30
+ AnyFuture,
31
+ BatchFn,
32
+ CachePolicy,
33
+ Decorator,
34
+ Job,
35
+ KeyFn,
36
+ Some,
37
+ )
28
38
 
29
39
 
30
40
  class _Empty(enum.Enum):
@@ -77,23 +87,23 @@ def cache_status() -> str:
77
87
  _REFS: MutableMapping[int, '_Cache'] = WeakValueDictionary()
78
88
 
79
89
 
80
- class _AbstractCache[T](Protocol):
81
- def __getitem__(self, key: Hashable) -> T | _Empty: ...
82
- def __setitem__(self, key: Hashable, value: T) -> None: ...
90
+ class _AbstractCache[K: Hashable, T](Protocol):
91
+ def __getitem__(self, key: K, /) -> T | _Empty: ...
92
+ def __setitem__(self, key: K, value: T, /) -> None: ...
83
93
 
84
94
 
85
- class _CacheMaker[T](Protocol):
95
+ class _CacheMaker[K, T](Protocol):
86
96
  def __call__(
87
97
  self, capacity: int, make_node: Callable[[T], _Node[T]]
88
- ) -> '_AbstractCache[T]': ...
98
+ ) -> '_AbstractCache[K, T]': ...
89
99
 
90
100
 
91
101
  @dataclass(repr=False, slots=True, weakref_slot=True)
92
- class _Cache[T]:
102
+ class _Cache[K: Hashable, T]:
93
103
  capacity: int
94
104
  make_node: Callable[[T], _Node[T]] = field(repr=False)
95
105
  size: int = 0
96
- store: dict[Hashable, _Node[T]] = field(default_factory=dict)
106
+ store: dict[K, _Node[T]] = field(default_factory=dict)
97
107
  stats: Stats = field(default_factory=Stats)
98
108
 
99
109
  def __post_init__(self) -> None:
@@ -124,8 +134,8 @@ class _Cache[T]:
124
134
  return f'{type(self).__name__}({", ".join(args)})'
125
135
 
126
136
 
127
- class _Heap[T](_Cache[T]):
128
- def __getitem__(self, key: Hashable) -> T | _Empty:
137
+ class _Heap[K: Hashable, T](_Cache[K, T]):
138
+ def __getitem__(self, key: K, /) -> T | _Empty:
129
139
  if node := self.store.get(key):
130
140
  self.stats.hits += 1
131
141
  return node.value
@@ -133,7 +143,7 @@ class _Heap[T](_Cache[T]):
133
143
  self.stats.misses += 1
134
144
  return _empty
135
145
 
136
- def __setitem__(self, key: Hashable, value: T) -> None:
146
+ def __setitem__(self, key: K, value: T, /) -> None:
137
147
  if key in self.store:
138
148
  return
139
149
  node = self.make_node(value)
@@ -146,8 +156,8 @@ class _Heap[T](_Cache[T]):
146
156
  self.size += node.size
147
157
 
148
158
 
149
- class _LruMruCache[T](_Cache[T]):
150
- def __getitem__(self, key: Hashable) -> T | _Empty:
159
+ class _LruMruCache[K: Hashable, T](_Cache[K, T]):
160
+ def __getitem__(self, key: K, /) -> T | _Empty:
151
161
  if node := self.store.pop(key, None):
152
162
  self.stats.hits += 1
153
163
  self.store[key] = node
@@ -156,7 +166,7 @@ class _LruMruCache[T](_Cache[T]):
156
166
  self.stats.misses += 1
157
167
  return _empty
158
168
 
159
- def __setitem__(self, key: Hashable, value: T) -> None:
169
+ def __setitem__(self, key: K, value: T, /) -> None:
160
170
  if key in self.store:
161
171
  return
162
172
  node = self.make_node(value)
@@ -176,13 +186,13 @@ class _LruMruCache[T](_Cache[T]):
176
186
  raise NotImplementedError
177
187
 
178
188
 
179
- class _LruCache[T](_LruMruCache[T]):
189
+ class _LruCache[K: Hashable, T](_LruMruCache[K, T]):
180
190
  def pop(self) -> _Node:
181
191
  """Drop oldest node."""
182
192
  return self.store.pop(next(iter(self.store)))
183
193
 
184
194
 
185
- class _MruCache[T](_LruMruCache[T]):
195
+ class _MruCache[K: Hashable, T](_LruMruCache[K, T]):
186
196
  def pop(self) -> _Node:
187
197
  """Drop most recently added node."""
188
198
  return self.store.popitem()[1]
@@ -192,26 +202,26 @@ class _MruCache[T](_LruMruCache[T]):
192
202
 
193
203
 
194
204
  @dataclass(frozen=True, kw_only=True)
195
- class _WeakCache[T]:
205
+ class _WeakCache[K: Hashable, T]:
196
206
  """Retrieve items via weak references from everywhere."""
197
207
 
198
- alive: WeakValueDictionary[Hashable, T] = field(
208
+ alive: WeakValueDictionary[K, T] = field(
199
209
  default_factory=WeakValueDictionary
200
210
  )
201
211
 
202
- def __getitem__(self, key: Hashable) -> T | _Empty:
212
+ def __getitem__(self, key: K, /) -> T | _Empty:
203
213
  return self.alive.get(key, _empty)
204
214
 
205
- def __setitem__(self, key: Hashable, value: T) -> None:
215
+ def __setitem__(self, key: K, value: T, /) -> None:
206
216
  if type(value).__weakrefoffset__: # Support weak reference.
207
217
  self.alive[key] = value
208
218
 
209
219
 
210
220
  @dataclass(frozen=True, kw_only=True)
211
- class _StrongCache[R](_WeakCache[R]):
212
- cache: _AbstractCache[R]
221
+ class _StrongCache[K: Hashable, T](_WeakCache[K, T]):
222
+ cache: _AbstractCache[K, T]
213
223
 
214
- def __getitem__(self, key: Hashable) -> R | _Empty:
224
+ def __getitem__(self, key: K, /) -> T | _Empty:
215
225
  # Alive and stored items.
216
226
  # Called first to update cache stats (i.e. MRU/LRU if any).
217
227
  # `cache` has subset of objects from `alive`.
@@ -220,17 +230,16 @@ class _StrongCache[R](_WeakCache[R]):
220
230
  # Item could still exist, try reference ...
221
231
  return super().__getitem__(key)
222
232
 
223
- def __setitem__(self, key: Hashable, value: R) -> None:
233
+ def __setitem__(self, key: K, value: T, /) -> None:
224
234
  self.cache[key] = value
225
235
  super().__setitem__(key, value)
226
236
 
227
237
 
228
238
  @dataclass(frozen=True, slots=True)
229
- class _CacheState[R]:
230
- cache: _AbstractCache[R]
231
- code: CodeType # for short tracebacks
232
- key_fn: KeyFn
233
- futures: WeakValueDictionary[Hashable, AnyFuture[R]] = field(
239
+ class _CacheState[K: Hashable, R]:
240
+ cache: _AbstractCache[K, R]
241
+ key_fn: KeyFn[K]
242
+ futures: WeakValueDictionary[K, AnyFuture[R]] = field(
234
243
  default_factory=WeakValueDictionary
235
244
  )
236
245
 
@@ -238,8 +247,19 @@ class _CacheState[R]:
238
247
  # --------------------------------- wrapping ---------------------------------
239
248
 
240
249
 
241
- def _sync_memoize[**P, R](
242
- fn: Callable[P, R], cs: _CacheState[R]
250
+ def _result[T](f: cf.Future[T]) -> T:
251
+ if f.cancelled():
252
+ with hide_frame:
253
+ raise cf.CancelledError
254
+ if exc := f.exception():
255
+ with hide_frame:
256
+ raise exc
257
+ return f.result()
258
+
259
+
260
+ def _sync_memoize[K: Hashable, **P, R](
261
+ fn: Callable[P, R],
262
+ cs: _CacheState[K, R],
243
263
  ) -> Callable[P, R]:
244
264
  lock = RLock()
245
265
 
@@ -253,17 +273,22 @@ def _sync_memoize[**P, R](
253
273
 
254
274
  # ... or it could be computed somewhere else, join there.
255
275
  f = cs.futures.get(key)
256
- if not f:
276
+ if f:
277
+ assert isinstance(f, cf.Future)
278
+ else:
257
279
  cs.futures[key] = f = cf.Future[R]()
258
280
  is_owner = True
259
281
 
260
282
  # Release lock to allow function to run
261
283
  if not is_owner:
262
- return f.result()
284
+ with hide_frame:
285
+ return _result(f)
263
286
 
264
287
  try:
265
- ret = fn(*args, **kwargs)
288
+ with hide_frame:
289
+ ret = fn(*args, **kwargs)
266
290
  except BaseException as exc:
291
+ exc = clone_exc(exc) # Protect from mutation by outer frame
267
292
  f.set_exception(exc)
268
293
  with lock:
269
294
  cs.futures.pop(key)
@@ -278,9 +303,9 @@ def _sync_memoize[**P, R](
278
303
  return wrapper
279
304
 
280
305
 
281
- def _async_memoize[**P, R](
306
+ def _async_memoize[K: Hashable, **P, R](
282
307
  fn: Callable[P, Awaitable[R]],
283
- cs: _CacheState[R],
308
+ cs: _CacheState[K, R],
284
309
  ) -> Callable[P, Awaitable[R]]:
285
310
  async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
286
311
  key = cs.key_fn(*args, **kwargs)
@@ -291,14 +316,17 @@ def _async_memoize[**P, R](
291
316
  # ... or it could be computed somewhere else, join there.
292
317
  if f := cs.futures.get(key):
293
318
  assert isinstance(f, asyncio.Future)
294
- return await f
319
+ with hide_frame:
320
+ return await f
295
321
  cs.futures[key] = f = asyncio.Future[R]()
296
322
 
297
323
  # NOTE: fn() is not within threading.Lock, thus it's not thread safe
298
324
  # NOTE: but it's async-safe because this `await` is only one here.
299
325
  try:
300
- ret = await fn(*args, **kwargs)
326
+ with hide_frame:
327
+ ret = await fn(*args, **kwargs)
301
328
  except BaseException as exc:
329
+ exc = clone_exc(exc)
302
330
  f.set_exception(exc)
303
331
  cs.futures.pop(key)
304
332
  raise
@@ -314,26 +342,20 @@ def _async_memoize[**P, R](
314
342
  # ----------------------- wrapper with batching support ----------------------
315
343
 
316
344
 
317
- @dataclass(slots=True, frozen=True)
318
- class _Arg[T]:
319
- arg: T
320
-
321
-
322
- class _BatchedQuery[T, R]:
345
+ class _BatchedQuery[K: Hashable, T, R]:
323
346
  def __init__(
324
- self, cs: _CacheState[R], *tokens: T, aio: bool = False
347
+ self, cs: _CacheState[K, R], *tokens: T, aio: bool = False
325
348
  ) -> None:
326
349
  self._cs = cs
327
350
  self._keys = [cs.key_fn(t) for t in tokens] # All keys with duplicates
328
351
 
329
- self._jobs: list[tuple[Hashable, _Arg[T] | None, AnyFuture[R]]] = []
330
- self._stash: list[tuple[Hashable, R]] = []
331
- self._done: dict[Hashable, R] = {}
352
+ self.jobs: list[tuple[K, Some[T] | None, AnyFuture[R]]] = []
353
+ self._done: dict[K, R] = {}
332
354
 
333
355
  for k, t in dict(zip(self._keys, tokens)).items():
334
356
  # If this key is processing right now, wait till its done ...
335
357
  if f := cs.futures.get(k): # ! Requires sync
336
- self._jobs.append((k, None, f)) # Wait for this
358
+ self.jobs.append((k, None, f)) # Wait for this
337
359
 
338
360
  # ... else check if it's done ...
339
361
  elif (r := cs.cache[k]) is not _empty: # ! Requires sync
@@ -342,86 +364,30 @@ class _BatchedQuery[T, R]:
342
364
  # ... otherwise schedule a new job.
343
365
  else:
344
366
  f = asyncio.Future[R]() if aio else cf.Future[R]()
345
- self._jobs.append((k, _Arg(t), f)) # Resolve this manually
367
+ self.jobs.append((k, Some(t), f)) # Resolve this manually
346
368
  cs.futures[k] = f # ! Requires sync
347
369
 
348
- self._errors: dict[BaseException, None] = {}
349
- self._default_tp: type[BaseException] | None = None
350
-
351
- def __bool__(self) -> bool:
352
- return bool(self._jobs)
353
-
354
370
  @property
355
- def result(self) -> list[R] | BaseException:
356
- match list(self._errors):
357
- case []:
358
- if self._default_tp:
359
- return self._default_tp()
360
- return [self._done[k] for k in self._keys]
361
- case [e]:
362
- return e
363
- case excs:
364
- msg = 'Got multiple exceptions'
365
- if all(isinstance(e, Exception) for e in excs):
366
- return ExceptionGroup(msg, excs) # type: ignore[type-var]
367
- return BaseExceptionGroup(msg, excs)
368
-
369
- @result.setter
370
- def result(self, obj: list[R] | BaseException) -> None:
371
- done_jobs = [(k, f) for k, a, f in self._jobs if a]
372
-
373
- if not isinstance(obj, BaseException):
374
- if len(obj) == len(done_jobs):
375
- for (k, f), value in zip(done_jobs, obj):
376
- f.set_result(value)
377
- self._stash.append((k, value))
378
- return
379
-
380
- obj = RuntimeError(
381
- f'Call with {len(done_jobs)} arguments '
382
- f'incorrectly returned {len(obj)} results'
383
- )
371
+ def pending_jobs(self) -> list[Job[T, R]]:
372
+ return [(a.x, f) for _, a, f in self.jobs if a]
384
373
 
385
- for _, f in done_jobs:
386
- f.set_exception(obj)
387
- if isinstance(f, asyncio.Future):
388
- f.exception() # Mark exception as retrieved
389
- self._errors[obj] = None
374
+ def running_as[F: AnyFuture](self, tp: type[F]) -> set[F]:
375
+ return {f for _, a, f in self.jobs if not a and isinstance(f, tp)}
390
376
 
391
- @property
392
- def args(self) -> list[T]:
393
- return [a.arg for _, a, _ in self._jobs if a]
394
-
395
- def fs_as[F: AnyFuture](self, tp: type[F]) -> set[F]:
396
- return {f for _, a, f in self._jobs if not a and isinstance(f, tp)}
397
-
398
- def finalize_fs(self) -> None:
399
- cerr = cf.CancelledError
400
- aerr = asyncio.CancelledError
401
- for k, a, f in self._jobs:
402
- if a:
403
- continue # Our task, not "borrowed" one
404
- if f.cancelled():
405
- self._default_tp = cerr if isinstance(f, cf.Future) else aerr
406
- elif e := f.exception():
407
- self._errors[e] = None
408
- else:
409
- self._stash.append((k, f.result()))
410
-
411
- def sync(self) -> None:
412
- for e in self._errors:
413
- declutter_tb(e, self._cs.code)
414
-
415
- for k, r in self._stash:
377
+ def sync(self, stash: Mapping[K, R]) -> None:
378
+ for k, r in stash.items():
416
379
  self._done[k] = self._cs.cache[k] = r
417
380
 
418
381
  # Force next callers to use cache # ! optional
419
- for k in self._jobs:
382
+ for k, _, _ in self.jobs:
420
383
  self._cs.futures.pop(k, None)
421
384
 
385
+ def result(self) -> list[R]:
386
+ return [self._done[k] for k in self._keys]
422
387
 
423
- def _sync_memoize_batched[T, R](
424
- fn: BatchFn[T, R], cs: _CacheState[R]
388
+
389
+ def _sync_memoize_batched[K: Hashable, T, R](
390
+ fn: BatchFn[T, R], cs: _CacheState[K, R]
425
391
  ) -> BatchFn[T, R]:
426
392
  lock = RLock()
427
393
 
@@ -429,54 +395,50 @@ def _sync_memoize_batched[T, R](
429
395
  with lock:
430
396
  q = _BatchedQuery(cs, *tokens)
431
397
 
398
+ stash: dict[K, R] = {}
432
399
  try:
433
- # Run tasks we are first to schedule
434
- if args := q.args:
435
- try:
436
- q.result = list(fn(args))
437
- except BaseException as exc: # noqa: BLE001
438
- q.result = exc
439
-
440
- # Wait for completion of tasks scheduled by neighbour calls
441
- if fs := q.fs_as(cf.Future):
400
+ if jobs := q.pending_jobs:
401
+ dispatch(fn, *jobs)
402
+
403
+ if fs := q.running_as(cf.Future):
442
404
  cf.wait(fs)
443
- q.finalize_fs()
405
+
406
+ stash, err = gather_fs((k, f) for k, _, f in q.jobs)
444
407
  finally:
445
- if q:
408
+ if q.jobs:
446
409
  with lock:
447
- q.sync()
410
+ q.sync(stash)
448
411
 
449
- if isinstance(ret := q.result, BaseException):
450
- raise ret
451
- return ret
412
+ if err is None:
413
+ return q.result()
414
+ with hide_frame:
415
+ raise err
452
416
 
453
417
  return wrapper
454
418
 
455
419
 
456
- def _async_memoize_batched[T, R](
457
- fn: ABatchFn[T, R], cs: _CacheState[R]
420
+ def _async_memoize_batched[K: Hashable, T, R](
421
+ fn: ABatchFn[T, R], cs: _CacheState[K, R]
458
422
  ) -> ABatchFn[T, R]:
459
423
  async def wrapper(tokens: Iterable[T]) -> list[R]:
460
424
  q = _BatchedQuery(cs, *tokens, aio=True)
461
425
 
426
+ stash: dict[K, R] = {}
462
427
  try:
463
- # Run tasks we are first to schedule
464
- if args := q.args:
465
- try:
466
- q.result = list(await fn(args))
467
- except BaseException as exc: # noqa: BLE001
468
- q.result = exc # Raise later in `q.exception()`
469
-
470
- # Wait for completion of tasks scheduled by neighbour calls
471
- if fs := q.fs_as(asyncio.Future):
428
+ if jobs := q.pending_jobs:
429
+ await adispatch(fn, *jobs)
430
+
431
+ if fs := q.running_as(asyncio.Future):
472
432
  await asyncio.wait(fs)
473
- q.finalize_fs()
433
+
434
+ stash, err = gather_fs((k, f) for k, _, f in q.jobs)
474
435
  finally:
475
- q.sync()
436
+ q.sync(stash)
476
437
 
477
- if isinstance(ret := q.result, BaseException):
478
- raise ret
479
- return ret
438
+ if err is None:
439
+ return q.result()
440
+ with hide_frame:
441
+ raise err
480
442
 
481
443
  return wrapper
482
444
 
@@ -484,15 +446,12 @@ def _async_memoize_batched[T, R](
484
446
  # ------------------------------- decorations --------------------------------
485
447
 
486
448
 
487
- def _memoize[**P, R](
449
+ def _memoize[K: Hashable, **P, R](
488
450
  fn: Callable[P, R],
489
451
  *,
490
- cache: _AbstractCache,
491
- key_fn: KeyFn,
452
+ cs: _CacheState[K, Any],
492
453
  batched: bool,
493
454
  ) -> Callable[P, R]:
494
- cs = _CacheState(cache, fn.__code__, key_fn)
495
-
496
455
  if batched and iscoroutinefunction(fn):
497
456
  w = cast(
498
457
  'Callable[P, R]',
@@ -508,9 +467,12 @@ def _memoize[**P, R](
508
467
  else:
509
468
  w = _sync_memoize(fn, cs=cs)
510
469
 
511
- while isinstance(cache, _StrongCache):
512
- cache = cache.cache
513
- w.cache = cache # type: ignore[attr-defined]
470
+ w.running = cs.futures # type: ignore[attr-defined]
471
+ if isinstance(cs.cache, _WeakCache):
472
+ w.wrefs = cs.cache.alive # type: ignore[attr-defined]
473
+ if isinstance(cs.cache, _StrongCache):
474
+ w.cache = cs.cache.cache # type: ignore[attr-defined]
475
+
514
476
  return functools.update_wrapper(w, fn)
515
477
 
516
478
 
@@ -544,7 +506,9 @@ def memoize(
544
506
  capacity = max(count, nbytes)
545
507
  if int(capacity) == 0:
546
508
  return functools.partial( # type: ignore[return-value]
547
- _memoize, cache=_WeakCache(), batched=batched, key_fn=key_fn
509
+ _memoize,
510
+ cs=_CacheState(_WeakCache(), key_fn),
511
+ batched=batched,
548
512
  )
549
513
 
550
514
  if cache_cls := _CACHES.get(policy):
@@ -559,8 +523,7 @@ def memoize(
559
523
  cache = cache_cls(capacity, make_node)
560
524
  return functools.partial( # type: ignore[return-value]
561
525
  _memoize,
562
- cache=_StrongCache(cache=cache),
563
- key_fn=key_fn,
526
+ cs=_CacheState(_StrongCache(cache=cache), key_fn),
564
527
  batched=batched,
565
528
  )
566
529