cachebox 5.0.4__cp313-cp313t-macosx_11_0_arm64.whl → 5.2.1__cp313-cp313t-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cachebox/_cachebox.py CHANGED
@@ -1,9 +1,9 @@
1
- from . import _core
2
- from ._core import BaseCacheImpl
3
- from datetime import timedelta, datetime
4
1
  import copy as _std_copy
5
2
  import typing
3
+ from datetime import datetime, timedelta
6
4
 
5
+ from . import _core
6
+ from ._core import BaseCacheImpl
7
7
 
8
8
  KT = typing.TypeVar("KT")
9
9
  VT = typing.TypeVar("VT")
@@ -75,6 +75,7 @@ class Cache(BaseCacheImpl[KT, VT]):
75
75
  iterable: typing.Union[dict, typing.Iterable[tuple], None] = None,
76
76
  *,
77
77
  capacity: int = 0,
78
+ maxmemory: int = 0,
78
79
  ) -> None:
79
80
  """
80
81
  Initialize a new Cache instance.
@@ -83,11 +84,14 @@ class Cache(BaseCacheImpl[KT, VT]):
83
84
  maxsize (int): Maximum number of elements the cache can hold. Zero means unlimited.
84
85
  iterable (Union[Cache, dict, tuple, Generator, None], optional): Initial data to populate the cache. Defaults to None.
85
86
  capacity (int, optional): Pre-allocate hash table capacity to minimize reallocations. Defaults to 0.
87
+ maxmemory (int, optional): Maximum memory (bytes) allowed for cached entries. Zero means unlimited.
88
+ On PyPy, it works same as `maxsize` if objects do not support `__sizeof__`
89
+ method.
86
90
 
87
91
  Creates a new cache with specified size constraints and optional initial data. The cache can be pre-sized
88
92
  to improve performance when the number of expected elements is known in advance.
89
93
  """
90
- self._raw = _core.Cache(maxsize, capacity=capacity)
94
+ self._raw = _core.Cache(maxsize, capacity=capacity, maxmemory=maxmemory)
91
95
 
92
96
  if iterable is not None:
93
97
  self.update(iterable)
@@ -96,10 +100,18 @@ class Cache(BaseCacheImpl[KT, VT]):
96
100
  def maxsize(self) -> int:
97
101
  return self._raw.maxsize()
98
102
 
103
+ @property
104
+ def maxmemory(self) -> int:
105
+ return self._raw.maxmemory()
106
+
99
107
  def capacity(self) -> int:
100
108
  """Returns the number of elements the map can hold without reallocating."""
101
109
  return self._raw.capacity()
102
110
 
111
+ def memory(self) -> int:
112
+ """Returns the total estimated memory usage of cached entries in bytes."""
113
+ return self._raw.memory()
114
+
103
115
  def __len__(self) -> int:
104
116
  return len(self._raw)
105
117
 
@@ -309,6 +321,7 @@ class FIFOCache(BaseCacheImpl[KT, VT]):
309
321
  iterable: typing.Union[typing.Union[dict, typing.Iterable[tuple]], None] = None,
310
322
  *,
311
323
  capacity: int = 0,
324
+ maxmemory: int = 0,
312
325
  ) -> None:
313
326
  """
314
327
  Initialize a new FIFOCache instance.
@@ -318,8 +331,11 @@ class FIFOCache(BaseCacheImpl[KT, VT]):
318
331
  iterable: Optional initial data to populate the cache. Can be another FIFOCache,
319
332
  a dictionary, tuple, generator, or None.
320
333
  capacity: Optional initial capacity of the cache before resizing. Defaults to 0.
334
+ maxmemory: Maximum memory (bytes) allowed for cached entries. Zero means unlimited.
335
+ When maxmemory is set, updating an existing key can evict the updated key
336
+ if it is the oldest entry.
321
337
  """
322
- self._raw = _core.FIFOCache(maxsize, capacity=capacity)
338
+ self._raw = _core.FIFOCache(maxsize, capacity=capacity, maxmemory=maxmemory)
323
339
 
324
340
  if iterable is not None:
325
341
  self.update(iterable)
@@ -328,10 +344,18 @@ class FIFOCache(BaseCacheImpl[KT, VT]):
328
344
  def maxsize(self) -> int:
329
345
  return self._raw.maxsize()
330
346
 
347
+ @property
348
+ def maxmemory(self) -> int:
349
+ return self._raw.maxmemory()
350
+
331
351
  def capacity(self) -> int:
332
352
  """Returns the number of elements the map can hold without reallocating."""
333
353
  return self._raw.capacity()
334
354
 
355
+ def memory(self) -> int:
356
+ """Returns the total estimated memory usage of cached entries in bytes."""
357
+ return self._raw.memory()
358
+
335
359
  def __len__(self) -> int:
336
360
  return len(self._raw)
337
361
 
@@ -567,6 +591,7 @@ class RRCache(BaseCacheImpl[KT, VT]):
567
591
  iterable: typing.Union[typing.Union[dict, typing.Iterable[tuple]], None] = None,
568
592
  *,
569
593
  capacity: int = 0,
594
+ maxmemory: int = 0,
570
595
  ) -> None:
571
596
  """
572
597
  Initialize a new RRCache instance.
@@ -575,12 +600,16 @@ class RRCache(BaseCacheImpl[KT, VT]):
575
600
  maxsize (int): Maximum size of the cache. A value of zero means unlimited capacity.
576
601
  iterable (dict or Iterable[tuple], optional): Initial data to populate the cache. Defaults to None.
577
602
  capacity (int, optional): Preallocated capacity for the cache to minimize reallocations. Defaults to 0.
603
+ maxmemory (int, optional): Maximum memory (bytes) allowed for cached entries. Zero means unlimited.
604
+ When maxmemory is set, updates can evict any key, including the updated key.
605
+ On PyPy. In PyPy, the size of each object is assumed to be 1 if the object
606
+ does not have a `__sizeof__` method.
578
607
 
579
608
  Note:
580
609
  - The cache size limit is immutable after initialization.
581
610
  - If an iterable is provided, the cache will be populated using the update method.
582
611
  """
583
- self._raw = _core.RRCache(maxsize, capacity=capacity)
612
+ self._raw = _core.RRCache(maxsize, capacity=capacity, maxmemory=maxmemory)
584
613
 
585
614
  if iterable is not None:
586
615
  self.update(iterable)
@@ -589,10 +618,18 @@ class RRCache(BaseCacheImpl[KT, VT]):
589
618
  def maxsize(self) -> int:
590
619
  return self._raw.maxsize()
591
620
 
621
+ @property
622
+ def maxmemory(self) -> int:
623
+ return self._raw.maxmemory()
624
+
592
625
  def capacity(self) -> int:
593
626
  """Returns the number of elements the map can hold without reallocating."""
594
627
  return self._raw.capacity()
595
628
 
629
+ def memory(self) -> int:
630
+ """Returns the total estimated memory usage of cached entries in bytes."""
631
+ return self._raw.memory()
632
+
596
633
  def __len__(self) -> int:
597
634
  return len(self._raw)
598
635
 
@@ -828,6 +865,7 @@ class LRUCache(BaseCacheImpl[KT, VT]):
828
865
  iterable: typing.Union[typing.Union[dict, typing.Iterable[tuple]], None] = None,
829
866
  *,
830
867
  capacity: int = 0,
868
+ maxmemory: int = 0,
831
869
  ) -> None:
832
870
  """
833
871
  Initialize a new LRU Cache instance.
@@ -836,12 +874,15 @@ class LRUCache(BaseCacheImpl[KT, VT]):
836
874
  maxsize (int): Maximum size of the cache. Zero indicates unlimited size.
837
875
  iterable (dict | Iterable[tuple], optional): Initial data to populate the cache.
838
876
  capacity (int, optional): Pre-allocated capacity for the cache to minimize reallocations.
877
+ maxmemory (int, optional): Maximum memory (bytes) allowed for cached entries. Zero means unlimited.
878
+ On PyPy. In PyPy, the size of each object is assumed to be 1 if the object
879
+ does not have a `__sizeof__` method.
839
880
 
840
881
  Notes:
841
882
  - The cache size is immutable after initialization.
842
883
  - If an iterable is provided, it will be used to populate the cache.
843
884
  """
844
- self._raw = _core.LRUCache(maxsize, capacity=capacity)
885
+ self._raw = _core.LRUCache(maxsize, capacity=capacity, maxmemory=maxmemory)
845
886
 
846
887
  if iterable is not None:
847
888
  self.update(iterable)
@@ -850,10 +891,18 @@ class LRUCache(BaseCacheImpl[KT, VT]):
850
891
  def maxsize(self) -> int:
851
892
  return self._raw.maxsize()
852
893
 
894
+ @property
895
+ def maxmemory(self) -> int:
896
+ return self._raw.maxmemory()
897
+
853
898
  def capacity(self) -> int:
854
899
  """Returns the number of elements the map can hold without reallocating."""
855
900
  return self._raw.capacity()
856
901
 
902
+ def memory(self) -> int:
903
+ """Returns the total estimated memory usage of cached entries in bytes."""
904
+ return self._raw.memory()
905
+
857
906
  def __len__(self) -> int:
858
907
  return len(self._raw)
859
908
 
@@ -1098,6 +1147,7 @@ class LFUCache(BaseCacheImpl[KT, VT]):
1098
1147
  iterable: typing.Union[typing.Union[dict, typing.Iterable[tuple]], None] = None,
1099
1148
  *,
1100
1149
  capacity: int = 0,
1150
+ maxmemory: int = 0,
1101
1151
  ) -> None:
1102
1152
  """
1103
1153
  Initialize a new Least Frequently Used (LFU) cache.
@@ -1106,10 +1156,13 @@ class LFUCache(BaseCacheImpl[KT, VT]):
1106
1156
  maxsize (int): Maximum size of the cache. A value of zero means unlimited size.
1107
1157
  iterable (dict or Iterable[tuple], optional): Initial data to populate the cache.
1108
1158
  capacity (int, optional): Initial hash table capacity to minimize reallocations. Defaults to 0.
1159
+ maxmemory (int, optional): Maximum memory (bytes) allowed for cached entries. Zero means unlimited.
1160
+ On PyPy. In PyPy, the size of each object is assumed to be 1 if the object
1161
+ does not have a `__sizeof__` method.
1109
1162
 
1110
1163
  The cache uses a thread-safe LFU eviction policy, removing least frequently accessed items when the cache reaches its maximum size.
1111
1164
  """
1112
- self._raw = _core.LFUCache(maxsize, capacity=capacity)
1165
+ self._raw = _core.LFUCache(maxsize, capacity=capacity, maxmemory=maxmemory)
1113
1166
 
1114
1167
  if iterable is not None:
1115
1168
  self.update(iterable)
@@ -1118,10 +1171,18 @@ class LFUCache(BaseCacheImpl[KT, VT]):
1118
1171
  def maxsize(self) -> int:
1119
1172
  return self._raw.maxsize()
1120
1173
 
1174
+ @property
1175
+ def maxmemory(self) -> int:
1176
+ return self._raw.maxmemory()
1177
+
1121
1178
  def capacity(self) -> int:
1122
1179
  """Returns the number of elements the map can hold without reallocating."""
1123
1180
  return self._raw.capacity()
1124
1181
 
1182
+ def memory(self) -> int:
1183
+ """Returns the total estimated memory usage of cached entries in bytes."""
1184
+ return self._raw.memory()
1185
+
1125
1186
  def __len__(self) -> int:
1126
1187
  return len(self._raw)
1127
1188
 
@@ -1378,6 +1439,7 @@ class TTLCache(BaseCacheImpl[KT, VT]):
1378
1439
  iterable: typing.Union[typing.Union[dict, typing.Iterable[tuple]], None] = None,
1379
1440
  *,
1380
1441
  capacity: int = 0,
1442
+ maxmemory: int = 0,
1381
1443
  ) -> None:
1382
1444
  """
1383
1445
  Initialize a new TTL cache instance.
@@ -1387,6 +1449,9 @@ class TTLCache(BaseCacheImpl[KT, VT]):
1387
1449
  ttl: Time-to-live for cache entries, either as seconds or a timedelta.
1388
1450
  iterable: Optional initial items to populate the cache, can be a dict or iterable of tuples.
1389
1451
  capacity: Optional initial capacity for the underlying cache storage. Defaults to 0.
1452
+ maxmemory: Maximum memory (bytes) allowed for cached entries. Zero means unlimited.
1453
+ On PyPy. In PyPy, the size of each object is assumed to be 1 if the object
1454
+ does not have a `__sizeof__` method.
1390
1455
 
1391
1456
  Raises:
1392
1457
  ValueError: If the time-to-live (ttl) is not a positive number.
@@ -1397,7 +1462,7 @@ class TTLCache(BaseCacheImpl[KT, VT]):
1397
1462
  if ttl <= 0:
1398
1463
  raise ValueError("ttl must be a positive number and non-zero")
1399
1464
 
1400
- self._raw = _core.TTLCache(maxsize, ttl, capacity=capacity)
1465
+ self._raw = _core.TTLCache(maxsize, ttl, capacity=capacity, maxmemory=maxmemory)
1401
1466
 
1402
1467
  if iterable is not None:
1403
1468
  self.update(iterable)
@@ -1406,6 +1471,10 @@ class TTLCache(BaseCacheImpl[KT, VT]):
1406
1471
  def maxsize(self) -> int:
1407
1472
  return self._raw.maxsize()
1408
1473
 
1474
+ @property
1475
+ def maxmemory(self) -> int:
1476
+ return self._raw.maxmemory()
1477
+
1409
1478
  @property
1410
1479
  def ttl(self) -> float:
1411
1480
  return self._raw.ttl()
@@ -1414,6 +1483,10 @@ class TTLCache(BaseCacheImpl[KT, VT]):
1414
1483
  """Returns the number of elements the map can hold without reallocating."""
1415
1484
  return self._raw.capacity()
1416
1485
 
1486
+ def memory(self) -> int:
1487
+ """Returns the total estimated memory usage of cached entries in bytes."""
1488
+ return self._raw.memory()
1489
+
1417
1490
  def __len__(self) -> int:
1418
1491
  return len(self._raw)
1419
1492
 
@@ -1744,6 +1817,7 @@ class VTTLCache(BaseCacheImpl[KT, VT]):
1744
1817
  ttl: typing.Union[float, timedelta, datetime, None] = None, # This is not a global TTL!
1745
1818
  *,
1746
1819
  capacity: int = 0,
1820
+ maxmemory: int = 0,
1747
1821
  ) -> None:
1748
1822
  """
1749
1823
  Initialize a new VTTLCache instance.
@@ -1753,11 +1827,14 @@ class VTTLCache(BaseCacheImpl[KT, VT]):
1753
1827
  iterable (dict or Iterable[tuple], optional): Initial data to populate the cache.
1754
1828
  ttl (float or timedelta or datetime, optional): Time-to-live duration for `iterable` items.
1755
1829
  capacity (int, optional): Preallocated capacity for the cache to minimize reallocations.
1830
+ maxmemory (int, optional): Maximum memory (bytes) allowed for cached entries. Zero means unlimited.
1831
+ On PyPy. In PyPy, the size of each object is assumed to be 1 if the object
1832
+ does not have a `__sizeof__` method.
1756
1833
 
1757
1834
  Raises:
1758
1835
  ValueError: If provided TTL is zero or negative.
1759
1836
  """
1760
- self._raw = _core.VTTLCache(maxsize, capacity=capacity)
1837
+ self._raw = _core.VTTLCache(maxsize, capacity=capacity, maxmemory=maxmemory)
1761
1838
 
1762
1839
  if iterable is not None:
1763
1840
  self.update(iterable, ttl)
@@ -1766,10 +1843,18 @@ class VTTLCache(BaseCacheImpl[KT, VT]):
1766
1843
  def maxsize(self) -> int:
1767
1844
  return self._raw.maxsize()
1768
1845
 
1846
+ @property
1847
+ def maxmemory(self) -> int:
1848
+ return self._raw.maxmemory()
1849
+
1769
1850
  def capacity(self) -> int:
1770
1851
  """Returns the number of elements the map can hold without reallocating."""
1771
1852
  return self._raw.capacity()
1772
1853
 
1854
+ def memory(self) -> int:
1855
+ """Returns the total estimated memory usage of cached entries in bytes."""
1856
+ return self._raw.memory()
1857
+
1773
1858
  def __len__(self) -> int:
1774
1859
  return len(self._raw)
1775
1860
 
@@ -1789,7 +1874,10 @@ class VTTLCache(BaseCacheImpl[KT, VT]):
1789
1874
  return self._raw.is_full()
1790
1875
 
1791
1876
  def insert(
1792
- self, key: KT, value: VT, ttl: typing.Union[float, timedelta, datetime, None] = None
1877
+ self,
1878
+ key: KT,
1879
+ value: VT,
1880
+ ttl: typing.Union[float, timedelta, datetime, None] = None,
1793
1881
  ) -> typing.Optional[VT]:
1794
1882
  """
1795
1883
  Insert a key-value pair into the cache with an optional time-to-live (TTL).
Binary file
cachebox/_core.pyi CHANGED
@@ -30,11 +30,14 @@ class BaseCacheImpl(typing.Generic[KT, VT]):
30
30
  iterable: typing.Union[typing.Iterable[typing.Tuple[KT, VT]], typing.Dict[KT, VT]] = ...,
31
31
  *,
32
32
  capacity: int = ...,
33
+ maxmemory: int = ...,
33
34
  ) -> None: ...
34
35
  @staticmethod
35
36
  def __class_getitem__(*args: typing.Any) -> None: ...
36
37
  @property
37
38
  def maxsize(self) -> int: ...
39
+ @property
40
+ def maxmemory(self) -> int: ...
38
41
  def __len__(self) -> int: ...
39
42
  def __sizeof__(self) -> int: ...
40
43
  def __bool__(self) -> bool: ...
@@ -47,6 +50,7 @@ class BaseCacheImpl(typing.Generic[KT, VT]):
47
50
  def __eq__(self, other: typing.Any) -> bool: ...
48
51
  def __ne__(self, other: typing.Any) -> bool: ...
49
52
  def capacity(self) -> int: ...
53
+ def memory(self) -> int: ...
50
54
  def is_full(self) -> bool: ...
51
55
  def is_empty(self) -> bool: ...
52
56
  def insert(
@@ -55,7 +59,11 @@ class BaseCacheImpl(typing.Generic[KT, VT]):
55
59
  def get(self, key: KT, default: typing.Optional[DT] = None) -> typing.Union[VT, DT]: ...
56
60
  def pop(self, key: KT, default: typing.Optional[DT] = None) -> typing.Union[VT, DT]: ...
57
61
  def setdefault(
58
- self, key: KT, default: typing.Optional[DT] = None, *args: typing.Any, **kwargs: typing.Any
62
+ self,
63
+ key: KT,
64
+ default: typing.Optional[DT] = None,
65
+ *args: typing.Any,
66
+ **kwargs: typing.Any,
59
67
  ) -> typing.Optional[VT | DT]: ...
60
68
  def popitem(self) -> typing.Tuple[KT, VT]: ...
61
69
  def drain(self, n: int) -> int: ...
cachebox/utils.py CHANGED
@@ -1,11 +1,11 @@
1
- from ._cachebox import BaseCacheImpl, FIFOCache
2
- from collections import namedtuple, defaultdict
3
- import functools
4
- import asyncio
5
1
  import _thread
2
+ import asyncio
3
+ import functools
6
4
  import inspect
7
5
  import typing
6
+ from collections import defaultdict, namedtuple
8
7
 
8
+ from ._cachebox import BaseCacheImpl, FIFOCache
9
9
 
10
10
  KT = typing.TypeVar("KT")
11
11
  VT = typing.TypeVar("VT")
@@ -46,6 +46,10 @@ class Frozen(BaseCacheImpl[KT, VT]): # pragma: no cover
46
46
  def maxsize(self) -> int:
47
47
  return self.__cache.maxsize
48
48
 
49
+ @property
50
+ def maxmemory(self) -> int:
51
+ return self.__cache.maxmemory
52
+
49
53
  def __len__(self) -> int:
50
54
  return len(self.__cache)
51
55
 
@@ -85,6 +89,9 @@ class Frozen(BaseCacheImpl[KT, VT]): # pragma: no cover
85
89
  def capacity(self) -> int:
86
90
  return self.__cache.capacity()
87
91
 
92
+ def memory(self) -> int:
93
+ return self.__cache.memory()
94
+
88
95
  def is_full(self) -> bool:
89
96
  return self.__cache.is_full()
90
97
 
@@ -267,13 +274,14 @@ EVENT_HIT = 2
267
274
 
268
275
  def _cached_wrapper(
269
276
  func,
270
- cache: BaseCacheImpl,
277
+ cache: typing.Union[BaseCacheImpl, typing.Callable],
271
278
  key_maker: typing.Callable[[tuple, dict], typing.Hashable],
272
279
  clear_reuse: bool,
273
280
  callback: typing.Optional[typing.Callable[[int, typing.Any, typing.Any], typing.Any]],
274
281
  copy_level: int,
275
282
  is_method: bool,
276
283
  ):
284
+ is_method = cache_is_function = inspect.isfunction(cache)
277
285
  _key_maker = (lambda args, kwds: key_maker(args[1:], kwds)) if is_method else key_maker
278
286
 
279
287
  hits = 0
@@ -287,11 +295,12 @@ def _cached_wrapper(
287
295
  if kwds.pop("cachebox__ignore", False):
288
296
  return func(*args, **kwds)
289
297
 
298
+ _cache = cache(args[0]) if cache_is_function else cache
290
299
  key = _key_maker(args, kwds)
291
300
 
292
301
  # try to get result from cache
293
302
  try:
294
- result = cache[key]
303
+ result = _cache[key]
295
304
  except KeyError:
296
305
  pass
297
306
  else:
@@ -310,7 +319,7 @@ def _cached_wrapper(
310
319
  raise cached_error
311
320
 
312
321
  try:
313
- result = cache[key]
322
+ result = _cache[key]
314
323
  hits += 1
315
324
  event = EVENT_HIT
316
325
  except KeyError:
@@ -323,7 +332,7 @@ def _cached_wrapper(
323
332
  raise e
324
333
 
325
334
  else:
326
- cache[key] = result
335
+ _cache[key] = result
327
336
  misses += 1
328
337
  event = EVENT_MISS
329
338
 
@@ -332,34 +341,39 @@ def _cached_wrapper(
332
341
 
333
342
  return _copy_if_need(result, level=copy_level)
334
343
 
335
- _wrapped.cache = cache
344
+ if not cache_is_function:
345
+ _wrapped.cache = cache
346
+ _wrapped.cache_info = lambda: CacheInfo(
347
+ hits, misses, cache.maxsize, len(cache), cache.memory()
348
+ )
349
+
336
350
  _wrapped.callback = callback
337
- _wrapped.cache_info = lambda: CacheInfo(
338
- hits, misses, cache.maxsize, len(cache), cache.capacity()
339
- )
340
351
 
341
- def cache_clear() -> None:
342
- nonlocal misses, hits, locks, exceptions
343
- cache.clear(reuse=clear_reuse)
344
- misses = 0
345
- hits = 0
346
- locks.clear()
347
- exceptions.clear()
352
+ if not cache_is_function:
353
+
354
+ def cache_clear() -> None:
355
+ nonlocal misses, hits, locks, exceptions
356
+ cache.clear(reuse=clear_reuse)
357
+ misses = 0
358
+ hits = 0
359
+ locks.clear()
360
+ exceptions.clear()
348
361
 
349
- _wrapped.cache_clear = cache_clear
362
+ _wrapped.cache_clear = cache_clear
350
363
 
351
364
  return _wrapped
352
365
 
353
366
 
354
367
  def _async_cached_wrapper(
355
368
  func,
356
- cache: BaseCacheImpl,
369
+ cache: typing.Union[BaseCacheImpl, typing.Callable],
357
370
  key_maker: typing.Callable[[tuple, dict], typing.Hashable],
358
371
  clear_reuse: bool,
359
372
  callback: typing.Optional[typing.Callable[[int, typing.Any, typing.Any], typing.Any]],
360
373
  copy_level: int,
361
374
  is_method: bool,
362
375
  ):
376
+ is_method = cache_is_function = inspect.isfunction(cache)
363
377
  _key_maker = (lambda args, kwds: key_maker(args[1:], kwds)) if is_method else key_maker
364
378
 
365
379
  hits = 0
@@ -375,11 +389,12 @@ def _async_cached_wrapper(
375
389
  if kwds.pop("cachebox__ignore", False):
376
390
  return await func(*args, **kwds)
377
391
 
392
+ _cache = cache(args[0]) if cache_is_function else cache
378
393
  key = _key_maker(args, kwds)
379
394
 
380
395
  # try to get result from cache
381
396
  try:
382
- result = cache[key]
397
+ result = _cache[key]
383
398
  except KeyError:
384
399
  pass
385
400
  else:
@@ -400,7 +415,7 @@ def _async_cached_wrapper(
400
415
  raise cached_error
401
416
 
402
417
  try:
403
- result = cache[key]
418
+ result = _cache[key]
404
419
  hits += 1
405
420
  event = EVENT_HIT
406
421
  except KeyError:
@@ -413,7 +428,7 @@ def _async_cached_wrapper(
413
428
  raise e
414
429
 
415
430
  else:
416
- cache[key] = result
431
+ _cache[key] = result
417
432
  misses += 1
418
433
  event = EVENT_MISS
419
434
 
@@ -424,21 +439,25 @@ def _async_cached_wrapper(
424
439
 
425
440
  return _copy_if_need(result, level=copy_level)
426
441
 
427
- _wrapped.cache = cache
442
+ if not cache_is_function:
443
+ _wrapped.cache = cache
444
+ _wrapped.cache_info = lambda: CacheInfo(
445
+ hits, misses, cache.maxsize, len(cache), cache.memory()
446
+ )
447
+
428
448
  _wrapped.callback = callback
429
- _wrapped.cache_info = lambda: CacheInfo(
430
- hits, misses, cache.maxsize, len(cache), cache.capacity()
431
- )
432
449
 
433
- def cache_clear() -> None:
434
- nonlocal misses, hits, locks, exceptions
435
- cache.clear(reuse=clear_reuse)
436
- misses = 0
437
- hits = 0
438
- locks.clear()
439
- exceptions.clear()
450
+ if not cache_is_function:
451
+
452
+ def cache_clear() -> None:
453
+ nonlocal misses, hits, locks, exceptions
454
+ cache.clear(reuse=clear_reuse)
455
+ misses = 0
456
+ hits = 0
457
+ locks.clear()
458
+ exceptions.clear()
440
459
 
441
- _wrapped.cache_clear = cache_clear
460
+ _wrapped.cache_clear = cache_clear
442
461
 
443
462
  return _wrapped
444
463
 
@@ -456,7 +475,8 @@ def cached(
456
475
  Wraps a function to automatically cache and retrieve its results based on input parameters.
457
476
 
458
477
  Args:
459
- cache (BaseCacheImpl, dict, optional): Cache implementation to store results. Defaults to FIFOCache.
478
+ cache (BaseCacheImpl, dict, callable): Cache implementation to store results. Defaults to FIFOCache.
479
+ Can be a function that got `self` and should return cache.
460
480
  key_maker (Callable, optional): Function to generate cache keys from function arguments. Defaults to make_key.
461
481
  clear_reuse (bool, optional): Whether to reuse cache during clearing. Defaults to False.
462
482
  callback (Callable, optional): Function called on cache hit/miss events. Defaults to None.
@@ -465,7 +485,7 @@ def cached(
465
485
  Returns:
466
486
  Callable: Decorated function with caching capabilities.
467
487
 
468
- Example::
488
+ Example for functions::
469
489
 
470
490
  @cachebox.cached(cachebox.LRUCache(128))
471
491
  def sum_as_string(a, b):
@@ -476,6 +496,20 @@ def cached(
476
496
  assert len(sum_as_string.cache) == 1
477
497
  sum_as_string.cache_clear()
478
498
  assert len(sum_as_string.cache) == 0
499
+
500
+ Example for methods::
501
+
502
+ class A:
503
+ def __init__(self, num):
504
+ self.num = num
505
+ self._cache = cachebox.FIFOCache(0)
506
+
507
+ @cachebox.cached(lambda self: self._cache)
508
+ def method(self, n):
509
+ return self.num * n
510
+
511
+ instance = A(10)
512
+ assert A.method(2) == 20
479
513
  """
480
514
  if cache is None:
481
515
  cache = FIFOCache(0)
@@ -483,8 +517,8 @@ def cached(
483
517
  if type(cache) is dict:
484
518
  cache = FIFOCache(0, cache)
485
519
 
486
- if not isinstance(cache, BaseCacheImpl):
487
- raise TypeError("we expected cachebox caches, got %r" % (cache,))
520
+ if not isinstance(cache, BaseCacheImpl) and not inspect.isfunction(cache):
521
+ raise TypeError("we expected cachebox caches or function, got %r" % (cache,))
488
522
 
489
523
  def decorator(func: FT) -> FT:
490
524
  if inspect.iscoroutinefunction(func):
@@ -509,6 +543,9 @@ def cachedmethod(
509
543
  copy_level: int = 1,
510
544
  ) -> typing.Callable[[FT], FT]:
511
545
  """
546
+ **This function is deperecated due to issue [#35](https://github.com/awolverp/cachebox/issues/35)**.
547
+ Use `cached` method instead.
548
+
512
549
  Decorator to create a method-specific memoized cache for function results.
513
550
 
514
551
  Similar to `cached()`, but ignores `self` parameter when generating cache keys.
@@ -523,6 +560,14 @@ def cachedmethod(
523
560
  Returns:
524
561
  Callable: Decorated method with method-specific caching capabilities.
525
562
  """
563
+ import warnings
564
+
565
+ warnings.warn(
566
+ "cachedmethod is deprecated, use cached instead. see issue https://github.com/awolverp/cachebox/issues/35",
567
+ DeprecationWarning,
568
+ stacklevel=2,
569
+ )
570
+
526
571
  if cache is None:
527
572
  cache = FIFOCache(0)
528
573
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cachebox
3
- Version: 5.0.4
3
+ Version: 5.2.1
4
4
  Classifier: Programming Language :: Python :: Implementation :: CPython
5
5
  Classifier: Programming Language :: Python :: Implementation :: PyPy
6
6
  Classifier: Programming Language :: Python :: 3
@@ -176,7 +176,6 @@ assert cache.get("key") == "value"
176
176
  ## Getting started
177
177
  There are 3 useful functions:
178
178
  - [**cached**](#cached--decorator): a decorator that helps you to cache your functions and calculations with a lot of options.
179
- - [**cachedmethod**](#cachedmethod--decorator): this is excatly works like `cached()`, but ignores `self` parameters in hashing and key making.
180
179
  - [**is_cached**](#is_cached--function): check if a function/method cached by cachebox or not
181
180
 
182
181
  And 9 classes:
@@ -282,6 +281,23 @@ print(sum_as_string.cache_info())
282
281
  sum_as_string.cache_clear()
283
282
  ```
284
283
 
284
+ method example: *(Added in v5.1.0)*
285
+ ```python
286
+ import cachebox
287
+
288
+ class Example:
289
+ def __init__(self, num) -> None:
290
+ self.num = num
291
+ self._cache = cachebox.TTLCache(20, 10)
292
+
293
+ @cachebox.cached(lambda self: self._cache)
294
+ def method(self, char: str):
295
+ return char * self.num
296
+
297
+ ex = Example(10)
298
+ assert ex.method("a") == "a" * 10
299
+ ```
300
+
285
301
  callback example: *(Added in v4.2.0)*
286
302
  ```python
287
303
  import cachebox
@@ -314,26 +330,6 @@ assert func(5, 4) == 9
314
330
 
315
331
  </details>
316
332
 
317
-
318
- > [!NOTE]\
319
- > Recommended use `cached` method for **@staticmethod**s and use [`cachedmethod`](#function-cachedmethod) for **@classmethod**s;
320
- > And set `copy_level` parameter to `2` on **@classmethod**s.
321
- > ```python
322
- > class MyClass:
323
- > def __init__(self, num: int) -> None:
324
- > self.num = num
325
- >
326
- > @classmethod
327
- > @cachedmethod({}, copy_level=2)
328
- > def class_func(cls, num: int):
329
- > return cls(num)
330
- >
331
- > @staticmethod
332
- > @cached({})
333
- > def static_func(num: int):
334
- > return num * 5
335
- > ```
336
-
337
333
  > [!TIP]\
338
334
  > There's a new feature **since `v4.1.0`** that you can tell to a cached function that don't use cache for a call:
339
335
  > ```python
@@ -346,6 +342,9 @@ assert func(5, 4) == 9
346
342
  ### `cachedmethod` (🎀 decorator)
347
343
  this is excatly works like `cached()`, but ignores `self` parameters in hashing and key making.
348
344
 
345
+ > [!WARNING]\
346
+ > This function has been deprecated since `v5.1.0`, use `cached` function instead.
347
+
349
348
  <details>
350
349
  <summary><b>Example</b></summary>
351
350
 
@@ -0,0 +1,10 @@
1
+ cachebox-5.2.1.dist-info/METADATA,sha256=nIqCXmu4yObt_6PLJ-ZvdTH0qHwCXnoVMiX-wA1fa64,26991
2
+ cachebox-5.2.1.dist-info/WHEEL,sha256=xu7xe7fX0mDsrlObmIPY9Qu5_8dib97e5863-7t3qqk,106
3
+ cachebox-5.2.1.dist-info/licenses/LICENSE,sha256=4oH8nU-rNZ9h5tbh6MoPVPFgtYDqsPXlAr2QnhnxQ4Q,1064
4
+ cachebox/__init__.py,sha256=I93y-hLQKXsEIlmd0KAZ6CVfDqKqcWFvyiJdDsMUcvI,647
5
+ cachebox/_cachebox.py,sha256=6cRVHpBJ52o4BrEAoY6v8EOWzxDoduTY4beeJy1eb9o,74601
6
+ cachebox/_core.cpython-313t-darwin.so,sha256=2jrSNYGlhfiRPYopMJnbyUtLdvTKsPH36Ty3wChE2ME,758704
7
+ cachebox/_core.pyi,sha256=-FtdVb0huB_VmetHum_d8x9b2VYzVOEIKey5DonzCcQ,2988
8
+ cachebox/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ cachebox/utils.py,sha256=jacdATe_7XYkUt3OuSc1W1VnZlmCTHb7lOiQVCuANJA,18359
10
+ cachebox-5.2.1.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: maturin (1.9.6)
2
+ Generator: maturin (1.10.2)
3
3
  Root-Is-Purelib: false
4
4
  Tag: cp313-cp313t-macosx_11_0_arm64
@@ -1,10 +0,0 @@
1
- cachebox-5.0.4.dist-info/METADATA,sha256=vx5tWzBq0cdk0_0alFRm_R_1KquIHOixh3vzMgi00LY,27190
2
- cachebox-5.0.4.dist-info/WHEEL,sha256=lArrl3UZ7hqAytPo_RGBIshTDVGiR3UckOq0Rt8pOAA,105
3
- cachebox-5.0.4.dist-info/licenses/LICENSE,sha256=4oH8nU-rNZ9h5tbh6MoPVPFgtYDqsPXlAr2QnhnxQ4Q,1064
4
- cachebox/__init__.py,sha256=I93y-hLQKXsEIlmd0KAZ6CVfDqKqcWFvyiJdDsMUcvI,647
5
- cachebox/_cachebox.py,sha256=hCZLh1DRMHvD5Crqcrsffy0pFRmc9fToTxHmUJDFK5Q,70522
6
- cachebox/_core.cpython-313t-darwin.so,sha256=eMLSReJRNLhuXE_JMBYxXH7vhGxu5K8Ntdvg9SC-wLk,675024
7
- cachebox/_core.pyi,sha256=qVl1rXUfzAzxy5FrH35Bv7RTp4gvXbaoZ-oMTDe4zTY,2842
8
- cachebox/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- cachebox/utils.py,sha256=tHl21N5dW1HDpscwfkvre3-LeBJo8GiZ9ixYBn9KkGk,16804
10
- cachebox-5.0.4.dist-info/RECORD,,