cachebox 5.2.1__cp312-cp312-musllinux_1_2_armv7l.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cachebox/_core.pyi ADDED
@@ -0,0 +1,83 @@
1
+ import typing
2
+
3
+ __version__: str
4
+ __author__: str
5
+
6
+ class CoreKeyError(Exception):
7
+ """
8
+ An exception when a key is not found in a cache.
9
+ This exception is internal to the library core and won't affect you.
10
+ """
11
+
12
+ ...
13
+
14
+ KT = typing.TypeVar("KT")
15
+ VT = typing.TypeVar("VT")
16
+ DT = typing.TypeVar("DT")
17
+
18
+ class BaseCacheImpl(typing.Generic[KT, VT]):
19
+ """
20
+ Base implementation for cache classes in the cachebox library.
21
+
22
+ This abstract base class defines the generic structure for cache implementations,
23
+ supporting different key and value types through generic type parameters.
24
+ Serves as a foundation for specific cache variants like Cache and FIFOCache.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ maxsize: int,
30
+ iterable: typing.Union[typing.Iterable[typing.Tuple[KT, VT]], typing.Dict[KT, VT]] = ...,
31
+ *,
32
+ capacity: int = ...,
33
+ maxmemory: int = ...,
34
+ ) -> None: ...
35
+ @staticmethod
36
+ def __class_getitem__(*args: typing.Any) -> None: ...
37
+ @property
38
+ def maxsize(self) -> int: ...
39
+ @property
40
+ def maxmemory(self) -> int: ...
41
+ def __len__(self) -> int: ...
42
+ def __sizeof__(self) -> int: ...
43
+ def __bool__(self) -> bool: ...
44
+ def __contains__(self, key: KT) -> bool: ...
45
+ def __setitem__(self, key: KT, value: VT) -> None: ...
46
+ def __getitem__(self, key: KT) -> VT: ...
47
+ def __delitem__(self, key: KT) -> None: ...
48
+ def __str__(self) -> str: ...
49
+ def __iter__(self) -> typing.Iterator[KT]: ...
50
+ def __eq__(self, other: typing.Any) -> bool: ...
51
+ def __ne__(self, other: typing.Any) -> bool: ...
52
+ def capacity(self) -> int: ...
53
+ def memory(self) -> int: ...
54
+ def is_full(self) -> bool: ...
55
+ def is_empty(self) -> bool: ...
56
+ def insert(
57
+ self, key: KT, value: VT, *args: typing.Any, **kwargs: typing.Any
58
+ ) -> typing.Optional[VT]: ...
59
+ def get(self, key: KT, default: typing.Optional[DT] = None) -> typing.Union[VT, DT]: ...
60
+ def pop(self, key: KT, default: typing.Optional[DT] = None) -> typing.Union[VT, DT]: ...
61
+ def setdefault(
62
+ self,
63
+ key: KT,
64
+ default: typing.Optional[DT] = None,
65
+ *args: typing.Any,
66
+ **kwargs: typing.Any,
67
+ ) -> typing.Optional[VT | DT]: ...
68
+ def popitem(self) -> typing.Tuple[KT, VT]: ...
69
+ def drain(self, n: int) -> int: ...
70
+ def clear(self, *, reuse: bool = False) -> None: ...
71
+ def shrink_to_fit(self) -> None: ...
72
+ def update(
73
+ self,
74
+ iterable: typing.Union[typing.Iterable[typing.Tuple[KT, VT]], typing.Dict[KT, VT]],
75
+ *args: typing.Any,
76
+ **kwargs: typing.Any,
77
+ ) -> None: ...
78
+ def keys(self) -> typing.Iterable[KT]: ...
79
+ def values(self) -> typing.Iterable[VT]: ...
80
+ def items(self) -> typing.Iterable[typing.Tuple[KT, VT]]: ...
81
+ def __copy__(self) -> "BaseCacheImpl[KT, VT]": ...
82
+ def __deepcopy__(self, memo: typing.Dict[str, object]) -> "BaseCacheImpl[KT, VT]": ...
83
+ def copy(self) -> "BaseCacheImpl[KT, VT]": ...
cachebox/py.typed ADDED
File without changes
cachebox/utils.py ADDED
@@ -0,0 +1,599 @@
1
+ import _thread
2
+ import asyncio
3
+ import functools
4
+ import inspect
5
+ import typing
6
+ from collections import defaultdict, namedtuple
7
+
8
+ from ._cachebox import BaseCacheImpl, FIFOCache
9
+
10
+ KT = typing.TypeVar("KT")
11
+ VT = typing.TypeVar("VT")
12
+ DT = typing.TypeVar("DT")
13
+ FT = typing.TypeVar("FT", bound=typing.Callable[..., typing.Any])
14
+
15
+
16
+ class Frozen(BaseCacheImpl[KT, VT]): # pragma: no cover
17
+ """
18
+ A wrapper class that prevents modifications to an underlying cache implementation.
19
+
20
+ This class provides a read-only view of a cache, optionally allowing silent
21
+ suppression of modification attempts instead of raising exceptions.
22
+ """
23
+
24
+ __slots__ = ("__cache", "ignore")
25
+
26
+ def __init__(self, cls: BaseCacheImpl[KT, VT], ignore: bool = False) -> None:
27
+ """
28
+ Initialize a frozen cache wrapper.
29
+
30
+ :param cls: The underlying cache implementation to be frozen
31
+ :type cls: BaseCacheImpl[KT, VT]
32
+ :param ignore: If True, silently ignores modification attempts; if False, raises TypeError when modification is attempted
33
+ :type ignore: bool, optional
34
+ """
35
+ assert isinstance(cls, BaseCacheImpl)
36
+ assert type(cls) is not Frozen
37
+
38
+ self.__cache = cls
39
+ self.ignore = ignore
40
+
41
+ @property
42
+ def cache(self) -> BaseCacheImpl[KT, VT]:
43
+ return self.__cache
44
+
45
+ @property
46
+ def maxsize(self) -> int:
47
+ return self.__cache.maxsize
48
+
49
+ @property
50
+ def maxmemory(self) -> int:
51
+ return self.__cache.maxmemory
52
+
53
+ def __len__(self) -> int:
54
+ return len(self.__cache)
55
+
56
+ def __sizeof__(self) -> int:
57
+ return self.__cache.__sizeof__()
58
+
59
+ def __bool__(self) -> bool:
60
+ return bool(self.__cache)
61
+
62
+ def __contains__(self, key: KT) -> bool:
63
+ return key in self.__cache
64
+
65
+ def __setitem__(self, key: KT, value: VT) -> None:
66
+ if self.ignore:
67
+ return
68
+
69
+ raise TypeError("This cache is frozen.")
70
+
71
+ def __getitem__(self, key: KT) -> VT:
72
+ return self.__cache[key]
73
+
74
+ def __delitem__(self, key: KT) -> None:
75
+ if self.ignore:
76
+ return None
77
+
78
+ raise TypeError("This cache is frozen.")
79
+
80
+ def __repr__(self) -> str:
81
+ return f"<Frozen: {self.__cache}>"
82
+
83
+ def __iter__(self) -> typing.Iterator[KT]:
84
+ return iter(self.__cache)
85
+
86
+ def __richcmp__(self, other: typing.Any, op: int) -> bool:
87
+ return self.__cache.__richcmp__(other, op)
88
+
89
+ def capacity(self) -> int:
90
+ return self.__cache.capacity()
91
+
92
+ def memory(self) -> int:
93
+ return self.__cache.memory()
94
+
95
+ def is_full(self) -> bool:
96
+ return self.__cache.is_full()
97
+
98
+ def is_empty(self) -> bool:
99
+ return self.__cache.is_empty()
100
+
101
+ def insert(self, key: KT, value: VT, *args, **kwargs) -> typing.Optional[VT]:
102
+ if self.ignore:
103
+ return None
104
+
105
+ raise TypeError("This cache is frozen.")
106
+
107
+ def get(self, key: KT, default: typing.Optional[DT] = None) -> typing.Union[VT, DT]:
108
+ return self.__cache.get(key, default)
109
+
110
+ def pop(self, key: KT, default: typing.Optional[DT] = None) -> typing.Union[VT, DT]:
111
+ if self.ignore:
112
+ return None # type: ignore[return-value]
113
+
114
+ raise TypeError("This cache is frozen.")
115
+
116
+ def setdefault(
117
+ self, key: KT, default: typing.Optional[DT] = None, *args, **kwargs
118
+ ) -> typing.Optional[typing.Union[VT, DT]]:
119
+ if self.ignore:
120
+ return None
121
+
122
+ raise TypeError("This cache is frozen.")
123
+
124
+ def popitem(self) -> typing.Tuple[KT, VT]:
125
+ if self.ignore:
126
+ return # type: ignore
127
+
128
+ raise TypeError("This cache is frozen.")
129
+
130
+ def drain(self, n: int) -> int:
131
+ if self.ignore:
132
+ return # type: ignore
133
+
134
+ raise TypeError("This cache is frozen.")
135
+
136
+ def clear(self, *, reuse: bool = False) -> None:
137
+ if self.ignore:
138
+ return
139
+
140
+ raise TypeError("This cache is frozen.")
141
+
142
+ def shrink_to_fit(self) -> None:
143
+ if self.ignore:
144
+ return
145
+
146
+ raise TypeError("This cache is frozen.")
147
+
148
+ def update(
149
+ self,
150
+ iterable: typing.Union[typing.Iterable[typing.Tuple[KT, VT]], typing.Dict[KT, VT]],
151
+ *args,
152
+ **kwargs,
153
+ ) -> None:
154
+ if self.ignore:
155
+ return
156
+
157
+ raise TypeError("This cache is frozen.")
158
+
159
+ def keys(self) -> typing.Iterable[KT]:
160
+ return self.__cache.keys()
161
+
162
+ def values(self) -> typing.Iterable[VT]:
163
+ return self.__cache.values()
164
+
165
+ def items(self) -> typing.Iterable[typing.Tuple[KT, VT]]:
166
+ return self.__cache.items()
167
+
168
+
169
+ class _LockWithCounter:
170
+ """
171
+ A lock with a counter to track the number of waiters.
172
+
173
+ This class provides a lock mechanism that supports both synchronous and asynchronous contexts,
174
+ with the ability to track the number of threads or coroutines waiting to acquire the lock.
175
+ """
176
+
177
+ __slots__ = ("lock", "waiters")
178
+
179
+ def __init__(self, is_async: bool = False):
180
+ self.lock = _thread.allocate_lock() if not is_async else asyncio.Lock()
181
+ self.waiters = 0
182
+
183
+ async def __aenter__(self) -> None:
184
+ self.waiters += 1
185
+ await self.lock.acquire() # type: ignore[misc]
186
+
187
+ async def __aexit__(self, *args, **kwds) -> None:
188
+ self.waiters -= 1
189
+ self.lock.release()
190
+
191
+ def __enter__(self) -> None:
192
+ self.waiters += 1
193
+ self.lock.acquire()
194
+
195
+ def __exit__(self, *args, **kwds) -> None:
196
+ self.waiters -= 1
197
+ self.lock.release()
198
+
199
+
200
+ def _copy_if_need(obj: VT, tocopy=(dict, list, set), level: int = 1) -> VT:
201
+ from copy import copy
202
+
203
+ if level == 0:
204
+ return obj
205
+
206
+ if level == 2:
207
+ return copy(obj)
208
+
209
+ return copy(obj) if (type(obj) in tocopy) else obj
210
+
211
+
212
+ def make_key(args: tuple, kwds: dict, fasttype=(int, str)):
213
+ """
214
+ Create a hashable key from function arguments for caching purposes.
215
+
216
+ Args:
217
+ args (tuple): Positional arguments to be used in key generation.
218
+ kwds (dict): Keyword arguments to be used in key generation.
219
+ fasttype (tuple, optional): Types that can be directly used as keys. Defaults to (int, str).
220
+
221
+ Returns:
222
+ A hashable key representing the function arguments, optimized for simple single-argument cases.
223
+ """
224
+ key = args
225
+ if kwds:
226
+ key += (object,)
227
+ for item in kwds.items():
228
+ key += item
229
+
230
+ if fasttype and len(key) == 1 and type(key[0]) in fasttype:
231
+ return key[0]
232
+
233
+ return key
234
+
235
+
236
+ def make_hash_key(args: tuple, kwds: dict):
237
+ """
238
+ Create a hashable hash key from function arguments for caching purposes.
239
+
240
+ Args:
241
+ args (tuple): Positional arguments to be used in key generation.
242
+ kwds (dict): Keyword arguments to be used in key generation.
243
+
244
+ Returns:
245
+ int: A hash value representing the function arguments.
246
+ """
247
+ return hash(make_key(args, kwds))
248
+
249
+
250
+ def make_typed_key(args: tuple, kwds: dict):
251
+ """
252
+ Create a hashable key from function arguments that includes type information.
253
+
254
+ Args:
255
+ args (tuple): Positional arguments to be used in key generation.
256
+ kwds (dict): Keyword arguments to be used in key generation.
257
+
258
+ Returns:
259
+ A hashable key representing the function arguments, including the types of the arguments.
260
+ """
261
+ key = make_key(args, kwds, fasttype=())
262
+
263
+ key += tuple(type(v) for v in args)
264
+ if kwds:
265
+ key += tuple(type(v) for v in kwds.values())
266
+
267
+ return key
268
+
269
+
270
+ CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "length", "memory"])
271
+ EVENT_MISS = 1
272
+ EVENT_HIT = 2
273
+
274
+
275
+ def _cached_wrapper(
276
+ func,
277
+ cache: typing.Union[BaseCacheImpl, typing.Callable],
278
+ key_maker: typing.Callable[[tuple, dict], typing.Hashable],
279
+ clear_reuse: bool,
280
+ callback: typing.Optional[typing.Callable[[int, typing.Any, typing.Any], typing.Any]],
281
+ copy_level: int,
282
+ is_method: bool,
283
+ ):
284
+ is_method = cache_is_function = inspect.isfunction(cache)
285
+ _key_maker = (lambda args, kwds: key_maker(args[1:], kwds)) if is_method else key_maker
286
+
287
+ hits = 0
288
+ misses = 0
289
+ locks: defaultdict[typing.Hashable, _LockWithCounter] = defaultdict(_LockWithCounter)
290
+ exceptions: typing.Dict[typing.Hashable, BaseException] = {}
291
+
292
+ def _wrapped(*args, **kwds):
293
+ nonlocal hits, misses, locks, exceptions
294
+
295
+ if kwds.pop("cachebox__ignore", False):
296
+ return func(*args, **kwds)
297
+
298
+ _cache = cache(args[0]) if cache_is_function else cache
299
+ key = _key_maker(args, kwds)
300
+
301
+ # try to get result from cache
302
+ try:
303
+ result = _cache[key]
304
+ except KeyError:
305
+ pass
306
+ else:
307
+ # A NOTE FOR ME: we don't want to catch KeyError exceptions from `callback`
308
+ # so don't wrap it with try except
309
+ hits += 1
310
+
311
+ if callback is not None:
312
+ callback(EVENT_HIT, key, result)
313
+
314
+ return _copy_if_need(result, level=copy_level)
315
+
316
+ with locks[key]:
317
+ if exceptions.get(key, None) is not None:
318
+ cached_error = exceptions[key] if locks[key].waiters > 1 else exceptions.pop(key)
319
+ raise cached_error
320
+
321
+ try:
322
+ result = _cache[key]
323
+ hits += 1
324
+ event = EVENT_HIT
325
+ except KeyError:
326
+ try:
327
+ result = func(*args, **kwds)
328
+ except Exception as e:
329
+ if locks[key].waiters > 1:
330
+ exceptions[key] = e
331
+
332
+ raise e
333
+
334
+ else:
335
+ _cache[key] = result
336
+ misses += 1
337
+ event = EVENT_MISS
338
+
339
+ if callback is not None:
340
+ callback(event, key, result)
341
+
342
+ return _copy_if_need(result, level=copy_level)
343
+
344
+ if not cache_is_function:
345
+ _wrapped.cache = cache
346
+ _wrapped.cache_info = lambda: CacheInfo(
347
+ hits, misses, cache.maxsize, len(cache), cache.memory()
348
+ )
349
+
350
+ _wrapped.callback = callback
351
+
352
+ if not cache_is_function:
353
+
354
+ def cache_clear() -> None:
355
+ nonlocal misses, hits, locks, exceptions
356
+ cache.clear(reuse=clear_reuse)
357
+ misses = 0
358
+ hits = 0
359
+ locks.clear()
360
+ exceptions.clear()
361
+
362
+ _wrapped.cache_clear = cache_clear
363
+
364
+ return _wrapped
365
+
366
+
367
+ def _async_cached_wrapper(
368
+ func,
369
+ cache: typing.Union[BaseCacheImpl, typing.Callable],
370
+ key_maker: typing.Callable[[tuple, dict], typing.Hashable],
371
+ clear_reuse: bool,
372
+ callback: typing.Optional[typing.Callable[[int, typing.Any, typing.Any], typing.Any]],
373
+ copy_level: int,
374
+ is_method: bool,
375
+ ):
376
+ is_method = cache_is_function = inspect.isfunction(cache)
377
+ _key_maker = (lambda args, kwds: key_maker(args[1:], kwds)) if is_method else key_maker
378
+
379
+ hits = 0
380
+ misses = 0
381
+ locks: defaultdict[typing.Hashable, _LockWithCounter] = defaultdict(
382
+ lambda: _LockWithCounter(True)
383
+ )
384
+ exceptions: typing.Dict[typing.Hashable, BaseException] = {}
385
+
386
+ async def _wrapped(*args, **kwds):
387
+ nonlocal hits, misses, locks, exceptions
388
+
389
+ if kwds.pop("cachebox__ignore", False):
390
+ return await func(*args, **kwds)
391
+
392
+ _cache = cache(args[0]) if cache_is_function else cache
393
+ key = _key_maker(args, kwds)
394
+
395
+ # try to get result from cache
396
+ try:
397
+ result = _cache[key]
398
+ except KeyError:
399
+ pass
400
+ else:
401
+ # A NOTE FOR ME: we don't want to catch KeyError exceptions from `callback`
402
+ # so don't wrap it with try except
403
+ hits += 1
404
+
405
+ if callback is not None:
406
+ awaitable = callback(EVENT_HIT, key, result)
407
+ if inspect.isawaitable(awaitable):
408
+ await awaitable
409
+
410
+ return _copy_if_need(result, level=copy_level)
411
+
412
+ async with locks[key]:
413
+ if exceptions.get(key, None) is not None:
414
+ cached_error = exceptions[key] if locks[key].waiters > 1 else exceptions.pop(key)
415
+ raise cached_error
416
+
417
+ try:
418
+ result = _cache[key]
419
+ hits += 1
420
+ event = EVENT_HIT
421
+ except KeyError:
422
+ try:
423
+ result = await func(*args, **kwds)
424
+ except Exception as e:
425
+ if locks[key].waiters > 1:
426
+ exceptions[key] = e
427
+
428
+ raise e
429
+
430
+ else:
431
+ _cache[key] = result
432
+ misses += 1
433
+ event = EVENT_MISS
434
+
435
+ if callback is not None:
436
+ awaitable = callback(event, key, result)
437
+ if inspect.isawaitable(awaitable):
438
+ await awaitable
439
+
440
+ return _copy_if_need(result, level=copy_level)
441
+
442
+ if not cache_is_function:
443
+ _wrapped.cache = cache
444
+ _wrapped.cache_info = lambda: CacheInfo(
445
+ hits, misses, cache.maxsize, len(cache), cache.memory()
446
+ )
447
+
448
+ _wrapped.callback = callback
449
+
450
+ if not cache_is_function:
451
+
452
+ def cache_clear() -> None:
453
+ nonlocal misses, hits, locks, exceptions
454
+ cache.clear(reuse=clear_reuse)
455
+ misses = 0
456
+ hits = 0
457
+ locks.clear()
458
+ exceptions.clear()
459
+
460
+ _wrapped.cache_clear = cache_clear
461
+
462
+ return _wrapped
463
+
464
+
465
+ def cached(
466
+ cache: typing.Union[BaseCacheImpl, dict, None],
467
+ key_maker: typing.Callable[[tuple, dict], typing.Hashable] = make_key,
468
+ clear_reuse: bool = False,
469
+ callback: typing.Optional[typing.Callable[[int, typing.Any, typing.Any], typing.Any]] = None,
470
+ copy_level: int = 1,
471
+ ) -> typing.Callable[[FT], FT]:
472
+ """
473
+ Decorator to create a memoized cache for function results.
474
+
475
+ Wraps a function to automatically cache and retrieve its results based on input parameters.
476
+
477
+ Args:
478
+ cache (BaseCacheImpl, dict, callable): Cache implementation to store results. Defaults to FIFOCache.
479
+ Can be a function that got `self` and should return cache.
480
+ key_maker (Callable, optional): Function to generate cache keys from function arguments. Defaults to make_key.
481
+ clear_reuse (bool, optional): Whether to reuse cache during clearing. Defaults to False.
482
+ callback (Callable, optional): Function called on cache hit/miss events. Defaults to None.
483
+ copy_level (int, optional): Level of result copying. Defaults to 1.
484
+
485
+ Returns:
486
+ Callable: Decorated function with caching capabilities.
487
+
488
+ Example for functions::
489
+
490
+ @cachebox.cached(cachebox.LRUCache(128))
491
+ def sum_as_string(a, b):
492
+ return str(a+b)
493
+
494
+ assert sum_as_string(1, 2) == "3"
495
+
496
+ assert len(sum_as_string.cache) == 1
497
+ sum_as_string.cache_clear()
498
+ assert len(sum_as_string.cache) == 0
499
+
500
+ Example for methods::
501
+
502
+ class A:
503
+ def __init__(self, num):
504
+ self.num = num
505
+ self._cache = cachebox.FIFOCache(0)
506
+
507
+ @cachebox.cached(lambda self: self._cache)
508
+ def method(self, n):
509
+ return self.num * n
510
+
511
+ instance = A(10)
512
+ assert A.method(2) == 20
513
+ """
514
+ if cache is None:
515
+ cache = FIFOCache(0)
516
+
517
+ if type(cache) is dict:
518
+ cache = FIFOCache(0, cache)
519
+
520
+ if not isinstance(cache, BaseCacheImpl) and not inspect.isfunction(cache):
521
+ raise TypeError("we expected cachebox caches or function, got %r" % (cache,))
522
+
523
+ def decorator(func: FT) -> FT:
524
+ if inspect.iscoroutinefunction(func):
525
+ wrapper = _async_cached_wrapper(
526
+ func, cache, key_maker, clear_reuse, callback, copy_level, False
527
+ )
528
+ else:
529
+ wrapper = _cached_wrapper(
530
+ func, cache, key_maker, clear_reuse, callback, copy_level, False
531
+ )
532
+
533
+ return functools.update_wrapper(wrapper, func) # type: ignore[return-value]
534
+
535
+ return decorator
536
+
537
+
538
+ def cachedmethod(
539
+ cache: typing.Union[BaseCacheImpl, dict, None],
540
+ key_maker: typing.Callable[[tuple, dict], typing.Hashable] = make_key,
541
+ clear_reuse: bool = False,
542
+ callback: typing.Optional[typing.Callable[[int, typing.Any, typing.Any], typing.Any]] = None,
543
+ copy_level: int = 1,
544
+ ) -> typing.Callable[[FT], FT]:
545
+ """
546
+ **This function is deperecated due to issue [#35](https://github.com/awolverp/cachebox/issues/35)**.
547
+ Use `cached` method instead.
548
+
549
+ Decorator to create a method-specific memoized cache for function results.
550
+
551
+ Similar to `cached()`, but ignores `self` parameter when generating cache keys.
552
+
553
+ Args:
554
+ cache (BaseCacheImpl, dict, optional): Cache implementation to store results. Defaults to FIFOCache.
555
+ key_maker (Callable, optional): Function to generate cache keys from function arguments. Defaults to make_key.
556
+ clear_reuse (bool, optional): Whether to reuse cache during clearing. Defaults to False.
557
+ callback (Callable, optional): Function called on cache hit/miss events. Defaults to None.
558
+ copy_level (int, optional): Level of result copying. Defaults to 1.
559
+
560
+ Returns:
561
+ Callable: Decorated method with method-specific caching capabilities.
562
+ """
563
+ import warnings
564
+
565
+ warnings.warn(
566
+ "cachedmethod is deprecated, use cached instead. see issue https://github.com/awolverp/cachebox/issues/35",
567
+ DeprecationWarning,
568
+ stacklevel=2,
569
+ )
570
+
571
+ if cache is None:
572
+ cache = FIFOCache(0)
573
+
574
+ if type(cache) is dict:
575
+ cache = FIFOCache(0, cache)
576
+
577
+ if not isinstance(cache, BaseCacheImpl):
578
+ raise TypeError("we expected cachebox caches, got %r" % (cache,))
579
+
580
+ def decorator(func: FT) -> FT:
581
+ if inspect.iscoroutinefunction(func):
582
+ wrapper = _async_cached_wrapper(
583
+ func, cache, key_maker, clear_reuse, callback, copy_level, True
584
+ )
585
+ else:
586
+ wrapper = _cached_wrapper(
587
+ func, cache, key_maker, clear_reuse, callback, copy_level, True
588
+ )
589
+
590
+ return functools.update_wrapper(wrapper, func) # type: ignore[return-value]
591
+
592
+ return decorator
593
+
594
+
595
+ def is_cached(func: object) -> bool:
596
+ """
597
+ Check if a function/method cached by cachebox or not
598
+ """
599
+ return hasattr(func, "cache") and isinstance(func.cache, BaseCacheImpl)