best-simple-caching 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Gorshipisk
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,178 @@
1
+ Metadata-Version: 2.4
2
+ Name: best-simple-caching
3
+ Version: 1.0.0
4
+ Summary: Asynchronous caching library with per-key locking, TTL, LRU and decorators
5
+ Author-email: Gorshipisk <bestdevelopment.work@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/Gorshipiskp/best-simple-cache
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.10
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Requires-Dist: pydantic>=2.0
15
+ Dynamic: license-file
16
+
17
+ ### Short Description
18
+
19
+ A lightweight, asynchronous caching library for Python with per‑key locking, TTL, LRU eviction, and convenient decorators. Built with `asyncio` and Pydantic.
20
+
21
+ ---
22
+
23
+ # Best Simple Cache
24
+
25
+ **Best Simple Cache** is an asynchronous caching library designed for modern Python applications that use `asyncio` and Pydantic models. It provides:
26
+
27
+ - **Per‑key locking** for thread‑safe concurrent access.
28
+ - **Time‑to‑live (TTL)** and **LRU eviction**.
29
+ - **Decorator‑based integration** – add caching to any async/sync function with minimal boilerplate.
30
+ - **Invalidation hooks** and **automatic cache refresh**.
31
+ - **Flexible primary key generation** – define your own key structure.
32
+
33
+ ## Features
34
+
35
+ - ✅ **Async‑first** – built on `asyncio` with per‑key locks to avoid race conditions.
36
+ - ✅ **TTL & LRU** – automatically evict stale or least‑recently used entries.
37
+ - ✅ **Decoupled storage** – implement your own cache backend by subclassing `EntityCache`.
38
+ - ✅ **Decorator suite** – `@cache`, `@invalidate_after`, and `@refresh_after_by` for common patterns.
39
+ - ✅ **Mixed sync/async support** – works with both synchronous and asynchronous functions.
40
+ - ✅ **Type hints** – fully typed for better IDE support.
41
+
42
+ ## Installation
43
+
44
+ ```bash
45
+ pip install best_simple_caching
46
+ ```
47
+
48
+ **Requirements:** Python 3.10+, Pydantic (v2).
49
+
50
+ ## Quick Start
51
+
52
+ ```python
53
+ import asyncio
54
+ from pydantic import BaseModel
55
+ from best_simple_cache import EntityCache, CacheDecorator, CachingConfig
56
+
57
+ # 1. Define your Pydantic model
58
+ class User(BaseModel):
59
+ id: int
60
+ name: str
61
+
62
+ # 2. Define the primary key (must be hashable)
63
+ class UserPK:
64
+ def __init__(self, user_id: int):
65
+ self.user_id = user_id
66
+
67
+ def __hash__(self):
68
+ return hash(self.user_id)
69
+
70
+ def __eq__(self, other):
71
+ return isinstance(other, UserPK) and self.user_id == other.user_id
72
+
73
+ # 3. Implement your cache class
74
+ class UserCache(EntityCache[User, UserPK]):
75
+ def make_pk(self, user_id: int, **_kwargs) -> UserPK:
76
+ return UserPK(user_id)
77
+
78
+ # 4. Create a decorator instance
79
+ user_cache = CacheDecorator(
80
+ entity_cache=UserCache(
81
+ entity_name="user",
82
+ model=User,
83
+ config=CachingConfig(ttl=60, max_size=1000)
84
+ )
85
+ )
86
+
87
+ # 5. Decorate your function
88
+ @user_cache.cache
89
+ async def get_user(*, user_id: int) -> User:
90
+ # Simulate an expensive API call
91
+ await asyncio.sleep(1)
92
+ return User(id=user_id, name="Alice")
93
+
94
+ async def main():
95
+ # First call – runs the function, stores result
96
+ user = await get_user(user_id=42)
97
+ print(user)
98
+
99
+ # Second call – returns from cache
100
+ user = await get_user(user_id=42)
101
+ print(user) # Instant
102
+
103
+ asyncio.run(main())
104
+ ```
105
+
106
+ ## Advanced Usage
107
+
108
+ ### Invalidation
109
+
110
+ ```python
111
+ @user_cache.invalidate_after
112
+ async def update_user(*, user_id: int) -> User:
113
+ # This function updates the user and then invalidates the cache
114
+ updated_user = ... # some update logic
115
+ return updated_user
116
+ ```
117
+
118
+ ### Refresh After Write
119
+
120
+ ```python
121
+ @user_cache.refresh_after_by(get_user) # get_user is a cached function
122
+ async def update_user(*, user_id: int) -> None:
123
+ # Perform update, then the cache will be invalidated and refreshed
124
+ ...
125
+ ```
126
+
127
+ ### Custom Key Generation
128
+
129
+ The `make_pk` method receives the same keyword arguments as the decorated function. Use it to build a hashable key.
130
+
131
+ ```python
132
+ class MyCache(EntityCache[MyModel, MyPK]):
133
+ def make_pk(self, id: int, region: str = "default", **_kwargs) -> MyPK:
134
+ return MyPK(id, region)
135
+ ```
136
+
137
+ ## API Reference
138
+
139
+ ### `EntityCache(ABC, Generic[T, PK])`
140
+
141
+ Abstract base class for a cache backend.
142
+
143
+ - `__init__(entity_name: str, model: type[T], config: CachingConfig | None)`
144
+ - `async set(entity: T, **kwargs) -> None` – store an entity.
145
+ - `async get(**kwargs) -> T | None` – retrieve an entity by keyword arguments.
146
+ - `async get_by_pk(pk: PK) -> T | None` – retrieve directly by primary key.
147
+ - `async invalidate(entity_pk: PK | None = None, **kwargs) -> None` – remove an entry.
148
+ - `abstractmethod make_pk(**kwargs) -> PK` – build a primary key from arguments.
149
+
150
+ ### `CacheDecorator(Generic[T, PK])`
151
+
152
+ Creates decorators bound to a specific `EntityCache`.
153
+
154
+ - `cache` – decorator that caches the function result.
155
+ - `invalidate_after` – decorator that invalidates the cache after the function runs.
156
+ - `refresh_after_by(refresh_func)` – decorator that invalidates and then calls `refresh_func` to repopulate the cache.
157
+
158
+ ### `CachingConfig`
159
+
160
+ Configuration for a cache.
161
+
162
+ - `ttl: float | None` – seconds after which an entry is considered stale (default `None` = never expires).
163
+ - `max_size: int` – maximum number of entries; `0` means unlimited (default `0`).
164
+ - `disabled: bool` – if `True`, caching is disabled (default `False`).
165
+
166
+ ## Limitations
167
+
168
+ - **Keyword arguments only** – the decorated function must use only keyword arguments (or at least those used in `make_pk` must be named). Positional arguments are not supported.
169
+ - **Pydantic models** – the library assumes cached entities are subclasses of `BaseModel`. You can extend it to work with other types by overriding type checks.
170
+ - **In‑memory only** – the default implementation stores data in an `OrderedDict`. For distributed caching, implement your own backend.
171
+
172
+ ## Contributing
173
+
174
+ Contributions are welcome! Please open an issue or submit a pull request.
175
+
176
+ ## License
177
+
178
+ MIT
@@ -0,0 +1,162 @@
1
+ ### Short Description
2
+
3
+ A lightweight, asynchronous caching library for Python with per‑key locking, TTL, LRU eviction, and convenient decorators. Built with `asyncio` and Pydantic.
4
+
5
+ ---
6
+
7
+ # Best Simple Cache
8
+
9
+ **Best Simple Cache** is an asynchronous caching library designed for modern Python applications that use `asyncio` and Pydantic models. It provides:
10
+
11
+ - **Per‑key locking** for thread‑safe concurrent access.
12
+ - **Time‑to‑live (TTL)** and **LRU eviction**.
13
+ - **Decorator‑based integration** – add caching to any async/sync function with minimal boilerplate.
14
+ - **Invalidation hooks** and **automatic cache refresh**.
15
+ - **Flexible primary key generation** – define your own key structure.
16
+
17
+ ## Features
18
+
19
+ - ✅ **Async‑first** – built on `asyncio` with per‑key locks to avoid race conditions.
20
+ - ✅ **TTL & LRU** – automatically evict stale or least‑recently used entries.
21
+ - ✅ **Decoupled storage** – implement your own cache backend by subclassing `EntityCache`.
22
+ - ✅ **Decorator suite** – `@cache`, `@invalidate_after`, and `@refresh_after_by` for common patterns.
23
+ - ✅ **Mixed sync/async support** – works with both synchronous and asynchronous functions.
24
+ - ✅ **Type hints** – fully typed for better IDE support.
25
+
26
+ ## Installation
27
+
28
+ ```bash
29
+ pip install best_simple_caching
30
+ ```
31
+
32
+ **Requirements:** Python 3.10+, Pydantic (v2).
33
+
34
+ ## Quick Start
35
+
36
+ ```python
37
+ import asyncio
38
+ from pydantic import BaseModel
39
+ from best_simple_cache import EntityCache, CacheDecorator, CachingConfig
40
+
41
+ # 1. Define your Pydantic model
42
+ class User(BaseModel):
43
+ id: int
44
+ name: str
45
+
46
+ # 2. Define the primary key (must be hashable)
47
+ class UserPK:
48
+ def __init__(self, user_id: int):
49
+ self.user_id = user_id
50
+
51
+ def __hash__(self):
52
+ return hash(self.user_id)
53
+
54
+ def __eq__(self, other):
55
+ return isinstance(other, UserPK) and self.user_id == other.user_id
56
+
57
+ # 3. Implement your cache class
58
+ class UserCache(EntityCache[User, UserPK]):
59
+ def make_pk(self, user_id: int, **_kwargs) -> UserPK:
60
+ return UserPK(user_id)
61
+
62
+ # 4. Create a decorator instance
63
+ user_cache = CacheDecorator(
64
+ entity_cache=UserCache(
65
+ entity_name="user",
66
+ model=User,
67
+ config=CachingConfig(ttl=60, max_size=1000)
68
+ )
69
+ )
70
+
71
+ # 5. Decorate your function
72
+ @user_cache.cache
73
+ async def get_user(*, user_id: int) -> User:
74
+ # Simulate an expensive API call
75
+ await asyncio.sleep(1)
76
+ return User(id=user_id, name="Alice")
77
+
78
+ async def main():
79
+ # First call – runs the function, stores result
80
+ user = await get_user(user_id=42)
81
+ print(user)
82
+
83
+ # Second call – returns from cache
84
+ user = await get_user(user_id=42)
85
+ print(user) # Instant
86
+
87
+ asyncio.run(main())
88
+ ```
89
+
90
+ ## Advanced Usage
91
+
92
+ ### Invalidation
93
+
94
+ ```python
95
+ @user_cache.invalidate_after
96
+ async def update_user(*, user_id: int) -> User:
97
+ # This function updates the user and then invalidates the cache
98
+ updated_user = ... # some update logic
99
+ return updated_user
100
+ ```
101
+
102
+ ### Refresh After Write
103
+
104
+ ```python
105
+ @user_cache.refresh_after_by(get_user) # get_user is a cached function
106
+ async def update_user(*, user_id: int) -> None:
107
+ # Perform update, then the cache will be invalidated and refreshed
108
+ ...
109
+ ```
110
+
111
+ ### Custom Key Generation
112
+
113
+ The `make_pk` method receives the same keyword arguments as the decorated function. Use it to build a hashable key.
114
+
115
+ ```python
116
+ class MyCache(EntityCache[MyModel, MyPK]):
117
+ def make_pk(self, id: int, region: str = "default", **_kwargs) -> MyPK:
118
+ return MyPK(id, region)
119
+ ```
120
+
121
+ ## API Reference
122
+
123
+ ### `EntityCache(ABC, Generic[T, PK])`
124
+
125
+ Abstract base class for a cache backend.
126
+
127
+ - `__init__(entity_name: str, model: type[T], config: CachingConfig | None)`
128
+ - `async set(entity: T, **kwargs) -> None` – store an entity.
129
+ - `async get(**kwargs) -> T | None` – retrieve an entity by keyword arguments.
130
+ - `async get_by_pk(pk: PK) -> T | None` – retrieve directly by primary key.
131
+ - `async invalidate(entity_pk: PK | None = None, **kwargs) -> None` – remove an entry.
132
+ - `abstractmethod make_pk(**kwargs) -> PK` – build a primary key from arguments.
133
+
134
+ ### `CacheDecorator(Generic[T, PK])`
135
+
136
+ Creates decorators bound to a specific `EntityCache`.
137
+
138
+ - `cache` – decorator that caches the function result.
139
+ - `invalidate_after` – decorator that invalidates the cache after the function runs.
140
+ - `refresh_after_by(refresh_func)` – decorator that invalidates and then calls `refresh_func` to repopulate the cache.
141
+
142
+ ### `CachingConfig`
143
+
144
+ Configuration for a cache.
145
+
146
+ - `ttl: float | None` – seconds after which an entry is considered stale (default `None` = never expires).
147
+ - `max_size: int` – maximum number of entries; `0` means unlimited (default `0`).
148
+ - `disabled: bool` – if `True`, caching is disabled (default `False`).
149
+
150
+ ## Limitations
151
+
152
+ - **Keyword arguments only** – the decorated function must use only keyword arguments (or at least those used in `make_pk` must be named). Positional arguments are not supported.
153
+ - **Pydantic models** – the library assumes cached entities are subclasses of `BaseModel`. You can extend it to work with other types by overriding type checks.
154
+ - **In‑memory only** – the default implementation stores data in an `OrderedDict`. For distributed caching, implement your own backend.
155
+
156
+ ## Contributing
157
+
158
+ Contributions are welcome! Please open an issue or submit a pull request.
159
+
160
+ ## License
161
+
162
+ MIT
@@ -0,0 +1,6 @@
1
+ from .caching import CacheDecorator
2
+ from .entity_cache import EntityCache, CachingConfig, CacheInfo
3
+
4
+ __all__ = ["EntityCache", "CacheInfo", "CachingConfig", "CacheDecorator"]
5
+ __version__ = "1.0.0"
6
+ __author__ = "Gorshipisk"
@@ -0,0 +1,7 @@
1
+ import asyncio
2
+
3
+ from .example import main
4
+
5
+ if "__main__" == __name__:
6
+ # Runs examples
7
+ asyncio.run(main())
@@ -0,0 +1,79 @@
1
+ import functools
2
+ from typing import Generic, Any, Callable, Awaitable, Coroutine, ParamSpec
3
+
4
+ from . import EntityCache
5
+ from .entity_cache import PK, T
6
+ from .misc import handle_maybe_async
7
+
8
+ P = ParamSpec("P")
9
+
10
+
11
+ class CacheDecorator(Generic[T, PK]):
12
+ def __init__(self, entity_cache: EntityCache[T, PK]) -> None:
13
+ self._entity_cache: EntityCache[T, PK] = entity_cache
14
+
15
+ self.cache = self.__cache()
16
+ self.invalidate_after = self.__invalidate_after()
17
+
18
+ def __cache(self) -> Callable[
19
+ [Callable[P, T | Awaitable[T] | Coroutine[Any, Any, T]]],
20
+ Callable[P, Coroutine[Any, Any, T]]
21
+ ]:
22
+ def decorator(
23
+ func: Callable[P, T | Awaitable[T] | Coroutine[Any, Any, T]]
24
+ ) -> Callable[P, Coroutine[Any, Any, T]]:
25
+ func.is_cached = True
26
+
27
+ @functools.wraps(func)
28
+ async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
29
+ from_cache: T | None = await self._entity_cache.get(**kwargs)
30
+
31
+ if from_cache is not None:
32
+ return from_cache
33
+
34
+ result: T = await handle_maybe_async(func, *args, **kwargs)
35
+
36
+ await self._entity_cache.set(entity=result, **kwargs)
37
+
38
+ return result
39
+
40
+ return wrapper
41
+
42
+ return decorator
43
+
44
+ def __invalidate_after(self):
45
+ def decorator(
46
+ func: Callable[P, T | Awaitable[T] | Coroutine[Any, Any, T]]
47
+ ) -> Callable[P, Coroutine[Any, Any, T]]:
48
+ @functools.wraps(func)
49
+ async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
50
+ result: T = await handle_maybe_async(func, *args, **kwargs)
51
+
52
+ await self._entity_cache.invalidate(**kwargs)
53
+
54
+ return result
55
+
56
+ return wrapper
57
+
58
+ return decorator
59
+
60
+ def refresh_after_by(
61
+ self,
62
+ refresh_func: Callable[..., T | Awaitable[T] | Coroutine[Any, Any, T]]
63
+ ) -> Callable[[Callable[P, T | Awaitable[T]]], Callable[P, Coroutine[Any, Any, T]]]:
64
+ def decorator(func: Callable[P, T | Awaitable[T]]) -> Callable[P, Coroutine[Any, Any, T]]:
65
+ @functools.wraps(func)
66
+ async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
67
+ result: T = await handle_maybe_async(func, *args, **kwargs)
68
+
69
+ await self._entity_cache.invalidate(**kwargs)
70
+
71
+ refresh_res: T = await handle_maybe_async(refresh_func, *args, **kwargs)
72
+ if getattr(func, "is_cached", False):
73
+ await self._entity_cache.set(entity=refresh_res, **kwargs)
74
+
75
+ return result
76
+
77
+ return wrapper
78
+
79
+ return decorator
@@ -0,0 +1,124 @@
1
+ import time
2
+ from abc import ABC, abstractmethod
3
+ from asyncio import Lock
4
+ from collections import OrderedDict
5
+ from dataclasses import dataclass
6
+ from typing import Any, Generic, TypeVar, Hashable
7
+
8
+ from pydantic import BaseModel
9
+
10
+ T = TypeVar("T", bound=BaseModel)
11
+ PK = TypeVar("PK", bound=Hashable)
12
+
13
+
14
+ @dataclass(frozen=True, kw_only=True)
15
+ class CacheInfo(Generic[T]):
16
+ entity: T
17
+ created_at: float
18
+
19
+
20
+ class CachingConfig:
21
+ def __init__(
22
+ self,
23
+ ttl: float | None = None,
24
+ max_size: int = 0,
25
+ disabled: bool = False,
26
+ ):
27
+ self.ttl: float | None = ttl
28
+ self.max_size: int = max_size
29
+ self.disabled: bool = disabled
30
+
31
+ def __repr__(self):
32
+ return f"CachingConfig(ttl={self.ttl}; max_size={self.max_size}; disabled={self.disabled})"
33
+
34
+
35
+ class EntityCache(ABC, Generic[T, PK]):
36
+ def __init__(
37
+ self,
38
+ entity_name: str,
39
+ model: type[T],
40
+ config: CachingConfig | None = None,
41
+ ):
42
+ self.entity_name: str = entity_name
43
+ self.model: type[T] = model
44
+
45
+ self._config: CachingConfig = config or CachingConfig()
46
+ self._pool: OrderedDict[PK, CacheInfo[T]] = OrderedDict()
47
+ self._pool_locks: dict[PK, Lock] = {}
48
+
49
+ def __repr__(self) -> str:
50
+ return (f"{self.__class__.__name__}(name=\"{self.entity_name}\"; "
51
+ f"pool_len={len(self._pool)}; config={self._config})")
52
+
53
+ async def set(self, entity: T, **kwargs: Any) -> None:
54
+ if self._config.disabled:
55
+ return
56
+
57
+ if not isinstance(entity, self.model):
58
+ raise TypeError(f"Expected {self.model}, got {type(entity)}")
59
+
60
+ entity_pk: PK = self.make_pk(**kwargs)
61
+
62
+ async with self._pool_locks.setdefault(entity_pk, Lock()):
63
+ now: float = time.monotonic()
64
+
65
+ self._pool[entity_pk] = CacheInfo(entity=entity, created_at=now)
66
+
67
+ self._pool.move_to_end(entity_pk)
68
+
69
+ self._evict_if_needed()
70
+
71
+ async def get_by_pk(self, pk: PK) -> T | None:
72
+ return await self._get(pk)
73
+
74
+ async def get(self, **kwargs: Any) -> T | None:
75
+ return await self._get(self.make_pk(**kwargs))
76
+
77
+ async def _get(self, entity_pk: PK) -> T | None:
78
+ if self._config.disabled:
79
+ return None
80
+
81
+ async with self._pool_locks.setdefault(entity_pk, Lock()):
82
+ info: CacheInfo[T] = self._pool.get(entity_pk)
83
+
84
+ if info is None:
85
+ return None
86
+
87
+ if self._is_expired(info):
88
+ self._pool.pop(entity_pk, None)
89
+ self._pool_locks.pop(entity_pk, None)
90
+ return None
91
+
92
+ self._pool.move_to_end(entity_pk)
93
+
94
+ return info.entity
95
+
96
+ async def invalidate(self, entity_pk: PK | None = None, **kwargs: Any) -> None:
97
+ if entity_pk is None:
98
+ entity_pk = self.make_pk(**kwargs)
99
+
100
+ async with self._pool_locks.setdefault(entity_pk, Lock()):
101
+ self._pool.pop(entity_pk, None)
102
+ self._pool_locks.pop(entity_pk, None)
103
+
104
+ def _evict_if_needed(self) -> None:
105
+ max_size: int = self._config.max_size
106
+
107
+ if max_size <= 0:
108
+ return
109
+
110
+ while len(self._pool) > max_size:
111
+ entity_pk: PK = self._pool.popitem(last=False)
112
+ self._pool_locks.pop(entity_pk, None)
113
+
114
+ def _is_expired(self, info: CacheInfo[T]) -> bool:
115
+ ttl: float | None = self._config.ttl
116
+
117
+ if ttl is None:
118
+ return False
119
+
120
+ return (time.monotonic() - info.created_at) > ttl
121
+
122
+ @abstractmethod
123
+ def make_pk(self, **kwargs: Any) -> PK:
124
+ ...
@@ -0,0 +1,131 @@
1
+ import asyncio
2
+ import time
3
+ from dataclasses import dataclass
4
+ from typing import Any
5
+
6
+ from pydantic import BaseModel
7
+
8
+ from . import EntityCache
9
+ from .caching import CacheDecorator
10
+ from .entity_cache import CachingConfig
11
+
12
+
13
+ async def main():
14
+ print(f"\n{' Example caching and TTL ':=^70}\n")
15
+
16
+ # Some kind of pydantic classes of API response (strict hashable)
17
+ class Group(BaseModel):
18
+ group_id: int
19
+ name: str
20
+ scope: str
21
+
22
+ model_config = {"frozen": True}
23
+
24
+ # Set `Group` entity's cache
25
+
26
+ # Primary key of group
27
+ @dataclass(frozen=True, kw_only=True)
28
+ class GroupPK:
29
+ group_id: int
30
+
31
+ # Overload necessary methods
32
+ class GroupCache(EntityCache):
33
+ def make_pk(self, group_id: int, **_kwargs: Any) -> GroupPK:
34
+ # We need `_kwargs` for backward compatibility
35
+ return GroupPK(
36
+ group_id=group_id
37
+ )
38
+
39
+ groups_cache: CacheDecorator[Group, GroupPK] = CacheDecorator(
40
+ entity_cache=GroupCache(
41
+ entity_name="group",
42
+ model=Group,
43
+ config=CachingConfig(
44
+ ttl=3
45
+ )
46
+ )
47
+ )
48
+
49
+ # Arbitrary API-contract (adding caching)
50
+ @groups_cache.cache
51
+ async def get_group_contract(*, group_id: int) -> Group:
52
+ # Warning: function with `.cache` decorator must include the same kwargs, that was set in PK
53
+
54
+ # Some API and requests thing
55
+ ...
56
+
57
+ await asyncio.sleep(1)
58
+
59
+ # Got the result
60
+ return Group(
61
+ group_id=group_id,
62
+ name="PMiK-16",
63
+ scope="GLOBAL"
64
+ )
65
+
66
+ start: float = time.perf_counter()
67
+ await get_group_contract(group_id=1)
68
+ end: float = time.perf_counter()
69
+
70
+ print(f"Before caching: {end - start:.6f}")
71
+
72
+ start: float = time.perf_counter()
73
+ await get_group_contract(group_id=1)
74
+ end: float = time.perf_counter()
75
+
76
+ print(f"After caching: {end - start:.6f}")
77
+
78
+ await asyncio.sleep(3.1)
79
+
80
+ start: float = time.perf_counter()
81
+ await get_group_contract(group_id=1)
82
+ end: float = time.perf_counter()
83
+
84
+ print(f"After ttl expire: {end - start:.6f}")
85
+
86
+ print(f"\n{' Invalidation hook example ':=^70}\n")
87
+
88
+ # New model and contract (invalidation hook with auto-refresh)
89
+ class GroupSettings(BaseModel):
90
+ group_id: int
91
+ new_members: bool
92
+
93
+ model_config = {"frozen": True}
94
+
95
+ class GroupSettingsPK(GroupPK):
96
+ ...
97
+
98
+ class GroupSettingsCache(EntityCache):
99
+ def make_pk(self, group_id: int, **_kwargs: Any) -> GroupPK:
100
+ return GroupSettingsPK(
101
+ group_id=group_id
102
+ )
103
+
104
+ groups_settings_cache: CacheDecorator[GroupSettings, GroupSettingsPK] = CacheDecorator(
105
+ entity_cache=GroupSettingsCache(
106
+ entity_name="group_settings",
107
+ model=GroupSettings
108
+ )
109
+ )
110
+
111
+ @groups_settings_cache.cache
112
+ async def get_group_settings(*, group_id: int) -> GroupSettings:
113
+ # Some API and requests thing
114
+ ...
115
+
116
+ print("Got group's settings")
117
+ # Got the result
118
+ return GroupSettings(
119
+ group_id=group_id,
120
+ new_members=True
121
+ )
122
+
123
+ @groups_settings_cache.refresh_after_by(get_group_settings)
124
+ async def update_group_settings(*, group_id: int) -> None:
125
+ # Some API and requests thing
126
+ ...
127
+
128
+ print(f"Group settings ({group_id=}) updated")
129
+
130
+ await get_group_settings(group_id=1)
131
+ await update_group_settings(group_id=1)
@@ -0,0 +1,37 @@
1
+ import inspect
2
+ from typing import TypeVar, Callable, Awaitable, Coroutine
3
+
4
+ T = TypeVar("T")
5
+
6
+
7
+ async def handle_maybe_async(func: Coroutine[..., ..., T] | Callable[..., T | Awaitable[T]] | T, *args, **kwargs) -> T:
8
+ """
9
+ Execute a value that may be synchronous, asynchronous, or already a result,
10
+ and return its resolved value
11
+
12
+ This utility normalizes mixed sync/async inputs into a single awaited result,
13
+ simplifying code that needs to transparently handle both execution models
14
+
15
+ Args:
16
+ func: A coroutine function, coroutine object, callable returning a value or awaitable,
17
+ or a direct value
18
+ *args: Positional arguments passed to the callable, if applicable
19
+ **kwargs: Keyword arguments passed to the callable, if applicable
20
+
21
+ Returns:
22
+ The resolved value of type `T` after executing or awaiting as necessary
23
+ """
24
+
25
+ if inspect.iscoroutinefunction(func):
26
+ return await func(*args, **kwargs)
27
+ if inspect.iscoroutine(func):
28
+ return await func
29
+ if not inspect.isfunction(func):
30
+ return func
31
+
32
+ result: T = func(*args, **kwargs)
33
+
34
+ if inspect.isawaitable(result):
35
+ return await result
36
+
37
+ return result
@@ -0,0 +1,178 @@
1
+ Metadata-Version: 2.4
2
+ Name: best-simple-caching
3
+ Version: 1.0.0
4
+ Summary: Asynchronous caching library with per-key locking, TTL, LRU and decorators
5
+ Author-email: Gorshipisk <bestdevelopment.work@gmail.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/Gorshipiskp/best-simple-cache
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: License :: OSI Approved :: MIT License
10
+ Classifier: Operating System :: OS Independent
11
+ Requires-Python: >=3.10
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Requires-Dist: pydantic>=2.0
15
+ Dynamic: license-file
16
+
17
+ ### Short Description
18
+
19
+ A lightweight, asynchronous caching library for Python with per‑key locking, TTL, LRU eviction, and convenient decorators. Built with `asyncio` and Pydantic.
20
+
21
+ ---
22
+
23
+ # Best Simple Cache
24
+
25
+ **Best Simple Cache** is an asynchronous caching library designed for modern Python applications that use `asyncio` and Pydantic models. It provides:
26
+
27
+ - **Per‑key locking** for thread‑safe concurrent access.
28
+ - **Time‑to‑live (TTL)** and **LRU eviction**.
29
+ - **Decorator‑based integration** – add caching to any async/sync function with minimal boilerplate.
30
+ - **Invalidation hooks** and **automatic cache refresh**.
31
+ - **Flexible primary key generation** – define your own key structure.
32
+
33
+ ## Features
34
+
35
+ - ✅ **Async‑first** – built on `asyncio` with per‑key locks to avoid race conditions.
36
+ - ✅ **TTL & LRU** – automatically evict stale or least‑recently used entries.
37
+ - ✅ **Decoupled storage** – implement your own cache backend by subclassing `EntityCache`.
38
+ - ✅ **Decorator suite** – `@cache`, `@invalidate_after`, and `@refresh_after_by` for common patterns.
39
+ - ✅ **Mixed sync/async support** – works with both synchronous and asynchronous functions.
40
+ - ✅ **Type hints** – fully typed for better IDE support.
41
+
42
+ ## Installation
43
+
44
+ ```bash
45
+ pip install best_simple_caching
46
+ ```
47
+
48
+ **Requirements:** Python 3.10+, Pydantic (v2).
49
+
50
+ ## Quick Start
51
+
52
+ ```python
53
+ import asyncio
54
+ from pydantic import BaseModel
55
+ from best_simple_cache import EntityCache, CacheDecorator, CachingConfig
56
+
57
+ # 1. Define your Pydantic model
58
+ class User(BaseModel):
59
+ id: int
60
+ name: str
61
+
62
+ # 2. Define the primary key (must be hashable)
63
+ class UserPK:
64
+ def __init__(self, user_id: int):
65
+ self.user_id = user_id
66
+
67
+ def __hash__(self):
68
+ return hash(self.user_id)
69
+
70
+ def __eq__(self, other):
71
+ return isinstance(other, UserPK) and self.user_id == other.user_id
72
+
73
+ # 3. Implement your cache class
74
+ class UserCache(EntityCache[User, UserPK]):
75
+ def make_pk(self, user_id: int, **_kwargs) -> UserPK:
76
+ return UserPK(user_id)
77
+
78
+ # 4. Create a decorator instance
79
+ user_cache = CacheDecorator(
80
+ entity_cache=UserCache(
81
+ entity_name="user",
82
+ model=User,
83
+ config=CachingConfig(ttl=60, max_size=1000)
84
+ )
85
+ )
86
+
87
+ # 5. Decorate your function
88
+ @user_cache.cache
89
+ async def get_user(*, user_id: int) -> User:
90
+ # Simulate an expensive API call
91
+ await asyncio.sleep(1)
92
+ return User(id=user_id, name="Alice")
93
+
94
+ async def main():
95
+ # First call – runs the function, stores result
96
+ user = await get_user(user_id=42)
97
+ print(user)
98
+
99
+ # Second call – returns from cache
100
+ user = await get_user(user_id=42)
101
+ print(user) # Instant
102
+
103
+ asyncio.run(main())
104
+ ```
105
+
106
+ ## Advanced Usage
107
+
108
+ ### Invalidation
109
+
110
+ ```python
111
+ @user_cache.invalidate_after
112
+ async def update_user(*, user_id: int) -> User:
113
+ # This function updates the user and then invalidates the cache
114
+ updated_user = ... # some update logic
115
+ return updated_user
116
+ ```
117
+
118
+ ### Refresh After Write
119
+
120
+ ```python
121
+ @user_cache.refresh_after_by(get_user) # get_user is a cached function
122
+ async def update_user(*, user_id: int) -> None:
123
+ # Perform update, then the cache will be invalidated and refreshed
124
+ ...
125
+ ```
126
+
127
+ ### Custom Key Generation
128
+
129
+ The `make_pk` method receives the same keyword arguments as the decorated function. Use it to build a hashable key.
130
+
131
+ ```python
132
+ class MyCache(EntityCache[MyModel, MyPK]):
133
+ def make_pk(self, id: int, region: str = "default", **_kwargs) -> MyPK:
134
+ return MyPK(id, region)
135
+ ```
136
+
137
+ ## API Reference
138
+
139
+ ### `EntityCache(ABC, Generic[T, PK])`
140
+
141
+ Abstract base class for a cache backend.
142
+
143
+ - `__init__(entity_name: str, model: type[T], config: CachingConfig | None)`
144
+ - `async set(entity: T, **kwargs) -> None` – store an entity.
145
+ - `async get(**kwargs) -> T | None` – retrieve an entity by keyword arguments.
146
+ - `async get_by_pk(pk: PK) -> T | None` – retrieve directly by primary key.
147
+ - `async invalidate(entity_pk: PK | None = None, **kwargs) -> None` – remove an entry.
148
+ - `abstractmethod make_pk(**kwargs) -> PK` – build a primary key from arguments.
149
+
150
+ ### `CacheDecorator(Generic[T, PK])`
151
+
152
+ Creates decorators bound to a specific `EntityCache`.
153
+
154
+ - `cache` – decorator that caches the function result.
155
+ - `invalidate_after` – decorator that invalidates the cache after the function runs.
156
+ - `refresh_after_by(refresh_func)` – decorator that invalidates and then calls `refresh_func` to repopulate the cache.
157
+
158
+ ### `CachingConfig`
159
+
160
+ Configuration for a cache.
161
+
162
+ - `ttl: float | None` – seconds after which an entry is considered stale (default `None` = never expires).
163
+ - `max_size: int` – maximum number of entries; `0` means unlimited (default `0`).
164
+ - `disabled: bool` – if `True`, caching is disabled (default `False`).
165
+
166
+ ## Limitations
167
+
168
+ - **Keyword arguments only** – the decorated function must use only keyword arguments (or at least those used in `make_pk` must be named). Positional arguments are not supported.
169
+ - **Pydantic models** – the library assumes cached entities are subclasses of `BaseModel`. You can extend it to work with other types by overriding type checks.
170
+ - **In‑memory only** – the default implementation stores data in an `OrderedDict`. For distributed caching, implement your own backend.
171
+
172
+ ## Contributing
173
+
174
+ Contributions are welcome! Please open an issue or submit a pull request.
175
+
176
+ ## License
177
+
178
+ MIT
@@ -0,0 +1,14 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ best_simple_cache/__init__.py
5
+ best_simple_cache/__main__.py
6
+ best_simple_cache/caching.py
7
+ best_simple_cache/entity_cache.py
8
+ best_simple_cache/example.py
9
+ best_simple_cache/misc.py
10
+ best_simple_caching.egg-info/PKG-INFO
11
+ best_simple_caching.egg-info/SOURCES.txt
12
+ best_simple_caching.egg-info/dependency_links.txt
13
+ best_simple_caching.egg-info/requires.txt
14
+ best_simple_caching.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ best_simple_cache
@@ -0,0 +1,25 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "best-simple-caching"
7
+ version = "1.0.0"
8
+ description = "Asynchronous caching library with per-key locking, TTL, LRU and decorators"
9
+ readme = "README.md"
10
+ authors = [
11
+ { name = "Gorshipisk", email = "bestdevelopment.work@gmail.com" },
12
+ ]
13
+ license = { text = "MIT" }
14
+ classifiers = [
15
+ "Programming Language :: Python :: 3",
16
+ "License :: OSI Approved :: MIT License",
17
+ "Operating System :: OS Independent",
18
+ ]
19
+ requires-python = ">=3.10"
20
+ dependencies = [
21
+ "pydantic>=2.0",
22
+ ]
23
+
24
+ [project.urls]
25
+ Homepage = "https://github.com/Gorshipiskp/best-simple-cache"
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+