purecache 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- purecache/__init__.py +8 -0
- purecache/backends/__init__.py +3 -0
- purecache/backends/lru.py +38 -0
- purecache/decorators.py +40 -0
- purecache/protocols.py +6 -0
- purecache-0.1.0.dist-info/METADATA +171 -0
- purecache-0.1.0.dist-info/RECORD +9 -0
- purecache-0.1.0.dist-info/WHEEL +5 -0
- purecache-0.1.0.dist-info/top_level.txt +1 -0
purecache/__init__.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""LRU Cache โ O(1) implementation.
|
|
2
|
+
|
|
3
|
+
Based on the stack algorithm framework from:
|
|
4
|
+
"Evaluation Techniques for Storage Hierarchies"
|
|
5
|
+
R. L. Mattson, J. Gecsei, D. R. Slutz, I. L. Traiger
|
|
6
|
+
IBM Systems Journal, 9(2):78-117, 1970
|
|
7
|
+
|
|
8
|
+
https://dl.acm.org/doi/10.1147/sj.92.0078
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import asyncio
|
|
12
|
+
from collections import OrderedDict
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class LRUCache:
|
|
17
|
+
def __init__(self, capacity: int):
|
|
18
|
+
self._capacity = capacity
|
|
19
|
+
self.container: OrderedDict[str, Any] = OrderedDict()
|
|
20
|
+
self._lock = asyncio.Lock()
|
|
21
|
+
|
|
22
|
+
def _is_full(self) -> bool:
|
|
23
|
+
return len(self.container) == self._capacity
|
|
24
|
+
|
|
25
|
+
async def get(self, key: str) -> Any | None:
|
|
26
|
+
async with self._lock:
|
|
27
|
+
if key in self.container:
|
|
28
|
+
self.container.move_to_end(key, True)
|
|
29
|
+
return self.container.get(key)
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
async def put(self, key: str, value: Any) -> None:
|
|
33
|
+
async with self._lock:
|
|
34
|
+
if (key not in self.container) and (len(self.container) == self._capacity):
|
|
35
|
+
self.container.popitem(last=False) # remove first item
|
|
36
|
+
|
|
37
|
+
self.container[key] = value
|
|
38
|
+
self.container.move_to_end(key, True) # move to tail
|
purecache/decorators.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import hashlib
|
|
3
|
+
import pickle
|
|
4
|
+
from collections.abc import Callable
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from .protocols import ICacheBackend
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def generate_key(args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
|
|
11
|
+
"""Build a stable cache key from function args and kwargs.
|
|
12
|
+
|
|
13
|
+
- Positional args keep their order (order matters).
|
|
14
|
+
- Keyword args are sorted by name so call order does not change the key.
|
|
15
|
+
- Uses pickle to serialize and SHA-256 for a fixed-length key.
|
|
16
|
+
"""
|
|
17
|
+
canonical = (args, tuple(sorted(kwargs.items())))
|
|
18
|
+
raw = pickle.dumps(canonical, protocol=pickle.HIGHEST_PROTOCOL)
|
|
19
|
+
return hashlib.sha256(raw).hexdigest()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def cache(
|
|
23
|
+
func: Callable[..., Any],
|
|
24
|
+
backend: Callable[..., ICacheBackend],
|
|
25
|
+
**kwargs: Any,
|
|
26
|
+
):
|
|
27
|
+
cache_backend = backend(**kwargs)
|
|
28
|
+
|
|
29
|
+
@functools.wraps(func)
|
|
30
|
+
async def wrapper(*args, **kwargs):
|
|
31
|
+
key = generate_key(args, kwargs)
|
|
32
|
+
cached_res = await cache_backend.get(key)
|
|
33
|
+
if cached_res is not None:
|
|
34
|
+
return cached_res
|
|
35
|
+
|
|
36
|
+
res = await func(*args, **kwargs)
|
|
37
|
+
await cache_backend.put(key, res)
|
|
38
|
+
return res
|
|
39
|
+
|
|
40
|
+
return wrapper
|
purecache/protocols.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: purecache
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Async-native in-memory cache with pluggable eviction backends. Pure Python, zero dependencies.
|
|
5
|
+
Author-email: Maksim Smirnoff <smirnoffmg@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/pure-python-system-design/purecache
|
|
8
|
+
Project-URL: Documentation, https://pure-python-system-design.github.io/purecache/
|
|
9
|
+
Project-URL: Repository, https://github.com/pure-python-system-design/purecache
|
|
10
|
+
Project-URL: Bug Tracker, https://github.com/pure-python-system-design/purecache/issues
|
|
11
|
+
Project-URL: Changelog, https://github.com/pure-python-system-design/purecache/releases
|
|
12
|
+
Keywords: cache,lru,lfu,ttl,asyncio,async,in-memory,system-design
|
|
13
|
+
Classifier: Development Status :: 3 - Alpha
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: Intended Audience :: Education
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
|
+
Classifier: Topic :: System :: Distributed Computing
|
|
22
|
+
Classifier: Framework :: AsyncIO
|
|
23
|
+
Classifier: Typing :: Typed
|
|
24
|
+
Requires-Python: >=3.12
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
|
|
27
|
+
# ๐๏ธ purecache
|
|
28
|
+
|
|
29
|
+
Async-native in-memory cache with pluggable eviction backends โ pure Python 3.12+, zero dependencies.
|
|
30
|
+
|
|
31
|
+
Just `asyncio`, `collections.OrderedDict`, and the irrational urge to understand what happens inside the black box.
|
|
32
|
+
|
|
33
|
+
Part of the [pure-python-system-design](https://github.com/pure-python-system-design) project.
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## ๐ฆ Installation
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
pip install purecache
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
Or with uv:
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
uv add purecache
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Python 3.12+ required.
|
|
50
|
+
|
|
51
|
+
---
|
|
52
|
+
|
|
53
|
+
## โก Quick Start
|
|
54
|
+
|
|
55
|
+
### Direct backend usage
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
from purecache.backends.lru import LRUCache
|
|
59
|
+
|
|
60
|
+
cache = LRUCache(capacity=128)
|
|
61
|
+
|
|
62
|
+
await cache.put("user:42", {"name": "Alice"})
|
|
63
|
+
value = await cache.get("user:42") # {"name": "Alice"}
|
|
64
|
+
value = await cache.get("missing") # None
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Decorator
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from purecache.decorators import cache
|
|
71
|
+
from purecache.backends.lru import LRUCache
|
|
72
|
+
|
|
73
|
+
@cache(backend=LRUCache, capacity=128)
|
|
74
|
+
async def get_user(user_id: str) -> dict:
|
|
75
|
+
return await fetch_from_db(user_id)
|
|
76
|
+
|
|
77
|
+
# First call โ executes get_user, caches result
|
|
78
|
+
user = await get_user("42")
|
|
79
|
+
|
|
80
|
+
# Second call โ returns cached result, skips get_user
|
|
81
|
+
user = await get_user("42")
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
Cache keys are derived automatically from the function's arguments using `pickle` + SHA-256 โ positional args keep their order, keyword args are sorted by name.
|
|
85
|
+
|
|
86
|
+
---
|
|
87
|
+
|
|
88
|
+
## ๐ง Backends
|
|
89
|
+
|
|
90
|
+
| Backend | Eviction Policy | Time | Memory | Best For |
|
|
91
|
+
| ------------- | --------------------- | ---- | ------ | ---------------------- |
|
|
92
|
+
| `LRUCache` | Least Recently Used | O(1) | O(n) | General purpose |
|
|
93
|
+
| `LFUCache` | Least Frequently Used | O(1) | O(n) | Skewed access patterns |
|
|
94
|
+
| `TTLCache` | Time-based expiry | O(1) | O(n) | Sessions, tokens |
|
|
95
|
+
| `LRUTTLCache` | LRU + TTL combined | O(1) | O(n) | Production default |
|
|
96
|
+
|
|
97
|
+
All backends implement the `ICacheBackend` protocol โ swap them without touching your application code.
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## ๐ Framework Examples
|
|
102
|
+
|
|
103
|
+
The decorator integrates naturally with any async framework:
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
# FastAPI
|
|
107
|
+
from fastapi import FastAPI
|
|
108
|
+
from purecache.decorators import cache
|
|
109
|
+
from purecache.backends.lru import LRUCache
|
|
110
|
+
|
|
111
|
+
app = FastAPI()
|
|
112
|
+
|
|
113
|
+
@app.get("/user/{user_id}")
|
|
114
|
+
@cache(backend=LRUCache, capacity=512)
|
|
115
|
+
async def get_user(user_id: str):
|
|
116
|
+
return await fetch_user_from_db(user_id)
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
TODO: Add more examples for aiohttp, Django, Flask, Litestar, and Sanic in [`examples/`](examples/).
|
|
120
|
+
|
|
121
|
+
---
|
|
122
|
+
|
|
123
|
+
## ๐ Architecture
|
|
124
|
+
|
|
125
|
+
```
|
|
126
|
+
cache() decorator
|
|
127
|
+
โโโ ICacheBackend (protocol)
|
|
128
|
+
โโโ LRUCache โ OrderedDict + move_to_end
|
|
129
|
+
โโโ LFUCache โ key_map + freq_map + min_freq pointer
|
|
130
|
+
โโโ TTLCache โ dict + expiry timestamps
|
|
131
|
+
โโโ LRUTTLCache โ LRU + TTL combined
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
The `cache()` decorator handles key generation and cache lookup. The backend handles storage and eviction. Swap the backend, keep everything else.
|
|
135
|
+
|
|
136
|
+
---
|
|
137
|
+
|
|
138
|
+
## โ ๏ธ Known Limitations
|
|
139
|
+
|
|
140
|
+
- **Caching `None`**: The decorator uses `if cached_res is not None` as the cache-hit check. Functions that legitimately return `None` will always miss โ the value won't be cached. Use a sentinel-aware backend or wrap the return value if needed.
|
|
141
|
+
|
|
142
|
+
---
|
|
143
|
+
|
|
144
|
+
## ๐ Requirements
|
|
145
|
+
|
|
146
|
+
- Python 3.12+
|
|
147
|
+
- Courage
|
|
148
|
+
|
|
149
|
+
---
|
|
150
|
+
|
|
151
|
+
## ๐งช Development
|
|
152
|
+
|
|
153
|
+
```bash
|
|
154
|
+
uv sync
|
|
155
|
+
pre-commit install
|
|
156
|
+
|
|
157
|
+
uv run pytest
|
|
158
|
+
uv run ruff check .
|
|
159
|
+
uv run mypy src/
|
|
160
|
+
uv run mkdocs serve
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
---
|
|
164
|
+
|
|
165
|
+
## ๐ Documentation
|
|
166
|
+
|
|
167
|
+
Full docs at **https://pure-python-system-design.github.io/purecache/**
|
|
168
|
+
|
|
169
|
+
---
|
|
170
|
+
|
|
171
|
+
More designs to come, if the pizza supply holds.
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
purecache/__init__.py,sha256=6YkeKXzH-0onsoLThPb660FJT0dc5FJCNxBu4rQUVBw,134
|
|
2
|
+
purecache/decorators.py,sha256=y3ZQna-P7gdP58M1bqsr-U4OuT2EbDfCHsnc9j7bfVc,1149
|
|
3
|
+
purecache/protocols.py,sha256=Lp2RrTlTuirHY5MLEpcVGkvSlEjD_fOH4xAx4PegmRk,170
|
|
4
|
+
purecache/backends/__init__.py,sha256=rt3kc87jtFzXa_PG9wFA9OtvIXchAOH0M1EQaY3YL7U,50
|
|
5
|
+
purecache/backends/lru.py,sha256=AncdPMFNL6Lksha13xxwyJ11p_5VBzPL4j9KjucpUlI,1223
|
|
6
|
+
purecache-0.1.0.dist-info/METADATA,sha256=X1paAn-dOlb_E9PCSNdwWY3XnQJuBgAABdvWGOnFiJA,4847
|
|
7
|
+
purecache-0.1.0.dist-info/WHEEL,sha256=YCfwYGOYMi5Jhw2fU4yNgwErybb2IX5PEwBKV4ZbdBo,91
|
|
8
|
+
purecache-0.1.0.dist-info/top_level.txt,sha256=T7kG2VnJCUFpcRzjNKTrw7Jx178_u0sNbYL6r5FVVfQ,10
|
|
9
|
+
purecache-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
purecache
|