purecache 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- purecache-0.1.0/PKG-INFO +171 -0
- purecache-0.1.0/README.md +145 -0
- purecache-0.1.0/pyproject.toml +87 -0
- purecache-0.1.0/setup.cfg +4 -0
- purecache-0.1.0/src/purecache/__init__.py +8 -0
- purecache-0.1.0/src/purecache/backends/__init__.py +3 -0
- purecache-0.1.0/src/purecache/backends/lru.py +38 -0
- purecache-0.1.0/src/purecache/decorators.py +40 -0
- purecache-0.1.0/src/purecache/protocols.py +6 -0
- purecache-0.1.0/src/purecache.egg-info/PKG-INFO +171 -0
- purecache-0.1.0/src/purecache.egg-info/SOURCES.txt +13 -0
- purecache-0.1.0/src/purecache.egg-info/dependency_links.txt +1 -0
- purecache-0.1.0/src/purecache.egg-info/top_level.txt +1 -0
- purecache-0.1.0/tests/test_decorators.py +324 -0
- purecache-0.1.0/tests/test_lru.py +294 -0
purecache-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: purecache
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Async-native in-memory cache with pluggable eviction backends. Pure Python, zero dependencies.
|
|
5
|
+
Author-email: Maksim Smirnoff <smirnoffmg@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/pure-python-system-design/purecache
|
|
8
|
+
Project-URL: Documentation, https://pure-python-system-design.github.io/purecache/
|
|
9
|
+
Project-URL: Repository, https://github.com/pure-python-system-design/purecache
|
|
10
|
+
Project-URL: Bug Tracker, https://github.com/pure-python-system-design/purecache/issues
|
|
11
|
+
Project-URL: Changelog, https://github.com/pure-python-system-design/purecache/releases
|
|
12
|
+
Keywords: cache,lru,lfu,ttl,asyncio,async,in-memory,system-design
|
|
13
|
+
Classifier: Development Status :: 3 - Alpha
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: Intended Audience :: Education
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
|
+
Classifier: Topic :: System :: Distributed Computing
|
|
22
|
+
Classifier: Framework :: AsyncIO
|
|
23
|
+
Classifier: Typing :: Typed
|
|
24
|
+
Requires-Python: >=3.12
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
|
|
27
|
+
# 🗄️ purecache
|
|
28
|
+
|
|
29
|
+
Async-native in-memory cache with pluggable eviction backends — pure Python 3.12+, zero dependencies.
|
|
30
|
+
|
|
31
|
+
Just `asyncio`, `collections.OrderedDict`, and the irrational urge to understand what happens inside the black box.
|
|
32
|
+
|
|
33
|
+
Part of the [pure-python-system-design](https://github.com/pure-python-system-design) project.
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## 📦 Installation
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
pip install purecache
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
Or with uv:
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
uv add purecache
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Python 3.12+ required.
|
|
50
|
+
|
|
51
|
+
---
|
|
52
|
+
|
|
53
|
+
## ⚡ Quick Start
|
|
54
|
+
|
|
55
|
+
### Direct backend usage
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
from purecache.backends.lru import LRUCache
|
|
59
|
+
|
|
60
|
+
cache = LRUCache(capacity=128)
|
|
61
|
+
|
|
62
|
+
await cache.put("user:42", {"name": "Alice"})
|
|
63
|
+
value = await cache.get("user:42") # {"name": "Alice"}
|
|
64
|
+
value = await cache.get("missing") # None
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Decorator
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from purecache.decorators import cache
|
|
71
|
+
from purecache.backends.lru import LRUCache
|
|
72
|
+
|
|
73
|
+
@cache(backend=LRUCache, capacity=128)
|
|
74
|
+
async def get_user(user_id: str) -> dict:
|
|
75
|
+
return await fetch_from_db(user_id)
|
|
76
|
+
|
|
77
|
+
# First call — executes get_user, caches result
|
|
78
|
+
user = await get_user("42")
|
|
79
|
+
|
|
80
|
+
# Second call — returns cached result, skips get_user
|
|
81
|
+
user = await get_user("42")
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
Cache keys are derived automatically from the function's arguments using `pickle` + SHA-256 — positional args keep their order, keyword args are sorted by name.
|
|
85
|
+
|
|
86
|
+
---
|
|
87
|
+
|
|
88
|
+
## 🧠 Backends
|
|
89
|
+
|
|
90
|
+
| Backend | Eviction Policy | Time | Memory | Best For |
|
|
91
|
+
| ------------- | --------------------- | ---- | ------ | ---------------------- |
|
|
92
|
+
| `LRUCache` | Least Recently Used | O(1) | O(n) | General purpose |
|
|
93
|
+
| `LFUCache` | Least Frequently Used | O(1) | O(n) | Skewed access patterns |
|
|
94
|
+
| `TTLCache` | Time-based expiry | O(1) | O(n) | Sessions, tokens |
|
|
95
|
+
| `LRUTTLCache` | LRU + TTL combined | O(1) | O(n) | Production default |
|
|
96
|
+
|
|
97
|
+
All backends implement the `ICacheBackend` protocol — swap them without touching your application code.
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## 🔌 Framework Examples
|
|
102
|
+
|
|
103
|
+
The decorator integrates naturally with any async framework:
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
# FastAPI
|
|
107
|
+
from fastapi import FastAPI
|
|
108
|
+
from purecache.decorators import cache
|
|
109
|
+
from purecache.backends.lru import LRUCache
|
|
110
|
+
|
|
111
|
+
app = FastAPI()
|
|
112
|
+
|
|
113
|
+
@app.get("/user/{user_id}")
|
|
114
|
+
@cache(backend=LRUCache, capacity=512)
|
|
115
|
+
async def get_user(user_id: str):
|
|
116
|
+
return await fetch_user_from_db(user_id)
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
TODO: Add more examples for aiohttp, Django, Flask, Litestar, and Sanic in [`examples/`](examples/).
|
|
120
|
+
|
|
121
|
+
---
|
|
122
|
+
|
|
123
|
+
## 📐 Architecture
|
|
124
|
+
|
|
125
|
+
```
|
|
126
|
+
cache() decorator
|
|
127
|
+
└── ICacheBackend (protocol)
|
|
128
|
+
├── LRUCache — OrderedDict + move_to_end
|
|
129
|
+
├── LFUCache — key_map + freq_map + min_freq pointer
|
|
130
|
+
├── TTLCache — dict + expiry timestamps
|
|
131
|
+
└── LRUTTLCache — LRU + TTL combined
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
The `cache()` decorator handles key generation and cache lookup. The backend handles storage and eviction. Swap the backend, keep everything else.
|
|
135
|
+
|
|
136
|
+
---
|
|
137
|
+
|
|
138
|
+
## ⚠️ Known Limitations
|
|
139
|
+
|
|
140
|
+
- **Caching `None`**: The decorator uses `if cached_res is not None` as the cache-hit check. Functions that legitimately return `None` will always miss — the value won't be cached. Use a sentinel-aware backend or wrap the return value if needed.
|
|
141
|
+
|
|
142
|
+
---
|
|
143
|
+
|
|
144
|
+
## 📋 Requirements
|
|
145
|
+
|
|
146
|
+
- Python 3.12+
|
|
147
|
+
- Courage
|
|
148
|
+
|
|
149
|
+
---
|
|
150
|
+
|
|
151
|
+
## 🧪 Development
|
|
152
|
+
|
|
153
|
+
```bash
|
|
154
|
+
uv sync
|
|
155
|
+
pre-commit install
|
|
156
|
+
|
|
157
|
+
uv run pytest
|
|
158
|
+
uv run ruff check .
|
|
159
|
+
uv run mypy src/
|
|
160
|
+
uv run mkdocs serve
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
---
|
|
164
|
+
|
|
165
|
+
## 📖 Documentation
|
|
166
|
+
|
|
167
|
+
Full docs at **https://pure-python-system-design.github.io/purecache/**
|
|
168
|
+
|
|
169
|
+
---
|
|
170
|
+
|
|
171
|
+
More designs to come, if the pizza supply holds.
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
# 🗄️ purecache
|
|
2
|
+
|
|
3
|
+
Async-native in-memory cache with pluggable eviction backends — pure Python 3.12+, zero dependencies.
|
|
4
|
+
|
|
5
|
+
Just `asyncio`, `collections.OrderedDict`, and the irrational urge to understand what happens inside the black box.
|
|
6
|
+
|
|
7
|
+
Part of the [pure-python-system-design](https://github.com/pure-python-system-design) project.
|
|
8
|
+
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
## 📦 Installation
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
pip install purecache
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
Or with uv:
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
uv add purecache
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
Python 3.12+ required.
|
|
24
|
+
|
|
25
|
+
---
|
|
26
|
+
|
|
27
|
+
## ⚡ Quick Start
|
|
28
|
+
|
|
29
|
+
### Direct backend usage
|
|
30
|
+
|
|
31
|
+
```python
|
|
32
|
+
from purecache.backends.lru import LRUCache
|
|
33
|
+
|
|
34
|
+
cache = LRUCache(capacity=128)
|
|
35
|
+
|
|
36
|
+
await cache.put("user:42", {"name": "Alice"})
|
|
37
|
+
value = await cache.get("user:42") # {"name": "Alice"}
|
|
38
|
+
value = await cache.get("missing") # None
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
### Decorator
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
from purecache.decorators import cache
|
|
45
|
+
from purecache.backends.lru import LRUCache
|
|
46
|
+
|
|
47
|
+
@cache(backend=LRUCache, capacity=128)
|
|
48
|
+
async def get_user(user_id: str) -> dict:
|
|
49
|
+
return await fetch_from_db(user_id)
|
|
50
|
+
|
|
51
|
+
# First call — executes get_user, caches result
|
|
52
|
+
user = await get_user("42")
|
|
53
|
+
|
|
54
|
+
# Second call — returns cached result, skips get_user
|
|
55
|
+
user = await get_user("42")
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
Cache keys are derived automatically from the function's arguments using `pickle` + SHA-256 — positional args keep their order, keyword args are sorted by name.
|
|
59
|
+
|
|
60
|
+
---
|
|
61
|
+
|
|
62
|
+
## 🧠 Backends
|
|
63
|
+
|
|
64
|
+
| Backend | Eviction Policy | Time | Memory | Best For |
|
|
65
|
+
| ------------- | --------------------- | ---- | ------ | ---------------------- |
|
|
66
|
+
| `LRUCache` | Least Recently Used | O(1) | O(n) | General purpose |
|
|
67
|
+
| `LFUCache` | Least Frequently Used | O(1) | O(n) | Skewed access patterns |
|
|
68
|
+
| `TTLCache` | Time-based expiry | O(1) | O(n) | Sessions, tokens |
|
|
69
|
+
| `LRUTTLCache` | LRU + TTL combined | O(1) | O(n) | Production default |
|
|
70
|
+
|
|
71
|
+
All backends implement the `ICacheBackend` protocol — swap them without touching your application code.
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
|
|
75
|
+
## 🔌 Framework Examples
|
|
76
|
+
|
|
77
|
+
The decorator integrates naturally with any async framework:
|
|
78
|
+
|
|
79
|
+
```python
|
|
80
|
+
# FastAPI
|
|
81
|
+
from fastapi import FastAPI
|
|
82
|
+
from purecache.decorators import cache
|
|
83
|
+
from purecache.backends.lru import LRUCache
|
|
84
|
+
|
|
85
|
+
app = FastAPI()
|
|
86
|
+
|
|
87
|
+
@app.get("/user/{user_id}")
|
|
88
|
+
@cache(backend=LRUCache, capacity=512)
|
|
89
|
+
async def get_user(user_id: str):
|
|
90
|
+
return await fetch_user_from_db(user_id)
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
TODO: Add more examples for aiohttp, Django, Flask, Litestar, and Sanic in [`examples/`](examples/).
|
|
94
|
+
|
|
95
|
+
---
|
|
96
|
+
|
|
97
|
+
## 📐 Architecture
|
|
98
|
+
|
|
99
|
+
```
|
|
100
|
+
cache() decorator
|
|
101
|
+
└── ICacheBackend (protocol)
|
|
102
|
+
├── LRUCache — OrderedDict + move_to_end
|
|
103
|
+
├── LFUCache — key_map + freq_map + min_freq pointer
|
|
104
|
+
├── TTLCache — dict + expiry timestamps
|
|
105
|
+
└── LRUTTLCache — LRU + TTL combined
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
The `cache()` decorator handles key generation and cache lookup. The backend handles storage and eviction. Swap the backend, keep everything else.
|
|
109
|
+
|
|
110
|
+
---
|
|
111
|
+
|
|
112
|
+
## ⚠️ Known Limitations
|
|
113
|
+
|
|
114
|
+
- **Caching `None`**: The decorator uses `if cached_res is not None` as the cache-hit check. Functions that legitimately return `None` will always miss — the value won't be cached. Use a sentinel-aware backend or wrap the return value if needed.
|
|
115
|
+
|
|
116
|
+
---
|
|
117
|
+
|
|
118
|
+
## 📋 Requirements
|
|
119
|
+
|
|
120
|
+
- Python 3.12+
|
|
121
|
+
- Courage
|
|
122
|
+
|
|
123
|
+
---
|
|
124
|
+
|
|
125
|
+
## 🧪 Development
|
|
126
|
+
|
|
127
|
+
```bash
|
|
128
|
+
uv sync
|
|
129
|
+
pre-commit install
|
|
130
|
+
|
|
131
|
+
uv run pytest
|
|
132
|
+
uv run ruff check .
|
|
133
|
+
uv run mypy src/
|
|
134
|
+
uv run mkdocs serve
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
---
|
|
138
|
+
|
|
139
|
+
## 📖 Documentation
|
|
140
|
+
|
|
141
|
+
Full docs at **https://pure-python-system-design.github.io/purecache/**
|
|
142
|
+
|
|
143
|
+
---
|
|
144
|
+
|
|
145
|
+
More designs to come, if the pizza supply holds.
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "purecache"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Async-native in-memory cache with pluggable eviction backends. Pure Python, zero dependencies."
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = { text = "MIT" }
|
|
11
|
+
requires-python = ">=3.12"
|
|
12
|
+
dependencies = []
|
|
13
|
+
|
|
14
|
+
authors = [{ name = "Maksim Smirnoff", email = "smirnoffmg@gmail.com" }]
|
|
15
|
+
|
|
16
|
+
keywords = [
|
|
17
|
+
"cache",
|
|
18
|
+
"lru",
|
|
19
|
+
"lfu",
|
|
20
|
+
"ttl",
|
|
21
|
+
"asyncio",
|
|
22
|
+
"async",
|
|
23
|
+
"in-memory",
|
|
24
|
+
"system-design",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
classifiers = [
|
|
28
|
+
"Development Status :: 3 - Alpha",
|
|
29
|
+
"Intended Audience :: Developers",
|
|
30
|
+
"Intended Audience :: Education",
|
|
31
|
+
"License :: OSI Approved :: MIT License",
|
|
32
|
+
"Programming Language :: Python :: 3",
|
|
33
|
+
"Programming Language :: Python :: 3.12",
|
|
34
|
+
"Programming Language :: Python :: 3.13",
|
|
35
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
36
|
+
"Topic :: System :: Distributed Computing",
|
|
37
|
+
"Framework :: AsyncIO",
|
|
38
|
+
"Typing :: Typed",
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
[project.urls]
|
|
42
|
+
Homepage = "https://github.com/pure-python-system-design/purecache"
|
|
43
|
+
Documentation = "https://pure-python-system-design.github.io/purecache/"
|
|
44
|
+
Repository = "https://github.com/pure-python-system-design/purecache"
|
|
45
|
+
"Bug Tracker" = "https://github.com/pure-python-system-design/purecache/issues"
|
|
46
|
+
Changelog = "https://github.com/pure-python-system-design/purecache/releases"
|
|
47
|
+
|
|
48
|
+
[tool.setuptools.packages.find]
|
|
49
|
+
where = ["src"]
|
|
50
|
+
|
|
51
|
+
[dependency-groups]
|
|
52
|
+
dev = [
|
|
53
|
+
"pytest-asyncio",
|
|
54
|
+
"ruff",
|
|
55
|
+
"mypy",
|
|
56
|
+
"pytest",
|
|
57
|
+
"pytest-cov",
|
|
58
|
+
"pre-commit",
|
|
59
|
+
"mkdocs",
|
|
60
|
+
"mkdocs-material",
|
|
61
|
+
"mkdocstrings[python]",
|
|
62
|
+
"pytest-xdist>=3.8.0",
|
|
63
|
+
"pytest-timeout>=2.4.0",
|
|
64
|
+
"pytest-benchmark>=5.2.3",
|
|
65
|
+
]
|
|
66
|
+
|
|
67
|
+
[tool.ruff]
|
|
68
|
+
target-version = "py312"
|
|
69
|
+
line-length = 88
|
|
70
|
+
src = ["src", "tests"]
|
|
71
|
+
|
|
72
|
+
[tool.ruff.lint]
|
|
73
|
+
select = ["E", "F", "I", "UP"]
|
|
74
|
+
ignore = []
|
|
75
|
+
|
|
76
|
+
[tool.mypy]
|
|
77
|
+
python_version = "3.12"
|
|
78
|
+
warn_return_any = true
|
|
79
|
+
warn_unused_ignores = true
|
|
80
|
+
disallow_untyped_defs = false
|
|
81
|
+
packages = ["src/purecache"]
|
|
82
|
+
|
|
83
|
+
[tool.pytest.ini_options]
|
|
84
|
+
testpaths = ["tests"]
|
|
85
|
+
addopts = "-v"
|
|
86
|
+
pythonpath = ["src"]
|
|
87
|
+
asyncio_mode = "auto"
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""LRU Cache — O(1) implementation.
|
|
2
|
+
|
|
3
|
+
Based on the stack algorithm framework from:
|
|
4
|
+
"Evaluation Techniques for Storage Hierarchies"
|
|
5
|
+
R. L. Mattson, J. Gecsei, D. R. Slutz, I. L. Traiger
|
|
6
|
+
IBM Systems Journal, 9(2):78-117, 1970
|
|
7
|
+
|
|
8
|
+
https://dl.acm.org/doi/10.1147/sj.92.0078
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import asyncio
|
|
12
|
+
from collections import OrderedDict
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class LRUCache:
|
|
17
|
+
def __init__(self, capacity: int):
|
|
18
|
+
self._capacity = capacity
|
|
19
|
+
self.container: OrderedDict[str, Any] = OrderedDict()
|
|
20
|
+
self._lock = asyncio.Lock()
|
|
21
|
+
|
|
22
|
+
def _is_full(self) -> bool:
|
|
23
|
+
return len(self.container) == self._capacity
|
|
24
|
+
|
|
25
|
+
async def get(self, key: str) -> Any | None:
|
|
26
|
+
async with self._lock:
|
|
27
|
+
if key in self.container:
|
|
28
|
+
self.container.move_to_end(key, True)
|
|
29
|
+
return self.container.get(key)
|
|
30
|
+
return None
|
|
31
|
+
|
|
32
|
+
async def put(self, key: str, value: Any) -> None:
|
|
33
|
+
async with self._lock:
|
|
34
|
+
if (key not in self.container) and (len(self.container) == self._capacity):
|
|
35
|
+
self.container.popitem(last=False) # remove first item
|
|
36
|
+
|
|
37
|
+
self.container[key] = value
|
|
38
|
+
self.container.move_to_end(key, True) # move to tail
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import hashlib
|
|
3
|
+
import pickle
|
|
4
|
+
from collections.abc import Callable
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from .protocols import ICacheBackend
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def generate_key(args: tuple[Any, ...], kwargs: dict[str, Any]) -> str:
|
|
11
|
+
"""Build a stable cache key from function args and kwargs.
|
|
12
|
+
|
|
13
|
+
- Positional args keep their order (order matters).
|
|
14
|
+
- Keyword args are sorted by name so call order does not change the key.
|
|
15
|
+
- Uses pickle to serialize and SHA-256 for a fixed-length key.
|
|
16
|
+
"""
|
|
17
|
+
canonical = (args, tuple(sorted(kwargs.items())))
|
|
18
|
+
raw = pickle.dumps(canonical, protocol=pickle.HIGHEST_PROTOCOL)
|
|
19
|
+
return hashlib.sha256(raw).hexdigest()
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def cache(
|
|
23
|
+
func: Callable[..., Any],
|
|
24
|
+
backend: Callable[..., ICacheBackend],
|
|
25
|
+
**kwargs: Any,
|
|
26
|
+
):
|
|
27
|
+
cache_backend = backend(**kwargs)
|
|
28
|
+
|
|
29
|
+
@functools.wraps(func)
|
|
30
|
+
async def wrapper(*args, **kwargs):
|
|
31
|
+
key = generate_key(args, kwargs)
|
|
32
|
+
cached_res = await cache_backend.get(key)
|
|
33
|
+
if cached_res is not None:
|
|
34
|
+
return cached_res
|
|
35
|
+
|
|
36
|
+
res = await func(*args, **kwargs)
|
|
37
|
+
await cache_backend.put(key, res)
|
|
38
|
+
return res
|
|
39
|
+
|
|
40
|
+
return wrapper
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: purecache
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Async-native in-memory cache with pluggable eviction backends. Pure Python, zero dependencies.
|
|
5
|
+
Author-email: Maksim Smirnoff <smirnoffmg@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/pure-python-system-design/purecache
|
|
8
|
+
Project-URL: Documentation, https://pure-python-system-design.github.io/purecache/
|
|
9
|
+
Project-URL: Repository, https://github.com/pure-python-system-design/purecache
|
|
10
|
+
Project-URL: Bug Tracker, https://github.com/pure-python-system-design/purecache/issues
|
|
11
|
+
Project-URL: Changelog, https://github.com/pure-python-system-design/purecache/releases
|
|
12
|
+
Keywords: cache,lru,lfu,ttl,asyncio,async,in-memory,system-design
|
|
13
|
+
Classifier: Development Status :: 3 - Alpha
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: Intended Audience :: Education
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Programming Language :: Python :: 3
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
|
+
Classifier: Topic :: System :: Distributed Computing
|
|
22
|
+
Classifier: Framework :: AsyncIO
|
|
23
|
+
Classifier: Typing :: Typed
|
|
24
|
+
Requires-Python: >=3.12
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
|
|
27
|
+
# 🗄️ purecache
|
|
28
|
+
|
|
29
|
+
Async-native in-memory cache with pluggable eviction backends — pure Python 3.12+, zero dependencies.
|
|
30
|
+
|
|
31
|
+
Just `asyncio`, `collections.OrderedDict`, and the irrational urge to understand what happens inside the black box.
|
|
32
|
+
|
|
33
|
+
Part of the [pure-python-system-design](https://github.com/pure-python-system-design) project.
|
|
34
|
+
|
|
35
|
+
---
|
|
36
|
+
|
|
37
|
+
## 📦 Installation
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
pip install purecache
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
Or with uv:
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
uv add purecache
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Python 3.12+ required.
|
|
50
|
+
|
|
51
|
+
---
|
|
52
|
+
|
|
53
|
+
## ⚡ Quick Start
|
|
54
|
+
|
|
55
|
+
### Direct backend usage
|
|
56
|
+
|
|
57
|
+
```python
|
|
58
|
+
from purecache.backends.lru import LRUCache
|
|
59
|
+
|
|
60
|
+
cache = LRUCache(capacity=128)
|
|
61
|
+
|
|
62
|
+
await cache.put("user:42", {"name": "Alice"})
|
|
63
|
+
value = await cache.get("user:42") # {"name": "Alice"}
|
|
64
|
+
value = await cache.get("missing") # None
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Decorator
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from purecache.decorators import cache
|
|
71
|
+
from purecache.backends.lru import LRUCache
|
|
72
|
+
|
|
73
|
+
@cache(backend=LRUCache, capacity=128)
|
|
74
|
+
async def get_user(user_id: str) -> dict:
|
|
75
|
+
return await fetch_from_db(user_id)
|
|
76
|
+
|
|
77
|
+
# First call — executes get_user, caches result
|
|
78
|
+
user = await get_user("42")
|
|
79
|
+
|
|
80
|
+
# Second call — returns cached result, skips get_user
|
|
81
|
+
user = await get_user("42")
|
|
82
|
+
```
|
|
83
|
+
|
|
84
|
+
Cache keys are derived automatically from the function's arguments using `pickle` + SHA-256 — positional args keep their order, keyword args are sorted by name.
|
|
85
|
+
|
|
86
|
+
---
|
|
87
|
+
|
|
88
|
+
## 🧠 Backends
|
|
89
|
+
|
|
90
|
+
| Backend | Eviction Policy | Time | Memory | Best For |
|
|
91
|
+
| ------------- | --------------------- | ---- | ------ | ---------------------- |
|
|
92
|
+
| `LRUCache` | Least Recently Used | O(1) | O(n) | General purpose |
|
|
93
|
+
| `LFUCache` | Least Frequently Used | O(1) | O(n) | Skewed access patterns |
|
|
94
|
+
| `TTLCache` | Time-based expiry | O(1) | O(n) | Sessions, tokens |
|
|
95
|
+
| `LRUTTLCache` | LRU + TTL combined | O(1) | O(n) | Production default |
|
|
96
|
+
|
|
97
|
+
All backends implement the `ICacheBackend` protocol — swap them without touching your application code.
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## 🔌 Framework Examples
|
|
102
|
+
|
|
103
|
+
The decorator integrates naturally with any async framework:
|
|
104
|
+
|
|
105
|
+
```python
|
|
106
|
+
# FastAPI
|
|
107
|
+
from fastapi import FastAPI
|
|
108
|
+
from purecache.decorators import cache
|
|
109
|
+
from purecache.backends.lru import LRUCache
|
|
110
|
+
|
|
111
|
+
app = FastAPI()
|
|
112
|
+
|
|
113
|
+
@app.get("/user/{user_id}")
|
|
114
|
+
@cache(backend=LRUCache, capacity=512)
|
|
115
|
+
async def get_user(user_id: str):
|
|
116
|
+
return await fetch_user_from_db(user_id)
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
TODO: Add more examples for aiohttp, Django, Flask, Litestar, and Sanic in [`examples/`](examples/).
|
|
120
|
+
|
|
121
|
+
---
|
|
122
|
+
|
|
123
|
+
## 📐 Architecture
|
|
124
|
+
|
|
125
|
+
```
|
|
126
|
+
cache() decorator
|
|
127
|
+
└── ICacheBackend (protocol)
|
|
128
|
+
├── LRUCache — OrderedDict + move_to_end
|
|
129
|
+
├── LFUCache — key_map + freq_map + min_freq pointer
|
|
130
|
+
├── TTLCache — dict + expiry timestamps
|
|
131
|
+
└── LRUTTLCache — LRU + TTL combined
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
The `cache()` decorator handles key generation and cache lookup. The backend handles storage and eviction. Swap the backend, keep everything else.
|
|
135
|
+
|
|
136
|
+
---
|
|
137
|
+
|
|
138
|
+
## ⚠️ Known Limitations
|
|
139
|
+
|
|
140
|
+
- **Caching `None`**: The decorator uses `if cached_res is not None` as the cache-hit check. Functions that legitimately return `None` will always miss — the value won't be cached. Use a sentinel-aware backend or wrap the return value if needed.
|
|
141
|
+
|
|
142
|
+
---
|
|
143
|
+
|
|
144
|
+
## 📋 Requirements
|
|
145
|
+
|
|
146
|
+
- Python 3.12+
|
|
147
|
+
- Courage
|
|
148
|
+
|
|
149
|
+
---
|
|
150
|
+
|
|
151
|
+
## 🧪 Development
|
|
152
|
+
|
|
153
|
+
```bash
|
|
154
|
+
uv sync
|
|
155
|
+
pre-commit install
|
|
156
|
+
|
|
157
|
+
uv run pytest
|
|
158
|
+
uv run ruff check .
|
|
159
|
+
uv run mypy src/
|
|
160
|
+
uv run mkdocs serve
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
---
|
|
164
|
+
|
|
165
|
+
## 📖 Documentation
|
|
166
|
+
|
|
167
|
+
Full docs at **https://pure-python-system-design.github.io/purecache/**
|
|
168
|
+
|
|
169
|
+
---
|
|
170
|
+
|
|
171
|
+
More designs to come, if the pizza supply holds.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
src/purecache/__init__.py
|
|
4
|
+
src/purecache/decorators.py
|
|
5
|
+
src/purecache/protocols.py
|
|
6
|
+
src/purecache.egg-info/PKG-INFO
|
|
7
|
+
src/purecache.egg-info/SOURCES.txt
|
|
8
|
+
src/purecache.egg-info/dependency_links.txt
|
|
9
|
+
src/purecache.egg-info/top_level.txt
|
|
10
|
+
src/purecache/backends/__init__.py
|
|
11
|
+
src/purecache/backends/lru.py
|
|
12
|
+
tests/test_decorators.py
|
|
13
|
+
tests/test_lru.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
purecache
|
|
@@ -0,0 +1,324 @@
|
|
|
1
|
+
"""High-quality tests for purecache.decorators.
|
|
2
|
+
|
|
3
|
+
Follows Arrange-Act-Assert and the testing pyramid.
|
|
4
|
+
Uses a fake in-memory backend to test decorator behavior without depending on LRUCache.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import pytest
|
|
8
|
+
|
|
9
|
+
from purecache.decorators import cache, generate_key
|
|
10
|
+
|
|
11
|
+
# --- Fake backend for decorator tests ---
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class FakeBackend:
|
|
15
|
+
"""In-memory backend that records get/put and stores by key."""
|
|
16
|
+
|
|
17
|
+
_instances: list["FakeBackend"] = []
|
|
18
|
+
|
|
19
|
+
def __init__(self, **kwargs: object) -> None:
|
|
20
|
+
FakeBackend._instances.append(self)
|
|
21
|
+
self._store: dict[str, object] = {}
|
|
22
|
+
self.get_calls: list[str] = []
|
|
23
|
+
self.put_calls: list[tuple[str, object]] = []
|
|
24
|
+
|
|
25
|
+
async def get(self, key: str) -> object | None:
|
|
26
|
+
self.get_calls.append(key)
|
|
27
|
+
return self._store.get(key)
|
|
28
|
+
|
|
29
|
+
async def put(self, key: str, value: object) -> None:
|
|
30
|
+
self.put_calls.append((key, value))
|
|
31
|
+
self._store[key] = value
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@pytest.fixture(autouse=True)
|
|
35
|
+
def clear_fake_backend_instances() -> None:
|
|
36
|
+
"""Reset so each test gets a clean list of created backends."""
|
|
37
|
+
FakeBackend._instances.clear()
|
|
38
|
+
yield
|
|
39
|
+
FakeBackend._instances.clear()
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _get_backend() -> FakeBackend:
|
|
43
|
+
"""Return the backend instance created by the cache decorator."""
|
|
44
|
+
assert FakeBackend._instances, "No FakeBackend instance created (cache not used?)"
|
|
45
|
+
return FakeBackend._instances[-1]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# --- Unit: generate_key ---
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def test_generate_key_returns_str() -> None:
|
|
52
|
+
# Act
|
|
53
|
+
key = generate_key((1, 2), {"a": 3})
|
|
54
|
+
|
|
55
|
+
# Assert
|
|
56
|
+
assert isinstance(key, str)
|
|
57
|
+
assert len(key) == 64
|
|
58
|
+
assert all(c in "0123456789abcdef" for c in key)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def test_generate_key_same_args_same_key() -> None:
|
|
62
|
+
# Arrange
|
|
63
|
+
args = (1, "x")
|
|
64
|
+
kwargs = {"a": 10, "b": 20}
|
|
65
|
+
|
|
66
|
+
# Act
|
|
67
|
+
key1 = generate_key(args, kwargs)
|
|
68
|
+
key2 = generate_key(args, kwargs)
|
|
69
|
+
|
|
70
|
+
# Assert
|
|
71
|
+
assert key1 == key2
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def test_generate_key_different_args_different_key() -> None:
|
|
75
|
+
# Act
|
|
76
|
+
key1 = generate_key((1, 2), {})
|
|
77
|
+
key2 = generate_key((2, 1), {})
|
|
78
|
+
|
|
79
|
+
# Assert
|
|
80
|
+
assert key1 != key2
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def test_generate_key_different_kwargs_different_key() -> None:
|
|
84
|
+
# Act
|
|
85
|
+
key1 = generate_key((), {"a": 1})
|
|
86
|
+
key2 = generate_key((), {"a": 2})
|
|
87
|
+
|
|
88
|
+
# Assert
|
|
89
|
+
assert key1 != key2
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def test_generate_key_kwargs_order_independent() -> None:
|
|
93
|
+
"""Same kwargs in different order produce the same key."""
|
|
94
|
+
# Act
|
|
95
|
+
key1 = generate_key((), {"a": 1, "b": 2})
|
|
96
|
+
key2 = generate_key((), {"b": 2, "a": 1})
|
|
97
|
+
|
|
98
|
+
# Assert
|
|
99
|
+
assert key1 == key2
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def test_generate_key_positional_order_matters() -> None:
|
|
103
|
+
# Act
|
|
104
|
+
key1 = generate_key((1, 2), {})
|
|
105
|
+
key2 = generate_key((2, 1), {})
|
|
106
|
+
|
|
107
|
+
# Assert
|
|
108
|
+
assert key1 != key2
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def test_generate_key_empty_args_empty_kwargs() -> None:
|
|
112
|
+
# Act
|
|
113
|
+
key = generate_key((), {})
|
|
114
|
+
|
|
115
|
+
# Assert
|
|
116
|
+
assert isinstance(key, str)
|
|
117
|
+
assert len(key) == 64
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def test_generate_key_empty_stable() -> None:
|
|
121
|
+
"""Empty args and kwargs produce stable key across calls."""
|
|
122
|
+
# Act
|
|
123
|
+
key1 = generate_key((), {})
|
|
124
|
+
key2 = generate_key((), {})
|
|
125
|
+
|
|
126
|
+
# Assert
|
|
127
|
+
assert key1 == key2
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def test_generate_key_handles_none_value() -> None:
|
|
131
|
+
# Act
|
|
132
|
+
key = generate_key((None,), {"x": None})
|
|
133
|
+
|
|
134
|
+
# Assert
|
|
135
|
+
assert isinstance(key, str)
|
|
136
|
+
assert len(key) == 64
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def test_generate_key_handles_list_and_dict() -> None:
|
|
140
|
+
# Act
|
|
141
|
+
key = generate_key(([1, 2],), {"k": {"nested": True}})
|
|
142
|
+
|
|
143
|
+
# Assert
|
|
144
|
+
assert isinstance(key, str)
|
|
145
|
+
assert len(key) == 64
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def test_generate_key_same_nested_structure_same_key() -> None:
|
|
149
|
+
# Act
|
|
150
|
+
key1 = generate_key(({"a": [1, 2]},), {})
|
|
151
|
+
key2 = generate_key(({"a": [1, 2]},), {})
|
|
152
|
+
|
|
153
|
+
# Assert
|
|
154
|
+
assert key1 == key2
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
# --- Unit: cache decorator (with FakeBackend) ---
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
async def test_cache_miss_returns_func_result() -> None:
|
|
161
|
+
"""On cache miss, wrapper returns the result of the wrapped function."""
|
|
162
|
+
|
|
163
|
+
# Arrange
|
|
164
|
+
async def fn(x: int) -> int:
|
|
165
|
+
return x + 1
|
|
166
|
+
|
|
167
|
+
wrapped = cache(fn, FakeBackend)
|
|
168
|
+
|
|
169
|
+
# Act
|
|
170
|
+
result = await wrapped(10)
|
|
171
|
+
|
|
172
|
+
# Assert
|
|
173
|
+
assert result == 11
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
async def test_cache_miss_calls_backend_get_then_put() -> None:
|
|
177
|
+
"""On cache miss, backend get is called, then put with key and result."""
|
|
178
|
+
|
|
179
|
+
# Arrange
|
|
180
|
+
async def fn(x: int) -> int:
|
|
181
|
+
return x + 1
|
|
182
|
+
|
|
183
|
+
wrapped = cache(fn, FakeBackend)
|
|
184
|
+
|
|
185
|
+
# Act
|
|
186
|
+
await wrapped(5)
|
|
187
|
+
|
|
188
|
+
# Assert
|
|
189
|
+
backend = _get_backend()
|
|
190
|
+
assert len(backend.get_calls) == 1
|
|
191
|
+
assert len(backend.put_calls) == 1
|
|
192
|
+
assert backend.put_calls[0][1] == 6
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
async def test_cache_hit_returns_cached_value_without_calling_func() -> None:
|
|
196
|
+
"""On cache hit, returns cached value and does not call the function again."""
|
|
197
|
+
# Arrange
|
|
198
|
+
call_count = 0
|
|
199
|
+
|
|
200
|
+
async def fn(x: int) -> int:
|
|
201
|
+
nonlocal call_count
|
|
202
|
+
call_count += 1
|
|
203
|
+
return x * 2
|
|
204
|
+
|
|
205
|
+
wrapped = cache(fn, FakeBackend)
|
|
206
|
+
await wrapped(7)
|
|
207
|
+
|
|
208
|
+
# Act
|
|
209
|
+
result = await wrapped(7)
|
|
210
|
+
|
|
211
|
+
# Assert
|
|
212
|
+
assert result == 14
|
|
213
|
+
assert call_count == 1
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
async def test_cache_hit_calls_backend_get_only() -> None:
|
|
217
|
+
"""On cache hit, backend get is called once; put is not called again."""
|
|
218
|
+
|
|
219
|
+
# Arrange
|
|
220
|
+
async def fn(x: int) -> int:
|
|
221
|
+
return x
|
|
222
|
+
|
|
223
|
+
wrapped = cache(fn, FakeBackend)
|
|
224
|
+
await wrapped(1)
|
|
225
|
+
await wrapped(1)
|
|
226
|
+
|
|
227
|
+
# Assert
|
|
228
|
+
backend = _get_backend()
|
|
229
|
+
assert len(backend.get_calls) == 2
|
|
230
|
+
assert len(backend.put_calls) == 1
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
async def test_cache_different_args_different_keys() -> None:
|
|
234
|
+
"""Different (args, kwargs) produce different keys and separate cache entries."""
|
|
235
|
+
|
|
236
|
+
# Arrange
|
|
237
|
+
async def fn(x: int) -> int:
|
|
238
|
+
return x
|
|
239
|
+
|
|
240
|
+
wrapped = cache(fn, FakeBackend)
|
|
241
|
+
await wrapped(1)
|
|
242
|
+
await wrapped(2)
|
|
243
|
+
|
|
244
|
+
# Assert
|
|
245
|
+
backend = _get_backend()
|
|
246
|
+
assert len(backend.put_calls) == 2
|
|
247
|
+
assert backend.put_calls[0][0] != backend.put_calls[1][0]
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
async def test_cache_same_args_same_key() -> None:
|
|
251
|
+
"""Same (args, kwargs) produce same key so second call is a hit."""
|
|
252
|
+
|
|
253
|
+
# Arrange
|
|
254
|
+
async def fn(x: int) -> int:
|
|
255
|
+
return x
|
|
256
|
+
|
|
257
|
+
wrapped = cache(fn, FakeBackend)
|
|
258
|
+
key_first = generate_key((1,), {})
|
|
259
|
+
await wrapped(1)
|
|
260
|
+
|
|
261
|
+
backend = _get_backend()
|
|
262
|
+
key_used = backend.put_calls[0][0]
|
|
263
|
+
|
|
264
|
+
# Assert
|
|
265
|
+
assert key_used == key_first
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
async def test_cache_preserves_function_name() -> None:
|
|
269
|
+
"""Wrapped function preserves __name__ of the original (via functools.wraps)."""
|
|
270
|
+
|
|
271
|
+
# Arrange
|
|
272
|
+
async def my_async_func() -> str:
|
|
273
|
+
return "ok"
|
|
274
|
+
|
|
275
|
+
wrapped = cache(my_async_func, FakeBackend)
|
|
276
|
+
|
|
277
|
+
# Assert
|
|
278
|
+
assert wrapped.__name__ == "my_async_func"
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
async def test_cache_with_kwargs_uses_key_from_both_args_and_kwargs() -> None:
|
|
282
|
+
"""Cache key is derived from both positional and keyword arguments."""
|
|
283
|
+
|
|
284
|
+
# Arrange
|
|
285
|
+
async def fn(a: int, b: int) -> int:
|
|
286
|
+
return a + b
|
|
287
|
+
|
|
288
|
+
wrapped = cache(fn, FakeBackend)
|
|
289
|
+
await wrapped(1, 2)
|
|
290
|
+
await wrapped(1, b=2)
|
|
291
|
+
|
|
292
|
+
# Assert: (1, 2) vs (1,), {b:2} are different
|
|
293
|
+
backend = _get_backend()
|
|
294
|
+
keys = [put[0] for put in backend.put_calls]
|
|
295
|
+
assert len(keys) == 2
|
|
296
|
+
assert keys[0] != keys[1]
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
# --- Integration: cache with falsy values ---
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
async def test_cache_stores_falsy_value_then_returns_on_hit() -> None:
|
|
303
|
+
"""Cached value 0 (or other falsy) is returned on second call without calling func.
|
|
304
|
+
|
|
305
|
+
Decorator should use 'is not None' (or equivalent) so falsy values are cached.
|
|
306
|
+
"""
|
|
307
|
+
# Arrange
|
|
308
|
+
call_count = 0
|
|
309
|
+
|
|
310
|
+
async def fn() -> int:
|
|
311
|
+
nonlocal call_count
|
|
312
|
+
call_count += 1
|
|
313
|
+
return 0
|
|
314
|
+
|
|
315
|
+
wrapped = cache(fn, FakeBackend)
|
|
316
|
+
first = await wrapped()
|
|
317
|
+
|
|
318
|
+
# Act
|
|
319
|
+
second = await wrapped()
|
|
320
|
+
|
|
321
|
+
# Assert: both return 0; func must be called only once (cache hit on second call)
|
|
322
|
+
assert first == 0
|
|
323
|
+
assert second == 0
|
|
324
|
+
assert call_count == 1
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
"""High-quality specification tests for LRUCache.
|
|
2
|
+
|
|
3
|
+
Follows Arrange-Act-Assert and the testing pyramid:
|
|
4
|
+
- Unit: single operation, one assertion focus.
|
|
5
|
+
- Integration: multiple operations, eviction + ordering behavior.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import pytest
|
|
9
|
+
|
|
10
|
+
from purecache.backends.lru import LRUCache
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@pytest.fixture
|
|
14
|
+
def cache() -> LRUCache:
|
|
15
|
+
"""Fresh LRUCache with capacity 3 per test."""
|
|
16
|
+
return LRUCache(3)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# --- Unit: get behavior ---
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
async def test_get_missing_returns_none(cache: LRUCache) -> None:
|
|
23
|
+
"""get for a key that was never put returns None."""
|
|
24
|
+
# Arrange: empty cache (fixture)
|
|
25
|
+
|
|
26
|
+
# Act
|
|
27
|
+
result = await cache.get("missing")
|
|
28
|
+
|
|
29
|
+
# Assert
|
|
30
|
+
assert result is None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def test_get_after_put_returns_value(cache: LRUCache) -> None:
|
|
34
|
+
"""After put(key, value), get(key) returns that value."""
|
|
35
|
+
# Arrange
|
|
36
|
+
await cache.put("a", 42)
|
|
37
|
+
|
|
38
|
+
# Act
|
|
39
|
+
result = await cache.get("a")
|
|
40
|
+
|
|
41
|
+
# Assert
|
|
42
|
+
assert result == 42
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
async def test_get_after_overwrite_returns_latest_value(cache: LRUCache) -> None:
|
|
46
|
+
"""Two puts for same key; get returns the last value."""
|
|
47
|
+
# Arrange
|
|
48
|
+
await cache.put("a", 1)
|
|
49
|
+
await cache.put("a", 2)
|
|
50
|
+
|
|
51
|
+
# Act
|
|
52
|
+
result = await cache.get("a")
|
|
53
|
+
|
|
54
|
+
# Assert
|
|
55
|
+
assert result == 2
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
# --- Unit: put behavior (no eviction) ---
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
async def test_put_stores_value_by_key(cache: LRUCache) -> None:
|
|
62
|
+
"""put stores value; get with same key returns it."""
|
|
63
|
+
# Arrange: empty cache
|
|
64
|
+
|
|
65
|
+
# Act
|
|
66
|
+
await cache.put("x", 10)
|
|
67
|
+
|
|
68
|
+
# Assert
|
|
69
|
+
assert await cache.get("x") == 10
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
async def test_put_multiple_keys_stores_independently(cache: LRUCache) -> None:
|
|
73
|
+
"""Several string keys store and retrieve independently."""
|
|
74
|
+
# Arrange
|
|
75
|
+
await cache.put("x", 10)
|
|
76
|
+
await cache.put("y", 20)
|
|
77
|
+
await cache.put("z", 30)
|
|
78
|
+
|
|
79
|
+
# Act
|
|
80
|
+
x_val = await cache.get("x")
|
|
81
|
+
y_val = await cache.get("y")
|
|
82
|
+
z_val = await cache.get("z")
|
|
83
|
+
|
|
84
|
+
# Assert
|
|
85
|
+
assert x_val == 10
|
|
86
|
+
assert y_val == 20
|
|
87
|
+
assert z_val == 30
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
# --- Unit: value types (Any) ---
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
async def test_put_get_int_value(cache: LRUCache) -> None:
|
|
94
|
+
# Arrange
|
|
95
|
+
await cache.put("k", 1)
|
|
96
|
+
|
|
97
|
+
# Act
|
|
98
|
+
result = await cache.get("k")
|
|
99
|
+
|
|
100
|
+
# Assert
|
|
101
|
+
assert result == 1
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
async def test_put_get_str_value(cache: LRUCache) -> None:
|
|
105
|
+
# Arrange
|
|
106
|
+
await cache.put("k", "hello")
|
|
107
|
+
|
|
108
|
+
# Act
|
|
109
|
+
result = await cache.get("k")
|
|
110
|
+
|
|
111
|
+
# Assert
|
|
112
|
+
assert result == "hello"
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
async def test_put_get_dict_value(cache: LRUCache) -> None:
|
|
116
|
+
# Arrange
|
|
117
|
+
await cache.put("k", {"nested": True})
|
|
118
|
+
|
|
119
|
+
# Act
|
|
120
|
+
result = await cache.get("k")
|
|
121
|
+
|
|
122
|
+
# Assert
|
|
123
|
+
assert result == {"nested": True}
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
async def test_put_get_none_value(cache: LRUCache) -> None:
|
|
127
|
+
# Arrange
|
|
128
|
+
await cache.put("k", None)
|
|
129
|
+
|
|
130
|
+
# Act
|
|
131
|
+
result = await cache.get("k")
|
|
132
|
+
|
|
133
|
+
# Assert
|
|
134
|
+
assert result is None
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
async def test_put_get_object_value(cache: LRUCache) -> None:
|
|
138
|
+
# Arrange
|
|
139
|
+
obj = object()
|
|
140
|
+
await cache.put("k", obj)
|
|
141
|
+
|
|
142
|
+
# Act
|
|
143
|
+
result = await cache.get("k")
|
|
144
|
+
|
|
145
|
+
# Assert
|
|
146
|
+
assert result is obj
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
async def test_put_get_empty_string_key(cache: LRUCache) -> None:
|
|
150
|
+
"""Empty string key is allowed."""
|
|
151
|
+
# Arrange
|
|
152
|
+
await cache.put("", "empty")
|
|
153
|
+
|
|
154
|
+
# Act
|
|
155
|
+
result = await cache.get("")
|
|
156
|
+
|
|
157
|
+
# Assert
|
|
158
|
+
assert result == "empty"
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
# --- Unit: capacity 1 ---
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
async def test_capacity_one_put_get_returns_value() -> None:
|
|
165
|
+
"""LRUCache(1): single put then get returns value."""
|
|
166
|
+
# Arrange
|
|
167
|
+
cache = LRUCache(1)
|
|
168
|
+
await cache.put("first", 1)
|
|
169
|
+
|
|
170
|
+
# Act
|
|
171
|
+
result = await cache.get("first")
|
|
172
|
+
|
|
173
|
+
# Assert
|
|
174
|
+
assert result == 1
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
async def test_capacity_one_second_put_evicts_first() -> None:
|
|
178
|
+
"""LRUCache(1): second put evicts first key."""
|
|
179
|
+
# Arrange
|
|
180
|
+
cache = LRUCache(1)
|
|
181
|
+
await cache.put("first", 1)
|
|
182
|
+
await cache.put("second", 2)
|
|
183
|
+
|
|
184
|
+
# Act
|
|
185
|
+
first_result = await cache.get("first")
|
|
186
|
+
second_result = await cache.get("second")
|
|
187
|
+
|
|
188
|
+
# Assert
|
|
189
|
+
assert first_result is None
|
|
190
|
+
assert second_result == 2
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
# --- Integration: eviction and LRU order ---
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
async def test_eviction_removes_oldest_when_at_capacity(cache: LRUCache) -> None:
|
|
197
|
+
"""When at capacity, put of new key evicts the least recently used (oldest)."""
|
|
198
|
+
# Arrange: fill to capacity
|
|
199
|
+
await cache.put("a", 1)
|
|
200
|
+
await cache.put("b", 2)
|
|
201
|
+
await cache.put("c", 3)
|
|
202
|
+
|
|
203
|
+
# Act
|
|
204
|
+
await cache.put("d", 4)
|
|
205
|
+
|
|
206
|
+
# Assert: first key evicted, others and new key present
|
|
207
|
+
assert await cache.get("a") is None
|
|
208
|
+
assert await cache.get("b") == 2
|
|
209
|
+
assert await cache.get("c") == 3
|
|
210
|
+
assert await cache.get("d") == 4
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
async def test_get_touches_and_protects_from_eviction(cache: LRUCache) -> None:
|
|
214
|
+
"""get(key) refreshes order; that key is not evicted on next insert."""
|
|
215
|
+
# Arrange
|
|
216
|
+
await cache.put("a", 1)
|
|
217
|
+
await cache.put("b", 2)
|
|
218
|
+
await cache.put("c", 3)
|
|
219
|
+
await cache.get("a")
|
|
220
|
+
|
|
221
|
+
# Act
|
|
222
|
+
await cache.put("d", 4)
|
|
223
|
+
|
|
224
|
+
# Assert: b evicted (second-oldest), a protected by get
|
|
225
|
+
assert await cache.get("a") == 1
|
|
226
|
+
assert await cache.get("b") is None
|
|
227
|
+
assert await cache.get("c") == 3
|
|
228
|
+
assert await cache.get("d") == 4
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
async def test_put_refreshes_order(cache: LRUCache) -> None:
|
|
232
|
+
"""Overwriting existing key with put counts as recent; not evicted next."""
|
|
233
|
+
# Arrange
|
|
234
|
+
await cache.put("a", 1)
|
|
235
|
+
await cache.put("b", 2)
|
|
236
|
+
await cache.put("c", 3)
|
|
237
|
+
await cache.put("a", 10)
|
|
238
|
+
|
|
239
|
+
# Act
|
|
240
|
+
await cache.put("d", 4)
|
|
241
|
+
|
|
242
|
+
# Assert: b evicted, a refreshed by overwrite
|
|
243
|
+
assert await cache.get("a") == 10
|
|
244
|
+
assert await cache.get("b") is None
|
|
245
|
+
assert await cache.get("c") == 3
|
|
246
|
+
assert await cache.get("d") == 4
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
async def test_repeated_put_same_key_keeps_latest_until_evicted(
|
|
250
|
+
cache: LRUCache,
|
|
251
|
+
) -> None:
|
|
252
|
+
"""Repeated put to same key keeps latest; eviction when others fill capacity."""
|
|
253
|
+
# Arrange
|
|
254
|
+
for i in range(10):
|
|
255
|
+
await cache.put("same", i)
|
|
256
|
+
|
|
257
|
+
# Act & Assert: same key still present with latest value
|
|
258
|
+
assert await cache.get("same") == 9
|
|
259
|
+
|
|
260
|
+
# Act: fill capacity with other keys
|
|
261
|
+
await cache.put("b", 2)
|
|
262
|
+
await cache.put("c", 3)
|
|
263
|
+
await cache.put("d", 4)
|
|
264
|
+
|
|
265
|
+
# Assert: same evicted, others present
|
|
266
|
+
assert await cache.get("same") is None
|
|
267
|
+
assert await cache.get("b") == 2
|
|
268
|
+
assert await cache.get("c") == 3
|
|
269
|
+
assert await cache.get("d") == 4
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
async def test_sequential_put_get_ordering(cache: LRUCache) -> None:
|
|
273
|
+
"""Multiple await get/put in sequence behave correctly (no shared state bugs)."""
|
|
274
|
+
# Arrange & Act: interleaved put/get
|
|
275
|
+
await cache.put("k1", 1)
|
|
276
|
+
v1 = await cache.get("k1")
|
|
277
|
+
await cache.put("k2", 2)
|
|
278
|
+
v2 = await cache.get("k2")
|
|
279
|
+
await cache.put("k3", 3)
|
|
280
|
+
v3 = await cache.get("k3")
|
|
281
|
+
|
|
282
|
+
# Assert
|
|
283
|
+
assert v1 == 1
|
|
284
|
+
assert v2 == 2
|
|
285
|
+
assert v3 == 3
|
|
286
|
+
|
|
287
|
+
# Act: overflow
|
|
288
|
+
await cache.put("k4", 4)
|
|
289
|
+
|
|
290
|
+
# Assert: k1 evicted (oldest), k2,k3,k4 present
|
|
291
|
+
assert await cache.get("k1") is None
|
|
292
|
+
assert await cache.get("k2") == 2
|
|
293
|
+
assert await cache.get("k3") == 3
|
|
294
|
+
assert await cache.get("k4") == 4
|