tiercache 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tiercache-0.1.0/.gitignore +33 -0
- tiercache-0.1.0/PKG-INFO +40 -0
- tiercache-0.1.0/README.md +298 -0
- tiercache-0.1.0/design.txt +289 -0
- tiercache-0.1.0/example/app.py +117 -0
- tiercache-0.1.0/example/config.yaml +27 -0
- tiercache-0.1.0/example/config_memcached.yaml +38 -0
- tiercache-0.1.0/example/config_ram.yaml +32 -0
- tiercache-0.1.0/pyproject.toml +57 -0
- tiercache-0.1.0/src/smartcache/__init__.py +41 -0
- tiercache-0.1.0/src/smartcache/backends/__init__.py +0 -0
- tiercache-0.1.0/src/smartcache/backends/base.py +29 -0
- tiercache-0.1.0/src/smartcache/backends/dry/__init__.py +0 -0
- tiercache-0.1.0/src/smartcache/backends/dry/local.py +109 -0
- tiercache-0.1.0/src/smartcache/backends/dry/mongodb.py +103 -0
- tiercache-0.1.0/src/smartcache/backends/dry/s3.py +119 -0
- tiercache-0.1.0/src/smartcache/backends/memcached.py +115 -0
- tiercache-0.1.0/src/smartcache/backends/ram.py +101 -0
- tiercache-0.1.0/src/smartcache/config.py +150 -0
- tiercache-0.1.0/src/smartcache/manager.py +189 -0
- tiercache-0.1.0/src/smartcache/serializer.py +18 -0
- tiercache-0.1.0/src/smartcache/tracking/__init__.py +0 -0
- tiercache-0.1.0/src/smartcache/tracking/base.py +29 -0
- tiercache-0.1.0/src/smartcache/tracking/mongodb.py +88 -0
- tiercache-0.1.0/src/smartcache/tracking/postgres.py +98 -0
- tiercache-0.1.0/src/smartcache/tracking/redis.py +80 -0
- tiercache-0.1.0/src/smartcache/tracking/sqlite.py +91 -0
- tiercache-0.1.0/tests/__init__.py +0 -0
- tiercache-0.1.0/tests/test_backends/__init__.py +0 -0
- tiercache-0.1.0/tests/test_backends/test_ram.py +63 -0
- tiercache-0.1.0/tests/test_manager.py +90 -0
- tiercache-0.1.0/tests/test_tracking/__init__.py +0 -0
- tiercache-0.1.0/tests/test_tracking/test_sqlite.py +40 -0
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# Python
|
|
2
|
+
__pycache__/
|
|
3
|
+
*.py[cod]
|
|
4
|
+
*.pyo
|
|
5
|
+
*.pyd
|
|
6
|
+
*.egg-info/
|
|
7
|
+
*.egg
|
|
8
|
+
|
|
9
|
+
# Virtual environment
|
|
10
|
+
.venv/
|
|
11
|
+
venv/
|
|
12
|
+
env/
|
|
13
|
+
|
|
14
|
+
# Build / dist
|
|
15
|
+
dist/
|
|
16
|
+
build/
|
|
17
|
+
|
|
18
|
+
# Tests
|
|
19
|
+
.pytest_cache/
|
|
20
|
+
.coverage
|
|
21
|
+
htmlcov/
|
|
22
|
+
|
|
23
|
+
# IDE / tools
|
|
24
|
+
.claude/
|
|
25
|
+
.idea/
|
|
26
|
+
.vscode/
|
|
27
|
+
|
|
28
|
+
# Cache data
|
|
29
|
+
/tmp/smartcache/
|
|
30
|
+
|
|
31
|
+
# Project specific
|
|
32
|
+
media/
|
|
33
|
+
notes.txt
|
tiercache-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: tiercache
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: RAM-first three-tier cache with swappable backends
|
|
5
|
+
Project-URL: Homepage, https://github.com/madtunebk/tiercache
|
|
6
|
+
Project-URL: Repository, https://github.com/madtunebk/tiercache
|
|
7
|
+
Project-URL: Issues, https://github.com/madtunebk/tiercache/issues
|
|
8
|
+
Author-email: Cornea Valentin <valicornea84@gmail.com>
|
|
9
|
+
License: MIT
|
|
10
|
+
Keywords: async,cache,django,fastapi,flask,memcached,ram,tiered-cache
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Framework :: AsyncIO
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
19
|
+
Classifier: Topic :: System :: Distributed Computing
|
|
20
|
+
Requires-Python: >=3.11
|
|
21
|
+
Requires-Dist: aiofiles>=23.0
|
|
22
|
+
Requires-Dist: aiosqlite>=0.19
|
|
23
|
+
Requires-Dist: msgpack>=1.0
|
|
24
|
+
Requires-Dist: pyyaml>=6.0
|
|
25
|
+
Provides-Extra: all
|
|
26
|
+
Requires-Dist: aioboto3>=12.0; extra == 'all'
|
|
27
|
+
Requires-Dist: aiomcache>=0.8; extra == 'all'
|
|
28
|
+
Requires-Dist: asyncpg>=0.29; extra == 'all'
|
|
29
|
+
Requires-Dist: motor>=3.0; extra == 'all'
|
|
30
|
+
Requires-Dist: redis>=5.0; extra == 'all'
|
|
31
|
+
Provides-Extra: memcached
|
|
32
|
+
Requires-Dist: aiomcache>=0.8; extra == 'memcached'
|
|
33
|
+
Provides-Extra: mongodb
|
|
34
|
+
Requires-Dist: motor>=3.0; extra == 'mongodb'
|
|
35
|
+
Provides-Extra: postgres
|
|
36
|
+
Requires-Dist: asyncpg>=0.29; extra == 'postgres'
|
|
37
|
+
Provides-Extra: redis
|
|
38
|
+
Requires-Dist: redis>=5.0; extra == 'redis'
|
|
39
|
+
Provides-Extra: s3
|
|
40
|
+
Requires-Dist: aioboto3>=12.0; extra == 's3'
|
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
# SmartCache
|
|
2
|
+
|
|
3
|
+
RAM-first three-tier cache for Python. Designed to keep your SSD/HDD out of the hot path.
|
|
4
|
+
|
|
5
|
+
```
|
|
6
|
+
pip install smartcache
|
|
7
|
+
```
|
|
8
|
+
|
|
9
|
+
---
|
|
10
|
+
|
|
11
|
+
## How it works
|
|
12
|
+
|
|
13
|
+
Every request walks down the tier chain until a hit is found:
|
|
14
|
+
|
|
15
|
+
```
|
|
16
|
+
GET request
|
|
17
|
+
│
|
|
18
|
+
├─ Hot cache (RAM, 2GB, 4h TTL) ──── HIT → serve, reset TTL
|
|
19
|
+
│ MISS ↓
|
|
20
|
+
├─ Cold cache (RAM, 10GB, 24h TTL) ──── HIT → promote to hot → serve
|
|
21
|
+
│ MISS ↓
|
|
22
|
+
└─ Dry cache (Disk / S3 / MongoDB) ─── HIT → promote to hot → serve
|
|
23
|
+
MISS → return None (fetch from origin)
|
|
24
|
+
|
|
25
|
+
SET request
|
|
26
|
+
└─ Writes to hot only (zero disk I/O)
|
|
27
|
+
│
|
|
28
|
+
└─ When hot evicts or expires → auto-demote to dry (failsafe, background)
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
Both hot and cold live entirely in RAM. Dry is only hit on a true cache miss.
|
|
32
|
+
After a server restart, the first GET recovers each item from dry back into hot.
|
|
33
|
+
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
## Installation
|
|
37
|
+
|
|
38
|
+
```bash
|
|
39
|
+
# Base (RAM + local filesystem + SQLite tracking)
|
|
40
|
+
pip install smartcache
|
|
41
|
+
|
|
42
|
+
# With Memcached backends (multi-process / multi-server)
|
|
43
|
+
pip install "smartcache[memcached]"
|
|
44
|
+
|
|
45
|
+
# With Redis tracking
|
|
46
|
+
pip install "smartcache[redis]"
|
|
47
|
+
|
|
48
|
+
# With S3 dry cache
|
|
49
|
+
pip install "smartcache[s3]"
|
|
50
|
+
|
|
51
|
+
# With MongoDB
|
|
52
|
+
pip install "smartcache[mongodb]"
|
|
53
|
+
|
|
54
|
+
# With PostgreSQL tracking
|
|
55
|
+
pip install "smartcache[postgres]"
|
|
56
|
+
|
|
57
|
+
# Everything
|
|
58
|
+
pip install "smartcache[all]"
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
---
|
|
62
|
+
|
|
63
|
+
## Quick start
|
|
64
|
+
|
|
65
|
+
### From a config file
|
|
66
|
+
|
|
67
|
+
```python
|
|
68
|
+
from smartcache import CacheManager
|
|
69
|
+
|
|
70
|
+
cache = CacheManager.from_config("smartcache.yaml")
|
|
71
|
+
|
|
72
|
+
# Async (FastAPI, aiohttp, Sanic)
|
|
73
|
+
value = await cache.get("my-key")
|
|
74
|
+
await cache.set("my-key", data)
|
|
75
|
+
|
|
76
|
+
# Sync (Flask, Django)
|
|
77
|
+
value = cache.get_sync("my-key")
|
|
78
|
+
cache.set_sync("my-key", data)
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### In code
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
from smartcache import CacheManager
|
|
85
|
+
from smartcache.backends.ram import RamBackend
|
|
86
|
+
from smartcache.backends.dry.local import LocalBackend
|
|
87
|
+
from smartcache.tracking.sqlite import SQLiteTracking
|
|
88
|
+
|
|
89
|
+
cache = CacheManager(
|
|
90
|
+
hot=RamBackend(ttl_seconds=14400, max_size_bytes=2 * 1024**3),
|
|
91
|
+
cold=RamBackend(ttl_seconds=86400, max_size_bytes=10 * 1024**3),
|
|
92
|
+
dry=LocalBackend(base_path="/var/cache/myapp/dry", max_size_bytes=100 * 1024**3),
|
|
93
|
+
tracking=SQLiteTracking(path="/var/cache/myapp/index.db"),
|
|
94
|
+
)
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
---
|
|
98
|
+
|
|
99
|
+
## Configuration
|
|
100
|
+
|
|
101
|
+
```yaml
|
|
102
|
+
# smartcache.yaml
|
|
103
|
+
|
|
104
|
+
hot_cache:
|
|
105
|
+
backend: ram # ram | memcached
|
|
106
|
+
ttl_hours: 4
|
|
107
|
+
max_size_gb: 2
|
|
108
|
+
|
|
109
|
+
cold_cache:
|
|
110
|
+
backend: ram # ram | memcached
|
|
111
|
+
ttl_hours: 24
|
|
112
|
+
max_size_gb: 10
|
|
113
|
+
|
|
114
|
+
dry_cache:
|
|
115
|
+
backend: local # local | s3 | mongodb
|
|
116
|
+
max_size_gb: 100
|
|
117
|
+
path: /var/cache/myapp/dry
|
|
118
|
+
|
|
119
|
+
tracking:
|
|
120
|
+
backend: sqlite # sqlite | redis | postgres | mongodb
|
|
121
|
+
|
|
122
|
+
# Optional: TTL rules by tag
|
|
123
|
+
ttl_rules:
|
|
124
|
+
- tag: { type: thumbnail }
|
|
125
|
+
hot_ttl_hours: 1
|
|
126
|
+
cold_ttl_hours: 6
|
|
127
|
+
- tag: { type: raw }
|
|
128
|
+
hot_ttl_hours: 8
|
|
129
|
+
cold_ttl_hours: 48
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
### Memcached (multi-process / multi-server)
|
|
133
|
+
|
|
134
|
+
```yaml
|
|
135
|
+
hot_cache:
|
|
136
|
+
backend: memcached
|
|
137
|
+
ttl_hours: 4
|
|
138
|
+
max_size_gb: 2
|
|
139
|
+
|
|
140
|
+
cold_cache:
|
|
141
|
+
backend: memcached
|
|
142
|
+
ttl_hours: 24
|
|
143
|
+
max_size_gb: 10
|
|
144
|
+
|
|
145
|
+
memcached:
|
|
146
|
+
host: localhost
|
|
147
|
+
port: 11211
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
### S3 dry cache
|
|
151
|
+
|
|
152
|
+
```yaml
|
|
153
|
+
dry_cache:
|
|
154
|
+
backend: s3
|
|
155
|
+
|
|
156
|
+
s3:
|
|
157
|
+
endpoint_url: https://s3.amazonaws.com # or MinIO, Cloudflare R2, etc.
|
|
158
|
+
bucket: my-cache-bucket
|
|
159
|
+
access_key: ...
|
|
160
|
+
secret_key: ...
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
### MongoDB dry cache + tracking
|
|
164
|
+
|
|
165
|
+
```yaml
|
|
166
|
+
dry_cache:
|
|
167
|
+
backend: mongodb
|
|
168
|
+
|
|
169
|
+
tracking:
|
|
170
|
+
backend: mongodb
|
|
171
|
+
|
|
172
|
+
mongodb:
|
|
173
|
+
uri: mongodb://localhost:27017
|
|
174
|
+
database: smartcache
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
### Redis tracking
|
|
178
|
+
|
|
179
|
+
```yaml
|
|
180
|
+
tracking:
|
|
181
|
+
backend: redis
|
|
182
|
+
|
|
183
|
+
redis:
|
|
184
|
+
host: localhost
|
|
185
|
+
port: 6379
|
|
186
|
+
db: 0
|
|
187
|
+
```
|
|
188
|
+
|
|
189
|
+
---
|
|
190
|
+
|
|
191
|
+
## API
|
|
192
|
+
|
|
193
|
+
```python
|
|
194
|
+
# Fetch a value (returns None on miss)
|
|
195
|
+
value = await cache.get("key")
|
|
196
|
+
|
|
197
|
+
# Store a value using tier default TTL
|
|
198
|
+
await cache.set("key", data)
|
|
199
|
+
|
|
200
|
+
# Override TTL for this key only
|
|
201
|
+
await cache.set("key", data, ttl_hours=2)
|
|
202
|
+
|
|
203
|
+
# Tag-based TTL (matched against ttl_rules in config)
|
|
204
|
+
await cache.set("key", data, tags={"type": "thumbnail"})
|
|
205
|
+
|
|
206
|
+
# Delete from all tiers
|
|
207
|
+
await cache.delete("key")
|
|
208
|
+
|
|
209
|
+
# Flush a specific tier or all
|
|
210
|
+
await cache.flush(tier="hot") # hot | cold | dry | all
|
|
211
|
+
|
|
212
|
+
# Hit/miss stats + tier sizes
|
|
213
|
+
stats = await cache.stats()
|
|
214
|
+
# {
|
|
215
|
+
# "hot_hits": 120, "cold_hits": 30, "dry_hits": 5, "misses": 2,
|
|
216
|
+
# "hot_size_bytes": 1048576, "cold_size_bytes": 0, "dry_size_bytes": 4096
|
|
217
|
+
# }
|
|
218
|
+
|
|
219
|
+
# Sync equivalents (Flask, Django)
|
|
220
|
+
cache.get_sync("key")
|
|
221
|
+
cache.set_sync("key", data, ttl_hours=2, tags={"type": "thumbnail"})
|
|
222
|
+
cache.delete_sync("key")
|
|
223
|
+
cache.flush_sync(tier="hot")
|
|
224
|
+
cache.stats_sync()
|
|
225
|
+
|
|
226
|
+
# Always close on shutdown
|
|
227
|
+
await cache.close()
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
### TTL priority (highest wins)
|
|
231
|
+
|
|
232
|
+
| Priority | Example |
|
|
233
|
+
|---|---|
|
|
234
|
+
| 1. Per-key override | `cache.set("k", v, ttl_hours=1)` |
|
|
235
|
+
| 2. Tag rule | `cache.set("k", v, tags={"type": "thumbnail"})` → matched in config |
|
|
236
|
+
| 3. Tier default | `hot_cache.ttl_hours` in yaml |
|
|
237
|
+
| 4. Global default | hot: 4h, cold: 24h, dry: no expiry |
|
|
238
|
+
|
|
239
|
+
---
|
|
240
|
+
|
|
241
|
+
## Backends
|
|
242
|
+
|
|
243
|
+
| Tier | Backend | Notes |
|
|
244
|
+
|---|---|---|
|
|
245
|
+
| Hot / Cold | `ram` | In-process, single server |
|
|
246
|
+
| Hot / Cold | `memcached` | Shared pool, multi-process/server |
|
|
247
|
+
| Dry | `local` | Local filesystem, SSD/HDD |
|
|
248
|
+
| Dry | `s3` | AWS S3, MinIO, Cloudflare R2 |
|
|
249
|
+
| Dry | `mongodb` | GridFS + native TTL indexes |
|
|
250
|
+
| Tracking | `sqlite` | Zero deps, single machine |
|
|
251
|
+
| Tracking | `redis` | In-memory, fast, recommended |
|
|
252
|
+
| Tracking | `postgres` | Production relational |
|
|
253
|
+
| Tracking | `mongodb` | Flexible schema, TTL indexes |
|
|
254
|
+
|
|
255
|
+
---
|
|
256
|
+
|
|
257
|
+
## Example HTTP app (FastAPI)
|
|
258
|
+
|
|
259
|
+
```bash
|
|
260
|
+
pip install fastapi uvicorn
|
|
261
|
+
|
|
262
|
+
# Single process (RAM)
|
|
263
|
+
uvicorn example.app:app --port 8989
|
|
264
|
+
|
|
265
|
+
# Multi-process (Memcached — shared cache across all workers)
|
|
266
|
+
SMARTCACHE_CONFIG=example/config_memcached.yaml \
|
|
267
|
+
uvicorn example.app:app --workers 4 --port 8989
|
|
268
|
+
```
|
|
269
|
+
|
|
270
|
+
```bash
|
|
271
|
+
# Store an image
|
|
272
|
+
curl -X PUT "http://localhost:8989/cache/photo.png?tag_type=thumbnail" \
|
|
273
|
+
-H "Content-Type: image/png" \
|
|
274
|
+
--data-binary @photo.png
|
|
275
|
+
|
|
276
|
+
# Fetch it (opens directly in browser)
|
|
277
|
+
curl "http://localhost:8989/cache/photo.png" -o out.png
|
|
278
|
+
|
|
279
|
+
# Stats
|
|
280
|
+
curl "http://localhost:8989/stats"
|
|
281
|
+
```
|
|
282
|
+
|
|
283
|
+
---
|
|
284
|
+
|
|
285
|
+
## Why not just use Redis for everything?
|
|
286
|
+
|
|
287
|
+
Redis is great but it is a network service — every cache hit is a round trip.
|
|
288
|
+
SmartCache's RAM backend (`ram`) stores values directly in the Python process
|
|
289
|
+
memory, making hot-path lookups **microsecond-range** with zero network overhead.
|
|
290
|
+
|
|
291
|
+
Use `memcached` when you need shared cache across multiple processes or servers.
|
|
292
|
+
Use `redis` for tracking metadata (tiny footprint, fast, persistent).
|
|
293
|
+
|
|
294
|
+
---
|
|
295
|
+
|
|
296
|
+
## License
|
|
297
|
+
|
|
298
|
+
MIT
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
SmartCache — Design Document
|
|
2
|
+
============================
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
CORE PHILOSOPHY
|
|
6
|
+
---------------
|
|
7
|
+
RAM-first. Hit disk as rarely as possible.
|
|
8
|
+
|
|
9
|
+
Hot and cold tiers live entirely in memory — no SSD, no HDD.
|
|
10
|
+
Dry is the only disk tier and should be a last resort.
|
|
11
|
+
The goal is to serve 95%+ of requests from RAM.
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
OVERVIEW
|
|
15
|
+
--------
|
|
16
|
+
A Python pip package implementing a three-tier hierarchical cache with
|
|
17
|
+
swappable backends per tier. Designed to be dropped into any project.
|
|
18
|
+
|
|
19
|
+
pip install smartcache
|
|
20
|
+
pip install smartcache[memcached]
|
|
21
|
+
pip install smartcache[s3]
|
|
22
|
+
pip install smartcache[mongodb]
|
|
23
|
+
pip install smartcache[postgres]
|
|
24
|
+
pip install smartcache[all]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
TIERS
|
|
28
|
+
-----
|
|
29
|
+
|
|
30
|
+
Hot Cache
|
|
31
|
+
Purpose : Fastest access, short-lived. Serves the majority of hits.
|
|
32
|
+
Storage : RAM only
|
|
33
|
+
TTL : 4 hours (configurable)
|
|
34
|
+
Default : RAM (in-process)
|
|
35
|
+
Options : RAM, Memcached
|
|
36
|
+
Max size : 2 GB (configurable)
|
|
37
|
+
|
|
38
|
+
Cold Cache
|
|
39
|
+
Purpose : Larger RAM pool, longer-lived. Fallback when hot misses.
|
|
40
|
+
Storage : RAM only
|
|
41
|
+
TTL : 24 hours (configurable)
|
|
42
|
+
Default : RAM (in-process)
|
|
43
|
+
Options : RAM, Memcached
|
|
44
|
+
Max size : 10 GB (configurable)
|
|
45
|
+
|
|
46
|
+
Dry Cache
|
|
47
|
+
Purpose : Disk-based last resort. Only hit when both RAM tiers miss.
|
|
48
|
+
Storage : Disk / object storage
|
|
49
|
+
TTL : None by default (configurable)
|
|
50
|
+
Default : Local filesystem
|
|
51
|
+
Options : Local filesystem, S3-compatible, MongoDB (GridFS)
|
|
52
|
+
Max size : 100 GB (configurable)
|
|
53
|
+
|
|
54
|
+
Note:
|
|
55
|
+
Hot and cold use the same backend options. The difference is size and TTL.
|
|
56
|
+
Hot = small + short-lived (most frequent hits).
|
|
57
|
+
Cold = larger + longer-lived (warm data that didn't fit or expired from hot).
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
TTL PRIORITY (highest wins)
|
|
61
|
+
--------------------------
|
|
62
|
+
TTLs are resolved in this order:
|
|
63
|
+
|
|
64
|
+
1. Per-key override → cache.set("key", data, ttl_hours=1)
|
|
65
|
+
2. Per-call tag → cache.set("key", data, tags={"type": "thumbnail"})
|
|
66
|
+
matched against tag rules in config
|
|
67
|
+
3. Tier default → hot_cache.ttl_hours in smartcache.yaml
|
|
68
|
+
4. Global default → hot: 4h, cold: 24h, dry: no expiry
|
|
69
|
+
|
|
70
|
+
This means the user can:
|
|
71
|
+
- Set a global default in the config file
|
|
72
|
+
- Override per tag/category (e.g. thumbnails expire faster than raw files)
|
|
73
|
+
- Override per individual key at set time
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
LOOKUP BEHAVIOR
|
|
77
|
+
---------------
|
|
78
|
+
On every cache.get(key):
|
|
79
|
+
|
|
80
|
+
1. Check hot cache (RAM)
|
|
81
|
+
HIT → serve it, reset TTL, return value
|
|
82
|
+
MISS → continue
|
|
83
|
+
|
|
84
|
+
2. Check cold cache (RAM)
|
|
85
|
+
HIT → promote to hot cache, return value
|
|
86
|
+
MISS → continue
|
|
87
|
+
|
|
88
|
+
3. Check dry cache (Disk — last resort)
|
|
89
|
+
HIT → promote to hot cache, return value
|
|
90
|
+
MISS → return None (caller handles origin fetch)
|
|
91
|
+
|
|
92
|
+
On cache.set(key, value, ttl_hours=None, tags=None):
|
|
93
|
+
- Writes to hot cache only
|
|
94
|
+
- ttl_hours overrides the tier default for this key only
|
|
95
|
+
- Dry cache is populated automatically on eviction (failsafe), not on write
|
|
96
|
+
- On hot/cold LRU evict or TTL expiry → value is demoted to dry
|
|
97
|
+
- On server restart all RAM is lost, but dry still holds everything
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
TRACKING / METADATA
|
|
101
|
+
-------------------
|
|
102
|
+
Keeps a record of all cached keys, timestamps, hit counts, tier location.
|
|
103
|
+
Should be fast — lives in memory where possible.
|
|
104
|
+
|
|
105
|
+
Default : Redis (in-memory, fast key lookups, tiny metadata footprint)
|
|
106
|
+
Options : Redis, SQLite, PostgreSQL, MongoDB
|
|
107
|
+
|
|
108
|
+
Used for:
|
|
109
|
+
- Knowing which tier holds a key without probing all three
|
|
110
|
+
- Hit/miss statistics
|
|
111
|
+
- Manual inspection and cache management
|
|
112
|
+
|
|
113
|
+
Why Redis as default for tracking:
|
|
114
|
+
Metadata per key is tiny (timestamps, tier, hit count). Redis holds this
|
|
115
|
+
entirely in memory with near-zero overhead. No disk I/O for lookups,
|
|
116
|
+
which fits the RAM-first philosophy.
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
BACKENDS (per tier)
|
|
120
|
+
-------------------
|
|
121
|
+
|
|
122
|
+
Hot / Cold (RAM only)
|
|
123
|
+
ram — Python dict with TTL and LRU eviction, single process
|
|
124
|
+
memcached — Distributed in-memory, great for multi-process or multi-server
|
|
125
|
+
|
|
126
|
+
Dry (Disk — last resort)
|
|
127
|
+
local — Files on SSD/HDD, size-based cleanup
|
|
128
|
+
s3 — S3-compatible object storage (AWS S3, MinIO, Cloudflare R2)
|
|
129
|
+
mongodb — GridFS for large binary/blob storage, flexible metadata
|
|
130
|
+
|
|
131
|
+
Tracking
|
|
132
|
+
redis — Default. In-memory, fast, fits RAM-first philosophy
|
|
133
|
+
sqlite — Zero-dependency fallback, single .db file on disk
|
|
134
|
+
postgres — Production relational option, strong querying
|
|
135
|
+
mongodb — Native TTL indexes, flexible schema, good if already used for dry
|
|
136
|
+
|
|
137
|
+
Note on MongoDB (dry + tracking):
|
|
138
|
+
- TTL indexes: MongoDB handles key expiry automatically, no cron needed
|
|
139
|
+
- GridFS: native large file storage split into chunks
|
|
140
|
+
- A single MongoDB instance can serve as dry backend AND tracking store
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
CONFIGURATION
|
|
144
|
+
-------------
|
|
145
|
+
Configured via a YAML file or passed as a dict in code.
|
|
146
|
+
|
|
147
|
+
Example smartcache.yaml:
|
|
148
|
+
|
|
149
|
+
hot_cache:
|
|
150
|
+
backend: ram # ram | memcached
|
|
151
|
+
ttl_hours: 4 # default TTL — overridable per key or per tag
|
|
152
|
+
max_size_gb: 2
|
|
153
|
+
|
|
154
|
+
cold_cache:
|
|
155
|
+
backend: ram # ram | memcached
|
|
156
|
+
ttl_hours: 24 # default TTL — overridable per key or per tag
|
|
157
|
+
max_size_gb: 10
|
|
158
|
+
|
|
159
|
+
dry_cache:
|
|
160
|
+
backend: local # local | s3 | mongodb
|
|
161
|
+
ttl_hours: null # null = no expiry (default)
|
|
162
|
+
max_size_gb: 100
|
|
163
|
+
path: /var/cache/smartcache/dry
|
|
164
|
+
|
|
165
|
+
tracking:
|
|
166
|
+
backend: redis # redis | sqlite | postgres | mongodb
|
|
167
|
+
|
|
168
|
+
# Optional: TTL rules by tag
|
|
169
|
+
# Applied when cache.set() is called with a matching tag
|
|
170
|
+
ttl_rules:
|
|
171
|
+
- tag: { type: thumbnail }
|
|
172
|
+
hot_ttl_hours: 1
|
|
173
|
+
cold_ttl_hours: 12
|
|
174
|
+
- tag: { type: raw }
|
|
175
|
+
hot_ttl_hours: 8
|
|
176
|
+
cold_ttl_hours: 48
|
|
177
|
+
dry_ttl_hours: 720 # 30 days
|
|
178
|
+
|
|
179
|
+
redis: # only needed if tracking backend is redis
|
|
180
|
+
host: localhost
|
|
181
|
+
port: 6379
|
|
182
|
+
db: 0
|
|
183
|
+
|
|
184
|
+
memcached: # only needed if hot/cold backend is memcached
|
|
185
|
+
host: localhost
|
|
186
|
+
port: 11211
|
|
187
|
+
|
|
188
|
+
s3: # only needed if dry backend is s3
|
|
189
|
+
endpoint_url: ...
|
|
190
|
+
bucket: smartcache
|
|
191
|
+
access_key: ...
|
|
192
|
+
secret_key: ...
|
|
193
|
+
|
|
194
|
+
mongodb: # only needed if dry or tracking uses mongodb
|
|
195
|
+
uri: mongodb://localhost:27017
|
|
196
|
+
database: smartcache
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
PUBLIC API
|
|
200
|
+
----------
|
|
201
|
+
|
|
202
|
+
from smartcache import CacheManager
|
|
203
|
+
|
|
204
|
+
# Load from config file
|
|
205
|
+
cache = CacheManager.from_config("smartcache.yaml")
|
|
206
|
+
|
|
207
|
+
# Or configure in code
|
|
208
|
+
cache = CacheManager(
|
|
209
|
+
hot=RamBackend(ttl_hours=4, max_size_gb=2),
|
|
210
|
+
cold=RamBackend(ttl_hours=24, max_size_gb=10),
|
|
211
|
+
dry=S3Backend(bucket="my-bucket"),
|
|
212
|
+
tracking=RedisTracking(host="localhost", port=6379),
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
# Basic usage
|
|
216
|
+
value = await cache.get("my-key")
|
|
217
|
+
|
|
218
|
+
# Use tier default TTL
|
|
219
|
+
await cache.set("my-key", data)
|
|
220
|
+
|
|
221
|
+
# Override TTL for this key only
|
|
222
|
+
await cache.set("my-key", data, ttl_hours=2)
|
|
223
|
+
|
|
224
|
+
# Tag-based TTL (matches ttl_rules in config)
|
|
225
|
+
await cache.set("my-key", data, tags={"type": "thumbnail"})
|
|
226
|
+
|
|
227
|
+
# No expiry for this key
|
|
228
|
+
await cache.set("my-key", data, ttl_hours=None)
|
|
229
|
+
|
|
230
|
+
await cache.delete("my-key")
|
|
231
|
+
await cache.flush(tier="hot") # clear a specific tier
|
|
232
|
+
|
|
233
|
+
# Stats
|
|
234
|
+
stats = await cache.stats()
|
|
235
|
+
# { "hot_hits": 120, "cold_hits": 30, "dry_hits": 5, "misses": 2 }
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
PROJECT STRUCTURE
|
|
239
|
+
-----------------
|
|
240
|
+
|
|
241
|
+
smartcache/
|
|
242
|
+
├── pyproject.toml
|
|
243
|
+
├── src/
|
|
244
|
+
│ └── smartcache/
|
|
245
|
+
│ ├── __init__.py ← public API surface
|
|
246
|
+
│ ├── manager.py ← CacheManager, lookup chain logic
|
|
247
|
+
│ ├── config.py ← YAML loading, validation
|
|
248
|
+
│ ├── backends/
|
|
249
|
+
│ │ ├── base.py ← AbstractBackend interface
|
|
250
|
+
│ │ ├── ram.py ← shared by hot and cold
|
|
251
|
+
│ │ ├── memcached.py ← shared by hot and cold
|
|
252
|
+
│ │ └── dry/
|
|
253
|
+
│ │ ├── local.py
|
|
254
|
+
│ │ ├── s3.py
|
|
255
|
+
│ │ └── mongodb.py ← GridFS + TTL indexes
|
|
256
|
+
│ └── tracking/
|
|
257
|
+
│ ├── base.py ← AbstractTracking interface
|
|
258
|
+
│ ├── redis.py ← default
|
|
259
|
+
│ ├── sqlite.py
|
|
260
|
+
│ ├── postgres.py
|
|
261
|
+
│ └── mongodb.py
|
|
262
|
+
└── tests/
|
|
263
|
+
├── test_manager.py
|
|
264
|
+
├── test_backends/
|
|
265
|
+
└── test_tracking/
|
|
266
|
+
|
|
267
|
+
Note: hot and cold share the same backend implementations (ram.py, memcached.py)
|
|
268
|
+
— they are just two instances with different size and TTL config.
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
OPTIONAL DEPENDENCIES (extras)
|
|
272
|
+
-------------------------------
|
|
273
|
+
|
|
274
|
+
pip install smartcache → base only (RAM + local + Redis tracking)
|
|
275
|
+
pip install smartcache[memcached] → adds Memcached hot/cold backends
|
|
276
|
+
pip install smartcache[s3] → adds S3 dry backend
|
|
277
|
+
pip install smartcache[mongodb] → adds MongoDB dry backend + tracking
|
|
278
|
+
pip install smartcache[postgres] → adds PostgreSQL tracking
|
|
279
|
+
pip install smartcache[all] → everything
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
PACKAGING STANDARDS
|
|
283
|
+
-------------------
|
|
284
|
+
- pyproject.toml (PEP 517/518, modern standard — no setup.py)
|
|
285
|
+
- src/ layout (prevents accidental imports during development)
|
|
286
|
+
- Semantic versioning (1.0.0)
|
|
287
|
+
- Type hints on all public methods (PEP 484)
|
|
288
|
+
- Async-first API (asyncio, with optional sync wrappers)
|
|
289
|
+
- Lazy imports for optional backends (clear error if dep not installed)
|