fastapi-cachex 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastapi_cachex/__init__.py +7 -0
- fastapi_cachex/backends/__init__.py +13 -0
- fastapi_cachex/backends/base.py +70 -0
- fastapi_cachex/backends/memcached.py +239 -0
- fastapi_cachex/backends/memory.py +198 -0
- fastapi_cachex/backends/redis.py +300 -0
- fastapi_cachex/cache.py +301 -0
- fastapi_cachex/dependencies.py +16 -0
- fastapi_cachex/directives.py +21 -0
- fastapi_cachex/exceptions.py +17 -0
- fastapi_cachex/proxy.py +36 -0
- fastapi_cachex/py.typed +0 -0
- fastapi_cachex/routes.py +311 -0
- fastapi_cachex/types.py +25 -0
- fastapi_cachex-0.2.1.dist-info/METADATA +242 -0
- fastapi_cachex-0.2.1.dist-info/RECORD +17 -0
- fastapi_cachex-0.2.1.dist-info/WHEEL +4 -0
fastapi_cachex/routes.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
"""Optional routes for cache monitoring and management."""
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import TYPE_CHECKING
|
|
6
|
+
|
|
7
|
+
from fastapi_cachex.backends import BaseCacheBackend
|
|
8
|
+
from fastapi_cachex.exceptions import BackendNotFoundError
|
|
9
|
+
from fastapi_cachex.proxy import BackendProxy
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from fastapi import FastAPI
|
|
13
|
+
|
|
14
|
+
# Constants
|
|
15
|
+
CACHE_KEY_MIN_PARTS = 3
|
|
16
|
+
CACHE_KEY_MAX_PARTS = 3
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class CacheHitRecord:
|
|
21
|
+
"""Record for a single cache hit."""
|
|
22
|
+
|
|
23
|
+
cache_key: str
|
|
24
|
+
method: str
|
|
25
|
+
host: str
|
|
26
|
+
path: str
|
|
27
|
+
query_params: str
|
|
28
|
+
etag: str
|
|
29
|
+
is_expired: bool
|
|
30
|
+
ttl_remaining: float | None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class CacheHitSummary:
|
|
35
|
+
"""Summary of cache hit statistics."""
|
|
36
|
+
|
|
37
|
+
total_cached_entries: int
|
|
38
|
+
active_entries: int
|
|
39
|
+
frequently_cached_routes: list[str]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class CacheHitsResponse:
|
|
44
|
+
"""Response for cached hits endpoint."""
|
|
45
|
+
|
|
46
|
+
cached_hits: list[CacheHitRecord]
|
|
47
|
+
total_hits: int
|
|
48
|
+
valid_hits: int
|
|
49
|
+
expired_hits: int
|
|
50
|
+
unique_routes: int
|
|
51
|
+
summary: CacheHitSummary
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class CachedRecord:
|
|
56
|
+
"""Record for a single cached item."""
|
|
57
|
+
|
|
58
|
+
cache_key: str
|
|
59
|
+
method: str
|
|
60
|
+
host: str
|
|
61
|
+
path: str
|
|
62
|
+
query_params: str
|
|
63
|
+
etag: str
|
|
64
|
+
content_type: str
|
|
65
|
+
content_size: int
|
|
66
|
+
is_expired: bool
|
|
67
|
+
ttl_remaining: float | None
|
|
68
|
+
content_preview: str
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
@dataclass
|
|
72
|
+
class CacheSummary:
|
|
73
|
+
"""Summary of cached records."""
|
|
74
|
+
|
|
75
|
+
total_entries: int
|
|
76
|
+
valid_entries: int
|
|
77
|
+
estimated_cache_size_kb: float
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
@dataclass
|
|
81
|
+
class CachedRecordsResponse:
|
|
82
|
+
"""Response for cached records endpoint."""
|
|
83
|
+
|
|
84
|
+
cached_records: list[CachedRecord]
|
|
85
|
+
total_records: int
|
|
86
|
+
active_records: int
|
|
87
|
+
expired_records: int
|
|
88
|
+
total_cache_size_bytes: int
|
|
89
|
+
summary: CacheSummary
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _parse_cache_key(cache_key: str) -> tuple[str, str, str, str]:
|
|
93
|
+
"""Parse cache key into components.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
cache_key: Cache key in format method:host:path:query_params
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Tuple of (method, host, path, query_params)
|
|
100
|
+
"""
|
|
101
|
+
key_parts = cache_key.split(":", CACHE_KEY_MAX_PARTS)
|
|
102
|
+
if len(key_parts) >= CACHE_KEY_MIN_PARTS:
|
|
103
|
+
method, host, path = key_parts[0], key_parts[1], key_parts[2]
|
|
104
|
+
query_params = key_parts[3] if len(key_parts) > CACHE_KEY_MIN_PARTS else ""
|
|
105
|
+
return method, host, path, query_params
|
|
106
|
+
|
|
107
|
+
return "", "", "", ""
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
async def _get_cached_hits_handler(backend: BaseCacheBackend) -> CacheHitsResponse:
|
|
111
|
+
"""Handle the cached hits request.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
backend: The cache backend instance
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
CacheHitsResponse
|
|
118
|
+
"""
|
|
119
|
+
cache_data = await backend.get_cache_data()
|
|
120
|
+
|
|
121
|
+
now = time.time()
|
|
122
|
+
cached_hits: list[CacheHitRecord] = []
|
|
123
|
+
|
|
124
|
+
for cache_key, (etag_content, expiry) in cache_data.items():
|
|
125
|
+
method, host, path, query_params = _parse_cache_key(cache_key)
|
|
126
|
+
if method: # Valid cache key
|
|
127
|
+
# Check if cache entry is expired
|
|
128
|
+
is_expired = expiry is not None and expiry <= now
|
|
129
|
+
ttl_remaining = round(expiry - now, 2) if expiry is not None else None
|
|
130
|
+
|
|
131
|
+
cached_hits.append(
|
|
132
|
+
CacheHitRecord(
|
|
133
|
+
cache_key=cache_key,
|
|
134
|
+
method=method,
|
|
135
|
+
host=host,
|
|
136
|
+
path=path,
|
|
137
|
+
query_params=query_params,
|
|
138
|
+
etag=etag_content.etag,
|
|
139
|
+
is_expired=is_expired,
|
|
140
|
+
ttl_remaining=ttl_remaining,
|
|
141
|
+
)
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Calculate summary statistics
|
|
145
|
+
valid_hits: list[CacheHitRecord] = [h for h in cached_hits if not h.is_expired]
|
|
146
|
+
routes_hit: set[str] = {h.path for h in valid_hits}
|
|
147
|
+
|
|
148
|
+
return CacheHitsResponse(
|
|
149
|
+
cached_hits=cached_hits,
|
|
150
|
+
total_hits=len(cached_hits),
|
|
151
|
+
valid_hits=len(valid_hits),
|
|
152
|
+
expired_hits=len(cached_hits) - len(valid_hits),
|
|
153
|
+
unique_routes=len(routes_hit),
|
|
154
|
+
summary=CacheHitSummary(
|
|
155
|
+
total_cached_entries=len(cached_hits),
|
|
156
|
+
active_entries=len(valid_hits),
|
|
157
|
+
frequently_cached_routes=sorted(routes_hit),
|
|
158
|
+
),
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
async def _get_cached_records_handler(
|
|
163
|
+
backend: BaseCacheBackend,
|
|
164
|
+
) -> CachedRecordsResponse:
|
|
165
|
+
"""Handle the cached records request.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
backend: The cache backend instance
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
CachedRecordsResponse
|
|
172
|
+
"""
|
|
173
|
+
cache_data = await backend.get_cache_data()
|
|
174
|
+
|
|
175
|
+
now = time.time()
|
|
176
|
+
cached_records: list[CachedRecord] = []
|
|
177
|
+
|
|
178
|
+
for cache_key, (etag_content, expiry) in cache_data.items():
|
|
179
|
+
method, host, path, query_params = _parse_cache_key(cache_key)
|
|
180
|
+
if method: # Valid cache key
|
|
181
|
+
# Check if cache entry is expired
|
|
182
|
+
is_expired = expiry is not None and expiry <= now
|
|
183
|
+
|
|
184
|
+
# Get content size
|
|
185
|
+
content = etag_content.content
|
|
186
|
+
content_size = len(content) if isinstance(content, (bytes, str)) else 0
|
|
187
|
+
|
|
188
|
+
ttl_remaining = round(expiry - now, 2) if expiry is not None else None
|
|
189
|
+
|
|
190
|
+
content_preview = (
|
|
191
|
+
content[:100].decode("utf-8", errors="ignore")
|
|
192
|
+
if isinstance(content, bytes)
|
|
193
|
+
else str(content)[:100]
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
cached_records.append(
|
|
197
|
+
CachedRecord(
|
|
198
|
+
cache_key=cache_key,
|
|
199
|
+
method=method,
|
|
200
|
+
host=host,
|
|
201
|
+
path=path,
|
|
202
|
+
query_params=query_params,
|
|
203
|
+
etag=etag_content.etag,
|
|
204
|
+
content_type=type(content).__name__,
|
|
205
|
+
content_size=content_size,
|
|
206
|
+
is_expired=is_expired,
|
|
207
|
+
ttl_remaining=ttl_remaining,
|
|
208
|
+
content_preview=content_preview,
|
|
209
|
+
)
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
# Count active records and total size
|
|
213
|
+
active_records = sum(1 for r in cached_records if not r.is_expired)
|
|
214
|
+
total_size = sum(r.content_size for r in cached_records)
|
|
215
|
+
|
|
216
|
+
return CachedRecordsResponse(
|
|
217
|
+
cached_records=cached_records,
|
|
218
|
+
total_records=len(cached_records),
|
|
219
|
+
active_records=active_records,
|
|
220
|
+
expired_records=len(cached_records) - active_records,
|
|
221
|
+
total_cache_size_bytes=total_size,
|
|
222
|
+
summary=CacheSummary(
|
|
223
|
+
total_entries=len(cached_records),
|
|
224
|
+
valid_entries=active_records,
|
|
225
|
+
estimated_cache_size_kb=round(total_size / 1024, 2),
|
|
226
|
+
),
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def add_routes(
|
|
231
|
+
app: "FastAPI", prefix: str = "", include_in_schema: bool = False
|
|
232
|
+
) -> None:
|
|
233
|
+
"""Add cache monitoring routes to the FastAPI application.
|
|
234
|
+
|
|
235
|
+
This function allows users to optionally add cache monitoring routes
|
|
236
|
+
to their FastAPI application. Users can call this function to enable
|
|
237
|
+
cache hit tracking and cache record display.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
app: FastAPI application instance
|
|
241
|
+
prefix: URL prefix for the routes (e.g., "/api/cache", "/admin/cache").
|
|
242
|
+
Defaults to "" (no prefix).
|
|
243
|
+
include_in_schema: Whether to include routes in OpenAPI schema.
|
|
244
|
+
Defaults to False.
|
|
245
|
+
|
|
246
|
+
Example:
|
|
247
|
+
from fastapi import FastAPI
|
|
248
|
+
from fastapi_cachex import add_routes
|
|
249
|
+
|
|
250
|
+
app = FastAPI()
|
|
251
|
+
add_routes(app) # Routes at /cached-hits and /cached-records
|
|
252
|
+
|
|
253
|
+
# Or with prefix
|
|
254
|
+
add_routes(app, prefix="/api/cache") # Routes at /api/cache/cached-hits and /api/cache/cached-records
|
|
255
|
+
"""
|
|
256
|
+
|
|
257
|
+
@app.get(f"{prefix}/cached-hits", include_in_schema=include_in_schema)
|
|
258
|
+
async def get_cached_hits() -> CacheHitsResponse:
|
|
259
|
+
"""Return cached hit records.
|
|
260
|
+
|
|
261
|
+
Shows cache statistics including which routes are frequently being cached,
|
|
262
|
+
hit counts, and cache key information.
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
CacheHitsResponse containing cache hit records and statistics
|
|
266
|
+
"""
|
|
267
|
+
try:
|
|
268
|
+
backend: BaseCacheBackend = BackendProxy.get_backend()
|
|
269
|
+
except BackendNotFoundError:
|
|
270
|
+
return CacheHitsResponse(
|
|
271
|
+
cached_hits=[],
|
|
272
|
+
total_hits=0,
|
|
273
|
+
valid_hits=0,
|
|
274
|
+
expired_hits=0,
|
|
275
|
+
unique_routes=0,
|
|
276
|
+
summary=CacheHitSummary(
|
|
277
|
+
total_cached_entries=0,
|
|
278
|
+
active_entries=0,
|
|
279
|
+
frequently_cached_routes=[],
|
|
280
|
+
),
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
return await _get_cached_hits_handler(backend)
|
|
284
|
+
|
|
285
|
+
@app.get(f"{prefix}/cached-records", include_in_schema=include_in_schema)
|
|
286
|
+
async def get_cached_records() -> CachedRecordsResponse:
|
|
287
|
+
"""Display currently cached records.
|
|
288
|
+
|
|
289
|
+
Returns all currently cached records in the cache backend with their
|
|
290
|
+
content information and expiry details.
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
CachedRecordsResponse containing cached records and statistics
|
|
294
|
+
"""
|
|
295
|
+
try:
|
|
296
|
+
backend: BaseCacheBackend = BackendProxy.get_backend()
|
|
297
|
+
except BackendNotFoundError:
|
|
298
|
+
return CachedRecordsResponse(
|
|
299
|
+
cached_records=[],
|
|
300
|
+
total_records=0,
|
|
301
|
+
active_records=0,
|
|
302
|
+
expired_records=0,
|
|
303
|
+
total_cache_size_bytes=0,
|
|
304
|
+
summary=CacheSummary(
|
|
305
|
+
total_entries=0,
|
|
306
|
+
valid_entries=0,
|
|
307
|
+
estimated_cache_size_kb=0.0,
|
|
308
|
+
),
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
return await _get_cached_records_handler(backend)
|
fastapi_cachex/types.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""Type definitions and type aliases for FastAPI-CacheX."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass
|
|
8
|
+
class ETagContent:
|
|
9
|
+
"""ETag and content for cache items."""
|
|
10
|
+
|
|
11
|
+
etag: str
|
|
12
|
+
content: Any
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class CacheItem:
|
|
17
|
+
"""Cache item with optional expiry time.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
value: The cached ETagContent
|
|
21
|
+
expiry: Epoch timestamp when this cache item expires (None = never expires)
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
value: ETagContent
|
|
25
|
+
expiry: float | None = None
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: fastapi-cachex
|
|
3
|
+
Version: 0.2.1
|
|
4
|
+
Summary: A caching library for FastAPI with support for Cache-Control, ETag, and multiple backends.
|
|
5
|
+
Keywords: fastapi,cache,etag,cache-control,redis,memcached,in-memory
|
|
6
|
+
Author: Allen
|
|
7
|
+
Author-email: Allen <s96016641@gmail.com>
|
|
8
|
+
License-Expression: Apache-2.0
|
|
9
|
+
Classifier: Development Status :: 3 - Alpha
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: Programming Language :: Python
|
|
12
|
+
Classifier: Programming Language :: Python :: 3
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
18
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
19
|
+
Classifier: Framework :: FastAPI
|
|
20
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
21
|
+
Classifier: Topic :: Internet :: WWW/HTTP :: HTTP Servers
|
|
22
|
+
Requires-Dist: fastapi
|
|
23
|
+
Requires-Dist: pymemcache ; extra == 'memcache'
|
|
24
|
+
Requires-Dist: redis[hiredis] ; extra == 'redis'
|
|
25
|
+
Requires-Dist: orjson ; extra == 'redis'
|
|
26
|
+
Requires-Python: >=3.10
|
|
27
|
+
Project-URL: Homepage, https://github.com/allen0099/FastAPI-CacheX
|
|
28
|
+
Project-URL: Issues, https://github.com/allen0099/FastAPI-CacheX/issues
|
|
29
|
+
Project-URL: Repository, https://github.com/allen0099/FastAPI-CacheX.git
|
|
30
|
+
Provides-Extra: memcache
|
|
31
|
+
Provides-Extra: redis
|
|
32
|
+
Description-Content-Type: text/markdown
|
|
33
|
+
|
|
34
|
+
# FastAPI-Cache X
|
|
35
|
+
|
|
36
|
+
[](https://github.com/astral-sh/uv)
|
|
37
|
+
[](https://github.com/astral-sh/ruff)
|
|
38
|
+
[](https://github.com/allen0099/FastAPI-CacheX/actions/workflows/test.yml)
|
|
39
|
+
[](https://github.com/allen0099/FastAPI-CacheX/actions/workflows/coverage.yml)
|
|
40
|
+
|
|
41
|
+
[](https://pepy.tech/project/fastapi-cachex)
|
|
42
|
+
[](https://pepy.tech/project/fastapi-cachex)
|
|
43
|
+
[](https://pepy.tech/project/fastapi-cachex)
|
|
44
|
+
|
|
45
|
+
[](https://pypi.org/project/fastapi-cachex)
|
|
46
|
+
[](https://pypi.org/project/fastapi-cachex/)
|
|
47
|
+
|
|
48
|
+
[English](README.md) | [繁體中文](docs/README.zh-TW.md)
|
|
49
|
+
|
|
50
|
+
A high-performance caching extension for FastAPI, providing comprehensive HTTP caching support.
|
|
51
|
+
|
|
52
|
+
## Features
|
|
53
|
+
|
|
54
|
+
- Support for HTTP caching headers
|
|
55
|
+
- `Cache-Control`
|
|
56
|
+
- `ETag`
|
|
57
|
+
- `If-None-Match`
|
|
58
|
+
- Multiple backend cache support
|
|
59
|
+
- Redis
|
|
60
|
+
- Memcached
|
|
61
|
+
- In-memory cache
|
|
62
|
+
- Complete Cache-Control directive implementation
|
|
63
|
+
- Easy-to-use `@cache` decorator
|
|
64
|
+
|
|
65
|
+
### Cache-Control Directives
|
|
66
|
+
|
|
67
|
+
| Directive | Supported | Description |
|
|
68
|
+
|--------------------------|--------------------|---------------------------------------------------------------------------------------------------------|
|
|
69
|
+
| `max-age` | :white_check_mark: | Specifies the maximum amount of time a resource is considered fresh. |
|
|
70
|
+
| `s-maxage` | :x: | Specifies the maximum amount of time a resource is considered fresh for shared caches. |
|
|
71
|
+
| `no-cache` | :white_check_mark: | Forces caches to submit the request to the origin server for validation before releasing a cached copy. |
|
|
72
|
+
| `no-store` | :white_check_mark: | Instructs caches not to store any part of the request or response. |
|
|
73
|
+
| `no-transform` | :x: | Instructs caches not to transform the response content. |
|
|
74
|
+
| `must-revalidate` | :white_check_mark: | Forces caches to revalidate the response with the origin server after it becomes stale. |
|
|
75
|
+
| `proxy-revalidate` | :x: | Similar to `must-revalidate`, but only for shared caches. |
|
|
76
|
+
| `must-understand` | :x: | Indicates that the recipient must understand the directive or treat it as an error. |
|
|
77
|
+
| `private` | :white_check_mark: | Indicates that the response is intended for a single user and should not be stored by shared caches. |
|
|
78
|
+
| `public` | :white_check_mark: | Indicates that the response may be cached by any cache, even if it is normally non-cacheable. |
|
|
79
|
+
| `immutable` | :white_check_mark: | Indicates that the response body will not change over time, allowing for longer caching. |
|
|
80
|
+
| `stale-while-revalidate` | :white_check_mark: | Indicates that a cache can serve a stale response while it revalidates the response in the background. |
|
|
81
|
+
| `stale-if-error` | :white_check_mark: | Indicates that a cache can serve a stale response if the origin server is unavailable. |
|
|
82
|
+
|
|
83
|
+
## Installation
|
|
84
|
+
|
|
85
|
+
```bash
|
|
86
|
+
uv add fastapi-cachex
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
## Quick Start
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
from fastapi import FastAPI
|
|
93
|
+
from fastapi_cachex import cache
|
|
94
|
+
from fastapi_cachex import CacheBackend
|
|
95
|
+
|
|
96
|
+
app = FastAPI()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@app.get("/")
|
|
100
|
+
@cache(ttl=60) # Cache for 60 seconds
|
|
101
|
+
async def read_root():
|
|
102
|
+
return {"Hello": "World"}
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@app.get("/no-cache")
|
|
106
|
+
@cache(no_cache=True) # Mark this endpoint as non-cacheable
|
|
107
|
+
async def non_cache_endpoint():
|
|
108
|
+
return {"Hello": "World"}
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@app.get("/no-store")
|
|
112
|
+
@cache(no_store=True) # Mark this endpoint as non-cacheable
|
|
113
|
+
async def non_store_endpoint():
|
|
114
|
+
return {"Hello": "World"}
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
@app.get("/clear_cache")
|
|
118
|
+
async def remove_cache(cache: CacheBackend):
|
|
119
|
+
await cache.clear_path("/path/to/clear") # Clear cache for a specific path
|
|
120
|
+
await cache.clear_pattern("/path/to/clear/*") # Clear cache for a specific pattern
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
## Backend Configuration
|
|
124
|
+
|
|
125
|
+
FastAPI-CacheX supports multiple caching backends. You can easily switch between them using the `BackendProxy`.
|
|
126
|
+
|
|
127
|
+
### Cache Key Format
|
|
128
|
+
|
|
129
|
+
Cache keys are generated in the following format to avoid collisions:
|
|
130
|
+
|
|
131
|
+
```
|
|
132
|
+
{method}:{host}:{path}:{query_params}
|
|
133
|
+
```
|
|
134
|
+
|
|
135
|
+
This ensures that:
|
|
136
|
+
- Different HTTP methods (GET, POST, etc.) don't share cache
|
|
137
|
+
- Different hosts don't share cache (useful for multi-tenant scenarios)
|
|
138
|
+
- Different query parameters get separate cache entries
|
|
139
|
+
- The same endpoint with different parameters can be cached independently
|
|
140
|
+
|
|
141
|
+
All backends automatically namespace keys with a prefix (e.g., `fastapi_cachex:`) to avoid conflicts with other applications.
|
|
142
|
+
|
|
143
|
+
### Cache Hit Behavior
|
|
144
|
+
|
|
145
|
+
When a cached entry is valid (within TTL):
|
|
146
|
+
- **Default behavior**: Returns the cached content with HTTP 200 status code directly without re-executing the endpoint handler
|
|
147
|
+
- **With `If-None-Match` header**: Returns HTTP 304 Not Modified if the ETag matches
|
|
148
|
+
- **With `no-cache` directive**: Forces revalidation with fresh content before deciding on 304
|
|
149
|
+
|
|
150
|
+
This means **cached hits are extremely fast** - the endpoint handler function is never executed.
|
|
151
|
+
|
|
152
|
+
### In-Memory Cache (default)
|
|
153
|
+
|
|
154
|
+
If you don't specify a backend, FastAPI-CacheX will use the in-memory cache by default.
|
|
155
|
+
This is suitable for development and testing purposes. The backend automatically runs
|
|
156
|
+
a cleanup task to remove expired entries every 60 seconds.
|
|
157
|
+
|
|
158
|
+
```python
|
|
159
|
+
from fastapi_cachex.backends import MemoryBackend
|
|
160
|
+
from fastapi_cachex import BackendProxy
|
|
161
|
+
|
|
162
|
+
backend = MemoryBackend()
|
|
163
|
+
BackendProxy.set_backend(backend)
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
**Note**: In-memory cache is not suitable for production with multiple processes.
|
|
167
|
+
Each process maintains its own separate cache.
|
|
168
|
+
|
|
169
|
+
### Memcached
|
|
170
|
+
|
|
171
|
+
```python
|
|
172
|
+
from fastapi_cachex.backends import MemcachedBackend
|
|
173
|
+
from fastapi_cachex import BackendProxy
|
|
174
|
+
|
|
175
|
+
backend = MemcachedBackend(servers=["localhost:11211"])
|
|
176
|
+
BackendProxy.set_backend(backend)
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
**Limitations**:
|
|
180
|
+
- Pattern-based key clearing (`clear_pattern`) is not supported by the Memcached protocol
|
|
181
|
+
- Keys are namespaced with `fastapi_cachex:` prefix to avoid conflicts
|
|
182
|
+
- Consider using Redis backend if you need pattern-based cache clearing
|
|
183
|
+
|
|
184
|
+
### Redis
|
|
185
|
+
|
|
186
|
+
```python
|
|
187
|
+
from fastapi_cachex.backends import AsyncRedisCacheBackend
|
|
188
|
+
from fastapi_cachex import BackendProxy
|
|
189
|
+
|
|
190
|
+
backend = AsyncRedisCacheBackend(host="127.0.0.1", port=6379, db=0)
|
|
191
|
+
BackendProxy.set_backend(backend)
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
**Features**:
|
|
195
|
+
- Fully async implementation
|
|
196
|
+
- Supports pattern-based key clearing
|
|
197
|
+
- Uses SCAN instead of KEYS for safe production use (non-blocking)
|
|
198
|
+
- Namespaced with `fastapi_cachex:` prefix by default
|
|
199
|
+
- Optional custom key prefix for multi-tenant scenarios
|
|
200
|
+
|
|
201
|
+
**Example with custom prefix**:
|
|
202
|
+
|
|
203
|
+
```python
|
|
204
|
+
backend = AsyncRedisCacheBackend(
|
|
205
|
+
host="127.0.0.1",
|
|
206
|
+
port=6379,
|
|
207
|
+
key_prefix="myapp:cache:",
|
|
208
|
+
)
|
|
209
|
+
BackendProxy.set_backend(backend)
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
## Performance Considerations
|
|
213
|
+
|
|
214
|
+
### Cache Hit Performance
|
|
215
|
+
|
|
216
|
+
When a cache hit occurs (within TTL), the response is returned directly without executing your endpoint handler. This is extremely fast:
|
|
217
|
+
|
|
218
|
+
```python
|
|
219
|
+
@app.get("/expensive")
|
|
220
|
+
@cache(ttl=3600) # Cache for 1 hour
|
|
221
|
+
async def expensive_operation():
|
|
222
|
+
# This is ONLY executed when cache misses
|
|
223
|
+
# On cache hits, this function is never called
|
|
224
|
+
result = perform_expensive_calculation()
|
|
225
|
+
return result
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
### Backend Selection
|
|
229
|
+
|
|
230
|
+
- **MemoryBackend**: Fastest for single-process development; not suitable for production
|
|
231
|
+
- **Memcached**: Good for distributed systems; has limitations on pattern clearing
|
|
232
|
+
- **Redis**: Best for production; fully async, supports all features, non-blocking operations
|
|
233
|
+
|
|
234
|
+
## Documentation
|
|
235
|
+
|
|
236
|
+
- [Cache Flow Explanation](docs/CACHE_FLOW.md)
|
|
237
|
+
- [Development Guide](docs/DEVELOPMENT.md)
|
|
238
|
+
- [Contributing Guidelines](docs/CONTRIBUTING.md)
|
|
239
|
+
|
|
240
|
+
## License
|
|
241
|
+
|
|
242
|
+
This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details.
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
fastapi_cachex/__init__.py,sha256=T8KTNtlTxqYUzUZTbDutL9QErRFc-Ev7yh406u578e4,325
|
|
2
|
+
fastapi_cachex/backends/__init__.py,sha256=9dZr4l-Ozca9PC1Qsnbjnks8-r6A22fFWL8WP9DOR-I,414
|
|
3
|
+
fastapi_cachex/backends/base.py,sha256=7nQ15GQ_c8r7OnKYAeK8DqCEnrT30Qt1OaCfXl28vZw,2083
|
|
4
|
+
fastapi_cachex/backends/memcached.py,sha256=HHreoCLYtNYaz_wuK9h3St5AOS7LvItnNXjazwvKvy8,8103
|
|
5
|
+
fastapi_cachex/backends/memory.py,sha256=djQbj-jeTd9MIu_G-HUcfDauoNhu9wxynfak-mR5nYY,6950
|
|
6
|
+
fastapi_cachex/backends/redis.py,sha256=uFNrHjFoshGmFMmMBb6ooVLS34GHz_Xl7WA8POE3sJk,9977
|
|
7
|
+
fastapi_cachex/cache.py,sha256=W1nr-sUjTsFluRfXpiZjOeKRz9sIT0J_fX5ZGB81bDU,11284
|
|
8
|
+
fastapi_cachex/dependencies.py,sha256=RZfsO9U9BDdbhmZ7QEWd178vwQ_qb4vUvU6Z_fsmy4M,450
|
|
9
|
+
fastapi_cachex/directives.py,sha256=0W9_rRbxF1YioII7DNCa498ets3sHpqny0JLm-EUw5s,585
|
|
10
|
+
fastapi_cachex/exceptions.py,sha256=64Ub9pOa4w_jCc4DEjIngYYLXPuzVYEkn5vh_CLS69I,432
|
|
11
|
+
fastapi_cachex/proxy.py,sha256=iEKuLX3Qc8z4NcokMANdigDuZmXrgYbcZPBH75BtRkk,1047
|
|
12
|
+
fastapi_cachex/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
|
+
fastapi_cachex/routes.py,sha256=A8KvOsFQniwoo2rS4sSnVzujTicHFbM38e2619nxDG0,9363
|
|
14
|
+
fastapi_cachex/types.py,sha256=DkNyZYl4bIe3Vrlz_FZnxjHyMbmniwrAVwL0OVgccaM,498
|
|
15
|
+
fastapi_cachex-0.2.1.dist-info/WHEEL,sha256=ZyFSCYkV2BrxH6-HRVRg3R9Fo7MALzer9KiPYqNxSbo,79
|
|
16
|
+
fastapi_cachex-0.2.1.dist-info/METADATA,sha256=fEaMiWm77W-Mih0mDhm-EyvcpcfMczx2uzVuzlgUb2U,10310
|
|
17
|
+
fastapi_cachex-0.2.1.dist-info/RECORD,,
|