fastapi-cachex 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastapi_cachex/__init__.py +7 -0
- fastapi_cachex/backends/__init__.py +13 -0
- fastapi_cachex/backends/base.py +70 -0
- fastapi_cachex/backends/memcached.py +239 -0
- fastapi_cachex/backends/memory.py +198 -0
- fastapi_cachex/backends/redis.py +300 -0
- fastapi_cachex/cache.py +301 -0
- fastapi_cachex/dependencies.py +16 -0
- fastapi_cachex/directives.py +21 -0
- fastapi_cachex/exceptions.py +17 -0
- fastapi_cachex/proxy.py +36 -0
- fastapi_cachex/py.typed +0 -0
- fastapi_cachex/routes.py +311 -0
- fastapi_cachex/types.py +25 -0
- fastapi_cachex-0.2.1.dist-info/METADATA +242 -0
- fastapi_cachex-0.2.1.dist-info/RECORD +17 -0
- fastapi_cachex-0.2.1.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,300 @@
|
|
|
1
|
+
"""Redis cache backend implementation."""
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING
|
|
4
|
+
from typing import Any
|
|
5
|
+
from typing import Literal
|
|
6
|
+
|
|
7
|
+
from fastapi_cachex.backends.base import BaseCacheBackend
|
|
8
|
+
from fastapi_cachex.exceptions import CacheXError
|
|
9
|
+
from fastapi_cachex.types import ETagContent
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from redis.asyncio import Redis as AsyncRedis
|
|
13
|
+
|
|
14
|
+
try:
|
|
15
|
+
import orjson as json
|
|
16
|
+
|
|
17
|
+
except ImportError: # pragma: no cover
|
|
18
|
+
import json # type: ignore[no-redef] # pragma: no cover
|
|
19
|
+
|
|
20
|
+
# Default Redis key prefix for fastapi-cachex
|
|
21
|
+
DEFAULT_REDIS_PREFIX = "fastapi_cachex:"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AsyncRedisCacheBackend(BaseCacheBackend):
|
|
25
|
+
"""Async Redis cache backend implementation.
|
|
26
|
+
|
|
27
|
+
This backend uses Redis with a key prefix to avoid conflicts with other
|
|
28
|
+
applications. Keys are namespaced with 'fastapi_cachex:' by default.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
client: "AsyncRedis[str]"
|
|
32
|
+
key_prefix: str
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
host: str = "127.0.0.1",
|
|
37
|
+
port: int = 6379,
|
|
38
|
+
password: str | None = None,
|
|
39
|
+
db: int = 0,
|
|
40
|
+
encoding: str = "utf-8",
|
|
41
|
+
decode_responses: Literal[True] = True,
|
|
42
|
+
socket_timeout: float = 1.0,
|
|
43
|
+
socket_connect_timeout: float = 1.0,
|
|
44
|
+
key_prefix: str = DEFAULT_REDIS_PREFIX,
|
|
45
|
+
**kwargs: Any,
|
|
46
|
+
) -> None:
|
|
47
|
+
"""Initialize async Redis cache backend.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
host: Redis host
|
|
51
|
+
port: Redis port
|
|
52
|
+
password: Redis password
|
|
53
|
+
db: Redis database number
|
|
54
|
+
encoding: Character encoding to use
|
|
55
|
+
decode_responses: Whether to decode response automatically
|
|
56
|
+
socket_timeout: Timeout for socket operations (in seconds)
|
|
57
|
+
socket_connect_timeout: Timeout for socket connection (in seconds)
|
|
58
|
+
key_prefix: Prefix for all cache keys (default: 'fastapi_cachex:')
|
|
59
|
+
**kwargs: Additional arguments to pass to Redis client
|
|
60
|
+
"""
|
|
61
|
+
try:
|
|
62
|
+
from redis.asyncio import Redis as AsyncRedis
|
|
63
|
+
except ImportError:
|
|
64
|
+
msg = "redis[hiredis] is not installed. Please install it with 'pip install \"redis[hiredis]\"' "
|
|
65
|
+
raise CacheXError(msg)
|
|
66
|
+
|
|
67
|
+
self.client = AsyncRedis(
|
|
68
|
+
host=host,
|
|
69
|
+
port=port,
|
|
70
|
+
password=password,
|
|
71
|
+
db=db,
|
|
72
|
+
encoding=encoding,
|
|
73
|
+
decode_responses=decode_responses,
|
|
74
|
+
socket_timeout=socket_timeout,
|
|
75
|
+
socket_connect_timeout=socket_connect_timeout,
|
|
76
|
+
**kwargs,
|
|
77
|
+
)
|
|
78
|
+
self.key_prefix = key_prefix
|
|
79
|
+
|
|
80
|
+
def _make_key(self, key: str) -> str:
|
|
81
|
+
"""Add prefix to cache key."""
|
|
82
|
+
return f"{self.key_prefix}{key}"
|
|
83
|
+
|
|
84
|
+
def _serialize(self, value: ETagContent) -> str:
|
|
85
|
+
"""Serialize ETagContent to JSON string."""
|
|
86
|
+
if isinstance(value.content, bytes):
|
|
87
|
+
content = value.content.decode()
|
|
88
|
+
else:
|
|
89
|
+
content = value.content
|
|
90
|
+
|
|
91
|
+
serialized: str | bytes = json.dumps(
|
|
92
|
+
{
|
|
93
|
+
"etag": value.etag,
|
|
94
|
+
"content": content,
|
|
95
|
+
},
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# orjson returns bytes, stdlib json returns str
|
|
99
|
+
return serialized.decode() if isinstance(serialized, bytes) else serialized
|
|
100
|
+
|
|
101
|
+
def _deserialize(self, value: str | None) -> ETagContent | None:
|
|
102
|
+
"""Deserialize JSON string to ETagContent."""
|
|
103
|
+
if value is None:
|
|
104
|
+
return None
|
|
105
|
+
try:
|
|
106
|
+
data = json.loads(value)
|
|
107
|
+
return ETagContent(
|
|
108
|
+
etag=data["etag"],
|
|
109
|
+
content=data["content"].encode()
|
|
110
|
+
if isinstance(data["content"], str)
|
|
111
|
+
else data["content"],
|
|
112
|
+
)
|
|
113
|
+
except (json.JSONDecodeError, KeyError):
|
|
114
|
+
return None
|
|
115
|
+
|
|
116
|
+
async def get(self, key: str) -> ETagContent | None:
|
|
117
|
+
"""Retrieve a cached response."""
|
|
118
|
+
result = await self.client.get(self._make_key(key))
|
|
119
|
+
return self._deserialize(result)
|
|
120
|
+
|
|
121
|
+
async def set(self, key: str, value: ETagContent, ttl: int | None = None) -> None:
|
|
122
|
+
"""Store a response in the cache."""
|
|
123
|
+
serialized = self._serialize(value)
|
|
124
|
+
prefixed_key = self._make_key(key)
|
|
125
|
+
if ttl is not None:
|
|
126
|
+
await self.client.setex(prefixed_key, ttl, serialized)
|
|
127
|
+
else:
|
|
128
|
+
await self.client.set(prefixed_key, serialized)
|
|
129
|
+
|
|
130
|
+
async def delete(self, key: str) -> None:
|
|
131
|
+
"""Remove a response from the cache."""
|
|
132
|
+
await self.client.delete(self._make_key(key))
|
|
133
|
+
|
|
134
|
+
async def clear(self) -> None:
|
|
135
|
+
"""Clear all cached responses for this namespace.
|
|
136
|
+
|
|
137
|
+
Uses SCAN instead of KEYS to avoid blocking in production.
|
|
138
|
+
Only deletes keys within this backend's prefix.
|
|
139
|
+
"""
|
|
140
|
+
pattern = f"{self.key_prefix}*"
|
|
141
|
+
cursor = 0
|
|
142
|
+
batch_size = 100
|
|
143
|
+
keys_to_delete: list[str] = []
|
|
144
|
+
|
|
145
|
+
# Use SCAN to iterate through keys without blocking
|
|
146
|
+
while True:
|
|
147
|
+
cursor, keys = await self.client.scan(
|
|
148
|
+
cursor,
|
|
149
|
+
match=pattern,
|
|
150
|
+
count=batch_size,
|
|
151
|
+
)
|
|
152
|
+
if keys:
|
|
153
|
+
keys_to_delete.extend(keys)
|
|
154
|
+
if cursor == 0:
|
|
155
|
+
break
|
|
156
|
+
|
|
157
|
+
# Delete all collected keys in batches to avoid huge command size
|
|
158
|
+
if keys_to_delete:
|
|
159
|
+
for i in range(0, len(keys_to_delete), batch_size):
|
|
160
|
+
batch = keys_to_delete[i : i + batch_size]
|
|
161
|
+
if batch:
|
|
162
|
+
await self.client.delete(*batch)
|
|
163
|
+
|
|
164
|
+
async def clear_path(self, path: str, include_params: bool = False) -> int:
|
|
165
|
+
"""Clear cached responses for a specific path.
|
|
166
|
+
|
|
167
|
+
Uses SCAN instead of KEYS to avoid blocking in production.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
path: The path to clear cache for
|
|
171
|
+
include_params: Whether to clear all parameter variations
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
Number of cache entries cleared
|
|
175
|
+
"""
|
|
176
|
+
# Pattern includes the HTTP method, host, and path components
|
|
177
|
+
if include_params:
|
|
178
|
+
# Clear all variations: *:path:*
|
|
179
|
+
pattern = f"{self.key_prefix}*:{path}:*"
|
|
180
|
+
else:
|
|
181
|
+
# Clear only exact path (no query params): *:path
|
|
182
|
+
pattern = f"{self.key_prefix}*:{path}"
|
|
183
|
+
|
|
184
|
+
cursor = 0
|
|
185
|
+
batch_size = 100
|
|
186
|
+
cleared_count = 0
|
|
187
|
+
keys_to_delete: list[str] = []
|
|
188
|
+
|
|
189
|
+
# Use SCAN to iterate through keys without blocking
|
|
190
|
+
while True:
|
|
191
|
+
cursor, keys = await self.client.scan(
|
|
192
|
+
cursor,
|
|
193
|
+
match=pattern,
|
|
194
|
+
count=batch_size,
|
|
195
|
+
)
|
|
196
|
+
if keys:
|
|
197
|
+
keys_to_delete.extend(keys)
|
|
198
|
+
if cursor == 0:
|
|
199
|
+
break
|
|
200
|
+
|
|
201
|
+
# Delete all collected keys in batches
|
|
202
|
+
if keys_to_delete:
|
|
203
|
+
for i in range(0, len(keys_to_delete), batch_size):
|
|
204
|
+
batch = keys_to_delete[i : i + batch_size]
|
|
205
|
+
if batch:
|
|
206
|
+
deleted = await self.client.delete(*batch)
|
|
207
|
+
cleared_count += deleted
|
|
208
|
+
|
|
209
|
+
return cleared_count
|
|
210
|
+
|
|
211
|
+
async def clear_pattern(self, pattern: str) -> int:
|
|
212
|
+
"""Clear cached responses matching a pattern.
|
|
213
|
+
|
|
214
|
+
Uses SCAN instead of KEYS to avoid blocking in production.
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
pattern: A glob pattern to match cache keys against
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
Number of cache entries cleared
|
|
221
|
+
"""
|
|
222
|
+
# Ensure pattern includes the key prefix
|
|
223
|
+
if not pattern.startswith(self.key_prefix):
|
|
224
|
+
full_pattern = f"{self.key_prefix}{pattern}"
|
|
225
|
+
else:
|
|
226
|
+
full_pattern = pattern
|
|
227
|
+
|
|
228
|
+
cursor = 0
|
|
229
|
+
batch_size = 100
|
|
230
|
+
cleared_count = 0
|
|
231
|
+
keys_to_delete: list[str] = []
|
|
232
|
+
|
|
233
|
+
# Use SCAN to iterate through keys without blocking
|
|
234
|
+
while True:
|
|
235
|
+
cursor, keys = await self.client.scan(
|
|
236
|
+
cursor,
|
|
237
|
+
match=full_pattern,
|
|
238
|
+
count=batch_size,
|
|
239
|
+
)
|
|
240
|
+
if keys:
|
|
241
|
+
keys_to_delete.extend(keys)
|
|
242
|
+
if cursor == 0:
|
|
243
|
+
break
|
|
244
|
+
|
|
245
|
+
# Delete all collected keys in batches
|
|
246
|
+
if keys_to_delete:
|
|
247
|
+
for i in range(0, len(keys_to_delete), batch_size):
|
|
248
|
+
batch = keys_to_delete[i : i + batch_size]
|
|
249
|
+
if batch:
|
|
250
|
+
deleted = await self.client.delete(*batch)
|
|
251
|
+
cleared_count += deleted
|
|
252
|
+
|
|
253
|
+
return cleared_count
|
|
254
|
+
|
|
255
|
+
async def get_all_keys(self) -> list[str]:
|
|
256
|
+
"""Get all cache keys in the backend.
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
List of all cache keys currently stored in the backend
|
|
260
|
+
"""
|
|
261
|
+
pattern = f"{self.key_prefix}*"
|
|
262
|
+
cursor = 0
|
|
263
|
+
batch_size = 100
|
|
264
|
+
all_keys: list[str] = []
|
|
265
|
+
|
|
266
|
+
# Use SCAN to iterate through keys without blocking
|
|
267
|
+
while True:
|
|
268
|
+
cursor, keys = await self.client.scan(
|
|
269
|
+
cursor,
|
|
270
|
+
match=pattern,
|
|
271
|
+
count=batch_size,
|
|
272
|
+
)
|
|
273
|
+
if keys:
|
|
274
|
+
all_keys.extend(keys)
|
|
275
|
+
if cursor == 0:
|
|
276
|
+
break
|
|
277
|
+
|
|
278
|
+
return all_keys
|
|
279
|
+
|
|
280
|
+
async def get_cache_data(self) -> dict[str, tuple[ETagContent, float | None]]:
|
|
281
|
+
"""Get all cache data with expiry information.
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
Dictionary mapping cache keys to (ETagContent, expiry) tuples.
|
|
285
|
+
Note: Redis stores TTL but not absolute expiry time, so this
|
|
286
|
+
returns None for expiry (no expiry tracking in Redis backend).
|
|
287
|
+
"""
|
|
288
|
+
all_keys = await self.get_all_keys()
|
|
289
|
+
cache_data: dict[str, tuple[ETagContent, float | None]] = {}
|
|
290
|
+
|
|
291
|
+
for prefixed_key in all_keys:
|
|
292
|
+
# Remove prefix to get the original cache key
|
|
293
|
+
original_key = prefixed_key.removeprefix(self.key_prefix)
|
|
294
|
+
|
|
295
|
+
# Get the value using the original key (get() adds prefix internally)
|
|
296
|
+
value = await self.get(original_key)
|
|
297
|
+
if value is not None:
|
|
298
|
+
cache_data[original_key] = (value, None)
|
|
299
|
+
|
|
300
|
+
return cache_data
|
fastapi_cachex/cache.py
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
"""Core caching functionality and decorators."""
|
|
2
|
+
|
|
3
|
+
import hashlib
|
|
4
|
+
import inspect
|
|
5
|
+
from collections.abc import Awaitable
|
|
6
|
+
from collections.abc import Callable
|
|
7
|
+
from functools import update_wrapper
|
|
8
|
+
from functools import wraps
|
|
9
|
+
from inspect import Parameter
|
|
10
|
+
from inspect import Signature
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
|
+
from typing import Any
|
|
13
|
+
from typing import Literal
|
|
14
|
+
from typing import TypeVar
|
|
15
|
+
from typing import Union
|
|
16
|
+
|
|
17
|
+
from fastapi import Request
|
|
18
|
+
from fastapi import Response
|
|
19
|
+
from fastapi.datastructures import DefaultPlaceholder
|
|
20
|
+
from starlette.status import HTTP_304_NOT_MODIFIED
|
|
21
|
+
|
|
22
|
+
from fastapi_cachex.backends import MemoryBackend
|
|
23
|
+
from fastapi_cachex.directives import DirectiveType
|
|
24
|
+
from fastapi_cachex.exceptions import BackendNotFoundError
|
|
25
|
+
from fastapi_cachex.exceptions import CacheXError
|
|
26
|
+
from fastapi_cachex.exceptions import RequestNotFoundError
|
|
27
|
+
from fastapi_cachex.proxy import BackendProxy
|
|
28
|
+
from fastapi_cachex.types import ETagContent
|
|
29
|
+
|
|
30
|
+
if TYPE_CHECKING:
|
|
31
|
+
from fastapi.routing import APIRoute
|
|
32
|
+
|
|
33
|
+
T = TypeVar("T", bound=Response)
|
|
34
|
+
AsyncCallable = Callable[..., Awaitable[T]]
|
|
35
|
+
SyncCallable = Callable[..., T]
|
|
36
|
+
AnyCallable = Union[AsyncCallable[T], SyncCallable[T]] # noqa: UP007
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class CacheControl:
|
|
40
|
+
"""Manages Cache-Control header directives."""
|
|
41
|
+
|
|
42
|
+
def __init__(self) -> None:
|
|
43
|
+
"""Initialize an empty CacheControl instance."""
|
|
44
|
+
self.directives: list[str] = []
|
|
45
|
+
|
|
46
|
+
def add(self, directive: DirectiveType, value: int | None = None) -> None:
|
|
47
|
+
"""Add a Cache-Control directive.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
directive: The directive type to add
|
|
51
|
+
value: Optional value for the directive
|
|
52
|
+
"""
|
|
53
|
+
if value is not None:
|
|
54
|
+
self.directives.append(f"{directive.value}={value}")
|
|
55
|
+
else:
|
|
56
|
+
self.directives.append(directive.value)
|
|
57
|
+
|
|
58
|
+
def __str__(self) -> str:
|
|
59
|
+
"""Return the Cache-Control header value as a string."""
|
|
60
|
+
return ", ".join(self.directives)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
async def get_response(
|
|
64
|
+
__func: AnyCallable[Response],
|
|
65
|
+
__request: Request,
|
|
66
|
+
/,
|
|
67
|
+
*args: Any,
|
|
68
|
+
**kwargs: Any,
|
|
69
|
+
) -> Response:
|
|
70
|
+
"""Get the response from the function."""
|
|
71
|
+
if inspect.iscoroutinefunction(__func):
|
|
72
|
+
result = await __func(*args, **kwargs)
|
|
73
|
+
else:
|
|
74
|
+
result = __func(*args, **kwargs)
|
|
75
|
+
|
|
76
|
+
# If already a Response object, return it directly
|
|
77
|
+
if isinstance(result, Response):
|
|
78
|
+
return result
|
|
79
|
+
|
|
80
|
+
# Get response_class from route if available
|
|
81
|
+
route: APIRoute | None = __request.scope.get("route")
|
|
82
|
+
if route is None: # pragma: no cover
|
|
83
|
+
msg = "Route not found in request scope"
|
|
84
|
+
raise CacheXError(msg)
|
|
85
|
+
|
|
86
|
+
if isinstance(route.response_class, DefaultPlaceholder):
|
|
87
|
+
response_class: type[Response] = route.response_class.value
|
|
88
|
+
|
|
89
|
+
else:
|
|
90
|
+
response_class = route.response_class
|
|
91
|
+
|
|
92
|
+
# Convert non-Response result to Response using appropriate response_class
|
|
93
|
+
return response_class(content=result)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def cache( # noqa: C901
|
|
97
|
+
ttl: int | None = None,
|
|
98
|
+
stale_ttl: int | None = None,
|
|
99
|
+
stale: Literal["error", "revalidate"] | None = None,
|
|
100
|
+
no_cache: bool = False,
|
|
101
|
+
no_store: bool = False,
|
|
102
|
+
public: bool = False,
|
|
103
|
+
private: bool = False,
|
|
104
|
+
immutable: bool = False,
|
|
105
|
+
must_revalidate: bool = False,
|
|
106
|
+
) -> Callable[[AnyCallable[Response]], AsyncCallable[Response]]:
|
|
107
|
+
"""Cache decorator for FastAPI route handlers.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
ttl: Time-to-live in seconds for cache entries
|
|
111
|
+
stale_ttl: Additional time-to-live for stale cache entries
|
|
112
|
+
stale: Stale response handling strategy ('error' or 'revalidate')
|
|
113
|
+
no_cache: Whether to disable caching
|
|
114
|
+
no_store: Whether to prevent storing responses
|
|
115
|
+
public: Whether responses can be cached by shared caches
|
|
116
|
+
private: Whether responses are for single user only
|
|
117
|
+
immutable: Whether cached responses never change
|
|
118
|
+
must_revalidate: Whether to force revalidation when stale
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
Decorator function that wraps route handlers with caching logic
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
def decorator(func: AnyCallable[Response]) -> AsyncCallable[Response]: # noqa: C901
|
|
125
|
+
try:
|
|
126
|
+
cache_backend = BackendProxy.get_backend()
|
|
127
|
+
except BackendNotFoundError:
|
|
128
|
+
# Fallback to memory backend if no backend is set
|
|
129
|
+
cache_backend = MemoryBackend()
|
|
130
|
+
BackendProxy.set_backend(cache_backend)
|
|
131
|
+
|
|
132
|
+
# Analyze the original function's signature
|
|
133
|
+
sig: Signature = inspect.signature(func)
|
|
134
|
+
params: list[Parameter] = list(sig.parameters.values())
|
|
135
|
+
|
|
136
|
+
# Check if Request is already in the parameters
|
|
137
|
+
found_request: Parameter | None = next(
|
|
138
|
+
(param for param in params if param.annotation == Request),
|
|
139
|
+
None,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Add Request parameter if it's not present
|
|
143
|
+
if not found_request:
|
|
144
|
+
request_name: str = "__cachex_request"
|
|
145
|
+
|
|
146
|
+
request_param = inspect.Parameter(
|
|
147
|
+
request_name,
|
|
148
|
+
inspect.Parameter.KEYWORD_ONLY,
|
|
149
|
+
annotation=Request,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
sig = sig.replace(parameters=[*params, request_param])
|
|
153
|
+
|
|
154
|
+
else:
|
|
155
|
+
request_name = found_request.name
|
|
156
|
+
|
|
157
|
+
async def get_cache_control(cache_control: CacheControl) -> str: # noqa: C901
|
|
158
|
+
# Set Cache-Control headers
|
|
159
|
+
if no_cache:
|
|
160
|
+
cache_control.add(DirectiveType.NO_CACHE)
|
|
161
|
+
if must_revalidate:
|
|
162
|
+
cache_control.add(DirectiveType.MUST_REVALIDATE)
|
|
163
|
+
else:
|
|
164
|
+
# Handle normal cache control cases
|
|
165
|
+
# 1. Access scope (public/private)
|
|
166
|
+
if public:
|
|
167
|
+
cache_control.add(DirectiveType.PUBLIC)
|
|
168
|
+
elif private:
|
|
169
|
+
cache_control.add(DirectiveType.PRIVATE)
|
|
170
|
+
|
|
171
|
+
# 2. Cache time settings
|
|
172
|
+
if ttl is not None:
|
|
173
|
+
cache_control.add(DirectiveType.MAX_AGE, ttl)
|
|
174
|
+
|
|
175
|
+
# 3. Validation related
|
|
176
|
+
if must_revalidate:
|
|
177
|
+
cache_control.add(DirectiveType.MUST_REVALIDATE)
|
|
178
|
+
|
|
179
|
+
# 4. Stale response handling
|
|
180
|
+
if stale is not None and stale_ttl is None:
|
|
181
|
+
msg = "stale_ttl must be set if stale is used"
|
|
182
|
+
raise CacheXError(msg)
|
|
183
|
+
|
|
184
|
+
if stale == "revalidate":
|
|
185
|
+
cache_control.add(DirectiveType.STALE_WHILE_REVALIDATE, stale_ttl)
|
|
186
|
+
elif stale == "error":
|
|
187
|
+
cache_control.add(DirectiveType.STALE_IF_ERROR, stale_ttl)
|
|
188
|
+
|
|
189
|
+
# 5. Special flags
|
|
190
|
+
if immutable:
|
|
191
|
+
cache_control.add(DirectiveType.IMMUTABLE)
|
|
192
|
+
|
|
193
|
+
return str(cache_control)
|
|
194
|
+
|
|
195
|
+
@wraps(func)
|
|
196
|
+
async def wrapper(*args: Any, **kwargs: Any) -> Response: # noqa: C901
|
|
197
|
+
if found_request:
|
|
198
|
+
req: Request | None = kwargs.get(request_name)
|
|
199
|
+
else:
|
|
200
|
+
req = kwargs.pop(request_name, None)
|
|
201
|
+
|
|
202
|
+
if not req: # pragma: no cover
|
|
203
|
+
# Skip coverage for this case, as it should not happen
|
|
204
|
+
raise RequestNotFoundError
|
|
205
|
+
|
|
206
|
+
# Only cache GET requests
|
|
207
|
+
if req.method != "GET":
|
|
208
|
+
return await get_response(func, req, *args, **kwargs)
|
|
209
|
+
|
|
210
|
+
# Generate cache key: method:host:path:query_params[:vary]
|
|
211
|
+
# Include host to avoid cross-host cache pollution
|
|
212
|
+
cache_key = f"{req.method}:{req.headers.get('host', 'unknown')}:{req.url.path}:{req.query_params}"
|
|
213
|
+
client_etag = req.headers.get("if-none-match")
|
|
214
|
+
cache_control = await get_cache_control(CacheControl())
|
|
215
|
+
|
|
216
|
+
# Handle special case: no-store (highest priority)
|
|
217
|
+
if no_store:
|
|
218
|
+
response = await get_response(func, req, *args, **kwargs)
|
|
219
|
+
cc = CacheControl()
|
|
220
|
+
cc.add(DirectiveType.NO_STORE)
|
|
221
|
+
response.headers["Cache-Control"] = str(cc)
|
|
222
|
+
return response
|
|
223
|
+
|
|
224
|
+
# Check cache and handle ETag validation
|
|
225
|
+
cached_data = await cache_backend.get(cache_key)
|
|
226
|
+
|
|
227
|
+
current_response = None
|
|
228
|
+
current_etag = None
|
|
229
|
+
|
|
230
|
+
if client_etag:
|
|
231
|
+
if no_cache:
|
|
232
|
+
# Get fresh response first if using no-cache
|
|
233
|
+
current_response = await get_response(func, req, *args, **kwargs)
|
|
234
|
+
current_etag = (
|
|
235
|
+
f'W/"{hashlib.md5(current_response.body).hexdigest()}"' # noqa: S324
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
if client_etag == current_etag:
|
|
239
|
+
# For no-cache, compare fresh data with client's ETag
|
|
240
|
+
return Response(
|
|
241
|
+
status_code=HTTP_304_NOT_MODIFIED,
|
|
242
|
+
headers={
|
|
243
|
+
"ETag": current_etag,
|
|
244
|
+
"Cache-Control": cache_control,
|
|
245
|
+
},
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
# Compare with cached ETag - if match, return 304
|
|
249
|
+
elif (
|
|
250
|
+
cached_data and client_etag == cached_data.etag
|
|
251
|
+
): # pragma: no branch
|
|
252
|
+
# Cache hit with matching ETag: return 304 Not Modified
|
|
253
|
+
return Response(
|
|
254
|
+
status_code=HTTP_304_NOT_MODIFIED,
|
|
255
|
+
headers={
|
|
256
|
+
"ETag": cached_data.etag,
|
|
257
|
+
"Cache-Control": cache_control,
|
|
258
|
+
},
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
# If we don't have If-None-Match header, check if we have a valid cached copy
|
|
262
|
+
# and can serve it directly (cache hit without ETag comparison)
|
|
263
|
+
if cached_data and not no_cache and ttl is not None:
|
|
264
|
+
# We have a cached entry and TTL-based caching is enabled
|
|
265
|
+
# Return the cached content directly with 200 OK without revalidation
|
|
266
|
+
return Response(
|
|
267
|
+
content=cached_data.content,
|
|
268
|
+
status_code=200,
|
|
269
|
+
headers={
|
|
270
|
+
"ETag": cached_data.etag,
|
|
271
|
+
"Cache-Control": cache_control,
|
|
272
|
+
},
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
if not current_response or not current_etag:
|
|
276
|
+
# Retrieve the current response if not already done
|
|
277
|
+
current_response = await get_response(func, req, *args, **kwargs)
|
|
278
|
+
current_etag = f'W/"{hashlib.md5(current_response.body).hexdigest()}"' # noqa: S324
|
|
279
|
+
|
|
280
|
+
# Set ETag header
|
|
281
|
+
current_response.headers["ETag"] = current_etag
|
|
282
|
+
|
|
283
|
+
# Update cache if needed
|
|
284
|
+
if not cached_data or cached_data.etag != current_etag:
|
|
285
|
+
# Store in cache if data changed
|
|
286
|
+
await cache_backend.set(
|
|
287
|
+
cache_key,
|
|
288
|
+
ETagContent(current_etag, current_response.body),
|
|
289
|
+
ttl=ttl,
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
current_response.headers["Cache-Control"] = cache_control
|
|
293
|
+
return current_response
|
|
294
|
+
|
|
295
|
+
# Update the wrapper with the new signature
|
|
296
|
+
update_wrapper(wrapper, func)
|
|
297
|
+
wrapper.__signature__ = sig # type: ignore[attr-defined]
|
|
298
|
+
|
|
299
|
+
return wrapper
|
|
300
|
+
|
|
301
|
+
return decorator
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""FastAPI dependency injection utilities for cache control."""
|
|
2
|
+
|
|
3
|
+
from typing import Annotated
|
|
4
|
+
|
|
5
|
+
from fastapi import Depends
|
|
6
|
+
|
|
7
|
+
from fastapi_cachex.backends.base import BaseCacheBackend
|
|
8
|
+
from fastapi_cachex.proxy import BackendProxy
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def get_cache_backend() -> BaseCacheBackend:
|
|
12
|
+
"""Dependency to get the current cache backend instance."""
|
|
13
|
+
return BackendProxy.get_backend()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
CacheBackend = Annotated[BaseCacheBackend, Depends(get_cache_backend)]
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Cache-Control directive types and enumerations."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class DirectiveType(Enum):
|
|
7
|
+
"""Enum representing Cache-Control directives."""
|
|
8
|
+
|
|
9
|
+
MAX_AGE = "max-age"
|
|
10
|
+
S_MAXAGE = "s-maxage"
|
|
11
|
+
NO_CACHE = "no-cache"
|
|
12
|
+
NO_STORE = "no-store"
|
|
13
|
+
NO_TRANSFORM = "no-transform"
|
|
14
|
+
MUST_REVALIDATE = "must-revalidate"
|
|
15
|
+
PROXY_REVALIDATE = "proxy-revalidate"
|
|
16
|
+
MUST_UNDERSTAND = "must-understand"
|
|
17
|
+
PRIVATE = "private"
|
|
18
|
+
PUBLIC = "public"
|
|
19
|
+
IMMUTABLE = "immutable"
|
|
20
|
+
STALE_WHILE_REVALIDATE = "stale-while-revalidate"
|
|
21
|
+
STALE_IF_ERROR = "stale-if-error"
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""Custom exception classes for FastAPI-CacheX."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class CacheXError(Exception):
|
|
5
|
+
"""Base class for all exceptions in FastAPI-CacheX."""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class CacheError(CacheXError):
|
|
9
|
+
"""Exception raised for cache-related errors."""
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class BackendNotFoundError(CacheXError):
|
|
13
|
+
"""Exception raised when a cache backend is not found."""
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class RequestNotFoundError(CacheXError):
|
|
17
|
+
"""Exception raised when a request is not found."""
|
fastapi_cachex/proxy.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"""Backend proxy for managing cache backend instances."""
|
|
2
|
+
|
|
3
|
+
from fastapi_cachex.backends import BaseCacheBackend
|
|
4
|
+
from fastapi_cachex.exceptions import BackendNotFoundError
|
|
5
|
+
|
|
6
|
+
_default_backend: BaseCacheBackend | None = None
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BackendProxy:
|
|
10
|
+
"""FastAPI CacheX Proxy for backend management."""
|
|
11
|
+
|
|
12
|
+
@staticmethod
|
|
13
|
+
def get_backend() -> BaseCacheBackend:
|
|
14
|
+
"""Get the current cache backend instance.
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
The current cache backend
|
|
18
|
+
|
|
19
|
+
Raises:
|
|
20
|
+
BackendNotFoundError: If no backend has been set
|
|
21
|
+
"""
|
|
22
|
+
if _default_backend is None:
|
|
23
|
+
msg = "Backend is not set. Please set the backend first."
|
|
24
|
+
raise BackendNotFoundError(msg)
|
|
25
|
+
|
|
26
|
+
return _default_backend
|
|
27
|
+
|
|
28
|
+
@staticmethod
|
|
29
|
+
def set_backend(backend: BaseCacheBackend | None) -> None:
|
|
30
|
+
"""Set the backend for caching.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
backend: The backend to use for caching, or None to clear the current backend
|
|
34
|
+
"""
|
|
35
|
+
global _default_backend
|
|
36
|
+
_default_backend = backend
|
fastapi_cachex/py.typed
ADDED
|
File without changes
|