fastapi-cachex 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fastapi_cachex/__init__.py +7 -0
- fastapi_cachex/backends/__init__.py +13 -0
- fastapi_cachex/backends/base.py +70 -0
- fastapi_cachex/backends/memcached.py +239 -0
- fastapi_cachex/backends/memory.py +198 -0
- fastapi_cachex/backends/redis.py +300 -0
- fastapi_cachex/cache.py +301 -0
- fastapi_cachex/dependencies.py +16 -0
- fastapi_cachex/directives.py +21 -0
- fastapi_cachex/exceptions.py +17 -0
- fastapi_cachex/proxy.py +36 -0
- fastapi_cachex/py.typed +0 -0
- fastapi_cachex/routes.py +311 -0
- fastapi_cachex/types.py +25 -0
- fastapi_cachex-0.2.1.dist-info/METADATA +242 -0
- fastapi_cachex-0.2.1.dist-info/RECORD +17 -0
- fastapi_cachex-0.2.1.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
"""FastAPI-CacheX: A powerful and flexible caching extension for FastAPI."""
|
|
2
|
+
|
|
3
|
+
from .cache import cache as cache
|
|
4
|
+
from .dependencies import CacheBackend as CacheBackend
|
|
5
|
+
from .dependencies import get_cache_backend as get_cache_backend
|
|
6
|
+
from .proxy import BackendProxy as BackendProxy
|
|
7
|
+
from .routes import add_routes as add_routes
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Cache backend implementations for FastAPI-CacheX."""
|
|
2
|
+
|
|
3
|
+
from fastapi_cachex.backends.base import BaseCacheBackend
|
|
4
|
+
from fastapi_cachex.backends.memcached import MemcachedBackend
|
|
5
|
+
from fastapi_cachex.backends.memory import MemoryBackend
|
|
6
|
+
from fastapi_cachex.backends.redis import AsyncRedisCacheBackend
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"AsyncRedisCacheBackend",
|
|
10
|
+
"BaseCacheBackend",
|
|
11
|
+
"MemcachedBackend",
|
|
12
|
+
"MemoryBackend",
|
|
13
|
+
]
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Base cache backend interface and abstract implementation."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC
|
|
4
|
+
from abc import abstractmethod
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from fastapi_cachex.types import ETagContent
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class BaseCacheBackend(ABC):
|
|
11
|
+
"""Base class for all cache backends."""
|
|
12
|
+
|
|
13
|
+
@abstractmethod
|
|
14
|
+
async def get(self, key: str) -> ETagContent | None:
|
|
15
|
+
"""Retrieve a cached response."""
|
|
16
|
+
|
|
17
|
+
@abstractmethod
|
|
18
|
+
async def set(self, key: str, value: ETagContent, ttl: int | None = None) -> None:
|
|
19
|
+
"""Store a response in the cache."""
|
|
20
|
+
|
|
21
|
+
@abstractmethod
|
|
22
|
+
async def delete(self, key: str) -> None:
|
|
23
|
+
"""Remove a response from the cache."""
|
|
24
|
+
|
|
25
|
+
@abstractmethod
|
|
26
|
+
async def clear(self) -> None:
|
|
27
|
+
"""Clear all cached responses."""
|
|
28
|
+
|
|
29
|
+
@abstractmethod
|
|
30
|
+
async def clear_path(self, path: str, include_params: bool = False) -> int:
|
|
31
|
+
"""Clear cached responses for a specific path.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
path: The path to clear cache for
|
|
35
|
+
include_params: Whether to clear all parameter variations of the path
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Number of cache entries cleared
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
@abstractmethod
|
|
42
|
+
async def clear_pattern(self, pattern: str) -> int:
|
|
43
|
+
"""Clear cached responses matching a pattern.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
pattern: A glob pattern to match cache keys against (e.g., "/users/*")
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Number of cache entries cleared
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
@abstractmethod
|
|
53
|
+
async def get_all_keys(self) -> list[str]:
|
|
54
|
+
"""Get all cache keys in the backend.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
List of all cache keys currently stored in the backend
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
@abstractmethod
|
|
61
|
+
async def get_cache_data(self) -> dict[str, tuple[Any, float | None]]:
|
|
62
|
+
"""Get all cache data with expiry information.
|
|
63
|
+
|
|
64
|
+
This method is primarily used for cache monitoring and statistics.
|
|
65
|
+
Returns cache keys mapped to tuples of (value, expiry_time).
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Dictionary mapping cache keys to (value, expiry) tuples.
|
|
69
|
+
Expiry is None if the item never expires.
|
|
70
|
+
"""
|
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
"""Memcached cache backend implementation."""
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
4
|
+
|
|
5
|
+
from fastapi_cachex.backends.base import BaseCacheBackend
|
|
6
|
+
from fastapi_cachex.exceptions import CacheXError
|
|
7
|
+
from fastapi_cachex.types import ETagContent
|
|
8
|
+
|
|
9
|
+
try:
|
|
10
|
+
import orjson as json
|
|
11
|
+
|
|
12
|
+
except ImportError: # pragma: no cover
|
|
13
|
+
import json # type: ignore[no-redef] # pragma: no cover
|
|
14
|
+
|
|
15
|
+
# Default Memcached key prefix for fastapi-cachex
|
|
16
|
+
DEFAULT_MEMCACHE_PREFIX = "fastapi_cachex:"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class MemcachedBackend(BaseCacheBackend):
|
|
20
|
+
"""Memcached backend implementation.
|
|
21
|
+
|
|
22
|
+
Note: This implementation uses synchronous pymemcache client but wraps it
|
|
23
|
+
in async methods. For blocking concerns, consider using aiomcache for
|
|
24
|
+
true async Memcached operations. Keys are namespaced with 'fastapi_cachex:'
|
|
25
|
+
by default to avoid conflicts with other applications.
|
|
26
|
+
|
|
27
|
+
Limitations:
|
|
28
|
+
- Pattern-based clearing (clear_pattern) is not supported by Memcached protocol
|
|
29
|
+
- Operations are wrapped to appear async but use blocking sync client internally
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
key_prefix: str
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
servers: list[str],
|
|
37
|
+
key_prefix: str = DEFAULT_MEMCACHE_PREFIX,
|
|
38
|
+
) -> None:
|
|
39
|
+
"""Initialize the Memcached backend.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
servers: List of Memcached servers in format ["host:port", ...]
|
|
43
|
+
key_prefix: Prefix for all cache keys (default: 'fastapi_cachex:')
|
|
44
|
+
|
|
45
|
+
Raises:
|
|
46
|
+
CacheXError: If pymemcache is not installed
|
|
47
|
+
"""
|
|
48
|
+
try:
|
|
49
|
+
from pymemcache import HashClient
|
|
50
|
+
except ImportError:
|
|
51
|
+
msg = "pymemcache is not installed. Please install it with 'pip install pymemcache'"
|
|
52
|
+
raise CacheXError(msg)
|
|
53
|
+
|
|
54
|
+
self.client = HashClient(servers, connect_timeout=5, timeout=5)
|
|
55
|
+
self.key_prefix = key_prefix
|
|
56
|
+
|
|
57
|
+
def _make_key(self, key: str) -> str:
|
|
58
|
+
"""Add prefix to cache key."""
|
|
59
|
+
return f"{self.key_prefix}{key}"
|
|
60
|
+
|
|
61
|
+
async def get(self, key: str) -> ETagContent | None:
|
|
62
|
+
"""Get value from cache.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
key: Cache key to retrieve
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Optional[ETagContent]: Cached value with ETag if exists, None otherwise
|
|
69
|
+
"""
|
|
70
|
+
prefixed_key = self._make_key(key)
|
|
71
|
+
value = self.client.get(prefixed_key)
|
|
72
|
+
if value is None:
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
# Memcached stores data as bytes; deserialize from JSON
|
|
76
|
+
try:
|
|
77
|
+
data = json.loads(value.decode("utf-8"))
|
|
78
|
+
return ETagContent(
|
|
79
|
+
etag=data["etag"],
|
|
80
|
+
content=data["content"].encode()
|
|
81
|
+
if isinstance(data["content"], str)
|
|
82
|
+
else data["content"],
|
|
83
|
+
)
|
|
84
|
+
except (json.JSONDecodeError, KeyError, ValueError):
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
async def set(self, key: str, value: ETagContent, ttl: int | None = None) -> None:
|
|
88
|
+
"""Set value in cache.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
key: Cache key
|
|
92
|
+
value: ETagContent to store
|
|
93
|
+
ttl: Time to live in seconds
|
|
94
|
+
"""
|
|
95
|
+
prefixed_key = self._make_key(key)
|
|
96
|
+
|
|
97
|
+
# Prepare content for JSON serialization
|
|
98
|
+
if isinstance(value.content, bytes):
|
|
99
|
+
content = value.content.decode()
|
|
100
|
+
else:
|
|
101
|
+
content = value.content
|
|
102
|
+
|
|
103
|
+
serialized_data: str | bytes = json.dumps(
|
|
104
|
+
{
|
|
105
|
+
"etag": value.etag,
|
|
106
|
+
"content": content,
|
|
107
|
+
},
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# orjson returns bytes, stdlib json returns str
|
|
111
|
+
serialized_bytes = (
|
|
112
|
+
serialized_data
|
|
113
|
+
if isinstance(serialized_data, bytes)
|
|
114
|
+
else serialized_data.encode("utf-8")
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
self.client.set(
|
|
118
|
+
prefixed_key,
|
|
119
|
+
serialized_bytes,
|
|
120
|
+
expire=ttl if ttl is not None else 0,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
async def delete(self, key: str) -> None:
|
|
124
|
+
"""Delete value from cache.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
key: Cache key to delete
|
|
128
|
+
"""
|
|
129
|
+
self.client.delete(self._make_key(key))
|
|
130
|
+
|
|
131
|
+
async def clear(self) -> None:
|
|
132
|
+
"""Clear all values from cache.
|
|
133
|
+
|
|
134
|
+
Note: Memcached's flush_all affects the entire server.
|
|
135
|
+
Consider using clear_path() with your specific keys instead.
|
|
136
|
+
"""
|
|
137
|
+
warnings.warn(
|
|
138
|
+
"Memcached.clear() flushes ALL cached data from the server, "
|
|
139
|
+
"affecting other applications. Consider using clear_path() instead "
|
|
140
|
+
"to selectively remove only this namespace's keys.",
|
|
141
|
+
RuntimeWarning,
|
|
142
|
+
stacklevel=2,
|
|
143
|
+
)
|
|
144
|
+
self.client.flush_all()
|
|
145
|
+
|
|
146
|
+
async def clear_path(self, path: str, include_params: bool = False) -> int:
|
|
147
|
+
"""Clear cached responses for a specific path.
|
|
148
|
+
|
|
149
|
+
Note: Memcached does not support pattern-based queries.
|
|
150
|
+
This method can only delete keys if the exact key is provided,
|
|
151
|
+
or will try to match keys in memory if include_params=True.
|
|
152
|
+
For better pattern support, consider using Redis backend.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
path: The path to clear cache for
|
|
156
|
+
include_params: Currently unsupported (Memcached limitation)
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
Number of cache entries cleared (0 or 1 for exact match only)
|
|
160
|
+
"""
|
|
161
|
+
if include_params:
|
|
162
|
+
warnings.warn(
|
|
163
|
+
"Memcached backend does not support pattern-based key clearing. "
|
|
164
|
+
"Only exact key matches can be deleted. "
|
|
165
|
+
"The include_params option has no effect. "
|
|
166
|
+
"Consider using Redis backend for pattern support.",
|
|
167
|
+
RuntimeWarning,
|
|
168
|
+
stacklevel=2,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# Try to delete the prefixed key (exact match only)
|
|
172
|
+
prefixed_key = self._make_key(path)
|
|
173
|
+
try:
|
|
174
|
+
result = self.client.delete(prefixed_key, noreply=False)
|
|
175
|
+
except Exception: # noqa: BLE001
|
|
176
|
+
return 0
|
|
177
|
+
else:
|
|
178
|
+
return 1 if result else 0
|
|
179
|
+
|
|
180
|
+
async def clear_pattern(self, pattern: str) -> int: # noqa: ARG002
|
|
181
|
+
"""Clear cached responses matching a pattern.
|
|
182
|
+
|
|
183
|
+
Memcached does not support pattern matching or key scanning.
|
|
184
|
+
This operation is not available.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
pattern: A glob pattern (not supported by Memcached)
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
Always 0, as pattern matching is not supported
|
|
191
|
+
"""
|
|
192
|
+
warnings.warn(
|
|
193
|
+
"Memcached backend does not support pattern matching. "
|
|
194
|
+
"Pattern-based cache clearing is not available with Memcached. "
|
|
195
|
+
"Consider using Redis backend for pattern support, "
|
|
196
|
+
"or track keys manually in your application logic.",
|
|
197
|
+
RuntimeWarning,
|
|
198
|
+
stacklevel=2,
|
|
199
|
+
)
|
|
200
|
+
return 0
|
|
201
|
+
|
|
202
|
+
async def get_all_keys(self) -> list[str]:
|
|
203
|
+
"""Get all cache keys in the backend.
|
|
204
|
+
|
|
205
|
+
Note: Memcached does not support key scanning directly.
|
|
206
|
+
This returns an empty list as Memcached has no built-in way to enumerate keys.
|
|
207
|
+
For key enumeration, consider using Redis backend or tracking keys
|
|
208
|
+
manually in your application.
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Empty list (Memcached limitation)
|
|
212
|
+
"""
|
|
213
|
+
warnings.warn(
|
|
214
|
+
"Memcached backend does not support key enumeration. "
|
|
215
|
+
"get_all_keys() returns an empty list. "
|
|
216
|
+
"Consider using Redis backend if you need cache monitoring, "
|
|
217
|
+
"or track keys manually in your application.",
|
|
218
|
+
RuntimeWarning,
|
|
219
|
+
stacklevel=2,
|
|
220
|
+
)
|
|
221
|
+
return []
|
|
222
|
+
|
|
223
|
+
async def get_cache_data(self) -> dict[str, tuple[ETagContent, float | None]]:
|
|
224
|
+
"""Get all cache data with expiry information.
|
|
225
|
+
|
|
226
|
+
Note: Memcached does not support key enumeration or pattern matching.
|
|
227
|
+
This method returns an empty dictionary.
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
Empty dictionary (Memcached limitation)
|
|
231
|
+
"""
|
|
232
|
+
warnings.warn(
|
|
233
|
+
"Memcached backend does not support key enumeration. "
|
|
234
|
+
"get_cache_data() returns an empty dictionary. "
|
|
235
|
+
"Consider using Redis backend if you need cache monitoring.",
|
|
236
|
+
RuntimeWarning,
|
|
237
|
+
stacklevel=2,
|
|
238
|
+
)
|
|
239
|
+
return {}
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""In-memory cache backend implementation."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import contextlib
|
|
5
|
+
import fnmatch
|
|
6
|
+
import time
|
|
7
|
+
|
|
8
|
+
from fastapi_cachex.types import CacheItem
|
|
9
|
+
from fastapi_cachex.types import ETagContent
|
|
10
|
+
|
|
11
|
+
from .base import BaseCacheBackend
|
|
12
|
+
|
|
13
|
+
# Cache keys are formatted as: method:host:path:query_params
|
|
14
|
+
# Minimum parts required to extract path component
|
|
15
|
+
_MIN_KEY_PARTS = 3
|
|
16
|
+
# Maximum parts to split (method, host, path, query_params)
|
|
17
|
+
_MAX_KEY_PARTS = 3
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class MemoryBackend(BaseCacheBackend):
|
|
21
|
+
"""In-memory cache backend implementation.
|
|
22
|
+
|
|
23
|
+
Manages an in-memory cache dictionary with automatic expiration cleanup.
|
|
24
|
+
Cleanup runs in a background task that periodically removes expired entries.
|
|
25
|
+
Cleanup is lazily initialized on first cache operation to ensure proper
|
|
26
|
+
async context.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(self, cleanup_interval: int = 60) -> None:
|
|
30
|
+
"""Initialize in-memory cache backend.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
cleanup_interval: Interval in seconds between cleanup runs (default: 60)
|
|
34
|
+
"""
|
|
35
|
+
self.cache: dict[str, CacheItem] = {}
|
|
36
|
+
self.lock = asyncio.Lock()
|
|
37
|
+
self.cleanup_interval = cleanup_interval
|
|
38
|
+
self._cleanup_task: asyncio.Task[None] | None = None
|
|
39
|
+
|
|
40
|
+
def _ensure_cleanup_started(self) -> None:
|
|
41
|
+
"""Ensure cleanup task is started in proper async context."""
|
|
42
|
+
if self._cleanup_task is None or self._cleanup_task.done():
|
|
43
|
+
with contextlib.suppress(RuntimeError):
|
|
44
|
+
# No event loop yet; will be created on first async operation
|
|
45
|
+
self._cleanup_task = asyncio.create_task(self._cleanup_task_impl())
|
|
46
|
+
|
|
47
|
+
def start_cleanup(self) -> None:
|
|
48
|
+
"""Start the cleanup task if it's not already running.
|
|
49
|
+
|
|
50
|
+
Cleanup is lazily started to ensure it's created in proper async context.
|
|
51
|
+
"""
|
|
52
|
+
self._ensure_cleanup_started()
|
|
53
|
+
|
|
54
|
+
def stop_cleanup(self) -> None:
|
|
55
|
+
"""Stop the cleanup task if it's running."""
|
|
56
|
+
if self._cleanup_task is not None:
|
|
57
|
+
self._cleanup_task.cancel()
|
|
58
|
+
self._cleanup_task = None
|
|
59
|
+
|
|
60
|
+
async def get(self, key: str) -> ETagContent | None:
|
|
61
|
+
"""Retrieve a cached response.
|
|
62
|
+
|
|
63
|
+
Expired entries are skipped and return None.
|
|
64
|
+
Ensures cleanup task is started.
|
|
65
|
+
"""
|
|
66
|
+
self._ensure_cleanup_started()
|
|
67
|
+
|
|
68
|
+
async with self.lock:
|
|
69
|
+
cached_item = self.cache.get(key)
|
|
70
|
+
if cached_item:
|
|
71
|
+
if cached_item.expiry is None or cached_item.expiry > time.time():
|
|
72
|
+
return cached_item.value
|
|
73
|
+
# Entry has expired; clean it up
|
|
74
|
+
del self.cache[key]
|
|
75
|
+
return None
|
|
76
|
+
return None
|
|
77
|
+
|
|
78
|
+
async def set(self, key: str, value: ETagContent, ttl: int | None = None) -> None:
|
|
79
|
+
"""Store a response in the cache.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
key: Cache key
|
|
83
|
+
value: Content to cache
|
|
84
|
+
ttl: Time to live in seconds (None = never expires)
|
|
85
|
+
"""
|
|
86
|
+
async with self.lock:
|
|
87
|
+
expiry = time.time() + ttl if ttl is not None else None
|
|
88
|
+
self.cache[key] = CacheItem(value=value, expiry=expiry)
|
|
89
|
+
|
|
90
|
+
async def delete(self, key: str) -> None:
|
|
91
|
+
"""Remove a response from the cache."""
|
|
92
|
+
async with self.lock:
|
|
93
|
+
self.cache.pop(key, None)
|
|
94
|
+
|
|
95
|
+
async def clear(self) -> None:
|
|
96
|
+
"""Clear all cached responses."""
|
|
97
|
+
async with self.lock:
|
|
98
|
+
self.cache.clear()
|
|
99
|
+
|
|
100
|
+
async def clear_path(self, path: str, include_params: bool = False) -> int:
|
|
101
|
+
"""Clear cached responses for a specific path.
|
|
102
|
+
|
|
103
|
+
Parses cache keys to extract the path component and matches against
|
|
104
|
+
the provided path.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
path: The path to clear cache for
|
|
108
|
+
include_params: If True, clear all variations including query params
|
|
109
|
+
If False, only clear exact path (no query params)
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Number of cache entries cleared
|
|
113
|
+
"""
|
|
114
|
+
cleared_count = 0
|
|
115
|
+
async with self.lock:
|
|
116
|
+
keys_to_delete = []
|
|
117
|
+
for key in self.cache:
|
|
118
|
+
# Keys are formatted as: method:host:path:query_params
|
|
119
|
+
parts = key.split(":", _MAX_KEY_PARTS)
|
|
120
|
+
if len(parts) >= _MIN_KEY_PARTS:
|
|
121
|
+
cache_path = parts[2]
|
|
122
|
+
has_params = len(parts) > _MIN_KEY_PARTS
|
|
123
|
+
if cache_path == path and (include_params or not has_params):
|
|
124
|
+
keys_to_delete.append(key)
|
|
125
|
+
cleared_count += 1
|
|
126
|
+
|
|
127
|
+
for key in keys_to_delete:
|
|
128
|
+
del self.cache[key]
|
|
129
|
+
|
|
130
|
+
return cleared_count
|
|
131
|
+
|
|
132
|
+
async def clear_pattern(self, pattern: str) -> int:
|
|
133
|
+
"""Clear cached responses matching a pattern.
|
|
134
|
+
|
|
135
|
+
Uses fnmatch for glob-style pattern matching against the path component
|
|
136
|
+
of cache keys.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
pattern: A glob pattern to match against paths (e.g., "/users/*")
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Number of cache entries cleared
|
|
143
|
+
"""
|
|
144
|
+
cleared_count = 0
|
|
145
|
+
async with self.lock:
|
|
146
|
+
keys_to_delete = []
|
|
147
|
+
for key in self.cache:
|
|
148
|
+
# Extract path component (method:host:path:query_params)
|
|
149
|
+
parts = key.split(":", _MAX_KEY_PARTS)
|
|
150
|
+
if len(parts) >= _MIN_KEY_PARTS:
|
|
151
|
+
cache_path = parts[2]
|
|
152
|
+
if fnmatch.fnmatch(cache_path, pattern):
|
|
153
|
+
keys_to_delete.append(key)
|
|
154
|
+
cleared_count += 1
|
|
155
|
+
|
|
156
|
+
for key in keys_to_delete:
|
|
157
|
+
del self.cache[key]
|
|
158
|
+
|
|
159
|
+
return cleared_count
|
|
160
|
+
|
|
161
|
+
async def get_all_keys(self) -> list[str]:
|
|
162
|
+
"""Get all cache keys in the backend.
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
List of all cache keys currently stored in the backend
|
|
166
|
+
"""
|
|
167
|
+
async with self.lock:
|
|
168
|
+
return list(self.cache.keys())
|
|
169
|
+
|
|
170
|
+
async def get_cache_data(self) -> dict[str, tuple[ETagContent, float | None]]:
|
|
171
|
+
"""Get all cache data with expiry information.
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
Dictionary mapping cache keys to (ETagContent, expiry) tuples
|
|
175
|
+
"""
|
|
176
|
+
async with self.lock:
|
|
177
|
+
return {key: (item.value, item.expiry) for key, item in self.cache.items()}
|
|
178
|
+
|
|
179
|
+
async def _cleanup_task_impl(self) -> None:
|
|
180
|
+
try:
|
|
181
|
+
while True:
|
|
182
|
+
await asyncio.sleep(self.cleanup_interval)
|
|
183
|
+
await self.cleanup() # pragma: no cover
|
|
184
|
+
except asyncio.CancelledError:
|
|
185
|
+
# Handle task cancellation gracefully
|
|
186
|
+
pass
|
|
187
|
+
|
|
188
|
+
async def cleanup(self) -> None:
|
|
189
|
+
"""Remove expired cache entries from memory."""
|
|
190
|
+
async with self.lock:
|
|
191
|
+
now = time.time()
|
|
192
|
+
expired_keys = [
|
|
193
|
+
k
|
|
194
|
+
for k, v in self.cache.items()
|
|
195
|
+
if v.expiry is not None and v.expiry <= now
|
|
196
|
+
]
|
|
197
|
+
for key in expired_keys:
|
|
198
|
+
self.cache.pop(key, None)
|