fastapi-cachekit 0.1.2__tar.gz → 0.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/PKG-INFO +22 -9
  2. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/README.md +18 -8
  3. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fast_cache/__init__.py +12 -1
  4. fastapi_cachekit-0.1.3/fast_cache/backends/google_firestore.py +351 -0
  5. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fast_cache/backends/memcached.py +18 -8
  6. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fast_cache/backends/memory.py +56 -29
  7. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fast_cache/backends/mongodb.py +30 -30
  8. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fast_cache/backends/postgres.py +73 -39
  9. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fast_cache/integration.py +16 -3
  10. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fastapi_cachekit.egg-info/PKG-INFO +22 -9
  11. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fastapi_cachekit.egg-info/SOURCES.txt +1 -0
  12. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fastapi_cachekit.egg-info/requires.txt +4 -0
  13. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/pyproject.toml +6 -2
  14. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/LICENSE.md +0 -0
  15. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fast_cache/backends/__init__.py +0 -0
  16. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fast_cache/backends/backend.py +0 -0
  17. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fast_cache/backends/redis.py +0 -0
  18. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fastapi_cachekit.egg-info/dependency_links.txt +0 -0
  19. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/fastapi_cachekit.egg-info/top_level.txt +0 -0
  20. {fastapi_cachekit-0.1.2 → fastapi_cachekit-0.1.3}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fastapi-cachekit
3
- Version: 0.1.2
3
+ Version: 0.1.3
4
4
  Summary: High-performance caching solution for FastAPI applications
5
5
  Author-email: Bijay Nayak <bijay6779@gmail.com>
6
6
  License-Expression: MIT
@@ -37,12 +37,15 @@ Requires-Dist: aiomcache>=0.8.1; extra == "memcached"
37
37
  Requires-Dist: pymemcache>=4.0.0; extra == "memcached"
38
38
  Provides-Extra: mongodb
39
39
  Requires-Dist: pymongo[gssapi,snappy,srv]>=4.6.0; extra == "mongodb"
40
+ Provides-Extra: firestore
41
+ Requires-Dist: google-cloud-firestore>=2.3.0; extra == "firestore"
40
42
  Provides-Extra: all
41
43
  Requires-Dist: redis>=4.2.0; extra == "all"
42
44
  Requires-Dist: psycopg[pool]>=3.2.9; extra == "all"
43
45
  Requires-Dist: aiomcache>=0.8.1; extra == "all"
44
46
  Requires-Dist: pymemcache>=4.0.0; extra == "all"
45
47
  Requires-Dist: pymongo[gssapi,snappy,srv]>=4.6.0; extra == "all"
48
+ Requires-Dist: google-cloud-firestore>=2.3.0; extra == "all"
46
49
  Dynamic: license-file
47
50
 
48
51
  # fastapi-cachekit
@@ -58,7 +61,7 @@ A high-performance, flexible caching solution for FastAPI applications. fastapi-
58
61
  ## Features
59
62
 
60
63
  - ✅ Full async/sync support for all operations
61
- - ✅ Redis backend with connection pooling
64
+ - ✅ Multiple backend Support So you can use the same tech stack as your app
62
65
  - ✅ Function result caching with decorator syntax
63
66
  - ✅ FastAPI dependency injection support
64
67
  - ✅ Namespace support for isolating cache entries
@@ -67,13 +70,14 @@ A high-performance, flexible caching solution for FastAPI applications. fastapi-
67
70
  - ✅ Expiration time support (seconds or timedelta)
68
71
 
69
72
  ## 📦 Backends & Sync/Async Support
70
-
71
- | Backend | Sync API | Async API | Install Extra |
72
- |--------------------|:--------:|:---------:|----------------------|
73
- | `InMemoryBackend` | ✅ | ✅ | _built-in_ |
74
- | `RedisBackend` | ✅ | ✅ | `redis` |
75
- | `PostgresBackend` | ✅ | ✅ | `postgres` |
76
- | `MemcachedBackend` | ✅ | ✅ | `memcached` |
73
+ | Backend | Sync API | Async API | Install Extra |
74
+ |--------------------|:--------:|:---------:|---------------|
75
+ | `InMemoryBackend` | ✅ | ✅ | _built-in_ |
76
+ | `RedisBackend` | ✅ | ✅ | `redis` |
77
+ | `PostgresBackend` | ✅ | ✅ | `postgres` |
78
+ | `MemcachedBackend` | ✅ | ✅ | `memcached` |
79
+ | `MongoDB` | ✅ | ✅ | `mongodb` |
80
+ | `FireStore` | ✅ | ✅ | `firestore` |
77
81
 
78
82
  ---
79
83
 
@@ -98,6 +102,15 @@ pip install fastapi-cachekit[postgres]
98
102
  ```bash
99
103
  pip install fastapi-cachekit[memcached]
100
104
  ```
105
+ **With MongoDB:**
106
+ ```bash
107
+ pip install fastapi-cachekit[mongodb]
108
+ ```
109
+
110
+ **With FireStore:**
111
+ ```bash
112
+ pip install fastapi-cachekit[firestore]
113
+ ```
101
114
 
102
115
  **All backends:**
103
116
  ```bash
@@ -11,7 +11,7 @@ A high-performance, flexible caching solution for FastAPI applications. fastapi-
11
11
  ## Features
12
12
 
13
13
  - ✅ Full async/sync support for all operations
14
- - ✅ Redis backend with connection pooling
14
+ - ✅ Multiple backend Support So you can use the same tech stack as your app
15
15
  - ✅ Function result caching with decorator syntax
16
16
  - ✅ FastAPI dependency injection support
17
17
  - ✅ Namespace support for isolating cache entries
@@ -20,13 +20,14 @@ A high-performance, flexible caching solution for FastAPI applications. fastapi-
20
20
  - ✅ Expiration time support (seconds or timedelta)
21
21
 
22
22
  ## 📦 Backends & Sync/Async Support
23
-
24
- | Backend | Sync API | Async API | Install Extra |
25
- |--------------------|:--------:|:---------:|----------------------|
26
- | `InMemoryBackend` | ✅ | ✅ | _built-in_ |
27
- | `RedisBackend` | ✅ | ✅ | `redis` |
28
- | `PostgresBackend` | ✅ | ✅ | `postgres` |
29
- | `MemcachedBackend` | ✅ | ✅ | `memcached` |
23
+ | Backend | Sync API | Async API | Install Extra |
24
+ |--------------------|:--------:|:---------:|---------------|
25
+ | `InMemoryBackend` | ✅ | ✅ | _built-in_ |
26
+ | `RedisBackend` | ✅ | ✅ | `redis` |
27
+ | `PostgresBackend` | ✅ | ✅ | `postgres` |
28
+ | `MemcachedBackend` | ✅ | ✅ | `memcached` |
29
+ | `MongoDB` | ✅ | ✅ | `mongodb` |
30
+ | `FireStore` | ✅ | ✅ | `firestore` |
30
31
 
31
32
  ---
32
33
 
@@ -51,6 +52,15 @@ pip install fastapi-cachekit[postgres]
51
52
  ```bash
52
53
  pip install fastapi-cachekit[memcached]
53
54
  ```
55
+ **With MongoDB:**
56
+ ```bash
57
+ pip install fastapi-cachekit[mongodb]
58
+ ```
59
+
60
+ **With FireStore:**
61
+ ```bash
62
+ pip install fastapi-cachekit[firestore]
63
+ ```
54
64
 
55
65
  **All backends:**
56
66
  ```bash
@@ -6,8 +6,19 @@ from .backends.memory import InMemoryBackend
6
6
  from .backends.postgres import PostgresBackend
7
7
  from .backends.memcached import MemcachedBackend
8
8
  from .backends.mongodb import MongoDBBackend
9
+ from .backends.google_firestore import FirestoreBackend
9
10
 
10
- __all__ = ["FastAPICache", "RedisBackend", "CacheBackend", "InMemoryBackend","PostgresBackend", "cache","MemcachedBackend", "MongoDBBackend" ]
11
+ __all__ = [
12
+ "FastAPICache",
13
+ "RedisBackend",
14
+ "CacheBackend",
15
+ "InMemoryBackend",
16
+ "PostgresBackend",
17
+ "cache",
18
+ "MemcachedBackend",
19
+ "MongoDBBackend",
20
+ "FirestoreBackend"
21
+ ]
11
22
 
12
23
 
13
24
  # Create global cache instance
@@ -0,0 +1,351 @@
1
+ import asyncio
2
+ import inspect
3
+ import pickle
4
+ import time
5
+ from functools import wraps
6
+ from typing import Any, Optional, Union
7
+ from datetime import timedelta
8
+
9
+ from .backend import CacheBackend
10
+
11
+
12
+ def ensure_cleanup_task(method):
13
+ @wraps(method)
14
+ def sync_wrapper(self, *args, **kwargs):
15
+ self._ensure_cleanup_task()
16
+ return method(self, *args, **kwargs)
17
+
18
+ @wraps(method)
19
+ async def async_wrapper(self, *args, **kwargs):
20
+ self._ensure_cleanup_task()
21
+ return await method(self, *args, **kwargs)
22
+
23
+ if inspect.iscoroutinefunction(method):
24
+ return async_wrapper
25
+ else:
26
+ return sync_wrapper
27
+
28
+ class FirestoreBackend(CacheBackend):
29
+ """
30
+ Firebase Firestore cache backend with both sync and async support.
31
+ Uses a 'expires_at' field for manual expiration checks.
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ credential_path: Optional[str] = None,
37
+ namespace: Optional[str] = "fastapi_cache",
38
+ collection_name: Optional[str] = "cache_entries",
39
+ cleanup_interval: int = 30,
40
+ auto_cleanup: bool = True,
41
+ ) -> None:
42
+ """
43
+ Initialize the Firestore backend.
44
+
45
+ Args:
46
+ credential_path (Optional[str]): Path to the Firebase Admin SDK credentials file.
47
+ If None, uses GOOGLE_APPLICATION_CREDENTIALS from env variable.
48
+ namespace (Optional[str]): Optional prefix for all cache keys. Defaults to "fastapi_cache".
49
+ collection_name (Optional[str]): Name of the Firestore collection to use. Defaults to "cache_entries".
50
+ """
51
+
52
+ try:
53
+ from google.oauth2 import service_account
54
+ from google.cloud import firestore
55
+ from google.cloud.firestore_v1.async_client import AsyncClient
56
+ from google.cloud.firestore_v1.client import Client
57
+ except ImportError:
58
+ raise ImportError(
59
+ "FirestoreBackend requires 'google-cloud-firestore'. "
60
+ "Install with: pip install fastapi-cachekit[firestore]"
61
+ )
62
+
63
+ self._namespace = namespace or "cache"
64
+ self._collection_name = collection_name or "cache_entries"
65
+
66
+ self._cleanup_task = None
67
+ self._cleanup_interval = cleanup_interval
68
+ self._auto_cleanup = auto_cleanup
69
+
70
+ if credential_path:
71
+ # Explicitly load credentials from the provided path
72
+ credentials = service_account.Credentials.from_service_account_file(
73
+ credential_path
74
+ )
75
+ self._sync_db: Client = firestore.Client(credentials=credentials)
76
+ self._async_db: AsyncClient = firestore.AsyncClient(credentials=credentials)
77
+ else:
78
+ # Rely on GOOGLE_APPLICATION_CREDENTIALS
79
+ self._sync_db: Client = firestore.Client()
80
+ self._async_db: AsyncClient = firestore.AsyncClient()
81
+
82
+ @staticmethod
83
+ def _compute_expire_at(expire: Optional[Union[int, timedelta]]) -> Optional[int]:
84
+ if expire is not None:
85
+ if isinstance(expire, timedelta):
86
+ return int(time.time() + expire.total_seconds())
87
+ else:
88
+ return int(time.time() + expire)
89
+ return None
90
+
91
+ def _make_key(self, key: str) -> str:
92
+ """
93
+ Create a namespaced cache key.
94
+
95
+ Args:
96
+ key (str): The original cache key.
97
+
98
+ Returns:
99
+ str: The namespaced cache key.
100
+ """
101
+ # Firestore document IDs have limitations, using safe encoding
102
+ import hashlib
103
+
104
+ hashed_key = hashlib.sha256(f"{self._namespace}:{key}".encode()).hexdigest()
105
+ return hashed_key
106
+
107
+ def _is_expired(self, expires_at: Optional[int]) -> bool:
108
+ """
109
+ Check if an entry has expired.
110
+
111
+ Args:
112
+ expires_at (Optional[int]): The expiration time in epoch seconds.
113
+
114
+ Returns:
115
+ bool: True if the entry is expired, False otherwise.
116
+ """
117
+ return expires_at is not None and expires_at < time.time()
118
+
119
+ @ensure_cleanup_task
120
+ def get(self, key: str) -> Optional[Any]:
121
+ """
122
+ Synchronously retrieve a value from the cache.
123
+
124
+ Args:
125
+ key (str): The cache key.
126
+
127
+ Returns:
128
+ Optional[Any]: The cached value, or None if not found or expired.
129
+ """
130
+ doc_ref = self._sync_db.collection(self._collection_name).document(
131
+ self._make_key(key)
132
+ )
133
+ doc = doc_ref.get()
134
+ if doc.exists:
135
+ data = doc.to_dict()
136
+ if not self._is_expired(data.get("expires_at")):
137
+ try:
138
+ return pickle.loads(data["value"])
139
+ except (pickle.UnpicklingError, KeyError):
140
+ return None
141
+ return None
142
+
143
+ @ensure_cleanup_task
144
+ def set(
145
+ self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
146
+ ) -> None:
147
+ """
148
+ Synchronously set a value in the cache.
149
+
150
+ Args:
151
+ key (str): The cache key.
152
+ value (Any): The value to cache.
153
+ expire (Optional[Union[int, timedelta]]): Expiration time in seconds or as timedelta.
154
+ If None, the entry never expires (or relies on Firestore's max TTL).
155
+ """
156
+ doc_ref = self._sync_db.collection(self._collection_name).document(
157
+ self._make_key(key)
158
+ )
159
+ data = {"value": pickle.dumps(value)}
160
+ exptime = self._compute_expire_at(expire)
161
+ if exptime is not None:
162
+ data["expires_at"] = exptime
163
+
164
+ doc_ref.set(data)
165
+
166
+ def delete(self, key: str) -> None:
167
+ """
168
+ Synchronously delete a value from the cache.
169
+
170
+ Args:
171
+ key (str): The cache key.
172
+ """
173
+ doc_ref = self._sync_db.collection(self._collection_name).document(
174
+ self._make_key(key)
175
+ )
176
+ doc_ref.delete()
177
+
178
+ def clear(self) -> None:
179
+ """
180
+ Synchronously clear all values from the namespace.
181
+ Note: Firestore doesn't have direct namespace-based clearing.
182
+ This implementation will delete all documents in the collection.
183
+ Consider adding a query based on a namespaced field if needed.
184
+ """
185
+ docs = self._sync_db.collection(self._collection_name).stream()
186
+ for doc in docs:
187
+ doc.reference.delete()
188
+
189
+ def has(self, key: str) -> bool:
190
+ """
191
+ Synchronously check if a key exists in the cache and is not expired.
192
+
193
+ Args:
194
+ key (str): The cache key.
195
+
196
+ Returns:
197
+ bool: True if the key exists and is not expired, False otherwise.
198
+ """
199
+ doc_ref = self._sync_db.collection(self._collection_name).document(
200
+ self._make_key(key)
201
+ )
202
+ doc = doc_ref.get()
203
+ if doc.exists:
204
+ data = doc.to_dict()
205
+ return not self._is_expired(data.get("expires_at"))
206
+ return False
207
+
208
+ @ensure_cleanup_task
209
+ async def aget(self, key: str) -> Optional[Any]:
210
+ """
211
+ Asynchronously retrieve a value from the cache.
212
+
213
+ Args:
214
+ key (str): The cache key.
215
+
216
+ Returns:
217
+ Optional[Any]: The cached value, or None if not found or expired.
218
+ """
219
+ doc_ref = self._async_db.collection(self._collection_name).document(
220
+ self._make_key(key)
221
+ )
222
+ doc = await doc_ref.get()
223
+ if doc.exists:
224
+ data = doc.to_dict()
225
+ if not self._is_expired(data.get("expires_at")):
226
+ try:
227
+ return pickle.loads(data["value"])
228
+ except (pickle.UnpicklingError, KeyError):
229
+ # Handle potential deserialization errors or missing value field
230
+ return None
231
+ return None
232
+
233
+ @ensure_cleanup_task
234
+ async def aset(
235
+ self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
236
+ ) -> None:
237
+ """
238
+ Asynchronously set a value in the cache.
239
+
240
+ Args:
241
+ key (str): The cache key.
242
+ value (Any): The value to cache.
243
+ expire (Optional[Union[int, timedelta]]): Expiration time in seconds or as timedelta.
244
+ If None, the entry never expires (or relies on Firestore's max TTL).
245
+ """
246
+ doc_ref = self._async_db.collection(self._collection_name).document(
247
+ self._make_key(key)
248
+ )
249
+ data = {"value": pickle.dumps(value)}
250
+ exptime = self._compute_expire_at(expire)
251
+
252
+ if expire is not None:
253
+ data["expires_at"] = exptime
254
+
255
+ await doc_ref.set(data)
256
+
257
+ async def adelete(self, key: str) -> None:
258
+ """
259
+ Asynchronously delete a value from the cache.
260
+
261
+ Args:
262
+ key (str): The cache key.
263
+ """
264
+ doc_ref = self._async_db.collection(self._collection_name).document(
265
+ self._make_key(key)
266
+ )
267
+ await doc_ref.delete()
268
+
269
+ async def aclear(self) -> None:
270
+ """
271
+ Asynchronously clear all values from the namespace.
272
+ Note: Firestore doesn't have direct namespace-based clearing.
273
+ This implementation will delete all documents in the collection.
274
+ Consider adding a query based on a namespaced field if needed.
275
+ """
276
+ docs = self._async_db.collection(self._collection_name).stream()
277
+ async for doc in docs:
278
+ await doc.reference.delete()
279
+
280
+ async def ahas(self, key: str) -> bool:
281
+ """
282
+ Asynchronously check if a key exists in the cache and is not expired.
283
+
284
+ Args:
285
+ key (str): The cache key.
286
+
287
+ Returns:
288
+ bool: True if the key exists and is not expired, False otherwise.
289
+ """
290
+ doc_ref = self._async_db.collection(self._collection_name).document(
291
+ self._make_key(key)
292
+ )
293
+ doc = await doc_ref.get()
294
+ if doc.exists:
295
+ data = doc.to_dict()
296
+ return not self._is_expired(data.get("expires_at"))
297
+ return False
298
+
299
+ def close(self) -> None:
300
+ """
301
+ Close the synchronous Firestore client.
302
+ """
303
+ try:
304
+ self._sync_db.close()
305
+ except TypeError as e:
306
+ return
307
+
308
+ async def aclose(self) -> None:
309
+ """
310
+ Close the asynchronous Firestore client.
311
+ """
312
+ try:
313
+ await self._async_db.close()
314
+ except TypeError as e:
315
+ return
316
+
317
+ def _ensure_cleanup_task(self):
318
+ if (
319
+ getattr(self, "_auto_cleanup", True)
320
+ and getattr(self, "_cleanup_task", None) is None
321
+ ):
322
+ try:
323
+ loop = asyncio.get_running_loop()
324
+ self._cleanup_task = loop.create_task(self.cleanup_expired(self._cleanup_interval))
325
+ except RuntimeError:
326
+ pass
327
+
328
+ async def cleanup_expired(self, interval_seconds: int = 30):
329
+ """
330
+ Periodically delete expired cache entries.
331
+ Should be run as a background task.
332
+ Args:
333
+ interval_seconds (int): How often to run cleanup (default: 1 hour)
334
+ """
335
+ while True:
336
+ now = int(time.time())
337
+ expired_query = self._async_db.collection(self._collection_name).where(
338
+ "expires_at", "<", now
339
+ )
340
+ batch = self._async_db.batch()
341
+ count = 0
342
+ async for doc in expired_query.stream():
343
+ batch.delete(doc.reference)
344
+ count += 1
345
+ if count == 500:
346
+ await batch.commit()
347
+ batch = self._async_db.batch()
348
+ count = 0
349
+ if count > 0:
350
+ await batch.commit()
351
+ await asyncio.sleep(interval_seconds)
@@ -3,6 +3,7 @@ from typing import Any, Optional, Union, Mapping
3
3
  from datetime import timedelta
4
4
  from .backend import CacheBackend
5
5
 
6
+
6
7
  class MemcachedBackend(CacheBackend):
7
8
  """
8
9
  Memcached cache backend with both sync and async support.
@@ -56,8 +57,14 @@ class MemcachedBackend(CacheBackend):
56
57
  self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
57
58
  ) -> None:
58
59
  try:
59
- exptime = int(expire.total_seconds()) if isinstance(expire, timedelta) else (expire or 0)
60
- self._sync_client.set(self._make_key(key), pickle.dumps(value), expire=exptime)
60
+ exptime = (
61
+ int(expire.total_seconds())
62
+ if isinstance(expire, timedelta)
63
+ else (expire or 0)
64
+ )
65
+ self._sync_client.set(
66
+ self._make_key(key), pickle.dumps(value), expire=exptime
67
+ )
61
68
  except Exception:
62
69
  pass
63
70
 
@@ -80,7 +87,6 @@ class MemcachedBackend(CacheBackend):
80
87
  except Exception:
81
88
  return False
82
89
 
83
-
84
90
  async def aget(self, key: str) -> Optional[Any]:
85
91
  try:
86
92
  value = await self._async_client.get(self._make_key(key))
@@ -92,8 +98,14 @@ class MemcachedBackend(CacheBackend):
92
98
  self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
93
99
  ) -> None:
94
100
  try:
95
- exptime = int(expire.total_seconds()) if isinstance(expire, timedelta) else (expire or 0)
96
- await self._async_client.set(self._make_key(key), pickle.dumps(value), exptime=exptime)
101
+ exptime = (
102
+ int(expire.total_seconds())
103
+ if isinstance(expire, timedelta)
104
+ else (expire or 0)
105
+ )
106
+ await self._async_client.set(
107
+ self._make_key(key), pickle.dumps(value), exptime=exptime
108
+ )
97
109
  except Exception:
98
110
  pass
99
111
 
@@ -103,14 +115,12 @@ class MemcachedBackend(CacheBackend):
103
115
  except Exception:
104
116
  pass
105
117
 
106
-
107
118
  async def aclear(self) -> None:
108
119
  try:
109
120
  await self._async_client.flush_all()
110
121
  except Exception:
111
122
  pass
112
123
 
113
-
114
124
  async def ahas(self, key: str) -> bool:
115
125
  try:
116
126
  value = await self._async_client.get(self._make_key(key))
@@ -123,4 +133,4 @@ class MemcachedBackend(CacheBackend):
123
133
  await self._async_client.close()
124
134
  self._sync_client.close()
125
135
  except Exception:
126
- pass
136
+ pass
@@ -1,16 +1,37 @@
1
1
  import asyncio
2
+ import inspect
2
3
  import threading
3
4
  import time
4
5
  from collections import OrderedDict
5
6
  from datetime import timedelta
6
- from typing import Any, Dict, Optional, Union, Tuple
7
+ from functools import wraps
8
+ from typing import Any, Optional, Union, Tuple
7
9
  from .backend import CacheBackend
8
10
 
11
+ def ensure_cleanup_task(method):
12
+ """
13
+ Decorator to ensure the background cleanup task is started
14
+ on first use of any public method (sync or async).
15
+ """
16
+ @wraps(method)
17
+ def sync_wrapper(self, *args, **kwargs):
18
+ self._ensure_cleanup_task()
19
+ return method(self, *args, **kwargs)
20
+
21
+ @wraps(method)
22
+ async def async_wrapper(self, *args, **kwargs):
23
+ self._ensure_cleanup_task()
24
+ return await method(self, *args, **kwargs)
25
+
26
+ if inspect.iscoroutinefunction(method):
27
+ return async_wrapper
28
+ else:
29
+ return sync_wrapper
9
30
 
10
31
  class InMemoryBackend(CacheBackend):
11
32
  """
12
- In-memory cache backend implementation with namespace support,
13
- thread/async safety, and efficient expiration cleanup.
33
+ In-memory cache backend with namespace support, LRU eviction,
34
+ thread/async safety, and efficient background expiration cleanup.
14
35
 
15
36
  Attributes:
16
37
  _namespace (str): Namespace prefix for all keys.
@@ -89,19 +110,6 @@ class InMemoryBackend(CacheBackend):
89
110
  while len(self._cache) > self._max_size:
90
111
  self._cache.popitem(last=False) # Remove oldest (LRU)
91
112
 
92
- def _cleanup(self) -> None:
93
- """
94
- Remove expired items from the cache.
95
- """
96
- now = time.monotonic()
97
- keys_to_delete = [
98
- k
99
- for k, (_, exp) in list(self._cache.items())
100
- if exp is not None and now > exp
101
- ]
102
- for k in keys_to_delete:
103
- self._cache.pop(k, None)
104
-
105
113
  async def _cleanup_expired(self) -> None:
106
114
  """
107
115
  Periodically clean up expired items in the background.
@@ -109,8 +117,28 @@ class InMemoryBackend(CacheBackend):
109
117
  while True:
110
118
  await asyncio.sleep(60)
111
119
  async with self._async_lock:
112
- self._cleanup()
120
+ now = time.monotonic()
121
+ keys_to_delete = [
122
+ k
123
+ for k, (_, exp) in list(self._cache.items())
124
+ if exp is not None and now > exp
125
+ ]
126
+ for k in keys_to_delete:
127
+ self._cache.pop(k, None)
128
+
129
+ def _ensure_cleanup_task(self):
130
+ """
131
+ Ensure the background cleanup task is started (if in an event loop).
132
+ """
133
+ try:
134
+ loop = asyncio.get_running_loop()
135
+ if self._cleanup_task is None or self._cleanup_task.done():
136
+ self._cleanup_task = loop.create_task(self._cleanup_expired())
137
+ except RuntimeError:
138
+ # Not in an event loop (sync context), do nothing
139
+ pass
113
140
 
141
+ @ensure_cleanup_task
114
142
  def get(self, key: str) -> Optional[Any]:
115
143
  """
116
144
  Synchronously retrieve a value from the cache.
@@ -127,12 +155,12 @@ class InMemoryBackend(CacheBackend):
127
155
  if item:
128
156
  value, expire_time = item
129
157
  if not self._is_expired(expire_time):
130
- # Move to end for LRU
131
158
  self._cache.move_to_end(k)
132
159
  return value
133
160
  self._cache.pop(k, None)
134
161
  return None
135
162
 
163
+ @ensure_cleanup_task
136
164
  def set(
137
165
  self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
138
166
  ) -> None:
@@ -150,8 +178,8 @@ class InMemoryBackend(CacheBackend):
150
178
  self._cache[k] = (value, expire_time)
151
179
  self._cache.move_to_end(k)
152
180
  self._evict_if_needed()
153
- self._cleanup()
154
181
 
182
+ @ensure_cleanup_task
155
183
  def delete(self, key: str) -> None:
156
184
  """
157
185
  Synchronously delete a value from the cache.
@@ -163,6 +191,7 @@ class InMemoryBackend(CacheBackend):
163
191
  with self._lock:
164
192
  self._cache.pop(k, None)
165
193
 
194
+ @ensure_cleanup_task
166
195
  def clear(self) -> None:
167
196
  """
168
197
  Synchronously clear all values from the cache.
@@ -173,6 +202,7 @@ class InMemoryBackend(CacheBackend):
173
202
  for k in keys_to_delete:
174
203
  self._cache.pop(k, None)
175
204
 
205
+ @ensure_cleanup_task
176
206
  def has(self, key: str) -> bool:
177
207
  """
178
208
  Synchronously check if a key exists in the cache.
@@ -194,6 +224,7 @@ class InMemoryBackend(CacheBackend):
194
224
  self._cache.pop(k, None)
195
225
  return False
196
226
 
227
+ @ensure_cleanup_task
197
228
  async def aget(self, key: str) -> Optional[Any]:
198
229
  """
199
230
  Asynchronously retrieve a value from the cache.
@@ -215,6 +246,7 @@ class InMemoryBackend(CacheBackend):
215
246
  self._cache.pop(k, None)
216
247
  return None
217
248
 
249
+ @ensure_cleanup_task
218
250
  async def aset(
219
251
  self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
220
252
  ) -> None:
@@ -232,11 +264,8 @@ class InMemoryBackend(CacheBackend):
232
264
  self._cache[k] = (value, expire_time)
233
265
  self._cache.move_to_end(k)
234
266
  self._evict_if_needed()
235
- self._cleanup()
236
- # Start cleanup task if not already running
237
- if self._cleanup_task is None or self._cleanup_task.done():
238
- self._cleanup_task = asyncio.create_task(self._cleanup_expired())
239
267
 
268
+ @ensure_cleanup_task
240
269
  async def adelete(self, key: str) -> None:
241
270
  """
242
271
  Asynchronously delete a value from the cache.
@@ -248,6 +277,7 @@ class InMemoryBackend(CacheBackend):
248
277
  async with self._async_lock:
249
278
  self._cache.pop(k, None)
250
279
 
280
+ @ensure_cleanup_task
251
281
  async def aclear(self) -> None:
252
282
  """
253
283
  Asynchronously clear all values from the cache.
@@ -258,6 +288,7 @@ class InMemoryBackend(CacheBackend):
258
288
  for k in keys_to_delete:
259
289
  self._cache.pop(k, None)
260
290
 
291
+ @ensure_cleanup_task
261
292
  async def ahas(self, key: str) -> bool:
262
293
  """
263
294
  Asynchronously check if a key exists in the cache.
@@ -279,13 +310,9 @@ class InMemoryBackend(CacheBackend):
279
310
  self._cache.pop(k, None)
280
311
  return False
281
312
 
282
- async def close(self) -> None:
313
+ def close(self) -> None:
283
314
  """
284
- Asynchronously close the backend and cancel the cleanup task if running.
315
+ Synchronously close the backend and cancel the cleanup task if running.
285
316
  """
286
317
  if self._cleanup_task and not self._cleanup_task.done():
287
318
  self._cleanup_task.cancel()
288
- try:
289
- await self._cleanup_task
290
- except asyncio.CancelledError:
291
- pass
@@ -4,6 +4,7 @@ from typing import Any, Optional, Union
4
4
  from datetime import timedelta
5
5
  from .backend import CacheBackend
6
6
 
7
+
7
8
  class MongoDBBackend(CacheBackend):
8
9
  """
9
10
  MongoDB cache backend with both sync and async support.
@@ -18,11 +19,7 @@ class MongoDBBackend(CacheBackend):
18
19
  but expiration is also checked in code to avoid returning stale data.
19
20
  """
20
21
 
21
- def __init__(
22
- self,
23
- uri: str,
24
- namespace: Optional[str] = "fastapi_cache"
25
- ) -> None:
22
+ def __init__(self, uri: str, namespace: Optional[str] = "fastapi_cache") -> None:
26
23
  """
27
24
  Initialize the MongoDB backend.
28
25
 
@@ -76,14 +73,14 @@ class MongoDBBackend(CacheBackend):
76
73
  """
77
74
  doc = self._sync_collection.find_one({"_id": self._make_key(key)})
78
75
  if doc and (doc.get("expires_at", float("inf")) > time.time()):
79
- return pickle.loads(doc["value"])
76
+ try:
77
+ return pickle.loads(doc["value"])
78
+ except Exception:
79
+ return None
80
80
  return None
81
81
 
82
82
  def set(
83
- self,
84
- key: str,
85
- value: Any,
86
- expire: Optional[Union[int, timedelta]] = None
83
+ self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
87
84
  ) -> None:
88
85
  """
89
86
  Synchronously set a value in the cache.
@@ -95,11 +92,8 @@ class MongoDBBackend(CacheBackend):
95
92
  If None, the entry never expires.
96
93
  """
97
94
  update = {"value": pickle.dumps(value)}
98
- if expire is not None:
99
- if isinstance(expire, timedelta):
100
- exptime = int(time.time() + expire.total_seconds())
101
- else:
102
- exptime = int(time.time() + expire)
95
+ exptime = self._compute_expire_at(expire)
96
+ if exptime is not None:
103
97
  update["expires_at"] = exptime
104
98
 
105
99
  self._sync_collection.update_one(
@@ -146,14 +140,14 @@ class MongoDBBackend(CacheBackend):
146
140
  """
147
141
  doc = await self._async_collection.find_one({"_id": self._make_key(key)})
148
142
  if doc and (doc.get("expires_at", float("inf")) > time.time()):
149
- return pickle.loads(doc["value"])
143
+ try:
144
+ return pickle.loads(doc["value"])
145
+ except Exception:
146
+ return None
150
147
  return None
151
148
 
152
149
  async def aset(
153
- self,
154
- key: str,
155
- value: Any,
156
- expire: Optional[Union[int, timedelta]] = None
150
+ self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
157
151
  ) -> None:
158
152
  """
159
153
  Asynchronously set a value in the cache.
@@ -165,17 +159,12 @@ class MongoDBBackend(CacheBackend):
165
159
  If None, the entry never expires.
166
160
  """
167
161
  update = {"value": pickle.dumps(value)}
168
- if expire is not None:
169
- if isinstance(expire, timedelta):
170
- exptime = int(time.time() + expire.total_seconds())
171
- else:
172
- exptime = int(time.time() + expire)
162
+ exptime = self._compute_expire_at(expire)
163
+ if exptime is not None:
173
164
  update["expires_at"] = exptime
174
165
 
175
166
  await self._async_collection.update_one(
176
- {"_id": self._make_key(key)},
177
- {"$set": update},
178
- upsert=True
167
+ {"_id": self._make_key(key)}, {"$set": update}, upsert=True
179
168
  )
180
169
 
181
170
  async def adelete(self, key: str) -> None:
@@ -191,7 +180,9 @@ class MongoDBBackend(CacheBackend):
191
180
  """
192
181
  Asynchronously clear all values from the namespace.
193
182
  """
194
- await self._async_collection.delete_many({"_id": {"$regex": f"^{self._namespace}:"}})
183
+ await self._async_collection.delete_many(
184
+ {"_id": {"$regex": f"^{self._namespace}:"}}
185
+ )
195
186
 
196
187
  async def ahas(self, key: str) -> bool:
197
188
  """
@@ -217,4 +208,13 @@ class MongoDBBackend(CacheBackend):
217
208
  Close the asynchronous MongoDB client.
218
209
  """
219
210
  self._sync_client.close()
220
- await self._async_client.close()
211
+ await self._async_client.close()
212
+
213
+ @staticmethod
214
+ def _compute_expire_at(expire: Optional[Union[int, timedelta]]) -> Optional[int]:
215
+ if expire is not None:
216
+ if isinstance(expire, timedelta):
217
+ return int(time.time() + expire.total_seconds())
218
+ else:
219
+ return int(time.time() + expire)
220
+ return None
@@ -1,6 +1,8 @@
1
+ import asyncio
1
2
  import pickle
2
3
  import re
3
4
  from datetime import datetime, timezone, timedelta
5
+ from functools import wraps
4
6
  from typing import Any, Optional, Union
5
7
  from .backend import CacheBackend
6
8
 
@@ -10,6 +12,24 @@ def _validate_namespace(namespace: str) -> str:
10
12
  raise ValueError("Invalid namespace: only alphanumeric and underscore allowed")
11
13
  return namespace
12
14
 
15
+
16
+ def ensure_cleanup_task(method):
17
+ @wraps(method)
18
+ def sync_wrapper(self, *args, **kwargs):
19
+ self._ensure_cleanup_task()
20
+ return method(self, *args, **kwargs)
21
+
22
+ @wraps(method)
23
+ async def async_wrapper(self, *args, **kwargs):
24
+ self._ensure_cleanup_task()
25
+ return await method(self, *args, **kwargs)
26
+
27
+ import inspect
28
+ if inspect.iscoroutinefunction(method):
29
+ return async_wrapper
30
+ else:
31
+ return sync_wrapper
32
+
13
33
  class PostgresBackend(CacheBackend):
14
34
  """
15
35
  PostgreSQL cache backend implementation.
@@ -23,6 +43,8 @@ class PostgresBackend(CacheBackend):
23
43
  namespace: str = "fastapi",
24
44
  min_size: int = 1,
25
45
  max_size: int = 10,
46
+ cleanup_interval: int = 30,
47
+ auto_cleanup: bool = True,
26
48
  ) -> None:
27
49
  try:
28
50
  from psycopg_pool import AsyncConnectionPool, ConnectionPool
@@ -45,12 +67,11 @@ class PostgresBackend(CacheBackend):
45
67
  )
46
68
  self._create_unlogged_table_if_not_exists()
47
69
 
48
- def _validate_namespace(namespace: str) -> str:
49
- if not re.match(r"^[A-Za-z0-9_]+$", namespace):
50
- raise ValueError(
51
- "Invalid namespace: only alphanumeric and underscore allowed"
52
- )
53
- return namespace
70
+ # Lazy cleanup task setup
71
+ self._cleanup_task = None
72
+ self._cleanup_interval = cleanup_interval
73
+ self._auto_cleanup = auto_cleanup
74
+
54
75
 
55
76
  def _create_unlogged_table_if_not_exists(self):
56
77
  """Create the cache table if it doesn't exist."""
@@ -74,22 +95,13 @@ class PostgresBackend(CacheBackend):
74
95
  return f"{self._namespace}:{key}"
75
96
 
76
97
  def _is_expired(self, expire_at: Optional[datetime]) -> bool:
77
- return expire_at is not None and expire_at < datetime.now(
78
- timezone.utc
79
- )
98
+ return expire_at is not None and expire_at < datetime.now(timezone.utc)
80
99
 
100
+ @ensure_cleanup_task
81
101
  def set(
82
102
  self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
83
103
  ) -> None:
84
- expire_at = None
85
- if expire:
86
- delta = (
87
- timedelta(seconds=expire)
88
- if isinstance(expire, int)
89
- else expire
90
- )
91
- expire_at = datetime.now(timezone.utc) + delta
92
-
104
+ expire_at = self._compute_expire_at(expire)
93
105
  with self._sync_pool.connection() as conn:
94
106
  with conn.cursor() as cur:
95
107
  cur.execute(
@@ -104,6 +116,7 @@ class PostgresBackend(CacheBackend):
104
116
  )
105
117
  conn.commit()
106
118
 
119
+ @ensure_cleanup_task
107
120
  def get(self, key: str) -> Optional[Any]:
108
121
  with self._sync_pool.connection() as conn:
109
122
  with conn.cursor() as cur:
@@ -152,22 +165,12 @@ class PostgresBackend(CacheBackend):
152
165
  )
153
166
  conn.commit()
154
167
 
168
+ @ensure_cleanup_task
155
169
  async def aset(
156
170
  self, key: str, value: Any, expire: Optional[Union[int, timedelta]] = None
157
171
  ) -> None:
158
-
159
- if not self._async_pool._opened:
160
- await self._async_pool.open()
161
-
162
- expire_at = None
163
- if expire:
164
- delta = (
165
- timedelta(seconds=expire)
166
- if isinstance(expire, int)
167
- else expire
168
- )
169
- expire_at = datetime.now(timezone.utc) + delta
170
-
172
+ await self._ensure_async_pool_open()
173
+ expire_at = self._compute_expire_at(expire)
171
174
  async with self._async_pool.connection() as conn:
172
175
  async with conn.cursor() as cur:
173
176
  await cur.execute(
@@ -182,9 +185,9 @@ class PostgresBackend(CacheBackend):
182
185
  )
183
186
  await conn.commit()
184
187
 
188
+ @ensure_cleanup_task
185
189
  async def aget(self, key: str) -> Optional[Any]:
186
- if not self._async_pool._opened:
187
- await self._async_pool.open()
190
+ await self._ensure_async_pool_open()
188
191
  async with self._async_pool.connection() as conn:
189
192
  async with conn.cursor() as cur:
190
193
  await cur.execute(
@@ -201,8 +204,7 @@ class PostgresBackend(CacheBackend):
201
204
  return pickle.loads(value)
202
205
 
203
206
  async def adelete(self, key: str) -> None:
204
- if not self._async_pool._opened:
205
- await self._async_pool.open()
207
+ await self._ensure_async_pool_open()
206
208
  async with self._async_pool.connection() as conn:
207
209
  async with conn.cursor() as cur:
208
210
  await cur.execute(
@@ -212,8 +214,7 @@ class PostgresBackend(CacheBackend):
212
214
  await conn.commit()
213
215
 
214
216
  async def ahas(self, key: str) -> bool:
215
- if not self._async_pool._opened:
216
- await self._async_pool.open()
217
+ await self._ensure_async_pool_open()
217
218
  async with self._async_pool.connection() as conn:
218
219
  async with conn.cursor() as cur:
219
220
  await cur.execute(
@@ -227,8 +228,7 @@ class PostgresBackend(CacheBackend):
227
228
 
228
229
  async def aclear(self) -> None:
229
230
  """Asynchronously clear all keys in the current namespace."""
230
- if not self._async_pool._opened:
231
- await self._async_pool.open()
231
+ await self._ensure_async_pool_open()
232
232
  async with self._async_pool.connection() as conn:
233
233
  async with conn.cursor() as cur:
234
234
  # FIX: Use the dynamic table name
@@ -241,3 +241,37 @@ class PostgresBackend(CacheBackend):
241
241
  async def close(self) -> None:
242
242
  self._sync_pool.close()
243
243
  await self._async_pool.close()
244
+
245
+ def _ensure_cleanup_task(self):
246
+ if self._auto_cleanup and self._cleanup_task is None:
247
+ try:
248
+ loop = asyncio.get_running_loop()
249
+ self._cleanup_task = loop.create_task(
250
+ self.cleanup_expired(self._cleanup_interval)
251
+ )
252
+ except RuntimeError:
253
+ pass
254
+
255
+ async def cleanup_expired(self, interval_seconds: int = 30):
256
+ while True:
257
+ await self._ensure_async_pool_open()
258
+ async with self._async_pool.connection() as conn:
259
+ async with conn.cursor() as cur:
260
+ await cur.execute(
261
+ f"DELETE FROM {self._table_name} WHERE expire_at IS NOT NULL AND expire_at < NOW();"
262
+ )
263
+ await conn.commit()
264
+ await asyncio.sleep(interval_seconds)
265
+
266
+ @staticmethod
267
+ def _compute_expire_at(
268
+ expire: Optional[Union[int, timedelta]],
269
+ ) -> Optional[datetime]:
270
+ if expire:
271
+ delta = timedelta(seconds=expire) if isinstance(expire, int) else expire
272
+ return datetime.now(timezone.utc) + delta
273
+ return None
274
+
275
+ async def _ensure_async_pool_open(self):
276
+ if not self._async_pool._opened:
277
+ await self._async_pool.open()
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  from contextlib import asynccontextmanager
2
3
  from fastapi import FastAPI
3
4
  from typing import Optional, Callable, Union, AsyncIterator, Any
@@ -176,9 +177,21 @@ class FastAPICache:
176
177
  if not hasattr(app, "state"):
177
178
  app.state = {}
178
179
  app.state["cache"] = self
179
- yield
180
- self._backend = None
181
- self._app = None
180
+
181
+ try:
182
+ yield
183
+ finally:
184
+ if self._backend:
185
+ close = getattr(self._backend, "aclose", None)
186
+ if close:
187
+ await close()
188
+ else:
189
+ close = getattr(self._backend, "close", None)
190
+ if close:
191
+ close()
192
+
193
+ self._backend = None
194
+ self._app = None
182
195
 
183
196
  def init_app(
184
197
  self,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fastapi-cachekit
3
- Version: 0.1.2
3
+ Version: 0.1.3
4
4
  Summary: High-performance caching solution for FastAPI applications
5
5
  Author-email: Bijay Nayak <bijay6779@gmail.com>
6
6
  License-Expression: MIT
@@ -37,12 +37,15 @@ Requires-Dist: aiomcache>=0.8.1; extra == "memcached"
37
37
  Requires-Dist: pymemcache>=4.0.0; extra == "memcached"
38
38
  Provides-Extra: mongodb
39
39
  Requires-Dist: pymongo[gssapi,snappy,srv]>=4.6.0; extra == "mongodb"
40
+ Provides-Extra: firestore
41
+ Requires-Dist: google-cloud-firestore>=2.3.0; extra == "firestore"
40
42
  Provides-Extra: all
41
43
  Requires-Dist: redis>=4.2.0; extra == "all"
42
44
  Requires-Dist: psycopg[pool]>=3.2.9; extra == "all"
43
45
  Requires-Dist: aiomcache>=0.8.1; extra == "all"
44
46
  Requires-Dist: pymemcache>=4.0.0; extra == "all"
45
47
  Requires-Dist: pymongo[gssapi,snappy,srv]>=4.6.0; extra == "all"
48
+ Requires-Dist: google-cloud-firestore>=2.3.0; extra == "all"
46
49
  Dynamic: license-file
47
50
 
48
51
  # fastapi-cachekit
@@ -58,7 +61,7 @@ A high-performance, flexible caching solution for FastAPI applications. fastapi-
58
61
  ## Features
59
62
 
60
63
  - ✅ Full async/sync support for all operations
61
- - ✅ Redis backend with connection pooling
64
+ - ✅ Multiple backend Support So you can use the same tech stack as your app
62
65
  - ✅ Function result caching with decorator syntax
63
66
  - ✅ FastAPI dependency injection support
64
67
  - ✅ Namespace support for isolating cache entries
@@ -67,13 +70,14 @@ A high-performance, flexible caching solution for FastAPI applications. fastapi-
67
70
  - ✅ Expiration time support (seconds or timedelta)
68
71
 
69
72
  ## 📦 Backends & Sync/Async Support
70
-
71
- | Backend | Sync API | Async API | Install Extra |
72
- |--------------------|:--------:|:---------:|----------------------|
73
- | `InMemoryBackend` | ✅ | ✅ | _built-in_ |
74
- | `RedisBackend` | ✅ | ✅ | `redis` |
75
- | `PostgresBackend` | ✅ | ✅ | `postgres` |
76
- | `MemcachedBackend` | ✅ | ✅ | `memcached` |
73
+ | Backend | Sync API | Async API | Install Extra |
74
+ |--------------------|:--------:|:---------:|---------------|
75
+ | `InMemoryBackend` | ✅ | ✅ | _built-in_ |
76
+ | `RedisBackend` | ✅ | ✅ | `redis` |
77
+ | `PostgresBackend` | ✅ | ✅ | `postgres` |
78
+ | `MemcachedBackend` | ✅ | ✅ | `memcached` |
79
+ | `MongoDB` | ✅ | ✅ | `mongodb` |
80
+ | `FireStore` | ✅ | ✅ | `firestore` |
77
81
 
78
82
  ---
79
83
 
@@ -98,6 +102,15 @@ pip install fastapi-cachekit[postgres]
98
102
  ```bash
99
103
  pip install fastapi-cachekit[memcached]
100
104
  ```
105
+ **With MongoDB:**
106
+ ```bash
107
+ pip install fastapi-cachekit[mongodb]
108
+ ```
109
+
110
+ **With FireStore:**
111
+ ```bash
112
+ pip install fastapi-cachekit[firestore]
113
+ ```
101
114
 
102
115
  **All backends:**
103
116
  ```bash
@@ -5,6 +5,7 @@ fast_cache/__init__.py
5
5
  fast_cache/integration.py
6
6
  fast_cache/backends/__init__.py
7
7
  fast_cache/backends/backend.py
8
+ fast_cache/backends/google_firestore.py
8
9
  fast_cache/backends/memcached.py
9
10
  fast_cache/backends/memory.py
10
11
  fast_cache/backends/mongodb.py
@@ -6,6 +6,10 @@ psycopg[pool]>=3.2.9
6
6
  aiomcache>=0.8.1
7
7
  pymemcache>=4.0.0
8
8
  pymongo[gssapi,snappy,srv]>=4.6.0
9
+ google-cloud-firestore>=2.3.0
10
+
11
+ [firestore]
12
+ google-cloud-firestore>=2.3.0
9
13
 
10
14
  [memcached]
11
15
  aiomcache>=0.8.1
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "fastapi-cachekit"
3
- version = "0.1.2"
3
+ version = "0.1.3"
4
4
  description = "High-performance caching solution for FastAPI applications"
5
5
  readme = "README.md"
6
6
  license = "MIT"
@@ -66,12 +66,16 @@ memcached = [
66
66
  mongodb = [
67
67
  "pymongo[snappy,gssapi,srv]>=4.6.0"
68
68
  ]
69
+ firestore = [
70
+ 'google-cloud-firestore>=2.3.0'
71
+ ]
69
72
  all = [
70
73
  "redis>=4.2.0",
71
74
  "psycopg[pool]>=3.2.9",
72
75
  "aiomcache>=0.8.1",
73
76
  "pymemcache>=4.0.0",
74
- "pymongo[snappy,gssapi,srv]>=4.6.0"
77
+ "pymongo[snappy,gssapi,srv]>=4.6.0",
78
+ 'google-cloud-firestore>=2.3.0'
75
79
  ]
76
80
 
77
81
  [dependency-groups]