tiercache 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smartcache/__init__.py +41 -0
- smartcache/backends/__init__.py +0 -0
- smartcache/backends/base.py +29 -0
- smartcache/backends/dry/__init__.py +0 -0
- smartcache/backends/dry/local.py +109 -0
- smartcache/backends/dry/mongodb.py +103 -0
- smartcache/backends/dry/s3.py +119 -0
- smartcache/backends/memcached.py +115 -0
- smartcache/backends/ram.py +101 -0
- smartcache/config.py +150 -0
- smartcache/manager.py +189 -0
- smartcache/serializer.py +18 -0
- smartcache/tracking/__init__.py +0 -0
- smartcache/tracking/base.py +29 -0
- smartcache/tracking/mongodb.py +88 -0
- smartcache/tracking/postgres.py +98 -0
- smartcache/tracking/redis.py +80 -0
- smartcache/tracking/sqlite.py +91 -0
- tiercache-0.1.0.dist-info/METADATA +40 -0
- tiercache-0.1.0.dist-info/RECORD +21 -0
- tiercache-0.1.0.dist-info/WHEEL +4 -0
smartcache/__init__.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SmartCache — RAM-first three-tier cache with swappable backends.
|
|
3
|
+
|
|
4
|
+
Quick start:
|
|
5
|
+
|
|
6
|
+
from smartcache import CacheManager
|
|
7
|
+
|
|
8
|
+
# From config file
|
|
9
|
+
cache = CacheManager.from_config("smartcache.yaml")
|
|
10
|
+
|
|
11
|
+
# Or in code
|
|
12
|
+
from smartcache import CacheManager
|
|
13
|
+
from smartcache.backends.ram import RamBackend
|
|
14
|
+
from smartcache.backends.dry.local import LocalBackend
|
|
15
|
+
from smartcache.tracking.sqlite import SQLiteTracking
|
|
16
|
+
|
|
17
|
+
cache = CacheManager(
|
|
18
|
+
hot=RamBackend(ttl_seconds=14400, max_size_bytes=2 * 1024**3),
|
|
19
|
+
cold=RamBackend(ttl_seconds=86400, max_size_bytes=10 * 1024**3),
|
|
20
|
+
dry=LocalBackend(base_path="/var/cache/smartcache", max_size_bytes=100 * 1024**3),
|
|
21
|
+
tracking=SQLiteTracking(path="/var/cache/smartcache/index.db"),
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
value = await cache.get("my-key")
|
|
25
|
+
await cache.set("my-key", data)
|
|
26
|
+
await cache.set("my-key", data, ttl_hours=2)
|
|
27
|
+
await cache.set("my-key", data, tags={"type": "thumbnail"})
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
from .manager import CacheManager, TTLResolver
|
|
31
|
+
from .backends.base import AbstractBackend
|
|
32
|
+
from .tracking.base import AbstractTracking
|
|
33
|
+
|
|
34
|
+
__all__ = [
|
|
35
|
+
"CacheManager",
|
|
36
|
+
"TTLResolver",
|
|
37
|
+
"AbstractBackend",
|
|
38
|
+
"AbstractTracking",
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
__version__ = "0.1.0"
|
|
File without changes
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Any, Optional
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class AbstractBackend(ABC):
|
|
6
|
+
|
|
7
|
+
@abstractmethod
|
|
8
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
9
|
+
"""Return value for key, or None if missing / expired."""
|
|
10
|
+
|
|
11
|
+
@abstractmethod
|
|
12
|
+
async def set(self, key: str, value: Any, ttl_seconds: Optional[int] = None) -> None:
|
|
13
|
+
"""Store value. ttl_seconds=None means no expiry."""
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
async def delete(self, key: str) -> None:
|
|
17
|
+
"""Remove a key. No-op if not found."""
|
|
18
|
+
|
|
19
|
+
@abstractmethod
|
|
20
|
+
async def flush(self) -> None:
|
|
21
|
+
"""Clear all entries."""
|
|
22
|
+
|
|
23
|
+
@abstractmethod
|
|
24
|
+
async def size_bytes(self) -> int:
|
|
25
|
+
"""Current memory/storage usage in bytes."""
|
|
26
|
+
|
|
27
|
+
@abstractmethod
|
|
28
|
+
async def close(self) -> None:
|
|
29
|
+
"""Release any connections or resources."""
|
|
File without changes
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
import hashlib
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
|
|
7
|
+
import aiofiles
|
|
8
|
+
import aiofiles.os
|
|
9
|
+
|
|
10
|
+
from ...serializer import dumps, loads
|
|
11
|
+
from ..base import AbstractBackend
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LocalBackend(AbstractBackend):
|
|
15
|
+
"""
|
|
16
|
+
Local filesystem dry cache. Values are stored as pickled binary files
|
|
17
|
+
with a JSON sidecar for metadata (TTL, key, created_at).
|
|
18
|
+
|
|
19
|
+
Files are sharded into subdirectories by the first 4 hex chars of the
|
|
20
|
+
key hash to avoid large flat directories.
|
|
21
|
+
|
|
22
|
+
Requires: aiofiles (included in base install)
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(self, base_path: str, max_size_bytes: int) -> None:
|
|
26
|
+
self._base = Path(base_path)
|
|
27
|
+
self._max_size = max_size_bytes
|
|
28
|
+
self._base.mkdir(parents=True, exist_ok=True)
|
|
29
|
+
|
|
30
|
+
# ------------------------------------------------------------------
|
|
31
|
+
# Public interface
|
|
32
|
+
# ------------------------------------------------------------------
|
|
33
|
+
|
|
34
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
35
|
+
data_path, meta_path = self._paths(key)
|
|
36
|
+
if not data_path.exists():
|
|
37
|
+
return None
|
|
38
|
+
meta = self._read_meta(meta_path)
|
|
39
|
+
if meta is None:
|
|
40
|
+
return None
|
|
41
|
+
if self._expired(meta):
|
|
42
|
+
await self._remove_files(data_path, meta_path)
|
|
43
|
+
return None
|
|
44
|
+
async with aiofiles.open(data_path, "rb") as f:
|
|
45
|
+
return loads(await f.read())
|
|
46
|
+
|
|
47
|
+
async def set(self, key: str, value: Any, ttl_seconds: Optional[int] = None) -> None:
|
|
48
|
+
data_path, meta_path = self._paths(key)
|
|
49
|
+
data_path.parent.mkdir(parents=True, exist_ok=True)
|
|
50
|
+
raw = dumps(value)
|
|
51
|
+
async with aiofiles.open(data_path, "wb") as f:
|
|
52
|
+
await f.write(raw)
|
|
53
|
+
meta = {
|
|
54
|
+
"key": key,
|
|
55
|
+
"created_at": time.time(),
|
|
56
|
+
"ttl_seconds": ttl_seconds,
|
|
57
|
+
"size": len(raw),
|
|
58
|
+
}
|
|
59
|
+
async with aiofiles.open(meta_path, "w") as f:
|
|
60
|
+
await f.write(json.dumps(meta))
|
|
61
|
+
|
|
62
|
+
async def delete(self, key: str) -> None:
|
|
63
|
+
data_path, meta_path = self._paths(key)
|
|
64
|
+
await self._remove_files(data_path, meta_path)
|
|
65
|
+
|
|
66
|
+
async def flush(self) -> None:
|
|
67
|
+
import shutil
|
|
68
|
+
shutil.rmtree(self._base, ignore_errors=True)
|
|
69
|
+
self._base.mkdir(parents=True, exist_ok=True)
|
|
70
|
+
|
|
71
|
+
async def size_bytes(self) -> int:
|
|
72
|
+
total = 0
|
|
73
|
+
for p in self._base.rglob("*.bin"):
|
|
74
|
+
total += p.stat().st_size
|
|
75
|
+
return total
|
|
76
|
+
|
|
77
|
+
async def close(self) -> None:
|
|
78
|
+
pass
|
|
79
|
+
|
|
80
|
+
# ------------------------------------------------------------------
|
|
81
|
+
# Internal helpers
|
|
82
|
+
# ------------------------------------------------------------------
|
|
83
|
+
|
|
84
|
+
def _paths(self, key: str) -> tuple[Path, Path]:
|
|
85
|
+
h = hashlib.sha256(key.encode()).hexdigest()
|
|
86
|
+
shard = self._base / h[:2] / h[2:4]
|
|
87
|
+
return shard / f"{h}.bin", shard / f"{h}.meta.json"
|
|
88
|
+
|
|
89
|
+
@staticmethod
|
|
90
|
+
def _read_meta(path: Path) -> Optional[dict]:
|
|
91
|
+
try:
|
|
92
|
+
return json.loads(path.read_text())
|
|
93
|
+
except (FileNotFoundError, json.JSONDecodeError):
|
|
94
|
+
return None
|
|
95
|
+
|
|
96
|
+
@staticmethod
|
|
97
|
+
def _expired(meta: dict) -> bool:
|
|
98
|
+
ttl = meta.get("ttl_seconds")
|
|
99
|
+
if not ttl:
|
|
100
|
+
return False
|
|
101
|
+
return time.time() > meta["created_at"] + ttl
|
|
102
|
+
|
|
103
|
+
@staticmethod
|
|
104
|
+
async def _remove_files(*paths: Path) -> None:
|
|
105
|
+
for p in paths:
|
|
106
|
+
try:
|
|
107
|
+
await aiofiles.os.remove(p)
|
|
108
|
+
except FileNotFoundError:
|
|
109
|
+
pass
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from typing import Any, Optional
|
|
3
|
+
|
|
4
|
+
from ...serializer import dumps, loads
|
|
5
|
+
from ..base import AbstractBackend
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class MongoDBBackend(AbstractBackend):
|
|
9
|
+
"""
|
|
10
|
+
MongoDB GridFS dry cache using motor (async driver).
|
|
11
|
+
Large values are stored via GridFS; metadata (TTL, key) lives in the
|
|
12
|
+
files collection. A TTL index on `metadata.expires_at` lets MongoDB
|
|
13
|
+
handle expiry automatically — no cron job needed.
|
|
14
|
+
|
|
15
|
+
Requires: pip install smartcache[mongodb]
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, uri: str, database: str) -> None:
|
|
19
|
+
try:
|
|
20
|
+
import motor.motor_asyncio as motor
|
|
21
|
+
except ImportError:
|
|
22
|
+
raise ImportError(
|
|
23
|
+
"MongoDB backend requires motor. "
|
|
24
|
+
"Install it with: pip install smartcache[mongodb]"
|
|
25
|
+
)
|
|
26
|
+
self._motor = motor
|
|
27
|
+
self._uri = uri
|
|
28
|
+
self._database = database
|
|
29
|
+
self._client: Optional[Any] = None
|
|
30
|
+
self._fs: Optional[Any] = None
|
|
31
|
+
|
|
32
|
+
async def _get_fs(self) -> Any:
|
|
33
|
+
if self._fs is None:
|
|
34
|
+
import motor.motor_asyncio as motor
|
|
35
|
+
from motor.motor_asyncio import AsyncIOMotorGridFSBucket
|
|
36
|
+
self._client = motor.AsyncIOMotorClient(self._uri)
|
|
37
|
+
db = self._client[self._database]
|
|
38
|
+
self._fs = AsyncIOMotorGridFSBucket(db, bucket_name="smartcache")
|
|
39
|
+
# Ensure TTL index on expires_at metadata field
|
|
40
|
+
await db["smartcache.files"].create_index(
|
|
41
|
+
"metadata.expires_at",
|
|
42
|
+
expireAfterSeconds=0,
|
|
43
|
+
sparse=True,
|
|
44
|
+
)
|
|
45
|
+
return self._fs
|
|
46
|
+
|
|
47
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
48
|
+
fs = await self._get_fs()
|
|
49
|
+
cursor = fs.find({"metadata.cache_key": key})
|
|
50
|
+
docs = await cursor.to_list(length=1)
|
|
51
|
+
if not docs:
|
|
52
|
+
return None
|
|
53
|
+
doc = docs[0]
|
|
54
|
+
expires_at = doc.get("metadata", {}).get("expires_at")
|
|
55
|
+
if expires_at and time.time() > expires_at:
|
|
56
|
+
await fs.delete(doc["_id"])
|
|
57
|
+
return None
|
|
58
|
+
stream = await fs.open_download_stream(doc["_id"])
|
|
59
|
+
raw = await stream.read()
|
|
60
|
+
return loads(raw)
|
|
61
|
+
|
|
62
|
+
async def set(self, key: str, value: Any, ttl_seconds: Optional[int] = None) -> None:
|
|
63
|
+
fs = await self._get_fs()
|
|
64
|
+
# Delete existing entry for this key first
|
|
65
|
+
await self.delete(key)
|
|
66
|
+
raw = dumps(value)
|
|
67
|
+
metadata: dict[str, Any] = {"cache_key": key, "created_at": time.time()}
|
|
68
|
+
if ttl_seconds:
|
|
69
|
+
metadata["expires_at"] = time.time() + ttl_seconds
|
|
70
|
+
await fs.upload_from_stream(
|
|
71
|
+
key,
|
|
72
|
+
raw,
|
|
73
|
+
metadata=metadata,
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
async def delete(self, key: str) -> None:
|
|
77
|
+
fs = await self._get_fs()
|
|
78
|
+
cursor = fs.find({"metadata.cache_key": key})
|
|
79
|
+
docs = await cursor.to_list(length=None)
|
|
80
|
+
for doc in docs:
|
|
81
|
+
await fs.delete(doc["_id"])
|
|
82
|
+
|
|
83
|
+
async def flush(self) -> None:
|
|
84
|
+
fs = await self._get_fs()
|
|
85
|
+
cursor = fs.find({})
|
|
86
|
+
docs = await cursor.to_list(length=None)
|
|
87
|
+
for doc in docs:
|
|
88
|
+
await fs.delete(doc["_id"])
|
|
89
|
+
|
|
90
|
+
async def size_bytes(self) -> int:
|
|
91
|
+
if self._client is None:
|
|
92
|
+
return 0
|
|
93
|
+
db = self._client[self._database]
|
|
94
|
+
result = await db["smartcache.files"].aggregate([
|
|
95
|
+
{"$group": {"_id": None, "total": {"$sum": "$length"}}}
|
|
96
|
+
]).to_list(length=1)
|
|
97
|
+
return result[0]["total"] if result else 0
|
|
98
|
+
|
|
99
|
+
async def close(self) -> None:
|
|
100
|
+
if self._client is not None:
|
|
101
|
+
self._client.close()
|
|
102
|
+
self._client = None
|
|
103
|
+
self._fs = None
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import time
|
|
2
|
+
from typing import Any, Optional
|
|
3
|
+
|
|
4
|
+
from ...serializer import dumps, loads
|
|
5
|
+
from ..base import AbstractBackend
|
|
6
|
+
|
|
7
|
+
_META_TTL_KEY = "x-smartcache-ttl"
|
|
8
|
+
_META_CREATED_KEY = "x-smartcache-created"
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class S3Backend(AbstractBackend):
|
|
12
|
+
"""
|
|
13
|
+
S3-compatible dry cache using aioboto3.
|
|
14
|
+
TTL is stored as object metadata and checked on get.
|
|
15
|
+
|
|
16
|
+
Compatible with: AWS S3, MinIO, Cloudflare R2, and any S3-compatible service.
|
|
17
|
+
|
|
18
|
+
Requires: pip install smartcache[s3]
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
bucket: str,
|
|
24
|
+
endpoint_url: Optional[str] = None,
|
|
25
|
+
access_key: Optional[str] = None,
|
|
26
|
+
secret_key: Optional[str] = None,
|
|
27
|
+
prefix: str = "smartcache/",
|
|
28
|
+
) -> None:
|
|
29
|
+
try:
|
|
30
|
+
import aioboto3
|
|
31
|
+
except ImportError:
|
|
32
|
+
raise ImportError(
|
|
33
|
+
"S3 backend requires aioboto3. "
|
|
34
|
+
"Install it with: pip install smartcache[s3]"
|
|
35
|
+
)
|
|
36
|
+
self._aioboto3 = aioboto3
|
|
37
|
+
self._bucket = bucket
|
|
38
|
+
self._endpoint_url = endpoint_url
|
|
39
|
+
self._access_key = access_key
|
|
40
|
+
self._secret_key = secret_key
|
|
41
|
+
self._prefix = prefix
|
|
42
|
+
self._session: Optional[Any] = None
|
|
43
|
+
|
|
44
|
+
def _object_key(self, key: str) -> str:
|
|
45
|
+
return f"{self._prefix}{key}"
|
|
46
|
+
|
|
47
|
+
def _session_kwargs(self) -> dict:
|
|
48
|
+
kwargs: dict = {}
|
|
49
|
+
if self._access_key:
|
|
50
|
+
kwargs["aws_access_key_id"] = self._access_key
|
|
51
|
+
if self._secret_key:
|
|
52
|
+
kwargs["aws_secret_access_key"] = self._secret_key
|
|
53
|
+
return kwargs
|
|
54
|
+
|
|
55
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
56
|
+
session = self._aioboto3.Session(**self._session_kwargs())
|
|
57
|
+
async with session.client("s3", endpoint_url=self._endpoint_url) as s3:
|
|
58
|
+
try:
|
|
59
|
+
response = await s3.get_object(
|
|
60
|
+
Bucket=self._bucket, Key=self._object_key(key)
|
|
61
|
+
)
|
|
62
|
+
except s3.exceptions.NoSuchKey:
|
|
63
|
+
return None
|
|
64
|
+
except Exception:
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
meta = response.get("Metadata", {})
|
|
68
|
+
ttl = meta.get(_META_TTL_KEY)
|
|
69
|
+
created = meta.get(_META_CREATED_KEY)
|
|
70
|
+
if ttl and created:
|
|
71
|
+
if time.time() > float(created) + int(ttl):
|
|
72
|
+
await self.delete(key)
|
|
73
|
+
return None
|
|
74
|
+
|
|
75
|
+
body = await response["Body"].read()
|
|
76
|
+
return loads(body)
|
|
77
|
+
|
|
78
|
+
async def set(self, key: str, value: Any, ttl_seconds: Optional[int] = None) -> None:
|
|
79
|
+
session = self._aioboto3.Session(**self._session_kwargs())
|
|
80
|
+
raw = dumps(value)
|
|
81
|
+
meta: dict[str, str] = {_META_CREATED_KEY: str(time.time())}
|
|
82
|
+
if ttl_seconds:
|
|
83
|
+
meta[_META_TTL_KEY] = str(ttl_seconds)
|
|
84
|
+
async with session.client("s3", endpoint_url=self._endpoint_url) as s3:
|
|
85
|
+
await s3.put_object(
|
|
86
|
+
Bucket=self._bucket,
|
|
87
|
+
Key=self._object_key(key),
|
|
88
|
+
Body=raw,
|
|
89
|
+
Metadata=meta,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
async def delete(self, key: str) -> None:
|
|
93
|
+
session = self._aioboto3.Session(**self._session_kwargs())
|
|
94
|
+
async with session.client("s3", endpoint_url=self._endpoint_url) as s3:
|
|
95
|
+
await s3.delete_object(Bucket=self._bucket, Key=self._object_key(key))
|
|
96
|
+
|
|
97
|
+
async def flush(self) -> None:
|
|
98
|
+
session = self._aioboto3.Session(**self._session_kwargs())
|
|
99
|
+
async with session.client("s3", endpoint_url=self._endpoint_url) as s3:
|
|
100
|
+
paginator = s3.get_paginator("list_objects_v2")
|
|
101
|
+
async for page in paginator.paginate(Bucket=self._bucket, Prefix=self._prefix):
|
|
102
|
+
objects = [{"Key": o["Key"]} for o in page.get("Contents", [])]
|
|
103
|
+
if objects:
|
|
104
|
+
await s3.delete_objects(
|
|
105
|
+
Bucket=self._bucket, Delete={"Objects": objects}
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
async def size_bytes(self) -> int:
|
|
109
|
+
total = 0
|
|
110
|
+
session = self._aioboto3.Session(**self._session_kwargs())
|
|
111
|
+
async with session.client("s3", endpoint_url=self._endpoint_url) as s3:
|
|
112
|
+
paginator = s3.get_paginator("list_objects_v2")
|
|
113
|
+
async for page in paginator.paginate(Bucket=self._bucket, Prefix=self._prefix):
|
|
114
|
+
for obj in page.get("Contents", []):
|
|
115
|
+
total += obj.get("Size", 0)
|
|
116
|
+
return total
|
|
117
|
+
|
|
118
|
+
async def close(self) -> None:
|
|
119
|
+
pass
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
from typing import Any, Optional
|
|
2
|
+
|
|
3
|
+
from ..serializer import dumps, loads
|
|
4
|
+
from .base import AbstractBackend
|
|
5
|
+
|
|
6
|
+
# Memcached default max item size is 1MB.
|
|
7
|
+
# We chunk at 900KB to stay safely under the limit regardless of server config.
|
|
8
|
+
_CHUNK_SIZE = 900 * 1024 # 900 KB
|
|
9
|
+
_META_SUFFIX = b"__chunks__"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class MemcachedBackend(AbstractBackend):
|
|
13
|
+
"""
|
|
14
|
+
Memcached-backed RAM cache using aiomcache.
|
|
15
|
+
Values are pickled before storage.
|
|
16
|
+
Large values are automatically split into 900KB chunks and reassembled
|
|
17
|
+
on get — no server-side config needed regardless of item size.
|
|
18
|
+
TTL is enforced natively by Memcached.
|
|
19
|
+
Suitable for multi-process or multi-server deployments.
|
|
20
|
+
|
|
21
|
+
Requires: pip install smartcache[memcached]
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, host: str, port: int, ttl_seconds: int, max_size_bytes: int) -> None:
|
|
25
|
+
try:
|
|
26
|
+
import aiomcache
|
|
27
|
+
except ImportError:
|
|
28
|
+
raise ImportError(
|
|
29
|
+
"Memcached backend requires aiomcache. "
|
|
30
|
+
"Install it with: pip install smartcache[memcached]"
|
|
31
|
+
)
|
|
32
|
+
self._aiomcache = aiomcache
|
|
33
|
+
self._host = host
|
|
34
|
+
self._port = port
|
|
35
|
+
self._ttl = ttl_seconds
|
|
36
|
+
self._max_size = max_size_bytes
|
|
37
|
+
self._client: Optional[Any] = None
|
|
38
|
+
|
|
39
|
+
async def _get_client(self) -> Any:
|
|
40
|
+
if self._client is None:
|
|
41
|
+
self._client = self._aiomcache.Client(self._host, self._port)
|
|
42
|
+
return self._client
|
|
43
|
+
|
|
44
|
+
# ------------------------------------------------------------------
|
|
45
|
+
# Public interface
|
|
46
|
+
# ------------------------------------------------------------------
|
|
47
|
+
|
|
48
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
49
|
+
client = await self._get_client()
|
|
50
|
+
bkey = key.encode()
|
|
51
|
+
|
|
52
|
+
# Check if this key was stored in chunks
|
|
53
|
+
meta_raw = await client.get(bkey + _META_SUFFIX)
|
|
54
|
+
if meta_raw is not None:
|
|
55
|
+
n_chunks = int(meta_raw)
|
|
56
|
+
chunks = []
|
|
57
|
+
for i in range(n_chunks):
|
|
58
|
+
chunk = await client.get(f"{key}__chunk_{i}".encode())
|
|
59
|
+
if chunk is None:
|
|
60
|
+
return None # partial expiry — treat as miss
|
|
61
|
+
chunks.append(chunk)
|
|
62
|
+
return loads(b"".join(chunks))
|
|
63
|
+
|
|
64
|
+
# Single item
|
|
65
|
+
raw = await client.get(bkey)
|
|
66
|
+
if raw is None:
|
|
67
|
+
return None
|
|
68
|
+
return loads(raw)
|
|
69
|
+
|
|
70
|
+
async def set(self, key: str, value: Any, ttl_seconds: Optional[int] = None) -> None:
|
|
71
|
+
ttl = ttl_seconds if ttl_seconds is not None else self._ttl
|
|
72
|
+
exptime = ttl or 0
|
|
73
|
+
client = await self._get_client()
|
|
74
|
+
raw = dumps(value)
|
|
75
|
+
|
|
76
|
+
if len(raw) <= _CHUNK_SIZE:
|
|
77
|
+
await client.set(key.encode(), raw, exptime=exptime)
|
|
78
|
+
return
|
|
79
|
+
|
|
80
|
+
# Split into chunks
|
|
81
|
+
chunks = [raw[i: i + _CHUNK_SIZE] for i in range(0, len(raw), _CHUNK_SIZE)]
|
|
82
|
+
for i, chunk in enumerate(chunks):
|
|
83
|
+
await client.set(f"{key}__chunk_{i}".encode(), chunk, exptime=exptime)
|
|
84
|
+
# Store chunk count as metadata key
|
|
85
|
+
await client.set(
|
|
86
|
+
key.encode() + _META_SUFFIX,
|
|
87
|
+
str(len(chunks)).encode(),
|
|
88
|
+
exptime=exptime,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
async def delete(self, key: str) -> None:
|
|
92
|
+
client = await self._get_client()
|
|
93
|
+
bkey = key.encode()
|
|
94
|
+
|
|
95
|
+
meta_raw = await client.get(bkey + _META_SUFFIX)
|
|
96
|
+
if meta_raw is not None:
|
|
97
|
+
n_chunks = int(meta_raw)
|
|
98
|
+
for i in range(n_chunks):
|
|
99
|
+
await client.delete(f"{key}__chunk_{i}".encode())
|
|
100
|
+
await client.delete(bkey + _META_SUFFIX)
|
|
101
|
+
else:
|
|
102
|
+
await client.delete(bkey)
|
|
103
|
+
|
|
104
|
+
async def flush(self) -> None:
|
|
105
|
+
client = await self._get_client()
|
|
106
|
+
await client.flush_all()
|
|
107
|
+
|
|
108
|
+
async def size_bytes(self) -> int:
|
|
109
|
+
# Memcached does not expose per-key sizes
|
|
110
|
+
return 0
|
|
111
|
+
|
|
112
|
+
async def close(self) -> None:
|
|
113
|
+
if self._client is not None:
|
|
114
|
+
await self._client.close()
|
|
115
|
+
self._client = None
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import sys
|
|
3
|
+
import time
|
|
4
|
+
from collections import OrderedDict
|
|
5
|
+
from typing import Any, Awaitable, Callable, Optional
|
|
6
|
+
|
|
7
|
+
from .base import AbstractBackend
|
|
8
|
+
|
|
9
|
+
EvictCallback = Callable[[str, Any], Awaitable[None]]
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class RamBackend(AbstractBackend):
|
|
13
|
+
"""
|
|
14
|
+
In-process RAM cache using an OrderedDict for O(1) LRU eviction.
|
|
15
|
+
TTL is checked lazily on get. Size is estimated from value length (bytes)
|
|
16
|
+
or sys.getsizeof for other types.
|
|
17
|
+
|
|
18
|
+
on_evict: optional async callback(key, value) fired when an entry is
|
|
19
|
+
dropped due to LRU pressure or TTL expiry. Used by CacheManager to
|
|
20
|
+
demote evicted entries to dry cache (failsafe).
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
ttl_seconds: int,
|
|
26
|
+
max_size_bytes: int,
|
|
27
|
+
on_evict: Optional[EvictCallback] = None,
|
|
28
|
+
) -> None:
|
|
29
|
+
self._ttl = ttl_seconds
|
|
30
|
+
self._max_size = max_size_bytes
|
|
31
|
+
self._on_evict = on_evict
|
|
32
|
+
# key -> (value, expiry_monotonic) expiry=0 means no expiry
|
|
33
|
+
self._store: OrderedDict[str, tuple[Any, float]] = OrderedDict()
|
|
34
|
+
self._current_size = 0
|
|
35
|
+
self._lock = asyncio.Lock()
|
|
36
|
+
|
|
37
|
+
# ------------------------------------------------------------------
|
|
38
|
+
# Public interface
|
|
39
|
+
# ------------------------------------------------------------------
|
|
40
|
+
|
|
41
|
+
async def get(self, key: str) -> Optional[Any]:
|
|
42
|
+
async with self._lock:
|
|
43
|
+
entry = self._store.get(key)
|
|
44
|
+
if entry is None:
|
|
45
|
+
return None
|
|
46
|
+
value, expiry = entry
|
|
47
|
+
if self._expired(expiry):
|
|
48
|
+
self._remove(key, evict=False) # expired — don't demote to dry
|
|
49
|
+
return None
|
|
50
|
+
self._store.move_to_end(key)
|
|
51
|
+
return value
|
|
52
|
+
|
|
53
|
+
async def set(self, key: str, value: Any, ttl_seconds: Optional[int] = None) -> None:
|
|
54
|
+
ttl = ttl_seconds if ttl_seconds is not None else self._ttl
|
|
55
|
+
expiry = time.monotonic() + ttl if ttl else 0.0
|
|
56
|
+
size = self._measure(value)
|
|
57
|
+
async with self._lock:
|
|
58
|
+
if key in self._store:
|
|
59
|
+
old_value, _ = self._store[key]
|
|
60
|
+
self._current_size -= self._measure(old_value)
|
|
61
|
+
while self._current_size + size > self._max_size and self._store:
|
|
62
|
+
self._remove(next(iter(self._store)), evict=True) # LRU — demote to dry
|
|
63
|
+
self._store[key] = (value, expiry)
|
|
64
|
+
self._store.move_to_end(key)
|
|
65
|
+
self._current_size += size
|
|
66
|
+
|
|
67
|
+
async def delete(self, key: str) -> None:
|
|
68
|
+
async with self._lock:
|
|
69
|
+
if key in self._store:
|
|
70
|
+
self._remove(key, evict=False) # explicit delete — don't demote to dry
|
|
71
|
+
|
|
72
|
+
async def flush(self) -> None:
|
|
73
|
+
async with self._lock:
|
|
74
|
+
self._store.clear()
|
|
75
|
+
self._current_size = 0
|
|
76
|
+
|
|
77
|
+
async def size_bytes(self) -> int:
|
|
78
|
+
return self._current_size
|
|
79
|
+
|
|
80
|
+
async def close(self) -> None:
|
|
81
|
+
pass
|
|
82
|
+
|
|
83
|
+
# ------------------------------------------------------------------
|
|
84
|
+
# Internal helpers
|
|
85
|
+
# ------------------------------------------------------------------
|
|
86
|
+
|
|
87
|
+
def _remove(self, key: str, evict: bool = False) -> None:
|
|
88
|
+
value, _ = self._store.pop(key)
|
|
89
|
+
self._current_size -= self._measure(value)
|
|
90
|
+
if evict and self._on_evict is not None:
|
|
91
|
+
asyncio.create_task(self._on_evict(key, value))
|
|
92
|
+
|
|
93
|
+
@staticmethod
|
|
94
|
+
def _expired(expiry: float) -> bool:
|
|
95
|
+
return expiry != 0.0 and time.monotonic() > expiry
|
|
96
|
+
|
|
97
|
+
@staticmethod
|
|
98
|
+
def _measure(value: Any) -> int:
|
|
99
|
+
if isinstance(value, (bytes, bytearray, memoryview)):
|
|
100
|
+
return len(value)
|
|
101
|
+
return sys.getsizeof(value)
|