msaas-cache 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ node_modules/
2
+ dist/
3
+ .next/
4
+ .turbo/
5
+ *.pyc
6
+ __pycache__/
7
+ .venv/
8
+ *.egg-info/
9
+ .pytest_cache/
10
+ .ruff_cache/
11
+ .env
12
+ .env.local
13
+ .env.*.local
14
+ .DS_Store
15
+ coverage/
16
+
17
+ # Runtime artifacts
18
+ logs_llm/
19
+ vectors.db
20
+ vectors.db-shm
21
+ vectors.db-wal
@@ -0,0 +1,21 @@
1
+ Metadata-Version: 2.4
2
+ Name: msaas-cache
3
+ Version: 0.1.0
4
+ Summary: Caching library with Redis and in-memory backends
5
+ Requires-Python: >=3.12
6
+ Requires-Dist: pydantic>=2.0
7
+ Provides-Extra: all
8
+ Requires-Dist: fastapi>=0.115.0; extra == 'all'
9
+ Requires-Dist: redis[hiredis]>=5.0.0; extra == 'all'
10
+ Provides-Extra: dev
11
+ Requires-Dist: fakeredis[lua]>=2.25; extra == 'dev'
12
+ Requires-Dist: fastapi>=0.115.0; extra == 'dev'
13
+ Requires-Dist: httpx>=0.27.0; extra == 'dev'
14
+ Requires-Dist: mypy; extra == 'dev'
15
+ Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
16
+ Requires-Dist: pytest>=8.0; extra == 'dev'
17
+ Requires-Dist: ruff; extra == 'dev'
18
+ Provides-Extra: fastapi
19
+ Requires-Dist: fastapi>=0.115.0; extra == 'fastapi'
20
+ Provides-Extra: redis
21
+ Requires-Dist: redis[hiredis]>=5.0.0; extra == 'redis'
@@ -0,0 +1,41 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "msaas-cache"
7
+ version = "0.1.0"
8
+ description = "Caching library with Redis and in-memory backends"
9
+ requires-python = ">=3.12"
10
+ dependencies = [
11
+ "pydantic>=2.0",
12
+ ]
13
+
14
+ [project.optional-dependencies]
15
+ redis = ["redis[hiredis]>=5.0.0"]
16
+ fastapi = ["fastapi>=0.115.0"]
17
+ all = ["redis[hiredis]>=5.0.0", "fastapi>=0.115.0"]
18
+ dev = [
19
+ "pytest>=8.0",
20
+ "pytest-asyncio>=0.24",
21
+ "fakeredis[lua]>=2.25",
22
+ "fastapi>=0.115.0",
23
+ "httpx>=0.27.0",
24
+ "ruff",
25
+ "mypy",
26
+ ]
27
+
28
+ [tool.hatch.build.targets.wheel]
29
+ packages = ["src/cache"]
30
+
31
+ [tool.pytest.ini_options]
32
+ asyncio_mode = "auto"
33
+ testpaths = ["tests"]
34
+
35
+ [tool.ruff]
36
+ target-version = "py312"
37
+ line-length = 100
38
+
39
+ [tool.mypy]
40
+ python_version = "3.12"
41
+ strict = true
@@ -0,0 +1,34 @@
1
+ """willian-cache: Caching library with Redis and in-memory backends."""
2
+
3
+ from cache.backends.base import CacheBackend
4
+ from cache.backends.memory_backend import MemoryBackend
5
+ from cache.config import (
6
+ CacheConfig,
7
+ close_cache,
8
+ get_cache,
9
+ init_cache,
10
+ set_cache,
11
+ )
12
+ from cache.decorator import cached
13
+ from cache.middleware import CacheMiddleware, cache_response
14
+ from cache.patterns import cache_aside, cache_stampede_protection, write_through
15
+ from cache.serialization import JsonSerializer, PickleSerializer, Serializer
16
+
17
+ __all__ = [
18
+ "CacheBackend",
19
+ "CacheConfig",
20
+ "CacheMiddleware",
21
+ "JsonSerializer",
22
+ "MemoryBackend",
23
+ "PickleSerializer",
24
+ "Serializer",
25
+ "cache_aside",
26
+ "cache_response",
27
+ "cache_stampede_protection",
28
+ "cached",
29
+ "close_cache",
30
+ "get_cache",
31
+ "init_cache",
32
+ "set_cache",
33
+ "write_through",
34
+ ]
@@ -0,0 +1 @@
1
+ """Cache backend implementations."""
@@ -0,0 +1,63 @@
1
+ """Abstract cache backend interface."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from abc import ABC, abstractmethod
6
+ from typing import Any
7
+
8
+
9
+ class CacheBackend(ABC):
10
+ """Base class for all cache backends.
11
+
12
+ Every backend must implement the core operations: get, set, delete, exists,
13
+ clear. Bulk operations (get_many, set_many, delete_many) have default
14
+ implementations that delegate to the single-key variants but can be
15
+ overridden for efficiency (e.g. Redis pipelines).
16
+ """
17
+
18
+ @abstractmethod
19
+ async def get(self, key: str) -> Any | None:
20
+ """Return the cached value or ``None`` if missing / expired."""
21
+
22
+ @abstractmethod
23
+ async def set(self, key: str, value: Any, ttl: int | None = None) -> None:
24
+ """Store *value* under *key* with an optional TTL in seconds."""
25
+
26
+ @abstractmethod
27
+ async def delete(self, key: str) -> bool:
28
+ """Delete *key* and return ``True`` if it existed."""
29
+
30
+ @abstractmethod
31
+ async def exists(self, key: str) -> bool:
32
+ """Check whether *key* exists and is not expired."""
33
+
34
+ @abstractmethod
35
+ async def clear(self) -> None:
36
+ """Remove all keys managed by this backend."""
37
+
38
+ async def get_many(self, keys: list[str]) -> dict[str, Any]:
39
+ """Return a dict of key -> value for all keys that exist."""
40
+ result: dict[str, Any] = {}
41
+ for key in keys:
42
+ value = await self.get(key)
43
+ if value is not None:
44
+ result[key] = value
45
+ return result
46
+
47
+ async def set_many(
48
+ self, mapping: dict[str, Any], ttl: int | None = None
49
+ ) -> None:
50
+ """Set multiple key-value pairs with the same TTL."""
51
+ for key, value in mapping.items():
52
+ await self.set(key, value, ttl=ttl)
53
+
54
+ async def delete_many(self, keys: list[str]) -> int:
55
+ """Delete multiple keys and return count of keys that existed."""
56
+ count = 0
57
+ for key in keys:
58
+ if await self.delete(key):
59
+ count += 1
60
+ return count
61
+
62
+ async def close(self) -> None:
63
+ """Release any resources held by the backend."""
@@ -0,0 +1,124 @@
1
+ """In-memory cache backend with TTL tracking and LRU eviction."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import time
7
+ from collections import OrderedDict
8
+ from typing import Any
9
+
10
+ from cache.backends.base import CacheBackend
11
+
12
+
13
+ class _Entry:
14
+ """A cache entry with an optional expiration timestamp."""
15
+
16
+ __slots__ = ("value", "expires_at")
17
+
18
+ def __init__(self, value: Any, expires_at: float | None) -> None:
19
+ self.value = value
20
+ self.expires_at = expires_at
21
+
22
+ def is_expired(self) -> bool:
23
+ return self.expires_at is not None and time.monotonic() >= self.expires_at
24
+
25
+
26
+ class MemoryBackend(CacheBackend):
27
+ """Dict-backed cache with per-key TTL and LRU eviction.
28
+
29
+ Args:
30
+ max_size: Maximum number of entries. 0 means unlimited.
31
+ cleanup_interval: Seconds between background cleanup sweeps. Set to 0
32
+ to disable automatic cleanup (expired keys are still evicted lazily
33
+ on access).
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ max_size: int = 0,
39
+ cleanup_interval: float = 60.0,
40
+ ) -> None:
41
+ self._store: OrderedDict[str, _Entry] = OrderedDict()
42
+ self._max_size = max_size
43
+ self._cleanup_interval = cleanup_interval
44
+ self._cleanup_task: asyncio.Task[None] | None = None
45
+
46
+ # -- lifecycle ------------------------------------------------------------
47
+
48
+ def start_cleanup(self) -> None:
49
+ """Start the background cleanup task if an interval is configured."""
50
+ if self._cleanup_interval > 0 and self._cleanup_task is None:
51
+ self._cleanup_task = asyncio.create_task(self._cleanup_loop())
52
+
53
+ def _stop_cleanup(self) -> None:
54
+ if self._cleanup_task is not None:
55
+ self._cleanup_task.cancel()
56
+ self._cleanup_task = None
57
+
58
+ async def close(self) -> None:
59
+ self._stop_cleanup()
60
+ self._store.clear()
61
+
62
+ # -- core operations ------------------------------------------------------
63
+
64
+ async def get(self, key: str) -> Any | None:
65
+ entry = self._store.get(key)
66
+ if entry is None:
67
+ return None
68
+ if entry.is_expired():
69
+ del self._store[key]
70
+ return None
71
+ # Mark as recently used (move to end).
72
+ self._store.move_to_end(key)
73
+ return entry.value
74
+
75
+ async def set(self, key: str, value: Any, ttl: int | None = None) -> None:
76
+ expires_at = (time.monotonic() + ttl) if ttl else None
77
+
78
+ # If key already exists, update in place.
79
+ if key in self._store:
80
+ self._store[key] = _Entry(value, expires_at)
81
+ self._store.move_to_end(key)
82
+ return
83
+
84
+ # Evict LRU entries if at capacity.
85
+ if self._max_size > 0:
86
+ while len(self._store) >= self._max_size:
87
+ self._store.popitem(last=False)
88
+
89
+ self._store[key] = _Entry(value, expires_at)
90
+
91
+ async def delete(self, key: str) -> bool:
92
+ try:
93
+ del self._store[key]
94
+ return True
95
+ except KeyError:
96
+ return False
97
+
98
+ async def exists(self, key: str) -> bool:
99
+ entry = self._store.get(key)
100
+ if entry is None:
101
+ return False
102
+ if entry.is_expired():
103
+ del self._store[key]
104
+ return False
105
+ return True
106
+
107
+ async def clear(self) -> None:
108
+ self._store.clear()
109
+
110
+ # -- background cleanup ---------------------------------------------------
111
+
112
+ async def _cleanup_loop(self) -> None:
113
+ """Periodically sweep expired entries."""
114
+ try:
115
+ while True:
116
+ await asyncio.sleep(self._cleanup_interval)
117
+ self._sweep_expired()
118
+ except asyncio.CancelledError:
119
+ pass
120
+
121
+ def _sweep_expired(self) -> None:
122
+ expired = [k for k, e in self._store.items() if e.is_expired()]
123
+ for k in expired:
124
+ del self._store[k]
@@ -0,0 +1,118 @@
1
+ """Redis-backed cache backend using redis.asyncio."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any
6
+
7
+ from cache.backends.base import CacheBackend
8
+ from cache.serialization import JsonSerializer, Serializer
9
+
10
+
11
+ class RedisBackend(CacheBackend):
12
+ """Cache backend backed by Redis.
13
+
14
+ Args:
15
+ client: An ``redis.asyncio.Redis`` instance.
16
+ prefix: Key prefix / namespace applied to every key.
17
+ default_ttl: Default TTL in seconds when none is provided to ``set``.
18
+ serializer: Serializer for values. Defaults to JSON.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ client: Any, # redis.asyncio.Redis — typed as Any to keep redis optional
24
+ prefix: str = "",
25
+ default_ttl: int = 300,
26
+ serializer: Serializer | None = None,
27
+ ) -> None:
28
+ self._client = client
29
+ self._prefix = prefix
30
+ self._default_ttl = default_ttl
31
+ self._serializer: Serializer = serializer or JsonSerializer()
32
+
33
+ def _prefixed(self, key: str) -> str:
34
+ if self._prefix:
35
+ return f"{self._prefix}:{key}"
36
+ return key
37
+
38
+ # -- core operations ------------------------------------------------------
39
+
40
+ async def get(self, key: str) -> Any | None:
41
+ raw: bytes | None = await self._client.get(self._prefixed(key))
42
+ if raw is None:
43
+ return None
44
+ return self._serializer.loads(raw)
45
+
46
+ async def set(self, key: str, value: Any, ttl: int | None = None) -> None:
47
+ effective_ttl = ttl if ttl is not None else self._default_ttl
48
+ data = self._serializer.dumps(value)
49
+ if effective_ttl > 0:
50
+ await self._client.setex(self._prefixed(key), effective_ttl, data)
51
+ else:
52
+ await self._client.set(self._prefixed(key), data)
53
+
54
+ async def delete(self, key: str) -> bool:
55
+ result = await self._client.delete(self._prefixed(key))
56
+ return bool(result)
57
+
58
+ async def exists(self, key: str) -> bool:
59
+ result = await self._client.exists(self._prefixed(key))
60
+ return bool(result)
61
+
62
+ async def clear(self) -> None:
63
+ if self._prefix:
64
+ pattern = f"{self._prefix}:*"
65
+ cursor: int | bytes = 0
66
+ while True:
67
+ cursor, keys = await self._client.scan(
68
+ cursor=cursor, match=pattern, count=100
69
+ )
70
+ if keys:
71
+ await self._client.delete(*keys)
72
+ if cursor == 0:
73
+ break
74
+ else:
75
+ await self._client.flushdb()
76
+
77
+ # -- bulk operations via pipeline -----------------------------------------
78
+
79
+ async def get_many(self, keys: list[str]) -> dict[str, Any]:
80
+ if not keys:
81
+ return {}
82
+ prefixed = [self._prefixed(k) for k in keys]
83
+ pipe = self._client.pipeline()
84
+ for pk in prefixed:
85
+ pipe.get(pk)
86
+ results = await pipe.execute()
87
+
88
+ output: dict[str, Any] = {}
89
+ for key, raw in zip(keys, results):
90
+ if raw is not None:
91
+ output[key] = self._serializer.loads(raw)
92
+ return output
93
+
94
+ async def set_many(
95
+ self, mapping: dict[str, Any], ttl: int | None = None
96
+ ) -> None:
97
+ if not mapping:
98
+ return
99
+ effective_ttl = ttl if ttl is not None else self._default_ttl
100
+ pipe = self._client.pipeline()
101
+ for key, value in mapping.items():
102
+ data = self._serializer.dumps(value)
103
+ pk = self._prefixed(key)
104
+ if effective_ttl > 0:
105
+ pipe.setex(pk, effective_ttl, data)
106
+ else:
107
+ pipe.set(pk, data)
108
+ await pipe.execute()
109
+
110
+ async def delete_many(self, keys: list[str]) -> int:
111
+ if not keys:
112
+ return 0
113
+ prefixed = [self._prefixed(k) for k in keys]
114
+ result = await self._client.delete(*prefixed)
115
+ return int(result)
116
+
117
+ async def close(self) -> None:
118
+ await self._client.aclose()
@@ -0,0 +1,105 @@
1
+ """Cache configuration and global instance management."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from enum import Enum
6
+ from typing import Any
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+ from cache.backends.base import CacheBackend
11
+ from cache.serialization import JsonSerializer, Serializer
12
+
13
+
14
+ class BackendType(str, Enum):
15
+ MEMORY = "memory"
16
+ REDIS = "redis"
17
+
18
+
19
+ class CacheConfig(BaseModel):
20
+ """Configuration for the cache system.
21
+
22
+ Attributes:
23
+ backend: Which backend to use (``memory`` or ``redis``).
24
+ redis_url: Redis connection URL (only used when ``backend="redis"``).
25
+ default_ttl: Default TTL in seconds for cache entries.
26
+ key_prefix: Namespace prefix applied to all keys.
27
+ max_memory_items: Maximum items for the in-memory backend (0 = unlimited).
28
+ serializer_type: ``"json"`` or ``"pickle"``.
29
+ """
30
+
31
+ backend: BackendType = BackendType.MEMORY
32
+ redis_url: str = "redis://localhost:6379"
33
+ default_ttl: int = Field(default=300, ge=0)
34
+ key_prefix: str = ""
35
+ max_memory_items: int = Field(default=0, ge=0)
36
+ serializer_type: str = "json"
37
+
38
+
39
+ # -- global state -------------------------------------------------------------
40
+
41
+ _backend: CacheBackend | None = None
42
+
43
+
44
+ def _build_serializer(name: str) -> Serializer:
45
+ if name == "pickle":
46
+ from cache.serialization import PickleSerializer
47
+
48
+ return PickleSerializer()
49
+ return JsonSerializer()
50
+
51
+
52
+ def init_cache(config: CacheConfig | None = None) -> CacheBackend:
53
+ """Initialize the global cache backend from config.
54
+
55
+ Returns the created backend instance.
56
+ """
57
+ global _backend
58
+ cfg = config or CacheConfig()
59
+ serializer = _build_serializer(cfg.serializer_type)
60
+
61
+ if cfg.backend == BackendType.REDIS:
62
+ try:
63
+ import redis.asyncio as redis
64
+ except ImportError as exc:
65
+ raise ImportError(
66
+ "Install willian-cache[redis] to use the Redis backend."
67
+ ) from exc
68
+
69
+ client = redis.from_url(cfg.redis_url, decode_responses=False)
70
+ from cache.backends.redis_backend import RedisBackend
71
+
72
+ _backend = RedisBackend(
73
+ client=client,
74
+ prefix=cfg.key_prefix,
75
+ default_ttl=cfg.default_ttl,
76
+ serializer=serializer,
77
+ )
78
+ else:
79
+ from cache.backends.memory_backend import MemoryBackend
80
+
81
+ _backend = MemoryBackend(max_size=cfg.max_memory_items)
82
+ _backend.start_cleanup()
83
+
84
+ return _backend
85
+
86
+
87
+ def get_cache() -> CacheBackend:
88
+ """Return the active cache backend. Raises if not initialized."""
89
+ if _backend is None:
90
+ raise RuntimeError("Cache not initialized. Call init_cache() first.")
91
+ return _backend
92
+
93
+
94
+ def set_cache(backend: CacheBackend) -> None:
95
+ """Override the global cache backend (useful for testing)."""
96
+ global _backend
97
+ _backend = backend
98
+
99
+
100
+ async def close_cache() -> None:
101
+ """Close the global cache backend and release resources."""
102
+ global _backend
103
+ if _backend is not None:
104
+ await _backend.close()
105
+ _backend = None
@@ -0,0 +1,113 @@
1
+ """Caching decorator for async functions."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import functools
6
+ import hashlib
7
+ import inspect
8
+ import json
9
+ from typing import Any, Callable
10
+
11
+ from cache.config import get_cache
12
+
13
+
14
+ def _default_key_builder(
15
+ func: Callable[..., Any],
16
+ args: tuple[Any, ...],
17
+ kwargs: dict[str, Any],
18
+ prefix: str,
19
+ ) -> str:
20
+ """Build a cache key from function name + arguments.
21
+
22
+ Produces a deterministic, human-readable key like:
23
+ ``prefix:module.func_name:sha256_of_args``
24
+ """
25
+ func_id = f"{func.__module__}.{func.__qualname__}"
26
+
27
+ # Bind arguments to get a consistent representation.
28
+ sig = inspect.signature(func)
29
+ bound = sig.bind(*args, **kwargs)
30
+ bound.apply_defaults()
31
+ args_str = json.dumps(
32
+ {k: repr(v) for k, v in bound.arguments.items()},
33
+ sort_keys=True,
34
+ )
35
+ args_hash = hashlib.sha256(args_str.encode()).hexdigest()[:16]
36
+
37
+ parts = [p for p in (prefix, func_id, args_hash) if p]
38
+ return ":".join(parts)
39
+
40
+
41
+ def cached(
42
+ ttl: int = 300,
43
+ key_prefix: str = "",
44
+ key_builder: Callable[..., str] | None = None,
45
+ condition: Callable[[Any], bool] | None = None,
46
+ ) -> Callable[..., Any]:
47
+ """Decorator that caches the return value of an async function.
48
+
49
+ Args:
50
+ ttl: Time-to-live in seconds for the cached result.
51
+ key_prefix: Static prefix for cache keys.
52
+ key_builder: Custom ``(func, args, kwargs) -> str`` key builder.
53
+ Overrides the default key generation when provided.
54
+ condition: Optional predicate ``(result) -> bool``. The result is
55
+ only cached when condition returns ``True``.
56
+
57
+ Usage::
58
+
59
+ @cached(ttl=60, key_prefix="users")
60
+ async def get_user(user_id: str) -> dict:
61
+ ...
62
+
63
+ # Invalidate a specific call
64
+ await get_user.invalidate("user_123")
65
+
66
+ # Custom key builder
67
+ @cached(key_builder=lambda fn, args, kw: f"custom:{args[0]}")
68
+ async def lookup(query: str) -> list:
69
+ ...
70
+
71
+ # Conditional caching (skip None results)
72
+ @cached(condition=lambda r: r is not None)
73
+ async def find_user(email: str) -> dict | None:
74
+ ...
75
+ """
76
+
77
+ def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
78
+ @functools.wraps(func)
79
+ async def wrapper(*args: Any, **kwargs: Any) -> Any:
80
+ cache = get_cache()
81
+
82
+ if key_builder is not None:
83
+ cache_key = key_builder(func, args, kwargs)
84
+ else:
85
+ cache_key = _default_key_builder(func, args, kwargs, key_prefix)
86
+
87
+ # Check cache first.
88
+ hit = await cache.get(cache_key)
89
+ if hit is not None:
90
+ return hit
91
+
92
+ # Cache miss — call the original function.
93
+ result = await func(*args, **kwargs)
94
+
95
+ # Conditionally cache the result.
96
+ if condition is None or condition(result):
97
+ await cache.set(cache_key, result, ttl=ttl)
98
+
99
+ return result
100
+
101
+ async def invalidate(*args: Any, **kwargs: Any) -> bool:
102
+ """Invalidate the cached result for the given arguments."""
103
+ cache = get_cache()
104
+ if key_builder is not None:
105
+ cache_key = key_builder(func, args, kwargs)
106
+ else:
107
+ cache_key = _default_key_builder(func, args, kwargs, key_prefix)
108
+ return await cache.delete(cache_key)
109
+
110
+ wrapper.invalidate = invalidate # type: ignore[attr-defined]
111
+ return wrapper
112
+
113
+ return decorator