cachekit 0.3.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cachekit/__init__.py +114 -0
- cachekit/_rust_serializer.cp39-win_amd64.pyd +0 -0
- cachekit/backends/__init__.py +94 -0
- cachekit/backends/base.py +276 -0
- cachekit/backends/errors.py +181 -0
- cachekit/backends/file/__init__.py +30 -0
- cachekit/backends/file/backend.py +723 -0
- cachekit/backends/file/config.py +192 -0
- cachekit/backends/provider.py +141 -0
- cachekit/backends/redis/__init__.py +12 -0
- cachekit/backends/redis/backend.py +264 -0
- cachekit/backends/redis/client.py +213 -0
- cachekit/backends/redis/config.py +105 -0
- cachekit/backends/redis/error_handler.py +176 -0
- cachekit/backends/redis/provider.py +483 -0
- cachekit/cache_handler.py +1411 -0
- cachekit/config/__init__.py +50 -0
- cachekit/config/decorator.py +532 -0
- cachekit/config/nested.py +354 -0
- cachekit/config/settings.py +342 -0
- cachekit/config/singleton.py +83 -0
- cachekit/config/validation.py +98 -0
- cachekit/decorators/__init__.py +16 -0
- cachekit/decorators/intent.py +179 -0
- cachekit/decorators/main.py +10 -0
- cachekit/decorators/orchestrator.py +507 -0
- cachekit/decorators/session.py +66 -0
- cachekit/decorators/stats_context.py +64 -0
- cachekit/decorators/tenant_context.py +255 -0
- cachekit/decorators/utils/__init__.py +3 -0
- cachekit/decorators/wrapper.py +1413 -0
- cachekit/di.py +55 -0
- cachekit/hash_utils.py +92 -0
- cachekit/health.py +647 -0
- cachekit/hiredis_compat.py +96 -0
- cachekit/imports.py +48 -0
- cachekit/invalidation/__init__.py +31 -0
- cachekit/invalidation/channel.py +124 -0
- cachekit/invalidation/event.py +232 -0
- cachekit/invalidation/redis_channel.py +403 -0
- cachekit/key_generator.py +348 -0
- cachekit/l1_cache.py +623 -0
- cachekit/logging.py +580 -0
- cachekit/monitoring/__init__.py +8 -0
- cachekit/monitoring/correlation_tracking.py +308 -0
- cachekit/monitoring/pool_monitor.py +346 -0
- cachekit/monitoring/protocols.py +50 -0
- cachekit/py.typed +0 -0
- cachekit/reliability/__init__.py +41 -0
- cachekit/reliability/adaptive_timeout.py +422 -0
- cachekit/reliability/async_metrics.py +647 -0
- cachekit/reliability/circuit_breaker.py +487 -0
- cachekit/reliability/error_classification.py +91 -0
- cachekit/reliability/load_control.py +214 -0
- cachekit/reliability/metrics_collection.py +451 -0
- cachekit/reliability/profiles.py +348 -0
- cachekit/serializers/__init__.py +224 -0
- cachekit/serializers/arrow_serializer.py +247 -0
- cachekit/serializers/auto_serializer.py +803 -0
- cachekit/serializers/base.py +268 -0
- cachekit/serializers/encryption_wrapper.py +592 -0
- cachekit/serializers/orjson_serializer.py +203 -0
- cachekit/serializers/standard_serializer.py +358 -0
- cachekit/serializers/wrapper.py +89 -0
- cachekit-0.3.0.dist-info/METADATA +439 -0
- cachekit-0.3.0.dist-info/RECORD +68 -0
- cachekit-0.3.0.dist-info/WHEEL +4 -0
- cachekit-0.3.0.dist-info/licenses/LICENSE +21 -0
cachekit/__init__.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
r"""cachekit - Caching decorator for Python applications.
|
|
2
|
+
|
|
3
|
+
A robust, production-ready Python library that provides intelligent Redis caching
|
|
4
|
+
capabilities with advanced features like chunked data handling, multi-serialization
|
|
5
|
+
support, distributed locking, and automatic corruption detection.
|
|
6
|
+
|
|
7
|
+
Key Features:
|
|
8
|
+
- **Intelligent @cache decorator** with auto-detection and intent-based optimization
|
|
9
|
+
- **Circuit breaker protection** against cascading failures
|
|
10
|
+
- **Adaptive timeout adjustment** based on historical Redis latency patterns
|
|
11
|
+
- **Backpressure control** to prevent Redis overload
|
|
12
|
+
- **Connection pooling** for optimized performance
|
|
13
|
+
- **Health check methods** for comprehensive monitoring
|
|
14
|
+
- **Structured logging** with correlation IDs and distributed tracing
|
|
15
|
+
- **Statistics collection** for Prometheus metrics integration
|
|
16
|
+
|
|
17
|
+
Architecture Overview:
|
|
18
|
+
cachekit v0.1.0 provides a modular decorator architecture with intelligent
|
|
19
|
+
auto-detection and intent-based optimization. The v0.1 architecture includes:
|
|
20
|
+
|
|
21
|
+
- FeatureOrchestrator: Manages enterprise-grade reliability and monitoring features
|
|
22
|
+
- Flexible configuration interface with intelligent auto-detection
|
|
23
|
+
- Enhanced error handling with comprehensive safety checks
|
|
24
|
+
- Streamlined connection management with optimized components
|
|
25
|
+
|
|
26
|
+
Born from production debugging of Redis caching failures:
|
|
27
|
+
- UTF-8 corruption prevention with intelligent binary data handling
|
|
28
|
+
- Binary data magic byte detection and validation
|
|
29
|
+
- Chunked storage for large objects with atomic operations
|
|
30
|
+
- Comprehensive checksum validation and recovery
|
|
31
|
+
|
|
32
|
+
Example Usage:
|
|
33
|
+
```python
|
|
34
|
+
from cachekit import cache
|
|
35
|
+
|
|
36
|
+
# Intelligent cache with zero configuration (90% of use cases)
|
|
37
|
+
@cache
|
|
38
|
+
def expensive_function():
|
|
39
|
+
return compute_result()
|
|
40
|
+
|
|
41
|
+
# Intent-based optimization (9% of use cases)
|
|
42
|
+
@cache.minimal # Speed-critical functions
|
|
43
|
+
def get_price(symbol: str):
|
|
44
|
+
return fetch_price(symbol)
|
|
45
|
+
|
|
46
|
+
@cache.production # Reliability-critical functions
|
|
47
|
+
def process_payment(amount: Decimal):
|
|
48
|
+
return payment_gateway.charge(amount)
|
|
49
|
+
|
|
50
|
+
@cache.secure # Security-critical functions
|
|
51
|
+
def get_user_data(user_id: int) -> UserProfile:
|
|
52
|
+
return db.fetch_user(user_id)
|
|
53
|
+
|
|
54
|
+
# Manual configuration when needed (1% of use cases)
|
|
55
|
+
@cache(ttl=3600, namespace="custom", circuit_breaker=True)
|
|
56
|
+
def custom_function():
|
|
57
|
+
return special_computation()
|
|
58
|
+
|
|
59
|
+
# Health monitoring
|
|
60
|
+
health = custom_function.get_health_status()
|
|
61
|
+
full_health = custom_function.check_health()
|
|
62
|
+
```
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
__version__ = "0.3.0"
|
|
66
|
+
|
|
67
|
+
from typing import Any, Callable, TypeVar
|
|
68
|
+
|
|
69
|
+
# Configure hiredis compatibility BEFORE any Redis imports
|
|
70
|
+
# This prevents GIL warnings in Python 3.13+ free-threading mode
|
|
71
|
+
try:
|
|
72
|
+
from . import hiredis_compat
|
|
73
|
+
except ImportError:
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
# Import the configuration classes
|
|
77
|
+
# Redis client is automatically fast - no special import needed
|
|
78
|
+
from .config import DecoratorConfig
|
|
79
|
+
|
|
80
|
+
# Import the intelligent cache decorator and CacheInfo
|
|
81
|
+
from .decorators import cache
|
|
82
|
+
from .decorators.wrapper import CacheInfo
|
|
83
|
+
|
|
84
|
+
# Import health check functionality
|
|
85
|
+
from .health import (
|
|
86
|
+
HealthCheckResult,
|
|
87
|
+
HealthLevel,
|
|
88
|
+
HealthStatus,
|
|
89
|
+
async_health_check_handler,
|
|
90
|
+
get_health_checker,
|
|
91
|
+
health_check_handler,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# L1/L2 architecture integrated into standard cache interface
|
|
95
|
+
# No separate imports needed - cache.minimal/.production/.secure handle L1+L2 transparently
|
|
96
|
+
# Import reliability configuration
|
|
97
|
+
from .reliability import CircuitBreakerConfig
|
|
98
|
+
|
|
99
|
+
F = TypeVar("F", bound=Callable[..., Any])
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
__all__ = [
|
|
103
|
+
"__version__",
|
|
104
|
+
"async_health_check_handler",
|
|
105
|
+
"cache",
|
|
106
|
+
"CacheInfo",
|
|
107
|
+
"CircuitBreakerConfig",
|
|
108
|
+
"DecoratorConfig",
|
|
109
|
+
"get_health_checker",
|
|
110
|
+
"health_check_handler",
|
|
111
|
+
"HealthCheckResult",
|
|
112
|
+
"HealthLevel",
|
|
113
|
+
"HealthStatus",
|
|
114
|
+
]
|
|
Binary file
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""Backend storage abstraction for cachekit.
|
|
2
|
+
|
|
3
|
+
This module provides protocol-based abstraction for L2 backend storage with
|
|
4
|
+
dependency injection pattern. Backends can be Redis, HTTP, DynamoDB, or any
|
|
5
|
+
key-value store.
|
|
6
|
+
|
|
7
|
+
Public API:
|
|
8
|
+
- BaseBackend: Core protocol (5 methods: get, set, delete, exists, health_check)
|
|
9
|
+
- TTLInspectableBackend: Optional protocol for TTL inspection/refresh
|
|
10
|
+
- LockableBackend: Optional protocol for distributed locking
|
|
11
|
+
- TimeoutConfigurableBackend: Optional protocol for per-operation timeouts
|
|
12
|
+
- BackendProvider: Dependency injection protocol
|
|
13
|
+
- BackendError: Exception raised by backend operations
|
|
14
|
+
- BackendErrorType: Error classification enum
|
|
15
|
+
- CapabilityNotAvailableError: Exception for missing optional capabilities
|
|
16
|
+
- RedisBackend: Redis implementation (default)
|
|
17
|
+
|
|
18
|
+
Usage:
|
|
19
|
+
>>> from cachekit.backends import BaseBackend, RedisBackend, BackendError
|
|
20
|
+
>>> # RedisBackend usage requires Redis connection
|
|
21
|
+
>>> # See RedisBackend documentation for connection details
|
|
22
|
+
|
|
23
|
+
Dependency injection pattern:
|
|
24
|
+
>>> from cachekit.backends import BackendProvider
|
|
25
|
+
>>> # Implement BackendProvider protocol in your application
|
|
26
|
+
>>> # See BackendProvider documentation for implementation examples
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
from __future__ import annotations
|
|
30
|
+
|
|
31
|
+
from typing import Protocol
|
|
32
|
+
|
|
33
|
+
from cachekit.backends.base import (
|
|
34
|
+
BaseBackend,
|
|
35
|
+
LockableBackend,
|
|
36
|
+
TimeoutConfigurableBackend,
|
|
37
|
+
TTLInspectableBackend,
|
|
38
|
+
)
|
|
39
|
+
from cachekit.backends.errors import (
|
|
40
|
+
BackendError,
|
|
41
|
+
BackendErrorType,
|
|
42
|
+
CapabilityNotAvailableError,
|
|
43
|
+
)
|
|
44
|
+
from cachekit.backends.redis import RedisBackend
|
|
45
|
+
|
|
46
|
+
__all__ = [
|
|
47
|
+
"BaseBackend",
|
|
48
|
+
"TTLInspectableBackend",
|
|
49
|
+
"LockableBackend",
|
|
50
|
+
"TimeoutConfigurableBackend",
|
|
51
|
+
"BackendProvider",
|
|
52
|
+
"BackendError",
|
|
53
|
+
"BackendErrorType",
|
|
54
|
+
"CapabilityNotAvailableError",
|
|
55
|
+
"RedisBackend",
|
|
56
|
+
]
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class BackendProvider(Protocol):
|
|
60
|
+
"""Protocol for dependency injection of backend instances.
|
|
61
|
+
|
|
62
|
+
Enables testability and pluggable backends without hardcoding concrete
|
|
63
|
+
implementations. The provider manages backend lifecycle (singleton,
|
|
64
|
+
pooling, per-request creation, etc.).
|
|
65
|
+
|
|
66
|
+
Example:
|
|
67
|
+
>>> from cachekit.backends import BackendProvider, BaseBackend
|
|
68
|
+
>>> # Implement BackendProvider protocol:
|
|
69
|
+
>>> # class MyProvider:
|
|
70
|
+
>>> # def get_backend(self) -> BaseBackend:
|
|
71
|
+
>>> # return RedisBackend()
|
|
72
|
+
>>> # provider = MyProvider()
|
|
73
|
+
>>> # backend = provider.get_backend()
|
|
74
|
+
|
|
75
|
+
Note:
|
|
76
|
+
This is a structural protocol (PEP 544) - any class with a get_backend()
|
|
77
|
+
method satisfies this protocol without explicit inheritance.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def get_backend(self) -> BaseBackend:
|
|
81
|
+
"""Return a BaseBackend instance.
|
|
82
|
+
|
|
83
|
+
Implementation can manage singleton, pooling, or per-request creation
|
|
84
|
+
depending on backend requirements.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
BaseBackend instance ready for cache operations
|
|
88
|
+
|
|
89
|
+
Example:
|
|
90
|
+
>>> provider = RedisBackendProvider() # doctest: +SKIP
|
|
91
|
+
>>> backend = provider.get_backend() # doctest: +SKIP
|
|
92
|
+
>>> backend.set("key", b"value", ttl=60) # doctest: +SKIP
|
|
93
|
+
"""
|
|
94
|
+
...
|
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
"""Base backend protocol definitions.
|
|
2
|
+
|
|
3
|
+
This module defines the storage backend contract using PEP 544 protocol-based abstraction.
|
|
4
|
+
All L2 backends (Redis, HTTP, etc.) must implement BaseBackend protocol.
|
|
5
|
+
|
|
6
|
+
Optional capability protocols (TTLInspectableBackend, LockableBackend,
|
|
7
|
+
TimeoutConfigurableBackend) enable advanced features with graceful degradation.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from collections.abc import AsyncIterator
|
|
13
|
+
from typing import Any, Optional, Protocol, runtime_checkable
|
|
14
|
+
|
|
15
|
+
# Re-export BackendError for convenience (public API)
|
|
16
|
+
from cachekit.backends.errors import BackendError # noqa: F401
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@runtime_checkable
|
|
20
|
+
class BaseBackend(Protocol):
|
|
21
|
+
"""Protocol defining the L2 backend storage contract.
|
|
22
|
+
|
|
23
|
+
All backend implementations must support these four operations on bytes.
|
|
24
|
+
This protocol uses structural subtyping (PEP 544) - any class implementing
|
|
25
|
+
these methods is considered a valid backend.
|
|
26
|
+
|
|
27
|
+
Design principles:
|
|
28
|
+
- Stateless operations (no connection management in protocol)
|
|
29
|
+
- Bytes-only interface (language-agnostic, no Python-specific types)
|
|
30
|
+
- No Redis-specific concepts (works for any backend: Redis, HTTP, DynamoDB, etc.)
|
|
31
|
+
- Simple and focused (KISS principle)
|
|
32
|
+
|
|
33
|
+
Example:
|
|
34
|
+
>>> from cachekit.backends import BaseBackend, RedisBackend
|
|
35
|
+
>>> backend = RedisBackend()
|
|
36
|
+
>>> isinstance(backend, BaseBackend) # Runtime checkable protocol
|
|
37
|
+
True
|
|
38
|
+
>>> backend.set("key", b"value", ttl=60) # doctest: +SKIP
|
|
39
|
+
>>> data = backend.get("key") # doctest: +SKIP
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
def get(self, key: str) -> Optional[bytes]:
|
|
43
|
+
"""Retrieve value from backend storage.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
key: Cache key to retrieve
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Bytes value if found, None if key doesn't exist
|
|
50
|
+
|
|
51
|
+
Raises:
|
|
52
|
+
BackendError: If backend operation fails (network, timeout, etc.)
|
|
53
|
+
"""
|
|
54
|
+
...
|
|
55
|
+
|
|
56
|
+
def set(self, key: str, value: bytes, ttl: Optional[int] = None) -> None:
|
|
57
|
+
"""Store value in backend storage.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
key: Cache key to store
|
|
61
|
+
value: Bytes value to store (encrypted or plaintext msgpack)
|
|
62
|
+
ttl: Time-to-live in seconds (None = no expiry)
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
BackendError: If backend operation fails (network, timeout, etc.)
|
|
66
|
+
"""
|
|
67
|
+
...
|
|
68
|
+
|
|
69
|
+
def delete(self, key: str) -> bool:
|
|
70
|
+
"""Delete key from backend storage.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
key: Cache key to delete
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
True if key was deleted, False if key didn't exist
|
|
77
|
+
|
|
78
|
+
Raises:
|
|
79
|
+
BackendError: If backend operation fails (network, timeout, etc.)
|
|
80
|
+
"""
|
|
81
|
+
...
|
|
82
|
+
|
|
83
|
+
def exists(self, key: str) -> bool:
|
|
84
|
+
"""Check if key exists in backend storage.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
key: Cache key to check
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
True if key exists, False otherwise
|
|
91
|
+
|
|
92
|
+
Raises:
|
|
93
|
+
BackendError: If backend operation fails (network, timeout, etc.)
|
|
94
|
+
"""
|
|
95
|
+
...
|
|
96
|
+
|
|
97
|
+
def health_check(self) -> tuple[bool, dict[str, Any]]:
|
|
98
|
+
"""Check backend health status.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Tuple of (is_healthy, details_dict)
|
|
102
|
+
Details must include 'latency_ms' and 'backend_type'
|
|
103
|
+
|
|
104
|
+
Example:
|
|
105
|
+
>>> backend = RedisBackend() # doctest: +SKIP
|
|
106
|
+
>>> is_healthy, details = backend.health_check() # doctest: +SKIP
|
|
107
|
+
>>> assert 'latency_ms' in details # doctest: +SKIP
|
|
108
|
+
>>> assert 'backend_type' in details # doctest: +SKIP
|
|
109
|
+
"""
|
|
110
|
+
...
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
@runtime_checkable
|
|
114
|
+
class TTLInspectableBackend(Protocol):
|
|
115
|
+
"""Optional protocol for backends that support TTL inspection and refresh.
|
|
116
|
+
|
|
117
|
+
Backends implementing this protocol can report remaining TTL on keys
|
|
118
|
+
and refresh TTLs on existing keys. This enables features like automatic
|
|
119
|
+
TTL refresh for frequently-accessed keys.
|
|
120
|
+
|
|
121
|
+
Not all backends support this capability:
|
|
122
|
+
- Supported: Redis, PostgreSQL, DynamoDB, SQLite, FileSystem
|
|
123
|
+
- Not supported: HTTP (stateless), Memcached (limited), S3 (limited)
|
|
124
|
+
|
|
125
|
+
Example:
|
|
126
|
+
>>> # TTL inspection pattern (async context):
|
|
127
|
+
>>> # if hasattr(backend, 'get_ttl'):
|
|
128
|
+
>>> # ttl = await backend.get_ttl("user:123")
|
|
129
|
+
>>> # if ttl and ttl < 60:
|
|
130
|
+
>>> # await backend.refresh_ttl("user:123", 3600)
|
|
131
|
+
"""
|
|
132
|
+
|
|
133
|
+
async def get_ttl(self, key: str) -> Optional[int]:
|
|
134
|
+
"""Get remaining TTL on key (in seconds).
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
key: Cache key to inspect
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Remaining TTL in seconds, or None if:
|
|
141
|
+
- Key doesn't exist
|
|
142
|
+
- Key has no expiration (permanent)
|
|
143
|
+
|
|
144
|
+
Raises:
|
|
145
|
+
BackendError: If backend operation fails
|
|
146
|
+
|
|
147
|
+
Example:
|
|
148
|
+
>>> ttl = await backend.get_ttl("user:123") # doctest: +SKIP
|
|
149
|
+
>>> if ttl and ttl < 60: # doctest: +SKIP
|
|
150
|
+
... # Key expiring soon
|
|
151
|
+
... pass # doctest: +SKIP
|
|
152
|
+
"""
|
|
153
|
+
...
|
|
154
|
+
|
|
155
|
+
async def refresh_ttl(self, key: str, ttl: int) -> bool:
|
|
156
|
+
"""Refresh (update) TTL on existing key.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
key: Cache key to refresh
|
|
160
|
+
ttl: New TTL in seconds
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
True if key existed and TTL was refreshed
|
|
164
|
+
False if key doesn't exist (no-op)
|
|
165
|
+
|
|
166
|
+
Raises:
|
|
167
|
+
BackendError: If backend operation fails
|
|
168
|
+
|
|
169
|
+
Example:
|
|
170
|
+
>>> refreshed = await backend.refresh_ttl("user:123", 3600) # doctest: +SKIP
|
|
171
|
+
>>> if refreshed: # doctest: +SKIP
|
|
172
|
+
... # TTL successfully updated
|
|
173
|
+
... pass # doctest: +SKIP
|
|
174
|
+
"""
|
|
175
|
+
...
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
@runtime_checkable
|
|
179
|
+
class LockableBackend(Protocol):
|
|
180
|
+
"""Optional protocol for backends supporting distributed locking.
|
|
181
|
+
|
|
182
|
+
Backends implementing this protocol can provide distributed lock semantics
|
|
183
|
+
for coordinating access across multiple processes/servers. This enables
|
|
184
|
+
features like cache stampede prevention and critical sections.
|
|
185
|
+
|
|
186
|
+
Not all backends support this capability:
|
|
187
|
+
- Supported: Redis, PostgreSQL, DynamoDB
|
|
188
|
+
- Local-only: SQLite, FileSystem (single-process locking)
|
|
189
|
+
- Not supported: HTTP (stateless), Memcached, S3
|
|
190
|
+
|
|
191
|
+
Example:
|
|
192
|
+
>>> # Distributed locking pattern (async context):
|
|
193
|
+
>>> # if hasattr(backend, 'acquire_lock'):
|
|
194
|
+
>>> # async with backend.acquire_lock("lock:compute", timeout=30) as acquired:
|
|
195
|
+
>>> # if acquired:
|
|
196
|
+
>>> # result = expensive_computation()
|
|
197
|
+
"""
|
|
198
|
+
|
|
199
|
+
async def acquire_lock(
|
|
200
|
+
self,
|
|
201
|
+
key: str,
|
|
202
|
+
timeout: float,
|
|
203
|
+
blocking_timeout: Optional[float] = None,
|
|
204
|
+
) -> AsyncIterator[bool]:
|
|
205
|
+
"""Acquire a distributed lock on key.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
key: Lock key (e.g., "lock:user:123")
|
|
209
|
+
timeout: How long to hold the lock (seconds) before auto-release
|
|
210
|
+
blocking_timeout: Max time to wait for lock acquisition (None = non-blocking)
|
|
211
|
+
|
|
212
|
+
Yields:
|
|
213
|
+
True if lock was acquired
|
|
214
|
+
False if timeout occurred waiting for lock
|
|
215
|
+
|
|
216
|
+
Raises:
|
|
217
|
+
BackendError: If backend operation fails
|
|
218
|
+
|
|
219
|
+
Example:
|
|
220
|
+
>>> async with backend.acquire_lock("lock:key", timeout=30, blocking_timeout=5) as acquired: # doctest: +SKIP
|
|
221
|
+
... if acquired: # doctest: +SKIP
|
|
222
|
+
... # Lock held, safe to proceed
|
|
223
|
+
... pass # doctest: +SKIP
|
|
224
|
+
... else: # doctest: +SKIP
|
|
225
|
+
... # Timeout waiting for lock
|
|
226
|
+
... pass # doctest: +SKIP
|
|
227
|
+
|
|
228
|
+
Note:
|
|
229
|
+
Lock is automatically released on context exit, even if exception occurs.
|
|
230
|
+
"""
|
|
231
|
+
...
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
@runtime_checkable
|
|
235
|
+
class TimeoutConfigurableBackend(Protocol):
|
|
236
|
+
"""Optional protocol for per-operation timeout configuration.
|
|
237
|
+
|
|
238
|
+
Backends implementing this protocol allow fine-grained timeout control
|
|
239
|
+
per operation. This enables features like adaptive timeouts that adjust
|
|
240
|
+
based on operation latency.
|
|
241
|
+
|
|
242
|
+
All backends support some timeout mechanism, but granularity varies:
|
|
243
|
+
- Per-operation: HTTP, DynamoDB, PostgreSQL
|
|
244
|
+
- Per-socket/transaction: Redis, Memcached, SQLite
|
|
245
|
+
- Global: KV, S3
|
|
246
|
+
|
|
247
|
+
Example:
|
|
248
|
+
>>> # Per-operation timeout pattern (async context):
|
|
249
|
+
>>> # if hasattr(backend, 'with_timeout'):
|
|
250
|
+
>>> # async with backend.with_timeout("get", 100):
|
|
251
|
+
>>> # value = await backend.get("key")
|
|
252
|
+
"""
|
|
253
|
+
|
|
254
|
+
async def with_timeout(
|
|
255
|
+
self,
|
|
256
|
+
operation: str,
|
|
257
|
+
timeout_ms: int,
|
|
258
|
+
) -> AsyncIterator[None]:
|
|
259
|
+
"""Set timeout for operations within context.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
operation: Operation name (e.g., "get", "set", "delete")
|
|
263
|
+
timeout_ms: Timeout in milliseconds
|
|
264
|
+
|
|
265
|
+
Raises:
|
|
266
|
+
BackendError: With error_type=TIMEOUT if timeout exceeded
|
|
267
|
+
|
|
268
|
+
Example:
|
|
269
|
+
>>> async with backend.with_timeout("get", 100): # doctest: +SKIP
|
|
270
|
+
... value = await backend.get("key") # doctest: +SKIP
|
|
271
|
+
|
|
272
|
+
Note:
|
|
273
|
+
Backends without per-operation timeout may apply timeout at
|
|
274
|
+
socket or global level (coarser-grained fallback).
|
|
275
|
+
"""
|
|
276
|
+
...
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
"""Backend error types and classification.
|
|
2
|
+
|
|
3
|
+
This module defines error hierarchies for backend operations, enabling
|
|
4
|
+
circuit breaker and retry logic to make correct decisions without
|
|
5
|
+
Redis-specific exception handling.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BackendErrorType(str, Enum):
|
|
15
|
+
"""Error classification for circuit breaker and retry decisions.
|
|
16
|
+
|
|
17
|
+
Inherits from str for JSON serialization and string comparisons.
|
|
18
|
+
Each error type dictates a different retry/recovery strategy:
|
|
19
|
+
|
|
20
|
+
- TRANSIENT: Temporary failure, retry with exponential backoff + jitter
|
|
21
|
+
- PERMANENT: Unfixable error, fail fast without retry
|
|
22
|
+
- TIMEOUT: Operation exceeded time limit, configurable retry strategy
|
|
23
|
+
- AUTHENTICATION: Credential/auth issue, alert operations team
|
|
24
|
+
- UNKNOWN: Unclassified error, assume transient and log for investigation
|
|
25
|
+
|
|
26
|
+
Example:
|
|
27
|
+
>>> error = BackendError("Connection lost", error_type=BackendErrorType.TRANSIENT)
|
|
28
|
+
>>> if error.is_transient:
|
|
29
|
+
... # Retry with exponential backoff
|
|
30
|
+
... pass
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
TRANSIENT = "transient"
|
|
34
|
+
PERMANENT = "permanent"
|
|
35
|
+
TIMEOUT = "timeout"
|
|
36
|
+
AUTHENTICATION = "authentication"
|
|
37
|
+
UNKNOWN = "unknown"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class BackendError(Exception):
|
|
41
|
+
"""Base exception for all backend operations.
|
|
42
|
+
|
|
43
|
+
The error_type field enables circuit breaker and retry logic to make
|
|
44
|
+
correct decisions without inspecting exception types. This approach
|
|
45
|
+
works with any backend (Redis, HTTP, DynamoDB, etc.).
|
|
46
|
+
|
|
47
|
+
Designed for serializability across network boundaries. Contains only
|
|
48
|
+
simple types (str, enum) and operation context for debugging.
|
|
49
|
+
|
|
50
|
+
Attributes:
|
|
51
|
+
message: Human-readable error message
|
|
52
|
+
error_type: Error classification (see BackendErrorType)
|
|
53
|
+
original_exception: The original exception that caused this error (if any)
|
|
54
|
+
operation: The operation that failed (get, set, delete, exists)
|
|
55
|
+
key: The cache key involved in the operation (optional, for debugging)
|
|
56
|
+
|
|
57
|
+
Example:
|
|
58
|
+
>>> from redis import ConnectionError as RedisConnectionError
|
|
59
|
+
>>> try:
|
|
60
|
+
... # Some Redis operation
|
|
61
|
+
... pass
|
|
62
|
+
... except RedisConnectionError as exc:
|
|
63
|
+
... raise BackendError(
|
|
64
|
+
... "Redis connection failed",
|
|
65
|
+
... error_type=BackendErrorType.TRANSIENT,
|
|
66
|
+
... original_exception=exc,
|
|
67
|
+
... operation="get",
|
|
68
|
+
... key="user:123"
|
|
69
|
+
... )
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(
|
|
73
|
+
self,
|
|
74
|
+
message: str,
|
|
75
|
+
error_type: BackendErrorType = BackendErrorType.UNKNOWN,
|
|
76
|
+
original_exception: Optional[Exception] = None,
|
|
77
|
+
operation: str | None = None,
|
|
78
|
+
key: str | None = None,
|
|
79
|
+
):
|
|
80
|
+
"""Initialize BackendError with error classification and context.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
message: Human-readable error message
|
|
84
|
+
error_type: Error classification for retry/recovery logic
|
|
85
|
+
original_exception: The original exception that caused this error
|
|
86
|
+
operation: The operation that failed (get, set, delete, exists)
|
|
87
|
+
key: The cache key involved in the operation
|
|
88
|
+
"""
|
|
89
|
+
self.message = message
|
|
90
|
+
self.error_type = error_type
|
|
91
|
+
self.original_exception = original_exception
|
|
92
|
+
self.operation = operation
|
|
93
|
+
self.key = key
|
|
94
|
+
super().__init__(self._format_message())
|
|
95
|
+
|
|
96
|
+
def _format_message(self) -> str:
|
|
97
|
+
"""Format error message with operation context."""
|
|
98
|
+
parts = [self.message]
|
|
99
|
+
if self.operation:
|
|
100
|
+
parts.append(f"operation={self.operation}")
|
|
101
|
+
if self.key:
|
|
102
|
+
# Truncate key for security/readability
|
|
103
|
+
key_display = self.key[:50] + "..." if len(self.key) > 50 else self.key
|
|
104
|
+
parts.append(f"key={key_display}")
|
|
105
|
+
if self.error_type:
|
|
106
|
+
parts.append(f"type={self.error_type.value}")
|
|
107
|
+
return " | ".join(parts)
|
|
108
|
+
|
|
109
|
+
@property
|
|
110
|
+
def is_transient(self) -> bool:
|
|
111
|
+
"""Should trigger exponential backoff retry.
|
|
112
|
+
|
|
113
|
+
Example:
|
|
114
|
+
>>> error = BackendError("Temp failure", error_type=BackendErrorType.TRANSIENT)
|
|
115
|
+
>>> if error.is_transient:
|
|
116
|
+
... # Retry with backoff
|
|
117
|
+
... pass
|
|
118
|
+
"""
|
|
119
|
+
return self.error_type == BackendErrorType.TRANSIENT
|
|
120
|
+
|
|
121
|
+
@property
|
|
122
|
+
def is_permanent(self) -> bool:
|
|
123
|
+
"""Should fail fast, no retry.
|
|
124
|
+
|
|
125
|
+
Example:
|
|
126
|
+
>>> error = BackendError("Invalid key", error_type=BackendErrorType.PERMANENT)
|
|
127
|
+
>>> if error.is_permanent:
|
|
128
|
+
... # Don't retry, log and alert
|
|
129
|
+
... pass
|
|
130
|
+
"""
|
|
131
|
+
return self.error_type == BackendErrorType.PERMANENT
|
|
132
|
+
|
|
133
|
+
@property
|
|
134
|
+
def is_timeout(self) -> bool:
|
|
135
|
+
"""Configurable retry strategy.
|
|
136
|
+
|
|
137
|
+
Example:
|
|
138
|
+
>>> error = BackendError("Operation timeout", error_type=BackendErrorType.TIMEOUT)
|
|
139
|
+
>>> if error.is_timeout:
|
|
140
|
+
... # Retry with increased timeout
|
|
141
|
+
... pass
|
|
142
|
+
"""
|
|
143
|
+
return self.error_type == BackendErrorType.TIMEOUT
|
|
144
|
+
|
|
145
|
+
@property
|
|
146
|
+
def is_authentication(self) -> bool:
|
|
147
|
+
"""Should alert operations team.
|
|
148
|
+
|
|
149
|
+
Example:
|
|
150
|
+
>>> error = BackendError("Invalid creds", error_type=BackendErrorType.AUTHENTICATION)
|
|
151
|
+
>>> if error.is_authentication:
|
|
152
|
+
... # Alert ops, don't retry
|
|
153
|
+
... pass
|
|
154
|
+
"""
|
|
155
|
+
return self.error_type == BackendErrorType.AUTHENTICATION
|
|
156
|
+
|
|
157
|
+
def __repr__(self) -> str:
|
|
158
|
+
"""Developer-friendly representation."""
|
|
159
|
+
return f"BackendError({self.message!r}, error_type={self.error_type.value!r})"
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class CapabilityNotAvailableError(BackendError):
|
|
163
|
+
"""Raised when code requires an optional protocol that backend doesn't implement.
|
|
164
|
+
|
|
165
|
+
Used for hard requirements that can't be gracefully degraded. For example,
|
|
166
|
+
if code requires distributed locking but the backend doesn't support it.
|
|
167
|
+
|
|
168
|
+
Example:
|
|
169
|
+
>>> from cachekit.backends.errors import CapabilityNotAvailableError
|
|
170
|
+
>>> # Usage pattern:
|
|
171
|
+
>>> # if not hasattr(backend, 'acquire_lock'):
|
|
172
|
+
>>> # raise CapabilityNotAvailableError("Backend doesn't support locking")
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
def __init__(self, message: str):
|
|
176
|
+
"""Initialize with permanent error classification.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
message: Human-readable error message explaining missing capability
|
|
180
|
+
"""
|
|
181
|
+
super().__init__(message, error_type=BackendErrorType.PERMANENT)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"""File-based backend for local disk caching.
|
|
2
|
+
|
|
3
|
+
This module provides a production-ready filesystem-based cache backend with:
|
|
4
|
+
- Thread-safe operations using reentrant locks and file-level locking
|
|
5
|
+
- Atomic writes via write-then-rename pattern
|
|
6
|
+
- LRU eviction based on disk usage thresholds
|
|
7
|
+
- TTL-based expiration with secure header format
|
|
8
|
+
- Security features (O_NOFOLLOW, symlink prevention)
|
|
9
|
+
|
|
10
|
+
Public API:
|
|
11
|
+
- FileBackend: Main backend implementation
|
|
12
|
+
- FileBackendConfig: Configuration class
|
|
13
|
+
|
|
14
|
+
Example:
|
|
15
|
+
>>> from cachekit.backends.file import FileBackend, FileBackendConfig
|
|
16
|
+
>>> config = FileBackendConfig(cache_dir="/tmp/cachekit")
|
|
17
|
+
>>> backend = FileBackend(config)
|
|
18
|
+
>>> backend.set("key", b"value", ttl=60)
|
|
19
|
+
>>> data = backend.get("key")
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from __future__ import annotations
|
|
23
|
+
|
|
24
|
+
from cachekit.backends.file.backend import FileBackend
|
|
25
|
+
from cachekit.backends.file.config import FileBackendConfig
|
|
26
|
+
|
|
27
|
+
__all__ = [
|
|
28
|
+
"FileBackend",
|
|
29
|
+
"FileBackendConfig",
|
|
30
|
+
]
|