sqlacache 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sqlacache/__init__.py +14 -0
- sqlacache/config.py +175 -0
- sqlacache/contrib/__init__.py +1 -0
- sqlacache/contrib/fastapi.py +1 -0
- sqlacache/contrib/prometheus.py +1 -0
- sqlacache/exceptions.py +13 -0
- sqlacache/interceptor.py +128 -0
- sqlacache/invalidation.py +45 -0
- sqlacache/manager.py +348 -0
- sqlacache/pubsub/__init__.py +3 -0
- sqlacache/pubsub/redis.py +89 -0
- sqlacache/py.typed +0 -0
- sqlacache/serializers/__init__.py +5 -0
- sqlacache/serializers/json.py +24 -0
- sqlacache/transport/__init__.py +45 -0
- sqlacache/transport/cashews.py +136 -0
- sqlacache/utils/__init__.py +1 -0
- sqlacache/utils/key_generation.py +40 -0
- sqlacache/utils/query_analysis.py +108 -0
- sqlacache/utils/sync_wrapper.py +16 -0
- sqlacache-0.1.0.dist-info/METADATA +298 -0
- sqlacache-0.1.0.dist-info/RECORD +24 -0
- sqlacache-0.1.0.dist-info/WHEEL +4 -0
- sqlacache-0.1.0.dist-info/licenses/LICENSE +21 -0
sqlacache/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""Public package exports for sqlacache."""
|
|
2
|
+
|
|
3
|
+
from sqlacache.config import configure
|
|
4
|
+
from sqlacache.exceptions import ConfigError
|
|
5
|
+
from sqlacache.manager import CacheManager
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"CacheManager",
|
|
9
|
+
"ConfigError",
|
|
10
|
+
"__version__",
|
|
11
|
+
"configure",
|
|
12
|
+
]
|
|
13
|
+
|
|
14
|
+
__version__ = "0.1.0"
|
sqlacache/config.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
"""Configuration helpers for sqlacache."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Iterable, Mapping
|
|
6
|
+
from importlib import import_module
|
|
7
|
+
from typing import Any
|
|
8
|
+
from urllib.parse import urlparse
|
|
9
|
+
|
|
10
|
+
from sqlacache.exceptions import ConfigError
|
|
11
|
+
from sqlacache.manager import CacheManager
|
|
12
|
+
|
|
13
|
+
ALL_OPS = frozenset({"get", "fetch", "count", "exists"})
|
|
14
|
+
VALID_OPS = ALL_OPS
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def normalize_ops(ops_config: str | Iterable[str] | None) -> frozenset[str]:
|
|
18
|
+
"""Normalize an ops config into a validated frozenset."""
|
|
19
|
+
|
|
20
|
+
if ops_config is None:
|
|
21
|
+
return frozenset()
|
|
22
|
+
if ops_config == "all":
|
|
23
|
+
return ALL_OPS
|
|
24
|
+
if isinstance(ops_config, str):
|
|
25
|
+
raise ConfigError(f"Unknown ops value: {ops_config!r}")
|
|
26
|
+
|
|
27
|
+
normalized = frozenset(ops_config)
|
|
28
|
+
invalid_ops = normalized - VALID_OPS
|
|
29
|
+
if invalid_ops:
|
|
30
|
+
raise ConfigError(f"Unknown ops: {sorted(invalid_ops)!r}")
|
|
31
|
+
return normalized
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _resolve_model(model_path: str) -> type[Any]:
|
|
35
|
+
"""Resolve a dotted import path to a Python class."""
|
|
36
|
+
|
|
37
|
+
module_path, separator, class_name = model_path.rpartition(".")
|
|
38
|
+
if not separator or not module_path or not class_name:
|
|
39
|
+
raise ConfigError(f"Invalid model path: {model_path!r}")
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
module = import_module(module_path)
|
|
43
|
+
except ImportError as exc: # pragma: no cover - exact import failures vary
|
|
44
|
+
raise ConfigError(f"Could not import model module {module_path!r}") from exc
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
model = getattr(module, class_name)
|
|
48
|
+
except AttributeError as exc: # pragma: no cover - exact import failures vary
|
|
49
|
+
raise ConfigError(f"Module {module_path!r} does not define {class_name!r}") from exc
|
|
50
|
+
|
|
51
|
+
if not isinstance(model, type):
|
|
52
|
+
raise ConfigError(f"Resolved model path {model_path!r} is not a class")
|
|
53
|
+
return model
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _normalize_backend(
|
|
57
|
+
backend: str | Mapping[str, Any],
|
|
58
|
+
*,
|
|
59
|
+
serializer: str,
|
|
60
|
+
prefix: str,
|
|
61
|
+
compress: str | None,
|
|
62
|
+
) -> dict[str, Any]:
|
|
63
|
+
if isinstance(backend, str):
|
|
64
|
+
parsed = urlparse(backend)
|
|
65
|
+
if not parsed.scheme:
|
|
66
|
+
raise ConfigError(f"Invalid backend URL: {backend!r}")
|
|
67
|
+
normalized_backend: dict[str, Any] = {"url": backend}
|
|
68
|
+
elif isinstance(backend, Mapping):
|
|
69
|
+
url = backend.get("url")
|
|
70
|
+
if not isinstance(url, str) or not url:
|
|
71
|
+
raise ConfigError("Backend mapping must define a non-empty 'url' string")
|
|
72
|
+
parsed = urlparse(url)
|
|
73
|
+
if not parsed.scheme:
|
|
74
|
+
raise ConfigError(f"Invalid backend URL: {url!r}")
|
|
75
|
+
normalized_backend = dict(backend)
|
|
76
|
+
else:
|
|
77
|
+
raise ConfigError("Backend must be a URL string or mapping")
|
|
78
|
+
|
|
79
|
+
normalized_backend.setdefault("pickle_type", serializer)
|
|
80
|
+
if compress is not None:
|
|
81
|
+
normalized_backend.setdefault("compress_type", compress)
|
|
82
|
+
return normalized_backend
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _normalize_models(
|
|
86
|
+
models: Mapping[str, Mapping[str, Any] | None],
|
|
87
|
+
*,
|
|
88
|
+
default_timeout: int,
|
|
89
|
+
) -> tuple[dict[str, dict[str, Any] | None], dict[str, Any] | None]:
|
|
90
|
+
if not models:
|
|
91
|
+
raise ConfigError("models configuration must not be empty")
|
|
92
|
+
|
|
93
|
+
wildcard_raw = models.get("*")
|
|
94
|
+
wildcard_config: dict[str, Any] | None = None
|
|
95
|
+
if wildcard_raw is not None:
|
|
96
|
+
if not isinstance(wildcard_raw, Mapping):
|
|
97
|
+
raise ConfigError("Wildcard model config must be a mapping or None")
|
|
98
|
+
wildcard_timeout = wildcard_raw.get("timeout", default_timeout)
|
|
99
|
+
if not isinstance(wildcard_timeout, int) or wildcard_timeout <= 0:
|
|
100
|
+
raise ConfigError("Wildcard timeout must be a positive integer")
|
|
101
|
+
wildcard_config = {
|
|
102
|
+
"ops": normalize_ops(wildcard_raw.get("ops", "all")),
|
|
103
|
+
"timeout": wildcard_timeout,
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
normalized_models: dict[str, dict[str, Any] | None] = {}
|
|
107
|
+
for model_path, raw_config in models.items():
|
|
108
|
+
if not isinstance(model_path, str) or not model_path:
|
|
109
|
+
raise ConfigError("Model keys must be non-empty strings")
|
|
110
|
+
|
|
111
|
+
if model_path != "*" and "." not in model_path:
|
|
112
|
+
raise ConfigError(f"Invalid model path: {model_path!r}")
|
|
113
|
+
|
|
114
|
+
if raw_config is None:
|
|
115
|
+
normalized_models[model_path] = None
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
if not isinstance(raw_config, Mapping):
|
|
119
|
+
raise ConfigError(f"Model config for {model_path!r} must be a mapping or None")
|
|
120
|
+
|
|
121
|
+
timeout = raw_config.get("timeout")
|
|
122
|
+
if timeout is None:
|
|
123
|
+
timeout = wildcard_config["timeout"] if wildcard_config is not None else default_timeout
|
|
124
|
+
if not isinstance(timeout, int) or timeout <= 0:
|
|
125
|
+
raise ConfigError(f"Timeout for {model_path!r} must be a positive integer")
|
|
126
|
+
|
|
127
|
+
ops_default = wildcard_config["ops"] if wildcard_config is not None else "all"
|
|
128
|
+
normalized_models[model_path] = {
|
|
129
|
+
"ops": normalize_ops(raw_config.get("ops", ops_default)),
|
|
130
|
+
"timeout": timeout,
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
return normalized_models, wildcard_config
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def configure(
|
|
137
|
+
*,
|
|
138
|
+
backend: str | Mapping[str, Any],
|
|
139
|
+
models: Mapping[str, Mapping[str, Any] | None],
|
|
140
|
+
serializer: str = "sqlalchemy",
|
|
141
|
+
invalidation: str = "row",
|
|
142
|
+
prefix: str = "sqlacache",
|
|
143
|
+
default_timeout: int = 3600,
|
|
144
|
+
compress: str | None = None,
|
|
145
|
+
) -> CacheManager:
|
|
146
|
+
"""Create a cache manager from validated configuration."""
|
|
147
|
+
|
|
148
|
+
if invalidation not in {"row", "table"}:
|
|
149
|
+
raise ConfigError(f"Unsupported invalidation mode: {invalidation!r}")
|
|
150
|
+
if not isinstance(default_timeout, int) or default_timeout <= 0:
|
|
151
|
+
raise ConfigError("default_timeout must be a positive integer")
|
|
152
|
+
|
|
153
|
+
normalized_backend = _normalize_backend(
|
|
154
|
+
backend,
|
|
155
|
+
serializer=serializer,
|
|
156
|
+
prefix=prefix,
|
|
157
|
+
compress=compress,
|
|
158
|
+
)
|
|
159
|
+
normalized_models, wildcard_config = _normalize_models(
|
|
160
|
+
models,
|
|
161
|
+
default_timeout=default_timeout,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
return CacheManager(
|
|
165
|
+
config={
|
|
166
|
+
"backend": normalized_backend,
|
|
167
|
+
"models": normalized_models,
|
|
168
|
+
"wildcard": wildcard_config,
|
|
169
|
+
"serializer": serializer,
|
|
170
|
+
"invalidation": invalidation,
|
|
171
|
+
"prefix": prefix,
|
|
172
|
+
"default_timeout": default_timeout,
|
|
173
|
+
"compress": compress,
|
|
174
|
+
}
|
|
175
|
+
)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Framework integration stubs."""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""FastAPI integration placeholder."""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Prometheus integration placeholder."""
|
sqlacache/exceptions.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Exception hierarchy for sqlacache."""
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class CacheError(Exception):
|
|
5
|
+
"""Base exception for sqlacache."""
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ConfigError(CacheError):
|
|
9
|
+
"""Raised for invalid cache configuration."""
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class TransportError(CacheError):
|
|
13
|
+
"""Raised for cache transport failures."""
|
sqlacache/interceptor.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
"""SQLAlchemy ORM interception hooks."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Any, cast
|
|
7
|
+
|
|
8
|
+
from sqlalchemy.orm import ORMExecuteState, loading
|
|
9
|
+
from sqlalchemy.util import await_only
|
|
10
|
+
|
|
11
|
+
from sqlacache.invalidation import _bump_table_version, generate_tags
|
|
12
|
+
from sqlacache.utils.query_analysis import (
|
|
13
|
+
detect_operation_type,
|
|
14
|
+
extract_pks_from_fetch_result,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def merge_cached_result(session: Any, statement: Any, frozen: Any) -> Any:
|
|
21
|
+
"""Merge a frozen result back into a sync or async SQLAlchemy session."""
|
|
22
|
+
|
|
23
|
+
target_session = getattr(session, "sync_session", session)
|
|
24
|
+
merged = cast("Any", loading.merge_frozen_result(target_session, statement, frozen, load=False)) # type: ignore[no-untyped-call]
|
|
25
|
+
return merged()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
async def cache_query_result(
|
|
29
|
+
manager: Any,
|
|
30
|
+
statement: Any,
|
|
31
|
+
result: Any,
|
|
32
|
+
models: list[type[Any]],
|
|
33
|
+
timeout: int,
|
|
34
|
+
) -> Any:
|
|
35
|
+
"""Freeze and cache a SQLAlchemy result object with dependency tags."""
|
|
36
|
+
|
|
37
|
+
frozen = result.freeze()
|
|
38
|
+
pks_by_model = extract_pks_from_fetch_result(list(frozen.data), models)
|
|
39
|
+
tags: list[str] = []
|
|
40
|
+
for model, pks in pks_by_model.items():
|
|
41
|
+
tags.extend(generate_tags(model, pks))
|
|
42
|
+
|
|
43
|
+
key = await manager._build_cache_key(statement, models)
|
|
44
|
+
await manager._transport.set(key, frozen, expire=timeout, tags=tags)
|
|
45
|
+
return merge_cached_result(result.session, statement, frozen)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def build_do_orm_execute_handler(manager: Any) -> Any:
|
|
49
|
+
"""Build the Session.do_orm_execute event handler for a manager instance."""
|
|
50
|
+
|
|
51
|
+
def handler(execute_state: ORMExecuteState) -> Any:
|
|
52
|
+
if not manager._matches_session(execute_state.session):
|
|
53
|
+
return execute_state.invoke_statement()
|
|
54
|
+
if execute_state.execution_options.get("sqlacache_skip_interceptor"):
|
|
55
|
+
return execute_state.invoke_statement()
|
|
56
|
+
|
|
57
|
+
if execute_state.is_select:
|
|
58
|
+
if getattr(execute_state.session, "_is_asyncio", False):
|
|
59
|
+
return await_only(resolve_cached_result(manager, execute_state))
|
|
60
|
+
return execute_state.invoke_statement()
|
|
61
|
+
if execute_state.is_update or execute_state.is_delete:
|
|
62
|
+
if getattr(execute_state.session, "_is_asyncio", False):
|
|
63
|
+
return await_only(handle_bulk_mutation(manager, execute_state))
|
|
64
|
+
return execute_state.invoke_statement()
|
|
65
|
+
return execute_state.invoke_statement()
|
|
66
|
+
|
|
67
|
+
return handler
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def build_invalidation_handler(manager: Any, action: str) -> Any:
|
|
71
|
+
"""Build a mapper event handler that invalidates on row mutation."""
|
|
72
|
+
|
|
73
|
+
def handler(mapper: Any, connection: Any, target: Any) -> None:
|
|
74
|
+
del connection
|
|
75
|
+
manager._schedule_invalidation(mapper.class_, target, action=action)
|
|
76
|
+
|
|
77
|
+
return handler
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def build_bulk_update_handler(manager: Any) -> Any:
|
|
81
|
+
def handler(update_context: Any) -> None:
|
|
82
|
+
mapper = getattr(update_context, "mapper", None)
|
|
83
|
+
if mapper is not None:
|
|
84
|
+
manager._schedule_table_bump(mapper.class_)
|
|
85
|
+
|
|
86
|
+
return handler
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def build_bulk_delete_handler(manager: Any) -> Any:
|
|
90
|
+
def handler(delete_context: Any) -> None:
|
|
91
|
+
mapper = getattr(delete_context, "mapper", None)
|
|
92
|
+
if mapper is not None:
|
|
93
|
+
manager._schedule_table_bump(mapper.class_)
|
|
94
|
+
|
|
95
|
+
return handler
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
async def resolve_cached_result(manager: Any, execute_state: ORMExecuteState) -> Any:
|
|
99
|
+
statement = execute_state.statement
|
|
100
|
+
models = manager._extract_models(statement)
|
|
101
|
+
if not models:
|
|
102
|
+
return execute_state.invoke_statement()
|
|
103
|
+
|
|
104
|
+
op = detect_operation_type(statement, models)
|
|
105
|
+
primary_model = models[0]
|
|
106
|
+
if not manager.is_enabled(primary_model, op):
|
|
107
|
+
return execute_state.invoke_statement()
|
|
108
|
+
|
|
109
|
+
key = await manager._build_cache_key(statement, models)
|
|
110
|
+
cached = await manager._transport.get(key)
|
|
111
|
+
if cached is not None:
|
|
112
|
+
return merge_cached_result(execute_state.session, statement, cached)
|
|
113
|
+
|
|
114
|
+
result = execute_state.invoke_statement()
|
|
115
|
+
model_config = manager.get_model_config(primary_model)
|
|
116
|
+
timeout = model_config["timeout"] if model_config else manager.config["default_timeout"]
|
|
117
|
+
return await cache_query_result(manager, statement, result, models, timeout)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
async def handle_bulk_mutation(manager: Any, execute_state: ORMExecuteState) -> Any:
|
|
121
|
+
statement = execute_state.statement
|
|
122
|
+
result = execute_state.invoke_statement()
|
|
123
|
+
models = manager._extract_models(statement)
|
|
124
|
+
transport = manager._transport
|
|
125
|
+
if transport is not None:
|
|
126
|
+
for model in models:
|
|
127
|
+
await _bump_table_version(transport, model)
|
|
128
|
+
return result
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Row-level invalidation helpers."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from sqlacache.transport import CacheTransport
|
|
9
|
+
|
|
10
|
+
_TABLE_VERSION_PREFIX = "__sqlacache_tv__"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def generate_tags(model: type[Any], pks: list[Any]) -> list[str]:
|
|
14
|
+
"""Generate dependency tags from model/table and PK values."""
|
|
15
|
+
|
|
16
|
+
table_name = model.__tablename__
|
|
17
|
+
return [f"{table_name}:{pk}" for pk in pks if pk is not None]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
async def invalidate_tags(transport: CacheTransport, *tags: str) -> None:
|
|
21
|
+
"""Invalidate transport entries for the provided tags."""
|
|
22
|
+
|
|
23
|
+
if not tags:
|
|
24
|
+
return
|
|
25
|
+
await transport.delete_tags(*tags)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _table_version_key(model: type[Any] | str) -> str:
|
|
29
|
+
table_name = model if isinstance(model, str) else model.__tablename__
|
|
30
|
+
return f"{_TABLE_VERSION_PREFIX}{table_name}"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def _bump_table_version(transport: CacheTransport, model: type[Any] | str) -> int:
|
|
34
|
+
"""Increment the table-version counter in the shared cache and return the new value."""
|
|
35
|
+
|
|
36
|
+
key = _table_version_key(model)
|
|
37
|
+
return await transport.incr(key)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
async def _get_table_version(transport: CacheTransport, model: type[Any] | str) -> int:
|
|
41
|
+
"""Return the current table-version counter from the shared cache."""
|
|
42
|
+
|
|
43
|
+
key = _table_version_key(model)
|
|
44
|
+
value = await transport.get(key)
|
|
45
|
+
return int(value) if value is not None else 0
|