tnfr 6.0.0__py3-none-any.whl → 7.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +50 -5
- tnfr/__init__.pyi +0 -7
- tnfr/_compat.py +0 -1
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +44 -2
- tnfr/alias.py +14 -13
- tnfr/alias.pyi +5 -37
- tnfr/cache.py +9 -729
- tnfr/cache.pyi +8 -224
- tnfr/callback_utils.py +16 -31
- tnfr/callback_utils.pyi +3 -29
- tnfr/cli/__init__.py +17 -11
- tnfr/cli/__init__.pyi +0 -21
- tnfr/cli/arguments.py +175 -14
- tnfr/cli/arguments.pyi +5 -11
- tnfr/cli/execution.py +434 -48
- tnfr/cli/execution.pyi +14 -24
- tnfr/cli/utils.py +20 -3
- tnfr/cli/utils.pyi +5 -5
- tnfr/config/__init__.py +2 -1
- tnfr/config/__init__.pyi +2 -0
- tnfr/config/feature_flags.py +83 -0
- tnfr/config/init.py +1 -1
- tnfr/config/operator_names.py +1 -14
- tnfr/config/presets.py +6 -26
- tnfr/constants/__init__.py +10 -13
- tnfr/constants/__init__.pyi +10 -22
- tnfr/constants/aliases.py +31 -0
- tnfr/constants/core.py +4 -3
- tnfr/constants/init.py +1 -1
- tnfr/constants/metric.py +3 -3
- tnfr/dynamics/__init__.py +64 -10
- tnfr/dynamics/__init__.pyi +3 -4
- tnfr/dynamics/adaptation.py +79 -13
- tnfr/dynamics/aliases.py +10 -9
- tnfr/dynamics/coordination.py +77 -35
- tnfr/dynamics/dnfr.py +575 -274
- tnfr/dynamics/dnfr.pyi +1 -10
- tnfr/dynamics/integrators.py +47 -33
- tnfr/dynamics/integrators.pyi +0 -1
- tnfr/dynamics/runtime.py +489 -129
- tnfr/dynamics/sampling.py +2 -0
- tnfr/dynamics/selectors.py +101 -62
- tnfr/execution.py +15 -8
- tnfr/execution.pyi +5 -25
- tnfr/flatten.py +7 -3
- tnfr/flatten.pyi +1 -8
- tnfr/gamma.py +22 -26
- tnfr/gamma.pyi +0 -6
- tnfr/glyph_history.py +37 -26
- tnfr/glyph_history.pyi +1 -19
- tnfr/glyph_runtime.py +16 -0
- tnfr/glyph_runtime.pyi +9 -0
- tnfr/immutable.py +20 -15
- tnfr/immutable.pyi +4 -7
- tnfr/initialization.py +5 -7
- tnfr/initialization.pyi +1 -9
- tnfr/io.py +6 -305
- tnfr/io.pyi +13 -8
- tnfr/mathematics/__init__.py +81 -0
- tnfr/mathematics/backend.py +426 -0
- tnfr/mathematics/dynamics.py +398 -0
- tnfr/mathematics/epi.py +254 -0
- tnfr/mathematics/generators.py +222 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/operators.py +233 -0
- tnfr/mathematics/operators_factory.py +71 -0
- tnfr/mathematics/projection.py +78 -0
- tnfr/mathematics/runtime.py +173 -0
- tnfr/mathematics/spaces.py +247 -0
- tnfr/mathematics/transforms.py +292 -0
- tnfr/metrics/__init__.py +10 -10
- tnfr/metrics/coherence.py +123 -94
- tnfr/metrics/common.py +22 -13
- tnfr/metrics/common.pyi +42 -11
- tnfr/metrics/core.py +72 -14
- tnfr/metrics/diagnosis.py +48 -57
- tnfr/metrics/diagnosis.pyi +3 -7
- tnfr/metrics/export.py +3 -5
- tnfr/metrics/glyph_timing.py +41 -31
- tnfr/metrics/reporting.py +13 -6
- tnfr/metrics/sense_index.py +884 -114
- tnfr/metrics/trig.py +167 -11
- tnfr/metrics/trig.pyi +1 -0
- tnfr/metrics/trig_cache.py +112 -15
- tnfr/node.py +400 -17
- tnfr/node.pyi +55 -38
- tnfr/observers.py +111 -8
- tnfr/observers.pyi +0 -15
- tnfr/ontosim.py +9 -6
- tnfr/ontosim.pyi +0 -5
- tnfr/operators/__init__.py +529 -42
- tnfr/operators/__init__.pyi +14 -0
- tnfr/operators/definitions.py +350 -18
- tnfr/operators/definitions.pyi +0 -14
- tnfr/operators/grammar.py +760 -0
- tnfr/operators/jitter.py +28 -22
- tnfr/operators/registry.py +7 -12
- tnfr/operators/registry.pyi +0 -2
- tnfr/operators/remesh.py +38 -61
- tnfr/rng.py +17 -300
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/selector.py +3 -4
- tnfr/selector.pyi +1 -1
- tnfr/sense.py +22 -24
- tnfr/sense.pyi +0 -7
- tnfr/structural.py +504 -21
- tnfr/structural.pyi +41 -18
- tnfr/telemetry/__init__.py +23 -1
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/nu_f.py +423 -0
- tnfr/telemetry/nu_f.pyi +123 -0
- tnfr/tokens.py +1 -4
- tnfr/tokens.pyi +1 -6
- tnfr/trace.py +20 -53
- tnfr/trace.pyi +9 -37
- tnfr/types.py +244 -15
- tnfr/types.pyi +200 -14
- tnfr/units.py +69 -0
- tnfr/units.pyi +16 -0
- tnfr/utils/__init__.py +107 -48
- tnfr/utils/__init__.pyi +80 -11
- tnfr/utils/cache.py +1705 -65
- tnfr/utils/cache.pyi +370 -58
- tnfr/utils/chunks.py +104 -0
- tnfr/utils/chunks.pyi +21 -0
- tnfr/utils/data.py +95 -5
- tnfr/utils/data.pyi +8 -17
- tnfr/utils/graph.py +2 -4
- tnfr/utils/init.py +31 -7
- tnfr/utils/init.pyi +4 -11
- tnfr/utils/io.py +313 -14
- tnfr/{helpers → utils}/numeric.py +50 -24
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +92 -4
- tnfr/validation/__init__.pyi +77 -17
- tnfr/validation/compatibility.py +79 -43
- tnfr/validation/compatibility.pyi +4 -6
- tnfr/validation/grammar.py +55 -133
- tnfr/validation/grammar.pyi +37 -8
- tnfr/validation/graph.py +138 -0
- tnfr/validation/graph.pyi +17 -0
- tnfr/validation/rules.py +161 -74
- tnfr/validation/rules.pyi +55 -18
- tnfr/validation/runtime.py +263 -0
- tnfr/validation/runtime.pyi +31 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +37 -0
- tnfr/validation/spectral.py +159 -0
- tnfr/validation/spectral.pyi +46 -0
- tnfr/validation/syntax.py +28 -139
- tnfr/validation/syntax.pyi +7 -4
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/viz/__init__.py +9 -0
- tnfr/viz/matplotlib.py +246 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/METADATA +63 -19
- tnfr-7.0.0.dist-info/RECORD +185 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/licenses/LICENSE.md +1 -1
- tnfr/constants_glyphs.py +0 -16
- tnfr/constants_glyphs.pyi +0 -12
- tnfr/grammar.py +0 -25
- tnfr/grammar.pyi +0 -13
- tnfr/helpers/__init__.py +0 -151
- tnfr/helpers/__init__.pyi +0 -66
- tnfr/helpers/numeric.pyi +0 -12
- tnfr/presets.py +0 -15
- tnfr/presets.pyi +0 -7
- tnfr/utils/io.pyi +0 -10
- tnfr/utils/validators.py +0 -130
- tnfr/utils/validators.pyi +0 -19
- tnfr-6.0.0.dist-info/RECORD +0 -157
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/WHEEL +0 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/entry_points.txt +0 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/top_level.txt +0 -0
tnfr/utils/cache.py
CHANGED
|
@@ -1,15 +1,19 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""Cache infrastructure primitives and graph-level helpers for TNFR.
|
|
2
2
|
|
|
3
3
|
This module consolidates structural cache helpers that previously lived in
|
|
4
|
-
|
|
5
|
-
exposed here are responsible for maintaining deterministic node
|
|
6
|
-
scoped graph caches guarded by locks, and version counters that keep
|
|
7
|
-
artifacts in sync with ΔNFR driven updates.
|
|
4
|
+
legacy helper modules and are now exposed under :mod:`tnfr.utils`. The
|
|
5
|
+
functions exposed here are responsible for maintaining deterministic node
|
|
6
|
+
digests, scoped graph caches guarded by locks, and version counters that keep
|
|
7
|
+
edge artifacts in sync with ΔNFR driven updates.
|
|
8
8
|
"""
|
|
9
9
|
|
|
10
10
|
from __future__ import annotations
|
|
11
11
|
|
|
12
|
+
from abc import ABC, abstractmethod
|
|
12
13
|
import hashlib
|
|
14
|
+
import logging
|
|
15
|
+
import pickle
|
|
16
|
+
import shelve
|
|
13
17
|
import threading
|
|
14
18
|
from collections import defaultdict
|
|
15
19
|
from collections.abc import (
|
|
@@ -21,22 +25,33 @@ from collections.abc import (
|
|
|
21
25
|
MutableMapping,
|
|
22
26
|
)
|
|
23
27
|
from contextlib import contextmanager
|
|
28
|
+
from dataclasses import dataclass, field
|
|
24
29
|
from functools import lru_cache
|
|
25
|
-
from
|
|
26
|
-
from typing import TYPE_CHECKING, Any, TypeVar, cast
|
|
30
|
+
from time import perf_counter
|
|
31
|
+
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast
|
|
27
32
|
|
|
28
|
-
from cachetools import LRUCache
|
|
29
33
|
import networkx as nx
|
|
34
|
+
from cachetools import LRUCache
|
|
30
35
|
|
|
31
|
-
from ..
|
|
32
|
-
from ..types import GraphLike, NodeId,
|
|
36
|
+
from ..locking import get_lock
|
|
37
|
+
from ..types import GraphLike, NodeId, TimingContext, TNFRGraph
|
|
33
38
|
from .graph import get_graph, mark_dnfr_prep_dirty
|
|
34
|
-
from .init import get_logger, get_numpy
|
|
35
|
-
from .io import json_dumps
|
|
36
39
|
|
|
40
|
+
K = TypeVar("K", bound=Hashable)
|
|
41
|
+
V = TypeVar("V")
|
|
37
42
|
T = TypeVar("T")
|
|
38
43
|
|
|
39
44
|
__all__ = (
|
|
45
|
+
"CacheLayer",
|
|
46
|
+
"CacheManager",
|
|
47
|
+
"CacheCapacityConfig",
|
|
48
|
+
"CacheStatistics",
|
|
49
|
+
"InstrumentedLRUCache",
|
|
50
|
+
"ManagedLRUCache",
|
|
51
|
+
"MappingCacheLayer",
|
|
52
|
+
"RedisCacheLayer",
|
|
53
|
+
"ShelveCacheLayer",
|
|
54
|
+
"prune_lock_mapping",
|
|
40
55
|
"EdgeCacheManager",
|
|
41
56
|
"NODE_SET_CHECKSUM_KEY",
|
|
42
57
|
"cached_node_list",
|
|
@@ -54,15 +69,1163 @@ __all__ = (
|
|
|
54
69
|
"configure_graph_cache_limits",
|
|
55
70
|
"DNFR_PREP_STATE_KEY",
|
|
56
71
|
"DnfrPrepState",
|
|
72
|
+
"build_cache_manager",
|
|
73
|
+
"configure_global_cache_layers",
|
|
74
|
+
"reset_global_cache_manager",
|
|
75
|
+
"_GRAPH_CACHE_LAYERS_KEY",
|
|
76
|
+
"_SeedHashCache",
|
|
77
|
+
"ScopedCounterCache",
|
|
78
|
+
"DnfrCache",
|
|
79
|
+
"new_dnfr_cache",
|
|
57
80
|
)
|
|
58
81
|
|
|
59
|
-
|
|
60
|
-
|
|
82
|
+
@dataclass(frozen=True)
|
|
83
|
+
class CacheCapacityConfig:
|
|
84
|
+
"""Configuration snapshot for cache capacity policies."""
|
|
85
|
+
|
|
86
|
+
default_capacity: int | None
|
|
87
|
+
overrides: dict[str, int | None]
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
@dataclass(frozen=True)
|
|
91
|
+
class CacheStatistics:
|
|
92
|
+
"""Immutable snapshot of cache telemetry counters."""
|
|
93
|
+
|
|
94
|
+
hits: int = 0
|
|
95
|
+
misses: int = 0
|
|
96
|
+
evictions: int = 0
|
|
97
|
+
total_time: float = 0.0
|
|
98
|
+
timings: int = 0
|
|
99
|
+
|
|
100
|
+
def merge(self, other: CacheStatistics) -> CacheStatistics:
|
|
101
|
+
"""Return aggregated metrics combining ``self`` and ``other``."""
|
|
102
|
+
|
|
103
|
+
return CacheStatistics(
|
|
104
|
+
hits=self.hits + other.hits,
|
|
105
|
+
misses=self.misses + other.misses,
|
|
106
|
+
evictions=self.evictions + other.evictions,
|
|
107
|
+
total_time=self.total_time + other.total_time,
|
|
108
|
+
timings=self.timings + other.timings,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@dataclass
|
|
113
|
+
class DnfrCache:
|
|
114
|
+
idx: dict[Any, int]
|
|
115
|
+
theta: list[float]
|
|
116
|
+
epi: list[float]
|
|
117
|
+
vf: list[float]
|
|
118
|
+
cos_theta: list[float]
|
|
119
|
+
sin_theta: list[float]
|
|
120
|
+
neighbor_x: list[float]
|
|
121
|
+
neighbor_y: list[float]
|
|
122
|
+
neighbor_epi_sum: list[float]
|
|
123
|
+
neighbor_vf_sum: list[float]
|
|
124
|
+
neighbor_count: list[float]
|
|
125
|
+
neighbor_deg_sum: list[float] | None
|
|
126
|
+
th_bar: list[float] | None = None
|
|
127
|
+
epi_bar: list[float] | None = None
|
|
128
|
+
vf_bar: list[float] | None = None
|
|
129
|
+
deg_bar: list[float] | None = None
|
|
130
|
+
degs: dict[Any, float] | None = None
|
|
131
|
+
deg_list: list[float] | None = None
|
|
132
|
+
theta_np: Any | None = None
|
|
133
|
+
epi_np: Any | None = None
|
|
134
|
+
vf_np: Any | None = None
|
|
135
|
+
cos_theta_np: Any | None = None
|
|
136
|
+
sin_theta_np: Any | None = None
|
|
137
|
+
deg_array: Any | None = None
|
|
138
|
+
edge_src: Any | None = None
|
|
139
|
+
edge_dst: Any | None = None
|
|
140
|
+
checksum: Any | None = None
|
|
141
|
+
neighbor_x_np: Any | None = None
|
|
142
|
+
neighbor_y_np: Any | None = None
|
|
143
|
+
neighbor_epi_sum_np: Any | None = None
|
|
144
|
+
neighbor_vf_sum_np: Any | None = None
|
|
145
|
+
neighbor_count_np: Any | None = None
|
|
146
|
+
neighbor_deg_sum_np: Any | None = None
|
|
147
|
+
th_bar_np: Any | None = None
|
|
148
|
+
epi_bar_np: Any | None = None
|
|
149
|
+
vf_bar_np: Any | None = None
|
|
150
|
+
deg_bar_np: Any | None = None
|
|
151
|
+
grad_phase_np: Any | None = None
|
|
152
|
+
grad_epi_np: Any | None = None
|
|
153
|
+
grad_vf_np: Any | None = None
|
|
154
|
+
grad_topo_np: Any | None = None
|
|
155
|
+
grad_total_np: Any | None = None
|
|
156
|
+
dense_components_np: Any | None = None
|
|
157
|
+
dense_accum_np: Any | None = None
|
|
158
|
+
dense_degree_np: Any | None = None
|
|
159
|
+
neighbor_accum_np: Any | None = None
|
|
160
|
+
neighbor_inv_count_np: Any | None = None
|
|
161
|
+
neighbor_cos_avg_np: Any | None = None
|
|
162
|
+
neighbor_sin_avg_np: Any | None = None
|
|
163
|
+
neighbor_mean_tmp_np: Any | None = None
|
|
164
|
+
neighbor_mean_length_np: Any | None = None
|
|
165
|
+
edge_signature: Any | None = None
|
|
166
|
+
neighbor_accum_signature: Any | None = None
|
|
167
|
+
neighbor_edge_values_np: Any | None = None
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def new_dnfr_cache() -> DnfrCache:
|
|
171
|
+
"""Return an empty :class:`DnfrCache` prepared for ΔNFR orchestration."""
|
|
172
|
+
|
|
173
|
+
return DnfrCache(
|
|
174
|
+
idx={},
|
|
175
|
+
theta=[],
|
|
176
|
+
epi=[],
|
|
177
|
+
vf=[],
|
|
178
|
+
cos_theta=[],
|
|
179
|
+
sin_theta=[],
|
|
180
|
+
neighbor_x=[],
|
|
181
|
+
neighbor_y=[],
|
|
182
|
+
neighbor_epi_sum=[],
|
|
183
|
+
neighbor_vf_sum=[],
|
|
184
|
+
neighbor_count=[],
|
|
185
|
+
neighbor_deg_sum=[],
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
@dataclass
|
|
190
|
+
class _CacheMetrics:
|
|
191
|
+
hits: int = 0
|
|
192
|
+
misses: int = 0
|
|
193
|
+
evictions: int = 0
|
|
194
|
+
total_time: float = 0.0
|
|
195
|
+
timings: int = 0
|
|
196
|
+
lock: threading.Lock = field(default_factory=threading.Lock, repr=False)
|
|
197
|
+
|
|
198
|
+
def snapshot(self) -> CacheStatistics:
|
|
199
|
+
return CacheStatistics(
|
|
200
|
+
hits=self.hits,
|
|
201
|
+
misses=self.misses,
|
|
202
|
+
evictions=self.evictions,
|
|
203
|
+
total_time=self.total_time,
|
|
204
|
+
timings=self.timings,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
@dataclass
|
|
209
|
+
class _CacheEntry:
|
|
210
|
+
factory: Callable[[], Any]
|
|
211
|
+
lock: threading.Lock
|
|
212
|
+
reset: Callable[[Any], Any] | None = None
|
|
213
|
+
encoder: Callable[[Any], Any] | None = None
|
|
214
|
+
decoder: Callable[[Any], Any] | None = None
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class CacheLayer(ABC):
|
|
218
|
+
"""Abstract interface implemented by storage backends orchestrated by :class:`CacheManager`."""
|
|
219
|
+
|
|
220
|
+
@abstractmethod
|
|
221
|
+
def load(self, name: str) -> Any:
|
|
222
|
+
"""Return the stored payload for ``name`` or raise :class:`KeyError`."""
|
|
223
|
+
|
|
224
|
+
@abstractmethod
|
|
225
|
+
def store(self, name: str, value: Any) -> None:
|
|
226
|
+
"""Persist ``value`` under ``name``."""
|
|
227
|
+
|
|
228
|
+
@abstractmethod
|
|
229
|
+
def delete(self, name: str) -> None:
|
|
230
|
+
"""Remove ``name`` from the backend if present."""
|
|
231
|
+
|
|
232
|
+
@abstractmethod
|
|
233
|
+
def clear(self) -> None:
|
|
234
|
+
"""Remove every entry maintained by the layer."""
|
|
235
|
+
|
|
236
|
+
def close(self) -> None: # pragma: no cover - optional hook
|
|
237
|
+
"""Release resources held by the backend."""
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
class MappingCacheLayer(CacheLayer):
|
|
241
|
+
"""In-memory cache layer backed by a mutable mapping."""
|
|
242
|
+
|
|
243
|
+
def __init__(self, storage: MutableMapping[str, Any] | None = None) -> None:
|
|
244
|
+
self._storage: MutableMapping[str, Any] = {} if storage is None else storage
|
|
245
|
+
self._lock = threading.RLock()
|
|
246
|
+
|
|
247
|
+
@property
|
|
248
|
+
def storage(self) -> MutableMapping[str, Any]:
|
|
249
|
+
"""Return the mapping used to store cache entries."""
|
|
250
|
+
|
|
251
|
+
return self._storage
|
|
252
|
+
|
|
253
|
+
def load(self, name: str) -> Any:
|
|
254
|
+
with self._lock:
|
|
255
|
+
if name not in self._storage:
|
|
256
|
+
raise KeyError(name)
|
|
257
|
+
return self._storage[name]
|
|
258
|
+
|
|
259
|
+
def store(self, name: str, value: Any) -> None:
|
|
260
|
+
with self._lock:
|
|
261
|
+
self._storage[name] = value
|
|
262
|
+
|
|
263
|
+
def delete(self, name: str) -> None:
|
|
264
|
+
with self._lock:
|
|
265
|
+
self._storage.pop(name, None)
|
|
266
|
+
|
|
267
|
+
def clear(self) -> None:
|
|
268
|
+
with self._lock:
|
|
269
|
+
self._storage.clear()
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
class ShelveCacheLayer(CacheLayer):
|
|
273
|
+
"""Persistent cache layer backed by :mod:`shelve`."""
|
|
274
|
+
|
|
275
|
+
def __init__(
|
|
276
|
+
self,
|
|
277
|
+
path: str,
|
|
278
|
+
*,
|
|
279
|
+
flag: str = "c",
|
|
280
|
+
protocol: int | None = None,
|
|
281
|
+
writeback: bool = False,
|
|
282
|
+
) -> None:
|
|
283
|
+
self._path = path
|
|
284
|
+
self._flag = flag
|
|
285
|
+
self._protocol = pickle.HIGHEST_PROTOCOL if protocol is None else protocol
|
|
286
|
+
self._shelf = shelve.open(path, flag=flag, protocol=self._protocol, writeback=writeback)
|
|
287
|
+
self._lock = threading.RLock()
|
|
288
|
+
|
|
289
|
+
def load(self, name: str) -> Any:
|
|
290
|
+
with self._lock:
|
|
291
|
+
if name not in self._shelf:
|
|
292
|
+
raise KeyError(name)
|
|
293
|
+
return self._shelf[name]
|
|
294
|
+
|
|
295
|
+
def store(self, name: str, value: Any) -> None:
|
|
296
|
+
with self._lock:
|
|
297
|
+
self._shelf[name] = value
|
|
298
|
+
self._shelf.sync()
|
|
299
|
+
|
|
300
|
+
def delete(self, name: str) -> None:
|
|
301
|
+
with self._lock:
|
|
302
|
+
try:
|
|
303
|
+
del self._shelf[name]
|
|
304
|
+
except KeyError:
|
|
305
|
+
return
|
|
306
|
+
self._shelf.sync()
|
|
307
|
+
|
|
308
|
+
def clear(self) -> None:
|
|
309
|
+
with self._lock:
|
|
310
|
+
self._shelf.clear()
|
|
311
|
+
self._shelf.sync()
|
|
312
|
+
|
|
313
|
+
def close(self) -> None: # pragma: no cover - exercised indirectly
|
|
314
|
+
with self._lock:
|
|
315
|
+
self._shelf.close()
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
class RedisCacheLayer(CacheLayer):
|
|
319
|
+
"""Distributed cache layer backed by a Redis client."""
|
|
320
|
+
|
|
321
|
+
def __init__(self, client: Any | None = None, *, namespace: str = "tnfr:cache") -> None:
|
|
322
|
+
if client is None:
|
|
323
|
+
try: # pragma: no cover - import guarded for optional dependency
|
|
324
|
+
import redis # type: ignore
|
|
325
|
+
except Exception as exc: # pragma: no cover - defensive import
|
|
326
|
+
raise RuntimeError("redis-py is required to initialise RedisCacheLayer") from exc
|
|
327
|
+
client = redis.Redis()
|
|
328
|
+
self._client = client
|
|
329
|
+
self._namespace = namespace.rstrip(":") or "tnfr:cache"
|
|
330
|
+
self._lock = threading.RLock()
|
|
331
|
+
|
|
332
|
+
def _format_key(self, name: str) -> str:
|
|
333
|
+
return f"{self._namespace}:{name}"
|
|
334
|
+
|
|
335
|
+
def load(self, name: str) -> Any:
|
|
336
|
+
key = self._format_key(name)
|
|
337
|
+
with self._lock:
|
|
338
|
+
value = self._client.get(key)
|
|
339
|
+
if value is None:
|
|
340
|
+
raise KeyError(name)
|
|
341
|
+
if isinstance(value, (bytes, bytearray, memoryview)):
|
|
342
|
+
return pickle.loads(bytes(value))
|
|
343
|
+
return value
|
|
344
|
+
|
|
345
|
+
def store(self, name: str, value: Any) -> None:
|
|
346
|
+
key = self._format_key(name)
|
|
347
|
+
payload = value
|
|
348
|
+
if not isinstance(value, (bytes, bytearray, memoryview)):
|
|
349
|
+
payload = pickle.dumps(value, protocol=pickle.HIGHEST_PROTOCOL)
|
|
350
|
+
with self._lock:
|
|
351
|
+
self._client.set(key, payload)
|
|
352
|
+
|
|
353
|
+
def delete(self, name: str) -> None:
|
|
354
|
+
key = self._format_key(name)
|
|
355
|
+
with self._lock:
|
|
356
|
+
self._client.delete(key)
|
|
357
|
+
|
|
358
|
+
def clear(self) -> None:
|
|
359
|
+
pattern = f"{self._namespace}:*"
|
|
360
|
+
with self._lock:
|
|
361
|
+
if hasattr(self._client, "scan_iter"):
|
|
362
|
+
keys = list(self._client.scan_iter(match=pattern))
|
|
363
|
+
elif hasattr(self._client, "keys"):
|
|
364
|
+
keys = list(self._client.keys(pattern))
|
|
365
|
+
else: # pragma: no cover - extremely defensive
|
|
366
|
+
keys = []
|
|
367
|
+
if keys:
|
|
368
|
+
self._client.delete(*keys)
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
class CacheManager:
|
|
372
|
+
"""Coordinate named caches guarded by per-entry locks."""
|
|
373
|
+
|
|
374
|
+
_MISSING = object()
|
|
375
|
+
|
|
376
|
+
def __init__(
|
|
377
|
+
self,
|
|
378
|
+
storage: MutableMapping[str, Any] | None = None,
|
|
379
|
+
*,
|
|
380
|
+
default_capacity: int | None = None,
|
|
381
|
+
overrides: Mapping[str, int | None] | None = None,
|
|
382
|
+
layers: Iterable[CacheLayer] | None = None,
|
|
383
|
+
) -> None:
|
|
384
|
+
mapping_layer = MappingCacheLayer(storage)
|
|
385
|
+
extra_layers: tuple[CacheLayer, ...]
|
|
386
|
+
if layers is None:
|
|
387
|
+
extra_layers = ()
|
|
388
|
+
else:
|
|
389
|
+
extra_layers = tuple(layers)
|
|
390
|
+
for layer in extra_layers:
|
|
391
|
+
if not isinstance(layer, CacheLayer): # pragma: no cover - defensive typing
|
|
392
|
+
raise TypeError(f"unsupported cache layer type: {type(layer)!r}")
|
|
393
|
+
self._layers: tuple[CacheLayer, ...] = (mapping_layer, *extra_layers)
|
|
394
|
+
self._storage_layer = mapping_layer
|
|
395
|
+
self._storage: MutableMapping[str, Any] = mapping_layer.storage
|
|
396
|
+
self._entries: dict[str, _CacheEntry] = {}
|
|
397
|
+
self._registry_lock = threading.RLock()
|
|
398
|
+
self._default_capacity = self._normalise_capacity(default_capacity)
|
|
399
|
+
self._capacity_overrides: dict[str, int | None] = {}
|
|
400
|
+
self._metrics: dict[str, _CacheMetrics] = {}
|
|
401
|
+
self._metrics_publishers: list[Callable[[str, CacheStatistics], None]] = []
|
|
402
|
+
if overrides:
|
|
403
|
+
self.configure(overrides=overrides)
|
|
404
|
+
|
|
405
|
+
@staticmethod
|
|
406
|
+
def _normalise_capacity(value: int | None) -> int | None:
|
|
407
|
+
if value is None:
|
|
408
|
+
return None
|
|
409
|
+
size = int(value)
|
|
410
|
+
if size < 0:
|
|
411
|
+
raise ValueError("capacity must be non-negative or None")
|
|
412
|
+
return size
|
|
413
|
+
|
|
414
|
+
def register(
|
|
415
|
+
self,
|
|
416
|
+
name: str,
|
|
417
|
+
factory: Callable[[], Any],
|
|
418
|
+
*,
|
|
419
|
+
lock_factory: Callable[[], threading.Lock | threading.RLock] | None = None,
|
|
420
|
+
reset: Callable[[Any], Any] | None = None,
|
|
421
|
+
create: bool = True,
|
|
422
|
+
encoder: Callable[[Any], Any] | None = None,
|
|
423
|
+
decoder: Callable[[Any], Any] | None = None,
|
|
424
|
+
) -> None:
|
|
425
|
+
"""Register ``name`` with ``factory`` and optional lifecycle hooks."""
|
|
426
|
+
|
|
427
|
+
if lock_factory is None:
|
|
428
|
+
lock_factory = threading.RLock
|
|
429
|
+
with self._registry_lock:
|
|
430
|
+
entry = self._entries.get(name)
|
|
431
|
+
if entry is None:
|
|
432
|
+
entry = _CacheEntry(
|
|
433
|
+
factory=factory,
|
|
434
|
+
lock=lock_factory(),
|
|
435
|
+
reset=reset,
|
|
436
|
+
encoder=encoder,
|
|
437
|
+
decoder=decoder,
|
|
438
|
+
)
|
|
439
|
+
self._entries[name] = entry
|
|
440
|
+
else:
|
|
441
|
+
# Update hooks when re-registering the same cache name.
|
|
442
|
+
entry.factory = factory
|
|
443
|
+
entry.reset = reset
|
|
444
|
+
entry.encoder = encoder
|
|
445
|
+
entry.decoder = decoder
|
|
446
|
+
self._ensure_metrics(name)
|
|
447
|
+
if create:
|
|
448
|
+
self.get(name)
|
|
449
|
+
|
|
450
|
+
def configure(
|
|
451
|
+
self,
|
|
452
|
+
*,
|
|
453
|
+
default_capacity: int | None | object = _MISSING,
|
|
454
|
+
overrides: Mapping[str, int | None] | None = None,
|
|
455
|
+
replace_overrides: bool = False,
|
|
456
|
+
) -> None:
|
|
457
|
+
"""Update the cache capacity policy shared by registered entries."""
|
|
458
|
+
|
|
459
|
+
with self._registry_lock:
|
|
460
|
+
if default_capacity is not self._MISSING:
|
|
461
|
+
self._default_capacity = self._normalise_capacity(
|
|
462
|
+
default_capacity if default_capacity is not None else None
|
|
463
|
+
)
|
|
464
|
+
if overrides is not None:
|
|
465
|
+
if replace_overrides:
|
|
466
|
+
self._capacity_overrides.clear()
|
|
467
|
+
for key, value in overrides.items():
|
|
468
|
+
self._capacity_overrides[key] = self._normalise_capacity(value)
|
|
469
|
+
|
|
470
|
+
def configure_from_mapping(self, config: Mapping[str, Any]) -> None:
|
|
471
|
+
"""Load configuration produced by :meth:`export_config`."""
|
|
472
|
+
|
|
473
|
+
default = config.get("default_capacity", self._MISSING)
|
|
474
|
+
overrides = config.get("overrides")
|
|
475
|
+
overrides_mapping: Mapping[str, int | None] | None
|
|
476
|
+
overrides_mapping = overrides if isinstance(overrides, Mapping) else None
|
|
477
|
+
self.configure(default_capacity=default, overrides=overrides_mapping)
|
|
478
|
+
|
|
479
|
+
def export_config(self) -> CacheCapacityConfig:
|
|
480
|
+
"""Return a copy of the current capacity configuration."""
|
|
481
|
+
|
|
482
|
+
with self._registry_lock:
|
|
483
|
+
return CacheCapacityConfig(
|
|
484
|
+
default_capacity=self._default_capacity,
|
|
485
|
+
overrides=dict(self._capacity_overrides),
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
def get_capacity(
|
|
489
|
+
self,
|
|
490
|
+
name: str,
|
|
491
|
+
*,
|
|
492
|
+
requested: int | None = None,
|
|
493
|
+
fallback: int | None = None,
|
|
494
|
+
use_default: bool = True,
|
|
495
|
+
) -> int | None:
|
|
496
|
+
"""Return capacity for ``name`` considering overrides and defaults."""
|
|
497
|
+
|
|
498
|
+
with self._registry_lock:
|
|
499
|
+
override = self._capacity_overrides.get(name, self._MISSING)
|
|
500
|
+
default = self._default_capacity
|
|
501
|
+
if override is not self._MISSING:
|
|
502
|
+
return override
|
|
503
|
+
values: tuple[int | None, ...]
|
|
504
|
+
if use_default:
|
|
505
|
+
values = (requested, default, fallback)
|
|
506
|
+
else:
|
|
507
|
+
values = (requested, fallback)
|
|
508
|
+
for value in values:
|
|
509
|
+
if value is self._MISSING:
|
|
510
|
+
continue
|
|
511
|
+
normalised = self._normalise_capacity(value)
|
|
512
|
+
if normalised is not None:
|
|
513
|
+
return normalised
|
|
514
|
+
return None
|
|
515
|
+
|
|
516
|
+
def has_override(self, name: str) -> bool:
|
|
517
|
+
"""Return ``True`` if ``name`` has an explicit capacity override."""
|
|
518
|
+
|
|
519
|
+
with self._registry_lock:
|
|
520
|
+
return name in self._capacity_overrides
|
|
521
|
+
|
|
522
|
+
def get_lock(self, name: str) -> threading.Lock | threading.RLock:
|
|
523
|
+
"""Return the lock guarding cache ``name`` for external coordination."""
|
|
524
|
+
|
|
525
|
+
entry = self._entries.get(name)
|
|
526
|
+
if entry is None:
|
|
527
|
+
raise KeyError(name)
|
|
528
|
+
return entry.lock
|
|
529
|
+
|
|
530
|
+
def names(self) -> Iterator[str]:
|
|
531
|
+
"""Iterate over registered cache names."""
|
|
532
|
+
|
|
533
|
+
with self._registry_lock:
|
|
534
|
+
return iter(tuple(self._entries))
|
|
535
|
+
|
|
536
|
+
def get(self, name: str, *, create: bool = True) -> Any:
|
|
537
|
+
"""Return cache ``name`` creating it on demand when ``create`` is true."""
|
|
538
|
+
|
|
539
|
+
entry = self._entries.get(name)
|
|
540
|
+
if entry is None:
|
|
541
|
+
raise KeyError(name)
|
|
542
|
+
with entry.lock:
|
|
543
|
+
value = self._load_from_layers(name, entry)
|
|
544
|
+
if create and value is None:
|
|
545
|
+
value = entry.factory()
|
|
546
|
+
self._persist_layers(name, entry, value)
|
|
547
|
+
return value
|
|
548
|
+
|
|
549
|
+
def peek(self, name: str) -> Any:
|
|
550
|
+
"""Return cache ``name`` without creating a missing entry."""
|
|
551
|
+
|
|
552
|
+
entry = self._entries.get(name)
|
|
553
|
+
if entry is None:
|
|
554
|
+
raise KeyError(name)
|
|
555
|
+
with entry.lock:
|
|
556
|
+
return self._load_from_layers(name, entry)
|
|
557
|
+
|
|
558
|
+
def store(self, name: str, value: Any) -> None:
|
|
559
|
+
"""Replace the stored value for cache ``name`` with ``value``."""
|
|
560
|
+
|
|
561
|
+
entry = self._entries.get(name)
|
|
562
|
+
if entry is None:
|
|
563
|
+
raise KeyError(name)
|
|
564
|
+
with entry.lock:
|
|
565
|
+
self._persist_layers(name, entry, value)
|
|
566
|
+
|
|
567
|
+
def update(
|
|
568
|
+
self,
|
|
569
|
+
name: str,
|
|
570
|
+
updater: Callable[[Any], Any],
|
|
571
|
+
*,
|
|
572
|
+
create: bool = True,
|
|
573
|
+
) -> Any:
|
|
574
|
+
"""Apply ``updater`` to cache ``name`` storing the resulting value."""
|
|
575
|
+
|
|
576
|
+
entry = self._entries.get(name)
|
|
577
|
+
if entry is None:
|
|
578
|
+
raise KeyError(name)
|
|
579
|
+
with entry.lock:
|
|
580
|
+
current = self._load_from_layers(name, entry)
|
|
581
|
+
if create and current is None:
|
|
582
|
+
current = entry.factory()
|
|
583
|
+
new_value = updater(current)
|
|
584
|
+
self._persist_layers(name, entry, new_value)
|
|
585
|
+
return new_value
|
|
586
|
+
|
|
587
|
+
def clear(self, name: str | None = None) -> None:
|
|
588
|
+
"""Reset caches either selectively or for every registered name."""
|
|
589
|
+
|
|
590
|
+
if name is not None:
|
|
591
|
+
names = (name,)
|
|
592
|
+
else:
|
|
593
|
+
with self._registry_lock:
|
|
594
|
+
names = tuple(self._entries)
|
|
595
|
+
for cache_name in names:
|
|
596
|
+
entry = self._entries.get(cache_name)
|
|
597
|
+
if entry is None:
|
|
598
|
+
continue
|
|
599
|
+
with entry.lock:
|
|
600
|
+
current = self._load_from_layers(cache_name, entry)
|
|
601
|
+
new_value = None
|
|
602
|
+
if entry.reset is not None:
|
|
603
|
+
try:
|
|
604
|
+
new_value = entry.reset(current)
|
|
605
|
+
except Exception: # pragma: no cover - defensive logging
|
|
606
|
+
_logger.exception("cache reset failed for %s", cache_name)
|
|
607
|
+
if new_value is None:
|
|
608
|
+
try:
|
|
609
|
+
new_value = entry.factory()
|
|
610
|
+
except Exception:
|
|
611
|
+
self._delete_from_layers(cache_name)
|
|
612
|
+
continue
|
|
613
|
+
self._persist_layers(cache_name, entry, new_value)
|
|
614
|
+
|
|
615
|
+
# ------------------------------------------------------------------
|
|
616
|
+
# Layer orchestration helpers
|
|
617
|
+
|
|
618
|
+
def _encode_value(self, entry: _CacheEntry, value: Any) -> Any:
|
|
619
|
+
encoder = entry.encoder
|
|
620
|
+
if encoder is None:
|
|
621
|
+
return value
|
|
622
|
+
return encoder(value)
|
|
623
|
+
|
|
624
|
+
def _decode_value(self, entry: _CacheEntry, payload: Any) -> Any:
|
|
625
|
+
decoder = entry.decoder
|
|
626
|
+
if decoder is None:
|
|
627
|
+
return payload
|
|
628
|
+
return decoder(payload)
|
|
629
|
+
|
|
630
|
+
def _store_layer(self, name: str, entry: _CacheEntry, value: Any, *, layer_index: int) -> None:
|
|
631
|
+
layer = self._layers[layer_index]
|
|
632
|
+
if layer_index == 0:
|
|
633
|
+
payload = value
|
|
634
|
+
else:
|
|
635
|
+
try:
|
|
636
|
+
payload = self._encode_value(entry, value)
|
|
637
|
+
except Exception: # pragma: no cover - defensive logging
|
|
638
|
+
_logger.exception("cache encoding failed for %s", name)
|
|
639
|
+
return
|
|
640
|
+
try:
|
|
641
|
+
layer.store(name, payload)
|
|
642
|
+
except Exception: # pragma: no cover - defensive logging
|
|
643
|
+
_logger.exception(
|
|
644
|
+
"cache layer store failed for %s on %s", name, layer.__class__.__name__
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
def _persist_layers(self, name: str, entry: _CacheEntry, value: Any) -> None:
|
|
648
|
+
for index in range(len(self._layers)):
|
|
649
|
+
self._store_layer(name, entry, value, layer_index=index)
|
|
650
|
+
|
|
651
|
+
def _delete_from_layers(self, name: str) -> None:
|
|
652
|
+
for layer in self._layers:
|
|
653
|
+
try:
|
|
654
|
+
layer.delete(name)
|
|
655
|
+
except KeyError:
|
|
656
|
+
continue
|
|
657
|
+
except Exception: # pragma: no cover - defensive logging
|
|
658
|
+
_logger.exception(
|
|
659
|
+
"cache layer delete failed for %s on %s", name, layer.__class__.__name__
|
|
660
|
+
)
|
|
661
|
+
|
|
662
|
+
def _load_from_layers(self, name: str, entry: _CacheEntry) -> Any:
|
|
663
|
+
# Primary in-memory layer first for fast-path lookups.
|
|
664
|
+
try:
|
|
665
|
+
value = self._layers[0].load(name)
|
|
666
|
+
except KeyError:
|
|
667
|
+
value = None
|
|
668
|
+
except Exception: # pragma: no cover - defensive logging
|
|
669
|
+
_logger.exception(
|
|
670
|
+
"cache layer load failed for %s on %s", name, self._layers[0].__class__.__name__
|
|
671
|
+
)
|
|
672
|
+
value = None
|
|
673
|
+
if value is not None:
|
|
674
|
+
return value
|
|
675
|
+
|
|
676
|
+
# Fall back to slower layers and hydrate preceding caches on success.
|
|
677
|
+
for index in range(1, len(self._layers)):
|
|
678
|
+
layer = self._layers[index]
|
|
679
|
+
try:
|
|
680
|
+
payload = layer.load(name)
|
|
681
|
+
except KeyError:
|
|
682
|
+
continue
|
|
683
|
+
except Exception: # pragma: no cover - defensive logging
|
|
684
|
+
_logger.exception(
|
|
685
|
+
"cache layer load failed for %s on %s", name, layer.__class__.__name__
|
|
686
|
+
)
|
|
687
|
+
continue
|
|
688
|
+
try:
|
|
689
|
+
value = self._decode_value(entry, payload)
|
|
690
|
+
except Exception: # pragma: no cover - defensive logging
|
|
691
|
+
_logger.exception("cache decoding failed for %s", name)
|
|
692
|
+
continue
|
|
693
|
+
if value is None:
|
|
694
|
+
continue
|
|
695
|
+
for prev_index in range(index):
|
|
696
|
+
self._store_layer(name, entry, value, layer_index=prev_index)
|
|
697
|
+
return value
|
|
698
|
+
return None
|
|
699
|
+
|
|
700
|
+
# ------------------------------------------------------------------
|
|
701
|
+
# Metrics helpers
|
|
702
|
+
|
|
703
|
+
def _ensure_metrics(self, name: str) -> _CacheMetrics:
|
|
704
|
+
metrics = self._metrics.get(name)
|
|
705
|
+
if metrics is None:
|
|
706
|
+
with self._registry_lock:
|
|
707
|
+
metrics = self._metrics.get(name)
|
|
708
|
+
if metrics is None:
|
|
709
|
+
metrics = _CacheMetrics()
|
|
710
|
+
self._metrics[name] = metrics
|
|
711
|
+
return metrics
|
|
712
|
+
|
|
713
|
+
def increment_hit(
|
|
714
|
+
self,
|
|
715
|
+
name: str,
|
|
716
|
+
*,
|
|
717
|
+
amount: int = 1,
|
|
718
|
+
duration: float | None = None,
|
|
719
|
+
) -> None:
|
|
720
|
+
"""Increase cache hit counters for ``name`` (optionally logging latency)."""
|
|
721
|
+
|
|
722
|
+
metrics = self._ensure_metrics(name)
|
|
723
|
+
with metrics.lock:
|
|
724
|
+
metrics.hits += int(amount)
|
|
725
|
+
if duration is not None:
|
|
726
|
+
metrics.total_time += float(duration)
|
|
727
|
+
metrics.timings += 1
|
|
728
|
+
|
|
729
|
+
def increment_miss(
|
|
730
|
+
self,
|
|
731
|
+
name: str,
|
|
732
|
+
*,
|
|
733
|
+
amount: int = 1,
|
|
734
|
+
duration: float | None = None,
|
|
735
|
+
) -> None:
|
|
736
|
+
"""Increase cache miss counters for ``name`` (optionally logging latency)."""
|
|
737
|
+
|
|
738
|
+
metrics = self._ensure_metrics(name)
|
|
739
|
+
with metrics.lock:
|
|
740
|
+
metrics.misses += int(amount)
|
|
741
|
+
if duration is not None:
|
|
742
|
+
metrics.total_time += float(duration)
|
|
743
|
+
metrics.timings += 1
|
|
744
|
+
|
|
745
|
+
def increment_eviction(self, name: str, *, amount: int = 1) -> None:
|
|
746
|
+
"""Increase eviction count for cache ``name``."""
|
|
747
|
+
|
|
748
|
+
metrics = self._ensure_metrics(name)
|
|
749
|
+
with metrics.lock:
|
|
750
|
+
metrics.evictions += int(amount)
|
|
751
|
+
|
|
752
|
+
def record_timing(self, name: str, duration: float) -> None:
|
|
753
|
+
"""Accumulate ``duration`` into latency telemetry for ``name``."""
|
|
754
|
+
|
|
755
|
+
metrics = self._ensure_metrics(name)
|
|
756
|
+
with metrics.lock:
|
|
757
|
+
metrics.total_time += float(duration)
|
|
758
|
+
metrics.timings += 1
|
|
759
|
+
|
|
760
|
+
@contextmanager
|
|
761
|
+
def timer(self, name: str) -> TimingContext:
|
|
762
|
+
"""Context manager recording execution time for ``name``."""
|
|
763
|
+
|
|
764
|
+
start = perf_counter()
|
|
765
|
+
try:
|
|
766
|
+
yield
|
|
767
|
+
finally:
|
|
768
|
+
self.record_timing(name, perf_counter() - start)
|
|
769
|
+
|
|
770
|
+
def get_metrics(self, name: str) -> CacheStatistics:
|
|
771
|
+
"""Return a snapshot of telemetry collected for cache ``name``."""
|
|
772
|
+
|
|
773
|
+
metrics = self._metrics.get(name)
|
|
774
|
+
if metrics is None:
|
|
775
|
+
return CacheStatistics()
|
|
776
|
+
with metrics.lock:
|
|
777
|
+
return metrics.snapshot()
|
|
778
|
+
|
|
779
|
+
def iter_metrics(self) -> Iterator[tuple[str, CacheStatistics]]:
|
|
780
|
+
"""Yield ``(name, stats)`` pairs for every cache with telemetry."""
|
|
781
|
+
|
|
782
|
+
with self._registry_lock:
|
|
783
|
+
items = tuple(self._metrics.items())
|
|
784
|
+
for name, metrics in items:
|
|
785
|
+
with metrics.lock:
|
|
786
|
+
yield name, metrics.snapshot()
|
|
787
|
+
|
|
788
|
+
def aggregate_metrics(self) -> CacheStatistics:
|
|
789
|
+
"""Return aggregated telemetry statistics across all caches."""
|
|
790
|
+
|
|
791
|
+
aggregate = CacheStatistics()
|
|
792
|
+
for _, stats in self.iter_metrics():
|
|
793
|
+
aggregate = aggregate.merge(stats)
|
|
794
|
+
return aggregate
|
|
795
|
+
|
|
796
|
+
def register_metrics_publisher(
|
|
797
|
+
self, publisher: Callable[[str, CacheStatistics], None]
|
|
798
|
+
) -> None:
|
|
799
|
+
"""Register ``publisher`` to receive metrics snapshots on demand."""
|
|
800
|
+
|
|
801
|
+
with self._registry_lock:
|
|
802
|
+
self._metrics_publishers.append(publisher)
|
|
803
|
+
|
|
804
|
+
def publish_metrics(
|
|
805
|
+
self,
|
|
806
|
+
*,
|
|
807
|
+
publisher: Callable[[str, CacheStatistics], None] | None = None,
|
|
808
|
+
) -> None:
|
|
809
|
+
"""Send cached telemetry to ``publisher`` or all registered publishers."""
|
|
810
|
+
|
|
811
|
+
if publisher is None:
|
|
812
|
+
with self._registry_lock:
|
|
813
|
+
publishers = tuple(self._metrics_publishers)
|
|
814
|
+
else:
|
|
815
|
+
publishers = (publisher,)
|
|
816
|
+
if not publishers:
|
|
817
|
+
return
|
|
818
|
+
snapshot = tuple(self.iter_metrics())
|
|
819
|
+
for emit in publishers:
|
|
820
|
+
for name, stats in snapshot:
|
|
821
|
+
try:
|
|
822
|
+
emit(name, stats)
|
|
823
|
+
except Exception: # pragma: no cover - defensive logging
|
|
824
|
+
_logger.exception("Cache metrics publisher failed for %s", name)
|
|
825
|
+
|
|
826
|
+
def log_metrics(self, logger: logging.Logger, *, level: int = logging.INFO) -> None:
|
|
827
|
+
"""Emit cache metrics using ``logger`` for telemetry hooks."""
|
|
828
|
+
|
|
829
|
+
for name, stats in self.iter_metrics():
|
|
830
|
+
logger.log(
|
|
831
|
+
level,
|
|
832
|
+
"cache=%s hits=%d misses=%d evictions=%d timings=%d total_time=%.6f",
|
|
833
|
+
name,
|
|
834
|
+
stats.hits,
|
|
835
|
+
stats.misses,
|
|
836
|
+
stats.evictions,
|
|
837
|
+
stats.timings,
|
|
838
|
+
stats.total_time,
|
|
839
|
+
)
|
|
840
|
+
|
|
841
|
+
|
|
842
|
+
try:
|
|
843
|
+
from .init import get_logger as _get_logger
|
|
844
|
+
except ImportError: # pragma: no cover - circular bootstrap fallback
|
|
845
|
+
|
|
846
|
+
def _get_logger(name: str) -> logging.Logger:
|
|
847
|
+
return logging.getLogger(name)
|
|
848
|
+
|
|
849
|
+
_logger = _get_logger(__name__)
|
|
850
|
+
get_logger = _get_logger
|
|
851
|
+
|
|
852
|
+
|
|
853
|
+
def _normalise_callbacks(
|
|
854
|
+
callbacks: Iterable[Callable[[K, V], None]] | Callable[[K, V], None] | None,
|
|
855
|
+
) -> tuple[Callable[[K, V], None], ...]:
|
|
856
|
+
if callbacks is None:
|
|
857
|
+
return ()
|
|
858
|
+
if callable(callbacks):
|
|
859
|
+
return (callbacks,)
|
|
860
|
+
return tuple(callbacks)
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
def prune_lock_mapping(
|
|
864
|
+
cache: Mapping[K, Any] | MutableMapping[K, Any] | None,
|
|
865
|
+
locks: MutableMapping[K, Any] | None,
|
|
866
|
+
) -> None:
|
|
867
|
+
"""Drop lock entries not present in ``cache``."""
|
|
868
|
+
|
|
869
|
+
if locks is None:
|
|
870
|
+
return
|
|
871
|
+
if cache is None:
|
|
872
|
+
cache_keys: set[K] = set()
|
|
873
|
+
else:
|
|
874
|
+
cache_keys = set(cache.keys())
|
|
875
|
+
for key in list(locks.keys()):
|
|
876
|
+
if key not in cache_keys:
|
|
877
|
+
locks.pop(key, None)
|
|
878
|
+
|
|
879
|
+
|
|
880
|
+
class InstrumentedLRUCache(MutableMapping[K, V], Generic[K, V]):
|
|
881
|
+
"""LRU cache wrapper that synchronises telemetry, callbacks and locks.
|
|
882
|
+
|
|
883
|
+
The wrapper owns an internal :class:`cachetools.LRUCache` instance and
|
|
884
|
+
forwards all read operations to it. Mutating operations are instrumented to
|
|
885
|
+
update :class:`CacheManager` metrics, execute registered callbacks and keep
|
|
886
|
+
an optional lock mapping aligned with the stored keys. Telemetry callbacks
|
|
887
|
+
always execute before eviction callbacks, preserving the registration order
|
|
888
|
+
for deterministic side effects.
|
|
889
|
+
|
|
890
|
+
Callbacks can be extended or replaced after construction via
|
|
891
|
+
:meth:`set_telemetry_callbacks` and :meth:`set_eviction_callbacks`. When
|
|
892
|
+
``append`` is ``False`` (default) the provided callbacks replace the
|
|
893
|
+
existing sequence; otherwise they are appended at the end while keeping the
|
|
894
|
+
previous ordering intact.
|
|
895
|
+
"""
|
|
896
|
+
|
|
897
|
+
_MISSING = object()
|
|
898
|
+
|
|
899
|
+
def __init__(
|
|
900
|
+
self,
|
|
901
|
+
maxsize: int,
|
|
902
|
+
*,
|
|
903
|
+
manager: CacheManager | None = None,
|
|
904
|
+
metrics_key: str | None = None,
|
|
905
|
+
telemetry_callbacks: (
|
|
906
|
+
Iterable[Callable[[K, V], None]] | Callable[[K, V], None] | None
|
|
907
|
+
) = None,
|
|
908
|
+
eviction_callbacks: (
|
|
909
|
+
Iterable[Callable[[K, V], None]] | Callable[[K, V], None] | None
|
|
910
|
+
) = None,
|
|
911
|
+
locks: MutableMapping[K, Any] | None = None,
|
|
912
|
+
getsizeof: Callable[[V], int] | None = None,
|
|
913
|
+
count_overwrite_hit: bool = True,
|
|
914
|
+
) -> None:
|
|
915
|
+
self._cache: LRUCache[K, V] = LRUCache(maxsize, getsizeof=getsizeof)
|
|
916
|
+
original_popitem = self._cache.popitem
|
|
917
|
+
|
|
918
|
+
def _instrumented_popitem() -> tuple[K, V]:
|
|
919
|
+
key, value = original_popitem()
|
|
920
|
+
self._dispatch_removal(key, value)
|
|
921
|
+
return key, value
|
|
922
|
+
|
|
923
|
+
self._cache.popitem = _instrumented_popitem # type: ignore[assignment]
|
|
924
|
+
self._manager = manager
|
|
925
|
+
self._metrics_key = metrics_key
|
|
926
|
+
self._locks = locks
|
|
927
|
+
self._count_overwrite_hit = bool(count_overwrite_hit)
|
|
928
|
+
self._telemetry_callbacks: list[Callable[[K, V], None]]
|
|
929
|
+
self._telemetry_callbacks = list(_normalise_callbacks(telemetry_callbacks))
|
|
930
|
+
self._eviction_callbacks: list[Callable[[K, V], None]]
|
|
931
|
+
self._eviction_callbacks = list(_normalise_callbacks(eviction_callbacks))
|
|
932
|
+
|
|
933
|
+
# ------------------------------------------------------------------
|
|
934
|
+
# Callback registration helpers
|
|
935
|
+
|
|
936
|
+
@property
|
|
937
|
+
def telemetry_callbacks(self) -> tuple[Callable[[K, V], None], ...]:
|
|
938
|
+
"""Return currently registered telemetry callbacks."""
|
|
939
|
+
|
|
940
|
+
return tuple(self._telemetry_callbacks)
|
|
941
|
+
|
|
942
|
+
@property
|
|
943
|
+
def eviction_callbacks(self) -> tuple[Callable[[K, V], None], ...]:
|
|
944
|
+
"""Return currently registered eviction callbacks."""
|
|
945
|
+
|
|
946
|
+
return tuple(self._eviction_callbacks)
|
|
947
|
+
|
|
948
|
+
def set_telemetry_callbacks(
|
|
949
|
+
self,
|
|
950
|
+
callbacks: Iterable[Callable[[K, V], None]] | Callable[[K, V], None] | None,
|
|
951
|
+
*,
|
|
952
|
+
append: bool = False,
|
|
953
|
+
) -> None:
|
|
954
|
+
"""Update telemetry callbacks executed on removals.
|
|
955
|
+
|
|
956
|
+
When ``append`` is ``True`` the provided callbacks are added to the end
|
|
957
|
+
of the execution chain while preserving relative order. Otherwise, the
|
|
958
|
+
previous callbacks are replaced.
|
|
959
|
+
"""
|
|
960
|
+
|
|
961
|
+
new_callbacks = list(_normalise_callbacks(callbacks))
|
|
962
|
+
if append:
|
|
963
|
+
self._telemetry_callbacks.extend(new_callbacks)
|
|
964
|
+
else:
|
|
965
|
+
self._telemetry_callbacks = new_callbacks
|
|
966
|
+
|
|
967
|
+
def set_eviction_callbacks(
|
|
968
|
+
self,
|
|
969
|
+
callbacks: Iterable[Callable[[K, V], None]] | Callable[[K, V], None] | None,
|
|
970
|
+
*,
|
|
971
|
+
append: bool = False,
|
|
972
|
+
) -> None:
|
|
973
|
+
"""Update eviction callbacks executed on removals.
|
|
974
|
+
|
|
975
|
+
Behaviour matches :meth:`set_telemetry_callbacks`.
|
|
976
|
+
"""
|
|
977
|
+
|
|
978
|
+
new_callbacks = list(_normalise_callbacks(callbacks))
|
|
979
|
+
if append:
|
|
980
|
+
self._eviction_callbacks.extend(new_callbacks)
|
|
981
|
+
else:
|
|
982
|
+
self._eviction_callbacks = new_callbacks
|
|
983
|
+
|
|
984
|
+
# ------------------------------------------------------------------
|
|
985
|
+
# MutableMapping interface
|
|
986
|
+
|
|
987
|
+
def __getitem__(self, key: K) -> V:
|
|
988
|
+
"""Return the cached value for ``key``."""
|
|
989
|
+
|
|
990
|
+
return self._cache[key]
|
|
991
|
+
|
|
992
|
+
def __setitem__(self, key: K, value: V) -> None:
|
|
993
|
+
"""Store ``value`` under ``key`` updating telemetry accordingly."""
|
|
994
|
+
|
|
995
|
+
exists = key in self._cache
|
|
996
|
+
self._cache[key] = value
|
|
997
|
+
if exists:
|
|
998
|
+
if self._count_overwrite_hit:
|
|
999
|
+
self._record_hit(1)
|
|
1000
|
+
else:
|
|
1001
|
+
self._record_miss(1)
|
|
1002
|
+
|
|
1003
|
+
def __delitem__(self, key: K) -> None:
|
|
1004
|
+
"""Remove ``key`` from the cache and dispatch removal callbacks."""
|
|
1005
|
+
|
|
1006
|
+
try:
|
|
1007
|
+
value = self._cache[key]
|
|
1008
|
+
except KeyError:
|
|
1009
|
+
self._record_miss(1)
|
|
1010
|
+
raise
|
|
1011
|
+
del self._cache[key]
|
|
1012
|
+
self._dispatch_removal(key, value, hits=1)
|
|
1013
|
+
|
|
1014
|
+
def __iter__(self) -> Iterator[K]:
|
|
1015
|
+
"""Iterate over cached keys in eviction order."""
|
|
1016
|
+
|
|
1017
|
+
return iter(self._cache)
|
|
1018
|
+
|
|
1019
|
+
def __len__(self) -> int:
|
|
1020
|
+
"""Return the number of cached entries."""
|
|
1021
|
+
|
|
1022
|
+
return len(self._cache)
|
|
1023
|
+
|
|
1024
|
+
def __contains__(self, key: object) -> bool:
|
|
1025
|
+
"""Return ``True`` when ``key`` is stored in the cache."""
|
|
1026
|
+
|
|
1027
|
+
return key in self._cache
|
|
1028
|
+
|
|
1029
|
+
def __repr__(self) -> str: # pragma: no cover - debugging helper
|
|
1030
|
+
"""Return a debug representation including the underlying cache."""
|
|
1031
|
+
|
|
1032
|
+
return f"{self.__class__.__name__}({self._cache!r})"
|
|
1033
|
+
|
|
1034
|
+
# ------------------------------------------------------------------
|
|
1035
|
+
# Cache helpers
|
|
1036
|
+
|
|
1037
|
+
@property
|
|
1038
|
+
def maxsize(self) -> int:
|
|
1039
|
+
"""Return the configured maximum cache size."""
|
|
1040
|
+
|
|
1041
|
+
return self._cache.maxsize
|
|
1042
|
+
|
|
1043
|
+
@property
|
|
1044
|
+
def currsize(self) -> int:
|
|
1045
|
+
"""Return the current weighted size reported by :mod:`cachetools`."""
|
|
1046
|
+
|
|
1047
|
+
return self._cache.currsize
|
|
1048
|
+
|
|
1049
|
+
def get(self, key: K, default: V | None = None) -> V | None:
|
|
1050
|
+
"""Return ``key`` if present, otherwise ``default``."""
|
|
1051
|
+
|
|
1052
|
+
return self._cache.get(key, default)
|
|
1053
|
+
|
|
1054
|
+
def pop(self, key: K, default: Any = _MISSING) -> V:
|
|
1055
|
+
"""Remove ``key`` returning its value or ``default`` when provided."""
|
|
1056
|
+
|
|
1057
|
+
try:
|
|
1058
|
+
value = self._cache[key]
|
|
1059
|
+
except KeyError:
|
|
1060
|
+
self._record_miss(1)
|
|
1061
|
+
if default is self._MISSING:
|
|
1062
|
+
raise
|
|
1063
|
+
return cast(V, default)
|
|
1064
|
+
del self._cache[key]
|
|
1065
|
+
self._dispatch_removal(key, value, hits=1)
|
|
1066
|
+
return value
|
|
1067
|
+
|
|
1068
|
+
def popitem(self) -> tuple[K, V]:
|
|
1069
|
+
"""Remove and return the LRU entry ensuring instrumentation fires."""
|
|
1070
|
+
|
|
1071
|
+
return self._cache.popitem()
|
|
1072
|
+
|
|
1073
|
+
def clear(self) -> None: # type: ignore[override]
|
|
1074
|
+
"""Evict every entry while keeping telemetry and locks consistent."""
|
|
1075
|
+
|
|
1076
|
+
while True:
|
|
1077
|
+
try:
|
|
1078
|
+
self.popitem()
|
|
1079
|
+
except KeyError:
|
|
1080
|
+
break
|
|
1081
|
+
if self._locks is not None:
|
|
1082
|
+
try:
|
|
1083
|
+
self._locks.clear()
|
|
1084
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1085
|
+
_logger.exception("lock cleanup failed during cache clear")
|
|
1086
|
+
|
|
1087
|
+
# ------------------------------------------------------------------
|
|
1088
|
+
# Internal helpers
|
|
1089
|
+
|
|
1090
|
+
def _record_hit(self, amount: int) -> None:
|
|
1091
|
+
if amount and self._manager is not None and self._metrics_key is not None:
|
|
1092
|
+
self._manager.increment_hit(self._metrics_key, amount=amount)
|
|
1093
|
+
|
|
1094
|
+
def _record_miss(self, amount: int) -> None:
|
|
1095
|
+
if amount and self._manager is not None and self._metrics_key is not None:
|
|
1096
|
+
self._manager.increment_miss(self._metrics_key, amount=amount)
|
|
1097
|
+
|
|
1098
|
+
def _record_eviction(self, amount: int) -> None:
|
|
1099
|
+
if amount and self._manager is not None and self._metrics_key is not None:
|
|
1100
|
+
self._manager.increment_eviction(self._metrics_key, amount=amount)
|
|
1101
|
+
|
|
1102
|
+
def _dispatch_removal(
|
|
1103
|
+
self,
|
|
1104
|
+
key: K,
|
|
1105
|
+
value: V,
|
|
1106
|
+
*,
|
|
1107
|
+
hits: int = 0,
|
|
1108
|
+
misses: int = 0,
|
|
1109
|
+
eviction_amount: int = 1,
|
|
1110
|
+
purge_lock: bool = True,
|
|
1111
|
+
) -> None:
|
|
1112
|
+
if hits:
|
|
1113
|
+
self._record_hit(hits)
|
|
1114
|
+
if misses:
|
|
1115
|
+
self._record_miss(misses)
|
|
1116
|
+
if eviction_amount:
|
|
1117
|
+
self._record_eviction(eviction_amount)
|
|
1118
|
+
self._emit_callbacks(self._telemetry_callbacks, key, value, "telemetry")
|
|
1119
|
+
self._emit_callbacks(self._eviction_callbacks, key, value, "eviction")
|
|
1120
|
+
if purge_lock:
|
|
1121
|
+
self._purge_lock(key)
|
|
1122
|
+
|
|
1123
|
+
def _emit_callbacks(
|
|
1124
|
+
self,
|
|
1125
|
+
callbacks: Iterable[Callable[[K, V], None]],
|
|
1126
|
+
key: K,
|
|
1127
|
+
value: V,
|
|
1128
|
+
kind: str,
|
|
1129
|
+
) -> None:
|
|
1130
|
+
for callback in callbacks:
|
|
1131
|
+
try:
|
|
1132
|
+
callback(key, value)
|
|
1133
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1134
|
+
_logger.exception("%s callback failed for %r", kind, key)
|
|
1135
|
+
|
|
1136
|
+
def _purge_lock(self, key: K) -> None:
|
|
1137
|
+
if self._locks is None:
|
|
1138
|
+
return
|
|
1139
|
+
try:
|
|
1140
|
+
self._locks.pop(key, None)
|
|
1141
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1142
|
+
_logger.exception("lock cleanup failed for %r", key)
|
|
1143
|
+
|
|
1144
|
+
|
|
1145
|
+
class ManagedLRUCache(LRUCache[K, V]):
|
|
1146
|
+
"""LRU cache wrapper with telemetry hooks and lock synchronisation."""
|
|
1147
|
+
|
|
1148
|
+
def __init__(
|
|
1149
|
+
self,
|
|
1150
|
+
maxsize: int,
|
|
1151
|
+
*,
|
|
1152
|
+
manager: CacheManager | None = None,
|
|
1153
|
+
metrics_key: str | None = None,
|
|
1154
|
+
eviction_callbacks: (
|
|
1155
|
+
Iterable[Callable[[K, V], None]] | Callable[[K, V], None] | None
|
|
1156
|
+
) = None,
|
|
1157
|
+
telemetry_callbacks: (
|
|
1158
|
+
Iterable[Callable[[K, V], None]] | Callable[[K, V], None] | None
|
|
1159
|
+
) = None,
|
|
1160
|
+
locks: MutableMapping[K, Any] | None = None,
|
|
1161
|
+
) -> None:
|
|
1162
|
+
super().__init__(maxsize)
|
|
1163
|
+
self._manager = manager
|
|
1164
|
+
self._metrics_key = metrics_key
|
|
1165
|
+
self._locks = locks
|
|
1166
|
+
self._eviction_callbacks = _normalise_callbacks(eviction_callbacks)
|
|
1167
|
+
self._telemetry_callbacks = _normalise_callbacks(telemetry_callbacks)
|
|
1168
|
+
|
|
1169
|
+
def popitem(self) -> tuple[K, V]: # type: ignore[override]
|
|
1170
|
+
"""Evict the LRU entry while updating telemetry and lock state."""
|
|
1171
|
+
|
|
1172
|
+
key, value = super().popitem()
|
|
1173
|
+
if self._locks is not None:
|
|
1174
|
+
try:
|
|
1175
|
+
self._locks.pop(key, None)
|
|
1176
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1177
|
+
_logger.exception("lock cleanup failed for %r", key)
|
|
1178
|
+
if self._manager is not None and self._metrics_key is not None:
|
|
1179
|
+
self._manager.increment_eviction(self._metrics_key)
|
|
1180
|
+
for callback in self._telemetry_callbacks:
|
|
1181
|
+
try:
|
|
1182
|
+
callback(key, value)
|
|
1183
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1184
|
+
_logger.exception("telemetry callback failed for %r", key)
|
|
1185
|
+
for callback in self._eviction_callbacks:
|
|
1186
|
+
try:
|
|
1187
|
+
callback(key, value)
|
|
1188
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1189
|
+
_logger.exception("eviction callback failed for %r", key)
|
|
1190
|
+
return key, value
|
|
1191
|
+
|
|
1192
|
+
|
|
1193
|
+
@dataclass
|
|
1194
|
+
class _SeedCacheState:
|
|
1195
|
+
"""Container tracking the state for :class:`_SeedHashCache`."""
|
|
1196
|
+
|
|
1197
|
+
cache: InstrumentedLRUCache[tuple[int, int], int] | None
|
|
1198
|
+
maxsize: int
|
|
1199
|
+
|
|
1200
|
+
|
|
1201
|
+
@dataclass
|
|
1202
|
+
class _CounterState(Generic[K]):
|
|
1203
|
+
"""State bundle used by :class:`ScopedCounterCache`."""
|
|
1204
|
+
|
|
1205
|
+
cache: InstrumentedLRUCache[K, int]
|
|
1206
|
+
locks: dict[K, threading.RLock]
|
|
1207
|
+
max_entries: int
|
|
61
1208
|
|
|
62
1209
|
# Key used to store the node set checksum in a graph's ``graph`` attribute.
|
|
63
1210
|
NODE_SET_CHECKSUM_KEY = "_node_set_checksum_cache"
|
|
64
1211
|
|
|
65
|
-
logger =
|
|
1212
|
+
logger = _logger
|
|
1213
|
+
|
|
1214
|
+
# Helper to avoid importing ``tnfr.utils.init`` at module import time and keep
|
|
1215
|
+
# circular dependencies at bay while still reusing the canonical numpy loader.
|
|
1216
|
+
def _require_numpy():
|
|
1217
|
+
from .init import get_numpy
|
|
1218
|
+
|
|
1219
|
+
return get_numpy()
|
|
1220
|
+
|
|
1221
|
+
|
|
1222
|
+
# Graph key storing per-graph layer configuration overrides.
|
|
1223
|
+
_GRAPH_CACHE_LAYERS_KEY = "_tnfr_cache_layers"
|
|
1224
|
+
|
|
1225
|
+
# Process-wide configuration for shared cache layers (Shelve/Redis).
|
|
1226
|
+
_GLOBAL_CACHE_LAYER_CONFIG: dict[str, dict[str, Any]] = {}
|
|
1227
|
+
_GLOBAL_CACHE_LOCK = threading.RLock()
|
|
1228
|
+
_GLOBAL_CACHE_MANAGER: CacheManager | None = None
|
|
66
1229
|
|
|
67
1230
|
# Keys of cache entries dependent on the edge version. Any change to the edge
|
|
68
1231
|
# set requires these to be dropped to avoid stale data.
|
|
@@ -86,6 +1249,8 @@ def increment_graph_version(graph: Any, key: str) -> int:
|
|
|
86
1249
|
def stable_json(obj: Any) -> str:
|
|
87
1250
|
"""Return a JSON string with deterministic ordering for ``obj``."""
|
|
88
1251
|
|
|
1252
|
+
from .io import json_dumps
|
|
1253
|
+
|
|
89
1254
|
return json_dumps(
|
|
90
1255
|
obj,
|
|
91
1256
|
sort_keys=True,
|
|
@@ -112,15 +1277,199 @@ def clear_node_repr_cache() -> None:
|
|
|
112
1277
|
_node_repr_digest.cache_clear()
|
|
113
1278
|
|
|
114
1279
|
|
|
1280
|
+
def configure_global_cache_layers(
|
|
1281
|
+
*,
|
|
1282
|
+
shelve: Mapping[str, Any] | None = None,
|
|
1283
|
+
redis: Mapping[str, Any] | None = None,
|
|
1284
|
+
replace: bool = False,
|
|
1285
|
+
) -> None:
|
|
1286
|
+
"""Update process-wide cache layer configuration.
|
|
1287
|
+
|
|
1288
|
+
Parameters mirror the per-layer specifications accepted via graph metadata.
|
|
1289
|
+
Passing ``replace=True`` clears previous settings before applying new ones.
|
|
1290
|
+
Providing ``None`` for a layer while ``replace`` is true removes that layer
|
|
1291
|
+
from the configuration.
|
|
1292
|
+
"""
|
|
1293
|
+
|
|
1294
|
+
global _GLOBAL_CACHE_MANAGER
|
|
1295
|
+
with _GLOBAL_CACHE_LOCK:
|
|
1296
|
+
manager = _GLOBAL_CACHE_MANAGER
|
|
1297
|
+
_GLOBAL_CACHE_MANAGER = None
|
|
1298
|
+
if replace:
|
|
1299
|
+
_GLOBAL_CACHE_LAYER_CONFIG.clear()
|
|
1300
|
+
if shelve is not None:
|
|
1301
|
+
_GLOBAL_CACHE_LAYER_CONFIG["shelve"] = dict(shelve)
|
|
1302
|
+
elif replace:
|
|
1303
|
+
_GLOBAL_CACHE_LAYER_CONFIG.pop("shelve", None)
|
|
1304
|
+
if redis is not None:
|
|
1305
|
+
_GLOBAL_CACHE_LAYER_CONFIG["redis"] = dict(redis)
|
|
1306
|
+
elif replace:
|
|
1307
|
+
_GLOBAL_CACHE_LAYER_CONFIG.pop("redis", None)
|
|
1308
|
+
_close_cache_layers(manager)
|
|
1309
|
+
|
|
1310
|
+
|
|
1311
|
+
def _resolve_layer_config(
|
|
1312
|
+
graph: MutableMapping[str, Any] | None,
|
|
1313
|
+
) -> dict[str, dict[str, Any]]:
|
|
1314
|
+
resolved: dict[str, dict[str, Any]] = {}
|
|
1315
|
+
with _GLOBAL_CACHE_LOCK:
|
|
1316
|
+
for name, spec in _GLOBAL_CACHE_LAYER_CONFIG.items():
|
|
1317
|
+
resolved[name] = dict(spec)
|
|
1318
|
+
if graph is not None:
|
|
1319
|
+
overrides = graph.get(_GRAPH_CACHE_LAYERS_KEY)
|
|
1320
|
+
if isinstance(overrides, Mapping):
|
|
1321
|
+
for name in ("shelve", "redis"):
|
|
1322
|
+
layer_spec = overrides.get(name)
|
|
1323
|
+
if isinstance(layer_spec, Mapping):
|
|
1324
|
+
resolved[name] = dict(layer_spec)
|
|
1325
|
+
elif layer_spec is None:
|
|
1326
|
+
resolved.pop(name, None)
|
|
1327
|
+
return resolved
|
|
1328
|
+
|
|
1329
|
+
|
|
1330
|
+
def _build_shelve_layer(spec: Mapping[str, Any]) -> ShelveCacheLayer | None:
|
|
1331
|
+
path = spec.get("path")
|
|
1332
|
+
if not path:
|
|
1333
|
+
return None
|
|
1334
|
+
flag = spec.get("flag", "c")
|
|
1335
|
+
protocol = spec.get("protocol")
|
|
1336
|
+
writeback = bool(spec.get("writeback", False))
|
|
1337
|
+
try:
|
|
1338
|
+
proto_arg = None if protocol is None else int(protocol)
|
|
1339
|
+
except (TypeError, ValueError):
|
|
1340
|
+
logger.warning("Invalid shelve protocol %r; falling back to default", protocol)
|
|
1341
|
+
proto_arg = None
|
|
1342
|
+
try:
|
|
1343
|
+
return ShelveCacheLayer(
|
|
1344
|
+
str(path),
|
|
1345
|
+
flag=str(flag),
|
|
1346
|
+
protocol=proto_arg,
|
|
1347
|
+
writeback=writeback,
|
|
1348
|
+
)
|
|
1349
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1350
|
+
logger.exception("Failed to initialise ShelveCacheLayer for path %r", path)
|
|
1351
|
+
return None
|
|
1352
|
+
|
|
1353
|
+
|
|
1354
|
+
def _build_redis_layer(spec: Mapping[str, Any]) -> RedisCacheLayer | None:
|
|
1355
|
+
enabled = spec.get("enabled", True)
|
|
1356
|
+
if not enabled:
|
|
1357
|
+
return None
|
|
1358
|
+
namespace = spec.get("namespace")
|
|
1359
|
+
client = spec.get("client")
|
|
1360
|
+
if client is None:
|
|
1361
|
+
factory = spec.get("client_factory")
|
|
1362
|
+
if callable(factory):
|
|
1363
|
+
try:
|
|
1364
|
+
client = factory()
|
|
1365
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1366
|
+
logger.exception("Redis cache client factory failed")
|
|
1367
|
+
return None
|
|
1368
|
+
else:
|
|
1369
|
+
kwargs = spec.get("client_kwargs")
|
|
1370
|
+
if isinstance(kwargs, Mapping):
|
|
1371
|
+
try: # pragma: no cover - optional dependency
|
|
1372
|
+
import redis # type: ignore
|
|
1373
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1374
|
+
logger.exception("redis-py is required to build the configured Redis client")
|
|
1375
|
+
return None
|
|
1376
|
+
try:
|
|
1377
|
+
client = redis.Redis(**dict(kwargs))
|
|
1378
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1379
|
+
logger.exception("Failed to initialise redis client with %r", kwargs)
|
|
1380
|
+
return None
|
|
1381
|
+
try:
|
|
1382
|
+
if namespace is None:
|
|
1383
|
+
return RedisCacheLayer(client=client)
|
|
1384
|
+
return RedisCacheLayer(client=client, namespace=str(namespace))
|
|
1385
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1386
|
+
logger.exception("Failed to initialise RedisCacheLayer")
|
|
1387
|
+
return None
|
|
1388
|
+
|
|
1389
|
+
|
|
1390
|
+
def _build_cache_layers(config: Mapping[str, dict[str, Any]]) -> tuple[CacheLayer, ...]:
|
|
1391
|
+
layers: list[CacheLayer] = []
|
|
1392
|
+
shelve_spec = config.get("shelve")
|
|
1393
|
+
if isinstance(shelve_spec, Mapping):
|
|
1394
|
+
layer = _build_shelve_layer(shelve_spec)
|
|
1395
|
+
if layer is not None:
|
|
1396
|
+
layers.append(layer)
|
|
1397
|
+
redis_spec = config.get("redis")
|
|
1398
|
+
if isinstance(redis_spec, Mapping):
|
|
1399
|
+
layer = _build_redis_layer(redis_spec)
|
|
1400
|
+
if layer is not None:
|
|
1401
|
+
layers.append(layer)
|
|
1402
|
+
return tuple(layers)
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
def _close_cache_layers(manager: CacheManager | None) -> None:
|
|
1406
|
+
if manager is None:
|
|
1407
|
+
return
|
|
1408
|
+
layers = getattr(manager, "_layers", ())
|
|
1409
|
+
for layer in layers:
|
|
1410
|
+
close = getattr(layer, "close", None)
|
|
1411
|
+
if callable(close):
|
|
1412
|
+
try:
|
|
1413
|
+
close()
|
|
1414
|
+
except Exception: # pragma: no cover - defensive logging
|
|
1415
|
+
logger.exception(
|
|
1416
|
+
"Cache layer close failed for %s", layer.__class__.__name__
|
|
1417
|
+
)
|
|
1418
|
+
|
|
1419
|
+
|
|
1420
|
+
def reset_global_cache_manager() -> None:
|
|
1421
|
+
"""Dispose the shared cache manager and close attached layers."""
|
|
1422
|
+
|
|
1423
|
+
global _GLOBAL_CACHE_MANAGER
|
|
1424
|
+
with _GLOBAL_CACHE_LOCK:
|
|
1425
|
+
manager = _GLOBAL_CACHE_MANAGER
|
|
1426
|
+
_GLOBAL_CACHE_MANAGER = None
|
|
1427
|
+
_close_cache_layers(manager)
|
|
1428
|
+
|
|
1429
|
+
|
|
1430
|
+
def build_cache_manager(
|
|
1431
|
+
*,
|
|
1432
|
+
graph: MutableMapping[str, Any] | None = None,
|
|
1433
|
+
storage: MutableMapping[str, Any] | None = None,
|
|
1434
|
+
default_capacity: int | None = None,
|
|
1435
|
+
overrides: Mapping[str, int | None] | None = None,
|
|
1436
|
+
) -> CacheManager:
|
|
1437
|
+
"""Construct a :class:`CacheManager` honouring configured cache layers."""
|
|
1438
|
+
|
|
1439
|
+
global _GLOBAL_CACHE_MANAGER
|
|
1440
|
+
if graph is None:
|
|
1441
|
+
with _GLOBAL_CACHE_LOCK:
|
|
1442
|
+
manager = _GLOBAL_CACHE_MANAGER
|
|
1443
|
+
if manager is not None:
|
|
1444
|
+
return manager
|
|
1445
|
+
|
|
1446
|
+
layers = _build_cache_layers(_resolve_layer_config(graph))
|
|
1447
|
+
manager = CacheManager(
|
|
1448
|
+
storage,
|
|
1449
|
+
default_capacity=default_capacity,
|
|
1450
|
+
overrides=overrides,
|
|
1451
|
+
layers=layers,
|
|
1452
|
+
)
|
|
1453
|
+
|
|
1454
|
+
if graph is None:
|
|
1455
|
+
with _GLOBAL_CACHE_LOCK:
|
|
1456
|
+
global_manager = _GLOBAL_CACHE_MANAGER
|
|
1457
|
+
if global_manager is None:
|
|
1458
|
+
_GLOBAL_CACHE_MANAGER = manager
|
|
1459
|
+
return manager
|
|
1460
|
+
_close_cache_layers(manager)
|
|
1461
|
+
return global_manager
|
|
1462
|
+
|
|
1463
|
+
return manager
|
|
1464
|
+
|
|
1465
|
+
|
|
115
1466
|
def _node_repr(n: Any) -> str:
|
|
116
1467
|
"""Stable representation for node hashing and sorting."""
|
|
117
1468
|
|
|
118
1469
|
return _node_repr_digest(n)[0]
|
|
119
1470
|
|
|
120
1471
|
|
|
121
|
-
def _iter_node_digests(
|
|
122
|
-
nodes: Iterable[Any], *, presorted: bool
|
|
123
|
-
) -> Iterable[bytes]:
|
|
1472
|
+
def _iter_node_digests(nodes: Iterable[Any], *, presorted: bool) -> Iterable[bytes]:
|
|
124
1473
|
"""Yield node digests in a deterministic order."""
|
|
125
1474
|
|
|
126
1475
|
if presorted:
|
|
@@ -174,9 +1523,7 @@ def node_set_checksum(
|
|
|
174
1523
|
|
|
175
1524
|
graph = get_graph(G)
|
|
176
1525
|
if nodes is None:
|
|
177
|
-
return _node_set_checksum_no_nodes(
|
|
178
|
-
G, graph, presorted=presorted, store=store
|
|
179
|
-
)
|
|
1526
|
+
return _node_set_checksum_no_nodes(G, graph, presorted=presorted, store=store)
|
|
180
1527
|
|
|
181
1528
|
hasher = hashlib.blake2b(digest_size=16)
|
|
182
1529
|
for digest in _iter_node_digests(nodes, presorted=presorted):
|
|
@@ -366,6 +1713,7 @@ class EdgeCacheState:
|
|
|
366
1713
|
cache: MutableMapping[Hashable, Any]
|
|
367
1714
|
locks: defaultdict[Hashable, threading.RLock]
|
|
368
1715
|
max_entries: int | None
|
|
1716
|
+
dirty: bool = False
|
|
369
1717
|
|
|
370
1718
|
|
|
371
1719
|
_GRAPH_CACHE_MANAGER_KEY = "_tnfr_cache_manager"
|
|
@@ -377,32 +1725,11 @@ DNFR_PREP_STATE_KEY = "_dnfr_prep_state"
|
|
|
377
1725
|
class DnfrPrepState:
|
|
378
1726
|
"""State container coordinating ΔNFR preparation caches."""
|
|
379
1727
|
|
|
380
|
-
cache:
|
|
1728
|
+
cache: DnfrCache
|
|
381
1729
|
cache_lock: threading.RLock
|
|
382
1730
|
vector_lock: threading.RLock
|
|
383
1731
|
|
|
384
1732
|
|
|
385
|
-
def _new_dnfr_cache() -> "DnfrCache":
|
|
386
|
-
"""Return an empty :class:`~tnfr.dynamics.dnfr.DnfrCache` instance."""
|
|
387
|
-
|
|
388
|
-
from ..dynamics.dnfr import DnfrCache
|
|
389
|
-
|
|
390
|
-
return DnfrCache(
|
|
391
|
-
idx={},
|
|
392
|
-
theta=[],
|
|
393
|
-
epi=[],
|
|
394
|
-
vf=[],
|
|
395
|
-
cos_theta=[],
|
|
396
|
-
sin_theta=[],
|
|
397
|
-
neighbor_x=[],
|
|
398
|
-
neighbor_y=[],
|
|
399
|
-
neighbor_epi_sum=[],
|
|
400
|
-
neighbor_vf_sum=[],
|
|
401
|
-
neighbor_count=[],
|
|
402
|
-
neighbor_deg_sum=[],
|
|
403
|
-
)
|
|
404
|
-
|
|
405
|
-
|
|
406
1733
|
def _build_dnfr_prep_state(
|
|
407
1734
|
graph: MutableMapping[str, Any],
|
|
408
1735
|
previous: DnfrPrepState | None = None,
|
|
@@ -418,7 +1745,7 @@ def _build_dnfr_prep_state(
|
|
|
418
1745
|
cache_lock = threading.RLock()
|
|
419
1746
|
vector_lock = threading.RLock()
|
|
420
1747
|
state = DnfrPrepState(
|
|
421
|
-
cache=
|
|
1748
|
+
cache=new_dnfr_cache(),
|
|
422
1749
|
cache_lock=cache_lock,
|
|
423
1750
|
vector_lock=vector_lock,
|
|
424
1751
|
)
|
|
@@ -435,11 +1762,7 @@ def _coerce_dnfr_state(
|
|
|
435
1762
|
if isinstance(current, DnfrPrepState):
|
|
436
1763
|
graph["_dnfr_prep_cache"] = current.cache
|
|
437
1764
|
return current
|
|
438
|
-
|
|
439
|
-
from ..dynamics.dnfr import DnfrCache
|
|
440
|
-
except Exception: # pragma: no cover - defensive import
|
|
441
|
-
DnfrCache = None # type: ignore[assignment]
|
|
442
|
-
if DnfrCache is not None and isinstance(current, DnfrCache):
|
|
1765
|
+
if isinstance(current, DnfrCache):
|
|
443
1766
|
state = DnfrPrepState(
|
|
444
1767
|
cache=current,
|
|
445
1768
|
cache_lock=threading.RLock(),
|
|
@@ -453,11 +1776,12 @@ def _coerce_dnfr_state(
|
|
|
453
1776
|
def _graph_cache_manager(graph: MutableMapping[str, Any]) -> CacheManager:
|
|
454
1777
|
manager = graph.get(_GRAPH_CACHE_MANAGER_KEY)
|
|
455
1778
|
if not isinstance(manager, CacheManager):
|
|
456
|
-
manager =
|
|
1779
|
+
manager = build_cache_manager(graph=graph, default_capacity=128)
|
|
457
1780
|
graph[_GRAPH_CACHE_MANAGER_KEY] = manager
|
|
458
1781
|
config = graph.get(_GRAPH_CACHE_CONFIG_KEY)
|
|
459
1782
|
if isinstance(config, dict):
|
|
460
1783
|
manager.configure_from_mapping(config)
|
|
1784
|
+
|
|
461
1785
|
def _dnfr_factory() -> DnfrPrepState:
|
|
462
1786
|
return _build_dnfr_prep_state(graph)
|
|
463
1787
|
|
|
@@ -510,10 +1834,33 @@ class EdgeCacheManager:
|
|
|
510
1834
|
def __init__(self, graph: MutableMapping[str, Any]) -> None:
|
|
511
1835
|
self.graph: MutableMapping[str, Any] = graph
|
|
512
1836
|
self._manager = _graph_cache_manager(graph)
|
|
1837
|
+
|
|
1838
|
+
def _encode_state(state: EdgeCacheState) -> Mapping[str, Any]:
|
|
1839
|
+
if not isinstance(state, EdgeCacheState):
|
|
1840
|
+
raise TypeError("EdgeCacheState expected")
|
|
1841
|
+
return {
|
|
1842
|
+
"max_entries": state.max_entries,
|
|
1843
|
+
"entries": list(state.cache.items()),
|
|
1844
|
+
}
|
|
1845
|
+
|
|
1846
|
+
def _decode_state(payload: Any) -> EdgeCacheState:
|
|
1847
|
+
if isinstance(payload, EdgeCacheState):
|
|
1848
|
+
return payload
|
|
1849
|
+
if not isinstance(payload, Mapping):
|
|
1850
|
+
raise TypeError("invalid edge cache payload")
|
|
1851
|
+
max_entries = payload.get("max_entries")
|
|
1852
|
+
state = self._build_state(max_entries)
|
|
1853
|
+
for key, value in payload.get("entries", []):
|
|
1854
|
+
state.cache[key] = value
|
|
1855
|
+
state.dirty = False
|
|
1856
|
+
return state
|
|
1857
|
+
|
|
513
1858
|
self._manager.register(
|
|
514
1859
|
self._STATE_KEY,
|
|
515
1860
|
self._default_state,
|
|
516
1861
|
reset=self._reset_state,
|
|
1862
|
+
encoder=_encode_state,
|
|
1863
|
+
decoder=_decode_state,
|
|
517
1864
|
)
|
|
518
1865
|
|
|
519
1866
|
def record_hit(self) -> None:
|
|
@@ -570,13 +1917,15 @@ class EdgeCacheManager:
|
|
|
570
1917
|
locks=locks,
|
|
571
1918
|
count_overwrite_hit=False,
|
|
572
1919
|
)
|
|
1920
|
+
state = EdgeCacheState(cache=cache, locks=locks, max_entries=max_entries)
|
|
573
1921
|
|
|
574
1922
|
def _on_eviction(key: Hashable, _: Any) -> None:
|
|
575
1923
|
self.record_eviction(track_metrics=False)
|
|
576
1924
|
locks.pop(key, None)
|
|
1925
|
+
state.dirty = True
|
|
577
1926
|
|
|
578
1927
|
cache.set_eviction_callbacks(_on_eviction)
|
|
579
|
-
return
|
|
1928
|
+
return state
|
|
580
1929
|
|
|
581
1930
|
def _ensure_state(
|
|
582
1931
|
self, state: EdgeCacheState | None, max_entries: int | None | object
|
|
@@ -593,6 +1942,7 @@ class EdgeCacheManager:
|
|
|
593
1942
|
def _reset_state(self, state: EdgeCacheState | None) -> EdgeCacheState:
|
|
594
1943
|
if isinstance(state, EdgeCacheState):
|
|
595
1944
|
state.cache.clear()
|
|
1945
|
+
state.dirty = False
|
|
596
1946
|
return state
|
|
597
1947
|
return self._build_state(None)
|
|
598
1948
|
|
|
@@ -601,25 +1951,28 @@ class EdgeCacheManager:
|
|
|
601
1951
|
max_entries: int | None | object,
|
|
602
1952
|
*,
|
|
603
1953
|
create: bool = True,
|
|
604
|
-
) ->
|
|
605
|
-
|
|
606
|
-
dict[Hashable, threading.RLock]
|
|
607
|
-
| defaultdict[Hashable, threading.RLock]
|
|
608
|
-
| None,
|
|
609
|
-
]:
|
|
610
|
-
"""Return the cache and lock mapping for the manager's graph."""
|
|
1954
|
+
) -> EdgeCacheState | None:
|
|
1955
|
+
"""Return the cache state for the manager's graph."""
|
|
611
1956
|
|
|
612
1957
|
if not create:
|
|
613
1958
|
state = self._manager.peek(self._STATE_KEY)
|
|
614
|
-
if isinstance(state, EdgeCacheState)
|
|
615
|
-
return state.cache, state.locks
|
|
616
|
-
return None, None
|
|
1959
|
+
return state if isinstance(state, EdgeCacheState) else None
|
|
617
1960
|
|
|
618
1961
|
state = self._manager.update(
|
|
619
1962
|
self._STATE_KEY,
|
|
620
1963
|
lambda current: self._ensure_state(current, max_entries),
|
|
621
1964
|
)
|
|
622
|
-
|
|
1965
|
+
if not isinstance(state, EdgeCacheState):
|
|
1966
|
+
raise RuntimeError("edge cache state failed to initialise")
|
|
1967
|
+
return state
|
|
1968
|
+
|
|
1969
|
+
def flush_state(self, state: EdgeCacheState) -> None:
|
|
1970
|
+
"""Persist ``state`` through the configured cache layers when dirty."""
|
|
1971
|
+
|
|
1972
|
+
if not isinstance(state, EdgeCacheState) or not state.dirty:
|
|
1973
|
+
return
|
|
1974
|
+
self._manager.store(self._STATE_KEY, state)
|
|
1975
|
+
state.dirty = False
|
|
623
1976
|
|
|
624
1977
|
def clear(self) -> None:
|
|
625
1978
|
"""Reset cached data managed by this instance."""
|
|
@@ -646,7 +1999,12 @@ def edge_version_cache(
|
|
|
646
1999
|
if resolved == 0:
|
|
647
2000
|
return builder()
|
|
648
2001
|
|
|
649
|
-
|
|
2002
|
+
state = manager.get_cache(resolved)
|
|
2003
|
+
if state is None:
|
|
2004
|
+
return builder()
|
|
2005
|
+
|
|
2006
|
+
cache = state.cache
|
|
2007
|
+
locks = state.locks
|
|
650
2008
|
edge_version = get_graph_version(graph, "_edge_version")
|
|
651
2009
|
lock = locks[key]
|
|
652
2010
|
|
|
@@ -663,6 +2021,7 @@ def edge_version_cache(
|
|
|
663
2021
|
logger.exception("edge_version_cache builder failed for %r: %s", key, exc)
|
|
664
2022
|
raise
|
|
665
2023
|
else:
|
|
2024
|
+
result = value
|
|
666
2025
|
with lock:
|
|
667
2026
|
entry = cache.get(key)
|
|
668
2027
|
if entry is not None:
|
|
@@ -673,7 +2032,11 @@ def edge_version_cache(
|
|
|
673
2032
|
return cached_value
|
|
674
2033
|
manager.record_eviction()
|
|
675
2034
|
cache[key] = (edge_version, value)
|
|
676
|
-
|
|
2035
|
+
state.dirty = True
|
|
2036
|
+
result = value
|
|
2037
|
+
if state.dirty:
|
|
2038
|
+
manager.flush_state(state)
|
|
2039
|
+
return result
|
|
677
2040
|
|
|
678
2041
|
|
|
679
2042
|
def cached_nodes_and_A(
|
|
@@ -711,7 +2074,7 @@ def cached_nodes_and_A(
|
|
|
711
2074
|
graph["_dnfr_nodes_checksum"] = checksum
|
|
712
2075
|
|
|
713
2076
|
def builder() -> tuple[tuple[Any, ...], Any]:
|
|
714
|
-
np =
|
|
2077
|
+
np = _require_numpy()
|
|
715
2078
|
if np is None or prefer_sparse:
|
|
716
2079
|
return nodes, None
|
|
717
2080
|
A = nx.to_numpy_array(G, nodelist=nodes, weight=None, dtype=float)
|
|
@@ -753,3 +2116,280 @@ def edge_version_update(G: TNFRGraph) -> Iterator[None]:
|
|
|
753
2116
|
yield
|
|
754
2117
|
finally:
|
|
755
2118
|
increment_edge_version(G)
|
|
2119
|
+
|
|
2120
|
+
|
|
2121
|
+
class _SeedHashCache(MutableMapping[tuple[int, int], int]):
|
|
2122
|
+
"""Mutable mapping proxy exposing a configurable LRU cache."""
|
|
2123
|
+
|
|
2124
|
+
def __init__(
|
|
2125
|
+
self,
|
|
2126
|
+
*,
|
|
2127
|
+
manager: CacheManager | None = None,
|
|
2128
|
+
state_key: str = "seed_hash_cache",
|
|
2129
|
+
default_maxsize: int = 128,
|
|
2130
|
+
) -> None:
|
|
2131
|
+
self._default_maxsize = int(default_maxsize)
|
|
2132
|
+
self._manager = manager or build_cache_manager(
|
|
2133
|
+
default_capacity=self._default_maxsize
|
|
2134
|
+
)
|
|
2135
|
+
self._state_key = state_key
|
|
2136
|
+
if not self._manager.has_override(self._state_key):
|
|
2137
|
+
self._manager.configure(
|
|
2138
|
+
overrides={self._state_key: self._default_maxsize}
|
|
2139
|
+
)
|
|
2140
|
+
self._manager.register(
|
|
2141
|
+
self._state_key,
|
|
2142
|
+
self._create_state,
|
|
2143
|
+
reset=self._reset_state,
|
|
2144
|
+
)
|
|
2145
|
+
|
|
2146
|
+
def _resolved_size(self, requested: int | None = None) -> int:
|
|
2147
|
+
size = self._manager.get_capacity(
|
|
2148
|
+
self._state_key,
|
|
2149
|
+
requested=requested,
|
|
2150
|
+
fallback=self._default_maxsize,
|
|
2151
|
+
)
|
|
2152
|
+
if size is None:
|
|
2153
|
+
return 0
|
|
2154
|
+
return int(size)
|
|
2155
|
+
|
|
2156
|
+
def _create_state(self) -> _SeedCacheState:
|
|
2157
|
+
size = self._resolved_size()
|
|
2158
|
+
if size <= 0:
|
|
2159
|
+
return _SeedCacheState(cache=None, maxsize=0)
|
|
2160
|
+
return _SeedCacheState(
|
|
2161
|
+
cache=InstrumentedLRUCache(
|
|
2162
|
+
size,
|
|
2163
|
+
manager=self._manager,
|
|
2164
|
+
metrics_key=self._state_key,
|
|
2165
|
+
),
|
|
2166
|
+
maxsize=size,
|
|
2167
|
+
)
|
|
2168
|
+
|
|
2169
|
+
def _reset_state(self, state: _SeedCacheState | None) -> _SeedCacheState:
|
|
2170
|
+
return self._create_state()
|
|
2171
|
+
|
|
2172
|
+
def _get_state(self, *, create: bool = True) -> _SeedCacheState | None:
|
|
2173
|
+
state = self._manager.get(self._state_key, create=create)
|
|
2174
|
+
if state is None:
|
|
2175
|
+
return None
|
|
2176
|
+
if not isinstance(state, _SeedCacheState):
|
|
2177
|
+
state = self._create_state()
|
|
2178
|
+
self._manager.store(self._state_key, state)
|
|
2179
|
+
return state
|
|
2180
|
+
|
|
2181
|
+
def configure(self, maxsize: int) -> None:
|
|
2182
|
+
size = int(maxsize)
|
|
2183
|
+
if size < 0:
|
|
2184
|
+
raise ValueError("maxsize must be non-negative")
|
|
2185
|
+
self._manager.configure(overrides={self._state_key: size})
|
|
2186
|
+
self._manager.update(self._state_key, lambda _: self._create_state())
|
|
2187
|
+
|
|
2188
|
+
def __getitem__(self, key: tuple[int, int]) -> int:
|
|
2189
|
+
state = self._get_state()
|
|
2190
|
+
if state is None or state.cache is None:
|
|
2191
|
+
raise KeyError(key)
|
|
2192
|
+
value = state.cache[key]
|
|
2193
|
+
self._manager.increment_hit(self._state_key)
|
|
2194
|
+
return value
|
|
2195
|
+
|
|
2196
|
+
def __setitem__(self, key: tuple[int, int], value: int) -> None:
|
|
2197
|
+
state = self._get_state()
|
|
2198
|
+
if state is not None and state.cache is not None:
|
|
2199
|
+
state.cache[key] = value
|
|
2200
|
+
|
|
2201
|
+
def __delitem__(self, key: tuple[int, int]) -> None:
|
|
2202
|
+
state = self._get_state()
|
|
2203
|
+
if state is None or state.cache is None:
|
|
2204
|
+
raise KeyError(key)
|
|
2205
|
+
del state.cache[key]
|
|
2206
|
+
|
|
2207
|
+
def __iter__(self) -> Iterator[tuple[int, int]]:
|
|
2208
|
+
state = self._get_state(create=False)
|
|
2209
|
+
if state is None or state.cache is None:
|
|
2210
|
+
return iter(())
|
|
2211
|
+
return iter(state.cache)
|
|
2212
|
+
|
|
2213
|
+
def __len__(self) -> int:
|
|
2214
|
+
state = self._get_state(create=False)
|
|
2215
|
+
if state is None or state.cache is None:
|
|
2216
|
+
return 0
|
|
2217
|
+
return len(state.cache)
|
|
2218
|
+
|
|
2219
|
+
def clear(self) -> None: # type: ignore[override]
|
|
2220
|
+
self._manager.clear(self._state_key)
|
|
2221
|
+
|
|
2222
|
+
@property
|
|
2223
|
+
def maxsize(self) -> int:
|
|
2224
|
+
state = self._get_state()
|
|
2225
|
+
return 0 if state is None else state.maxsize
|
|
2226
|
+
|
|
2227
|
+
@property
|
|
2228
|
+
def enabled(self) -> bool:
|
|
2229
|
+
state = self._get_state(create=False)
|
|
2230
|
+
return bool(state and state.cache is not None)
|
|
2231
|
+
|
|
2232
|
+
@property
|
|
2233
|
+
def data(self) -> InstrumentedLRUCache[tuple[int, int], int] | None:
|
|
2234
|
+
"""Expose the underlying cache for diagnostics/tests."""
|
|
2235
|
+
|
|
2236
|
+
state = self._get_state(create=False)
|
|
2237
|
+
return None if state is None else state.cache
|
|
2238
|
+
|
|
2239
|
+
|
|
2240
|
+
class ScopedCounterCache(Generic[K]):
|
|
2241
|
+
"""Thread-safe LRU cache storing monotonic counters by ``key``."""
|
|
2242
|
+
|
|
2243
|
+
def __init__(
|
|
2244
|
+
self,
|
|
2245
|
+
name: str,
|
|
2246
|
+
max_entries: int | None = None,
|
|
2247
|
+
*,
|
|
2248
|
+
manager: CacheManager | None = None,
|
|
2249
|
+
default_max_entries: int = 128,
|
|
2250
|
+
) -> None:
|
|
2251
|
+
self._name = name
|
|
2252
|
+
self._state_key = f"scoped_counter:{name}"
|
|
2253
|
+
self._default_max_entries = int(default_max_entries)
|
|
2254
|
+
requested = None if max_entries is None else int(max_entries)
|
|
2255
|
+
if requested is not None and requested < 0:
|
|
2256
|
+
raise ValueError("max_entries must be non-negative")
|
|
2257
|
+
self._manager = manager or build_cache_manager(
|
|
2258
|
+
default_capacity=self._default_max_entries
|
|
2259
|
+
)
|
|
2260
|
+
if not self._manager.has_override(self._state_key):
|
|
2261
|
+
fallback = requested
|
|
2262
|
+
if fallback is None:
|
|
2263
|
+
fallback = self._default_max_entries
|
|
2264
|
+
self._manager.configure(overrides={self._state_key: fallback})
|
|
2265
|
+
elif requested is not None:
|
|
2266
|
+
self._manager.configure(overrides={self._state_key: requested})
|
|
2267
|
+
self._manager.register(
|
|
2268
|
+
self._state_key,
|
|
2269
|
+
self._create_state,
|
|
2270
|
+
lock_factory=lambda: get_lock(name),
|
|
2271
|
+
reset=self._reset_state,
|
|
2272
|
+
)
|
|
2273
|
+
|
|
2274
|
+
def _resolved_entries(self, requested: int | None = None) -> int:
|
|
2275
|
+
size = self._manager.get_capacity(
|
|
2276
|
+
self._state_key,
|
|
2277
|
+
requested=requested,
|
|
2278
|
+
fallback=self._default_max_entries,
|
|
2279
|
+
)
|
|
2280
|
+
if size is None:
|
|
2281
|
+
return 0
|
|
2282
|
+
return int(size)
|
|
2283
|
+
|
|
2284
|
+
def _create_state(self, requested: int | None = None) -> _CounterState[K]:
|
|
2285
|
+
size = self._resolved_entries(requested)
|
|
2286
|
+
locks: dict[K, threading.RLock] = {}
|
|
2287
|
+
return _CounterState(
|
|
2288
|
+
cache=InstrumentedLRUCache(
|
|
2289
|
+
size,
|
|
2290
|
+
manager=self._manager,
|
|
2291
|
+
metrics_key=self._state_key,
|
|
2292
|
+
locks=locks,
|
|
2293
|
+
),
|
|
2294
|
+
locks=locks,
|
|
2295
|
+
max_entries=size,
|
|
2296
|
+
)
|
|
2297
|
+
|
|
2298
|
+
def _reset_state(self, state: _CounterState[K] | None) -> _CounterState[K]:
|
|
2299
|
+
return self._create_state()
|
|
2300
|
+
|
|
2301
|
+
def _get_state(self) -> _CounterState[K]:
|
|
2302
|
+
state = self._manager.get(self._state_key)
|
|
2303
|
+
if not isinstance(state, _CounterState):
|
|
2304
|
+
state = self._create_state(0)
|
|
2305
|
+
self._manager.store(self._state_key, state)
|
|
2306
|
+
return state
|
|
2307
|
+
|
|
2308
|
+
@property
|
|
2309
|
+
def lock(self) -> threading.Lock | threading.RLock:
|
|
2310
|
+
"""Return the lock guarding access to the underlying cache."""
|
|
2311
|
+
|
|
2312
|
+
return self._manager.get_lock(self._state_key)
|
|
2313
|
+
|
|
2314
|
+
@property
|
|
2315
|
+
def max_entries(self) -> int:
|
|
2316
|
+
"""Return the configured maximum number of cached entries."""
|
|
2317
|
+
|
|
2318
|
+
return self._get_state().max_entries
|
|
2319
|
+
|
|
2320
|
+
@property
|
|
2321
|
+
def cache(self) -> InstrumentedLRUCache[K, int]:
|
|
2322
|
+
"""Expose the instrumented cache for inspection."""
|
|
2323
|
+
|
|
2324
|
+
return self._get_state().cache
|
|
2325
|
+
|
|
2326
|
+
@property
|
|
2327
|
+
def locks(self) -> dict[K, threading.RLock]:
|
|
2328
|
+
"""Return the mapping of per-key locks tracked by the cache."""
|
|
2329
|
+
|
|
2330
|
+
return self._get_state().locks
|
|
2331
|
+
|
|
2332
|
+
def configure(self, *, force: bool = False, max_entries: int | None = None) -> None:
|
|
2333
|
+
"""Resize or reset the cache keeping previous settings."""
|
|
2334
|
+
|
|
2335
|
+
if max_entries is None:
|
|
2336
|
+
size = self._resolved_entries()
|
|
2337
|
+
update_policy = False
|
|
2338
|
+
else:
|
|
2339
|
+
size = int(max_entries)
|
|
2340
|
+
if size < 0:
|
|
2341
|
+
raise ValueError("max_entries must be non-negative")
|
|
2342
|
+
update_policy = True
|
|
2343
|
+
|
|
2344
|
+
def _update(state: _CounterState[K] | None) -> _CounterState[K]:
|
|
2345
|
+
if (
|
|
2346
|
+
not isinstance(state, _CounterState)
|
|
2347
|
+
or force
|
|
2348
|
+
or state.max_entries != size
|
|
2349
|
+
):
|
|
2350
|
+
locks: dict[K, threading.RLock] = {}
|
|
2351
|
+
return _CounterState(
|
|
2352
|
+
cache=InstrumentedLRUCache(
|
|
2353
|
+
size,
|
|
2354
|
+
manager=self._manager,
|
|
2355
|
+
metrics_key=self._state_key,
|
|
2356
|
+
locks=locks,
|
|
2357
|
+
),
|
|
2358
|
+
locks=locks,
|
|
2359
|
+
max_entries=size,
|
|
2360
|
+
)
|
|
2361
|
+
return cast(_CounterState[K], state)
|
|
2362
|
+
|
|
2363
|
+
if update_policy:
|
|
2364
|
+
self._manager.configure(overrides={self._state_key: size})
|
|
2365
|
+
self._manager.update(self._state_key, _update)
|
|
2366
|
+
|
|
2367
|
+
def clear(self) -> None:
|
|
2368
|
+
"""Clear stored counters preserving ``max_entries``."""
|
|
2369
|
+
|
|
2370
|
+
self.configure(force=True)
|
|
2371
|
+
|
|
2372
|
+
def bump(self, key: K) -> int:
|
|
2373
|
+
"""Return current counter for ``key`` and increment it atomically."""
|
|
2374
|
+
|
|
2375
|
+
result: dict[str, Any] = {}
|
|
2376
|
+
|
|
2377
|
+
def _update(state: _CounterState[K] | None) -> _CounterState[K]:
|
|
2378
|
+
if not isinstance(state, _CounterState):
|
|
2379
|
+
state = self._create_state(0)
|
|
2380
|
+
cache = state.cache
|
|
2381
|
+
locks = state.locks
|
|
2382
|
+
if key not in locks:
|
|
2383
|
+
locks[key] = threading.RLock()
|
|
2384
|
+
value = int(cache.get(key, 0))
|
|
2385
|
+
cache[key] = value + 1
|
|
2386
|
+
result["value"] = value
|
|
2387
|
+
return state
|
|
2388
|
+
|
|
2389
|
+
self._manager.update(self._state_key, _update)
|
|
2390
|
+
return int(result.get("value", 0))
|
|
2391
|
+
|
|
2392
|
+
def __len__(self) -> int:
|
|
2393
|
+
"""Return the number of tracked counters."""
|
|
2394
|
+
|
|
2395
|
+
return len(self.cache)
|