tnfr 4.5.2__py3-none-any.whl → 6.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tnfr/__init__.py +228 -49
- tnfr/__init__.pyi +40 -0
- tnfr/_compat.py +11 -0
- tnfr/_version.py +7 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +106 -21
- tnfr/alias.pyi +140 -0
- tnfr/cache.py +666 -512
- tnfr/cache.pyi +232 -0
- tnfr/callback_utils.py +2 -9
- tnfr/callback_utils.pyi +105 -0
- tnfr/cli/__init__.py +21 -7
- tnfr/cli/__init__.pyi +47 -0
- tnfr/cli/arguments.py +42 -20
- tnfr/cli/arguments.pyi +33 -0
- tnfr/cli/execution.py +54 -20
- tnfr/cli/execution.pyi +80 -0
- tnfr/cli/utils.py +0 -2
- tnfr/cli/utils.pyi +8 -0
- tnfr/config/__init__.py +12 -0
- tnfr/config/__init__.pyi +8 -0
- tnfr/config/constants.py +104 -0
- tnfr/config/constants.pyi +12 -0
- tnfr/{config.py → config/init.py} +11 -7
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +106 -0
- tnfr/config/operator_names.pyi +28 -0
- tnfr/config/presets.py +104 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/constants/__init__.py +78 -24
- tnfr/constants/__init__.pyi +104 -0
- tnfr/constants/core.py +1 -2
- tnfr/constants/core.pyi +17 -0
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +4 -12
- tnfr/constants/metric.pyi +19 -0
- tnfr/constants_glyphs.py +9 -91
- tnfr/constants_glyphs.pyi +12 -0
- tnfr/dynamics/__init__.py +112 -634
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +201 -0
- tnfr/dynamics/aliases.py +22 -0
- tnfr/dynamics/coordination.py +343 -0
- tnfr/dynamics/dnfr.py +1936 -354
- tnfr/dynamics/dnfr.pyi +33 -0
- tnfr/dynamics/integrators.py +369 -75
- tnfr/dynamics/integrators.pyi +35 -0
- tnfr/dynamics/runtime.py +521 -0
- tnfr/dynamics/sampling.py +8 -5
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +680 -0
- tnfr/execution.py +56 -41
- tnfr/execution.pyi +65 -0
- tnfr/flatten.py +7 -7
- tnfr/flatten.pyi +28 -0
- tnfr/gamma.py +54 -37
- tnfr/gamma.pyi +40 -0
- tnfr/glyph_history.py +85 -38
- tnfr/glyph_history.pyi +53 -0
- tnfr/grammar.py +19 -338
- tnfr/grammar.pyi +13 -0
- tnfr/helpers/__init__.py +110 -30
- tnfr/helpers/__init__.pyi +66 -0
- tnfr/helpers/numeric.py +1 -0
- tnfr/helpers/numeric.pyi +12 -0
- tnfr/immutable.py +55 -19
- tnfr/immutable.pyi +37 -0
- tnfr/initialization.py +12 -10
- tnfr/initialization.pyi +73 -0
- tnfr/io.py +99 -34
- tnfr/io.pyi +11 -0
- tnfr/locking.pyi +7 -0
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/coherence.py +934 -294
- tnfr/metrics/common.py +1 -3
- tnfr/metrics/common.pyi +15 -0
- tnfr/metrics/core.py +192 -34
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +707 -101
- tnfr/metrics/diagnosis.pyi +89 -0
- tnfr/metrics/export.py +27 -13
- tnfr/metrics/glyph_timing.py +218 -38
- tnfr/metrics/reporting.py +22 -18
- tnfr/metrics/reporting.pyi +12 -0
- tnfr/metrics/sense_index.py +199 -25
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +53 -18
- tnfr/metrics/trig.pyi +12 -0
- tnfr/metrics/trig_cache.py +3 -7
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/node.py +148 -125
- tnfr/node.pyi +161 -0
- tnfr/observers.py +44 -30
- tnfr/observers.pyi +46 -0
- tnfr/ontosim.py +14 -13
- tnfr/ontosim.pyi +33 -0
- tnfr/operators/__init__.py +84 -52
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/definitions.py +181 -0
- tnfr/operators/definitions.pyi +92 -0
- tnfr/operators/jitter.py +86 -23
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/registry.py +80 -0
- tnfr/operators/registry.pyi +15 -0
- tnfr/operators/remesh.py +141 -57
- tnfr/presets.py +9 -54
- tnfr/presets.pyi +7 -0
- tnfr/py.typed +0 -0
- tnfr/rng.py +259 -73
- tnfr/rng.pyi +14 -0
- tnfr/selector.py +24 -17
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +55 -43
- tnfr/sense.pyi +30 -0
- tnfr/structural.py +44 -267
- tnfr/structural.pyi +46 -0
- tnfr/telemetry/__init__.py +13 -0
- tnfr/telemetry/verbosity.py +37 -0
- tnfr/tokens.py +3 -2
- tnfr/tokens.pyi +41 -0
- tnfr/trace.py +272 -82
- tnfr/trace.pyi +68 -0
- tnfr/types.py +345 -6
- tnfr/types.pyi +145 -0
- tnfr/utils/__init__.py +158 -0
- tnfr/utils/__init__.pyi +133 -0
- tnfr/utils/cache.py +755 -0
- tnfr/utils/cache.pyi +156 -0
- tnfr/{collections_utils.py → utils/data.py} +57 -90
- tnfr/utils/data.pyi +73 -0
- tnfr/utils/graph.py +87 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +746 -0
- tnfr/utils/init.pyi +85 -0
- tnfr/{json_utils.py → utils/io.py} +13 -18
- tnfr/utils/io.pyi +10 -0
- tnfr/utils/validators.py +130 -0
- tnfr/utils/validators.pyi +19 -0
- tnfr/validation/__init__.py +25 -0
- tnfr/validation/__init__.pyi +17 -0
- tnfr/validation/compatibility.py +59 -0
- tnfr/validation/compatibility.pyi +8 -0
- tnfr/validation/grammar.py +149 -0
- tnfr/validation/grammar.pyi +11 -0
- tnfr/validation/rules.py +194 -0
- tnfr/validation/rules.pyi +18 -0
- tnfr/validation/syntax.py +151 -0
- tnfr/validation/syntax.pyi +7 -0
- tnfr-6.0.0.dist-info/METADATA +135 -0
- tnfr-6.0.0.dist-info/RECORD +157 -0
- tnfr/graph_utils.py +0 -84
- tnfr/import_utils.py +0 -228
- tnfr/logging_utils.py +0 -116
- tnfr/validators.py +0 -84
- tnfr/value_utils.py +0 -59
- tnfr-4.5.2.dist-info/METADATA +0 -379
- tnfr-4.5.2.dist-info/RECORD +0 -67
- {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/WHEEL +0 -0
- {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/entry_points.txt +0 -0
- {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/licenses/LICENSE.md +0 -0
- {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/top_level.txt +0 -0
tnfr/cache.py
CHANGED
|
@@ -1,578 +1,732 @@
|
|
|
1
|
-
"""
|
|
2
|
-
|
|
3
|
-
This module consolidates structural cache helpers that previously lived in
|
|
4
|
-
``tnfr.helpers.cache_utils`` and ``tnfr.helpers.edge_cache``. The functions
|
|
5
|
-
exposed here are responsible for maintaining deterministic node digests,
|
|
6
|
-
scoped graph caches guarded by locks, and version counters that keep edge
|
|
7
|
-
artifacts in sync with ΔNFR driven updates.
|
|
8
|
-
"""
|
|
1
|
+
"""Central cache registry infrastructure for TNFR services."""
|
|
9
2
|
|
|
10
3
|
from __future__ import annotations
|
|
11
4
|
|
|
12
|
-
import
|
|
5
|
+
import logging
|
|
13
6
|
import threading
|
|
14
|
-
from collections import
|
|
15
|
-
from collections.abc import Callable, Hashable, Iterable
|
|
7
|
+
from collections.abc import Iterable
|
|
16
8
|
from contextlib import contextmanager
|
|
17
|
-
from
|
|
18
|
-
from
|
|
19
|
-
from typing import Any, TypeVar
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from time import perf_counter
|
|
11
|
+
from typing import Any, Callable, Generic, Hashable, Iterator, Mapping, MutableMapping, TypeVar, cast
|
|
20
12
|
|
|
21
13
|
from cachetools import LRUCache
|
|
22
|
-
import networkx as nx # type: ignore[import-untyped]
|
|
23
|
-
|
|
24
|
-
from .graph_utils import get_graph, mark_dnfr_prep_dirty
|
|
25
|
-
from .import_utils import get_numpy
|
|
26
|
-
from .json_utils import json_dumps
|
|
27
|
-
from .logging_utils import get_logger
|
|
28
|
-
|
|
29
|
-
T = TypeVar("T")
|
|
30
|
-
|
|
31
|
-
__all__ = (
|
|
32
|
-
"EdgeCacheManager",
|
|
33
|
-
"LockAwareLRUCache",
|
|
34
|
-
"NODE_SET_CHECKSUM_KEY",
|
|
35
|
-
"cached_node_list",
|
|
36
|
-
"cached_nodes_and_A",
|
|
37
|
-
"clear_node_repr_cache",
|
|
38
|
-
"edge_version_cache",
|
|
39
|
-
"edge_version_update",
|
|
40
|
-
"ensure_node_index_map",
|
|
41
|
-
"ensure_node_offset_map",
|
|
42
|
-
"get_graph_version",
|
|
43
|
-
"increment_edge_version",
|
|
44
|
-
"increment_graph_version",
|
|
45
|
-
"node_set_checksum",
|
|
46
|
-
"stable_json",
|
|
47
|
-
)
|
|
48
|
-
|
|
49
|
-
# Key used to store the node set checksum in a graph's ``graph`` attribute.
|
|
50
|
-
NODE_SET_CHECKSUM_KEY = "_node_set_checksum_cache"
|
|
51
|
-
|
|
52
|
-
logger = get_logger(__name__)
|
|
53
|
-
|
|
54
|
-
# Keys of cache entries dependent on the edge version. Any change to the edge
|
|
55
|
-
# set requires these to be dropped to avoid stale data.
|
|
56
|
-
EDGE_VERSION_CACHE_KEYS = ("_trig_version",)
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
class LockAwareLRUCache(LRUCache[Hashable, Any]):
|
|
60
|
-
"""``LRUCache`` that drops per-key locks when evicting items."""
|
|
61
|
-
|
|
62
|
-
def __init__(self, maxsize: int, locks: dict[Hashable, threading.RLock]):
|
|
63
|
-
super().__init__(maxsize)
|
|
64
|
-
self._locks: dict[Hashable, threading.RLock] = locks
|
|
65
|
-
|
|
66
|
-
def popitem(self) -> tuple[Hashable, Any]: # type: ignore[override]
|
|
67
|
-
key, value = super().popitem()
|
|
68
|
-
self._locks.pop(key, None)
|
|
69
|
-
return key, value
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
def _ensure_graph_entry(
|
|
73
|
-
graph: Any,
|
|
74
|
-
key: str,
|
|
75
|
-
factory: Callable[[], T],
|
|
76
|
-
validator: Callable[[Any], bool],
|
|
77
|
-
) -> T:
|
|
78
|
-
"""Return a validated entry from ``graph`` or create one when missing."""
|
|
79
|
-
|
|
80
|
-
value = graph.get(key)
|
|
81
|
-
if not validator(value):
|
|
82
|
-
value = factory()
|
|
83
|
-
graph[key] = value
|
|
84
|
-
return value
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
def _ensure_lock_mapping(
|
|
88
|
-
graph: Any,
|
|
89
|
-
key: str,
|
|
90
|
-
*,
|
|
91
|
-
lock_factory: Callable[[], threading.RLock] = threading.RLock,
|
|
92
|
-
) -> defaultdict[Hashable, threading.RLock]:
|
|
93
|
-
"""Ensure ``graph`` holds a ``defaultdict`` of locks under ``key``."""
|
|
94
|
-
|
|
95
|
-
return _ensure_graph_entry(
|
|
96
|
-
graph,
|
|
97
|
-
key,
|
|
98
|
-
factory=lambda: defaultdict(lock_factory),
|
|
99
|
-
validator=lambda value: isinstance(value, defaultdict)
|
|
100
|
-
and value.default_factory is lock_factory,
|
|
101
|
-
)
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
def _prune_locks(
|
|
105
|
-
cache: dict[Hashable, Any] | LRUCache[Hashable, Any] | None,
|
|
106
|
-
locks: dict[Hashable, threading.RLock]
|
|
107
|
-
| defaultdict[Hashable, threading.RLock]
|
|
108
|
-
| None,
|
|
109
|
-
) -> None:
|
|
110
|
-
"""Drop locks with no corresponding cache entry."""
|
|
111
|
-
|
|
112
|
-
if not isinstance(locks, dict):
|
|
113
|
-
return
|
|
114
|
-
cache_keys = cache.keys() if isinstance(cache, dict) else ()
|
|
115
|
-
for key in list(locks.keys()):
|
|
116
|
-
if key not in cache_keys:
|
|
117
|
-
locks.pop(key, None)
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
def get_graph_version(graph: Any, key: str, default: int = 0) -> int:
|
|
121
|
-
"""Return integer version stored in ``graph`` under ``key``."""
|
|
122
|
-
|
|
123
|
-
return int(graph.get(key, default))
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
def increment_graph_version(graph: Any, key: str) -> int:
|
|
127
|
-
"""Increment and store a version counter in ``graph`` under ``key``."""
|
|
128
14
|
|
|
129
|
-
|
|
130
|
-
graph[key] = version
|
|
131
|
-
return version
|
|
15
|
+
from .types import TimingContext
|
|
132
16
|
|
|
17
|
+
__all__ = [
|
|
18
|
+
"CacheManager",
|
|
19
|
+
"CacheCapacityConfig",
|
|
20
|
+
"CacheStatistics",
|
|
21
|
+
"InstrumentedLRUCache",
|
|
22
|
+
"ManagedLRUCache",
|
|
23
|
+
"prune_lock_mapping",
|
|
24
|
+
]
|
|
133
25
|
|
|
134
|
-
def stable_json(obj: Any) -> str:
|
|
135
|
-
"""Return a JSON string with deterministic ordering for ``obj``."""
|
|
136
26
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
sort_keys=True,
|
|
140
|
-
ensure_ascii=False,
|
|
141
|
-
to_bytes=False,
|
|
142
|
-
)
|
|
27
|
+
K = TypeVar("K", bound=Hashable)
|
|
28
|
+
V = TypeVar("V")
|
|
143
29
|
|
|
30
|
+
_logger = logging.getLogger(__name__)
|
|
144
31
|
|
|
145
|
-
@lru_cache(maxsize=1024)
|
|
146
|
-
def _node_repr_digest(obj: Any) -> tuple[str, bytes]:
|
|
147
|
-
"""Return cached stable representation and digest for ``obj``."""
|
|
148
32
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
repr_ = repr(obj)
|
|
153
|
-
digest = hashlib.blake2b(repr_.encode("utf-8"), digest_size=16).digest()
|
|
154
|
-
return repr_, digest
|
|
33
|
+
@dataclass(frozen=True)
|
|
34
|
+
class CacheCapacityConfig:
|
|
35
|
+
"""Configuration snapshot for cache capacity policies."""
|
|
155
36
|
|
|
37
|
+
default_capacity: int | None
|
|
38
|
+
overrides: dict[str, int | None]
|
|
156
39
|
|
|
157
|
-
def clear_node_repr_cache() -> None:
|
|
158
|
-
"""Clear cached node representations used for checksums."""
|
|
159
40
|
|
|
160
|
-
|
|
41
|
+
@dataclass(frozen=True)
|
|
42
|
+
class CacheStatistics:
|
|
43
|
+
"""Immutable snapshot of cache telemetry counters."""
|
|
161
44
|
|
|
45
|
+
hits: int = 0
|
|
46
|
+
misses: int = 0
|
|
47
|
+
evictions: int = 0
|
|
48
|
+
total_time: float = 0.0
|
|
49
|
+
timings: int = 0
|
|
162
50
|
|
|
163
|
-
def
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
return _node_repr_digest(n)[0]
|
|
51
|
+
def merge(self, other: CacheStatistics) -> CacheStatistics:
|
|
52
|
+
"""Return aggregated metrics combining ``self`` and ``other``."""
|
|
167
53
|
|
|
54
|
+
return CacheStatistics(
|
|
55
|
+
hits=self.hits + other.hits,
|
|
56
|
+
misses=self.misses + other.misses,
|
|
57
|
+
evictions=self.evictions + other.evictions,
|
|
58
|
+
total_time=self.total_time + other.total_time,
|
|
59
|
+
timings=self.timings + other.timings,
|
|
60
|
+
)
|
|
168
61
|
|
|
169
|
-
def _iter_node_digests(
|
|
170
|
-
nodes: Iterable[Any], *, presorted: bool
|
|
171
|
-
) -> Iterable[bytes]:
|
|
172
|
-
"""Yield node digests in a deterministic order."""
|
|
173
62
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
) -> str:
|
|
191
|
-
"""Checksum helper when no explicit node set is provided."""
|
|
192
|
-
|
|
193
|
-
nodes_view = G.nodes()
|
|
194
|
-
current_nodes = frozenset(nodes_view)
|
|
195
|
-
cached = graph.get(NODE_SET_CHECKSUM_KEY)
|
|
196
|
-
if cached and len(cached) == 3 and cached[2] == current_nodes:
|
|
197
|
-
return cached[1]
|
|
198
|
-
|
|
199
|
-
hasher = hashlib.blake2b(digest_size=16)
|
|
200
|
-
for digest in _iter_node_digests(nodes_view, presorted=presorted):
|
|
201
|
-
hasher.update(digest)
|
|
202
|
-
|
|
203
|
-
checksum = hasher.hexdigest()
|
|
204
|
-
if store:
|
|
205
|
-
token = checksum[:16]
|
|
206
|
-
if cached and cached[0] == token:
|
|
207
|
-
return cached[1]
|
|
208
|
-
graph[NODE_SET_CHECKSUM_KEY] = (token, checksum, current_nodes)
|
|
209
|
-
else:
|
|
210
|
-
graph.pop(NODE_SET_CHECKSUM_KEY, None)
|
|
211
|
-
return checksum
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
def node_set_checksum(
|
|
215
|
-
G: nx.Graph,
|
|
216
|
-
nodes: Iterable[Any] | None = None,
|
|
217
|
-
*,
|
|
218
|
-
presorted: bool = False,
|
|
219
|
-
store: bool = True,
|
|
220
|
-
) -> str:
|
|
221
|
-
"""Return a BLAKE2b checksum of ``G``'s node set."""
|
|
222
|
-
|
|
223
|
-
graph = get_graph(G)
|
|
224
|
-
if nodes is None:
|
|
225
|
-
return _node_set_checksum_no_nodes(
|
|
226
|
-
G, graph, presorted=presorted, store=store
|
|
63
|
+
@dataclass
|
|
64
|
+
class _CacheMetrics:
|
|
65
|
+
hits: int = 0
|
|
66
|
+
misses: int = 0
|
|
67
|
+
evictions: int = 0
|
|
68
|
+
total_time: float = 0.0
|
|
69
|
+
timings: int = 0
|
|
70
|
+
lock: threading.Lock = field(default_factory=threading.Lock, repr=False)
|
|
71
|
+
|
|
72
|
+
def snapshot(self) -> CacheStatistics:
|
|
73
|
+
return CacheStatistics(
|
|
74
|
+
hits=self.hits,
|
|
75
|
+
misses=self.misses,
|
|
76
|
+
evictions=self.evictions,
|
|
77
|
+
total_time=self.total_time,
|
|
78
|
+
timings=self.timings,
|
|
227
79
|
)
|
|
228
80
|
|
|
229
|
-
hasher = hashlib.blake2b(digest_size=16)
|
|
230
|
-
for digest in _iter_node_digests(nodes, presorted=presorted):
|
|
231
|
-
hasher.update(digest)
|
|
232
|
-
|
|
233
|
-
checksum = hasher.hexdigest()
|
|
234
|
-
if store:
|
|
235
|
-
token = checksum[:16]
|
|
236
|
-
cached = graph.get(NODE_SET_CHECKSUM_KEY)
|
|
237
|
-
if cached and cached[0] == token:
|
|
238
|
-
return cached[1]
|
|
239
|
-
graph[NODE_SET_CHECKSUM_KEY] = (token, checksum)
|
|
240
|
-
else:
|
|
241
|
-
graph.pop(NODE_SET_CHECKSUM_KEY, None)
|
|
242
|
-
return checksum
|
|
243
81
|
|
|
82
|
+
@dataclass
|
|
83
|
+
class _CacheEntry:
|
|
84
|
+
factory: Callable[[], Any]
|
|
85
|
+
lock: threading.Lock
|
|
86
|
+
reset: Callable[[Any], Any] | None = None
|
|
244
87
|
|
|
245
|
-
@dataclass(slots=True)
|
|
246
|
-
class NodeCache:
|
|
247
|
-
"""Container for cached node data."""
|
|
248
88
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
sorted_nodes: tuple[Any, ...] | None = None
|
|
252
|
-
idx: dict[Any, int] | None = None
|
|
253
|
-
offset: dict[Any, int] | None = None
|
|
89
|
+
class CacheManager:
|
|
90
|
+
"""Coordinate named caches guarded by per-entry locks."""
|
|
254
91
|
|
|
255
|
-
|
|
256
|
-
def n(self) -> int:
|
|
257
|
-
return len(self.nodes)
|
|
92
|
+
_MISSING = object()
|
|
258
93
|
|
|
94
|
+
def __init__(
|
|
95
|
+
self,
|
|
96
|
+
storage: MutableMapping[str, Any] | None = None,
|
|
97
|
+
*,
|
|
98
|
+
default_capacity: int | None = None,
|
|
99
|
+
overrides: Mapping[str, int | None] | None = None,
|
|
100
|
+
) -> None:
|
|
101
|
+
self._storage: MutableMapping[str, Any]
|
|
102
|
+
if storage is None:
|
|
103
|
+
self._storage = {}
|
|
104
|
+
else:
|
|
105
|
+
self._storage = storage
|
|
106
|
+
self._entries: dict[str, _CacheEntry] = {}
|
|
107
|
+
self._registry_lock = threading.RLock()
|
|
108
|
+
self._default_capacity = self._normalise_capacity(default_capacity)
|
|
109
|
+
self._capacity_overrides: dict[str, int | None] = {}
|
|
110
|
+
self._metrics: dict[str, _CacheMetrics] = {}
|
|
111
|
+
self._metrics_publishers: list[Callable[[str, CacheStatistics], None]] = []
|
|
112
|
+
if overrides:
|
|
113
|
+
self.configure(overrides=overrides)
|
|
114
|
+
|
|
115
|
+
@staticmethod
|
|
116
|
+
def _normalise_capacity(value: int | None) -> int | None:
|
|
117
|
+
if value is None:
|
|
118
|
+
return None
|
|
119
|
+
size = int(value)
|
|
120
|
+
if size < 0:
|
|
121
|
+
raise ValueError("capacity must be non-negative or None")
|
|
122
|
+
return size
|
|
123
|
+
|
|
124
|
+
def register(
|
|
125
|
+
self,
|
|
126
|
+
name: str,
|
|
127
|
+
factory: Callable[[], Any],
|
|
128
|
+
*,
|
|
129
|
+
lock_factory: Callable[[], threading.Lock | threading.RLock] | None = None,
|
|
130
|
+
reset: Callable[[Any], Any] | None = None,
|
|
131
|
+
create: bool = True,
|
|
132
|
+
) -> None:
|
|
133
|
+
"""Register ``name`` with ``factory`` and optional lifecycle hooks."""
|
|
134
|
+
|
|
135
|
+
if lock_factory is None:
|
|
136
|
+
lock_factory = threading.RLock
|
|
137
|
+
with self._registry_lock:
|
|
138
|
+
entry = self._entries.get(name)
|
|
139
|
+
if entry is None:
|
|
140
|
+
entry = _CacheEntry(factory=factory, lock=lock_factory(), reset=reset)
|
|
141
|
+
self._entries[name] = entry
|
|
142
|
+
else:
|
|
143
|
+
# Update hooks when re-registering the same cache name.
|
|
144
|
+
entry.factory = factory
|
|
145
|
+
entry.reset = reset
|
|
146
|
+
self._ensure_metrics(name)
|
|
147
|
+
if create:
|
|
148
|
+
self.get(name)
|
|
149
|
+
|
|
150
|
+
def configure(
|
|
151
|
+
self,
|
|
152
|
+
*,
|
|
153
|
+
default_capacity: int | None | object = _MISSING,
|
|
154
|
+
overrides: Mapping[str, int | None] | None = None,
|
|
155
|
+
replace_overrides: bool = False,
|
|
156
|
+
) -> None:
|
|
157
|
+
"""Update the cache capacity policy shared by registered entries."""
|
|
158
|
+
|
|
159
|
+
with self._registry_lock:
|
|
160
|
+
if default_capacity is not self._MISSING:
|
|
161
|
+
self._default_capacity = self._normalise_capacity(
|
|
162
|
+
default_capacity if default_capacity is not None else None
|
|
163
|
+
)
|
|
164
|
+
if overrides is not None:
|
|
165
|
+
if replace_overrides:
|
|
166
|
+
self._capacity_overrides.clear()
|
|
167
|
+
for key, value in overrides.items():
|
|
168
|
+
self._capacity_overrides[key] = self._normalise_capacity(value)
|
|
169
|
+
|
|
170
|
+
def configure_from_mapping(self, config: Mapping[str, Any]) -> None:
|
|
171
|
+
"""Load configuration produced by :meth:`export_config`."""
|
|
172
|
+
|
|
173
|
+
default = config.get("default_capacity", self._MISSING)
|
|
174
|
+
overrides = config.get("overrides")
|
|
175
|
+
overrides_mapping: Mapping[str, int | None] | None
|
|
176
|
+
overrides_mapping = overrides if isinstance(overrides, Mapping) else None
|
|
177
|
+
self.configure(default_capacity=default, overrides=overrides_mapping)
|
|
178
|
+
|
|
179
|
+
def export_config(self) -> CacheCapacityConfig:
|
|
180
|
+
"""Return a copy of the current capacity configuration."""
|
|
181
|
+
|
|
182
|
+
with self._registry_lock:
|
|
183
|
+
return CacheCapacityConfig(
|
|
184
|
+
default_capacity=self._default_capacity,
|
|
185
|
+
overrides=dict(self._capacity_overrides),
|
|
186
|
+
)
|
|
259
187
|
|
|
260
|
-
def
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
) -> None:
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
)
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
*,
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
)
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
graph,
|
|
315
|
-
nodes,
|
|
316
|
-
"_node_list",
|
|
317
|
-
checksum=checksum,
|
|
318
|
-
sorted_nodes=sorted_nodes,
|
|
319
|
-
)
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
def _cache_node_list(G: nx.Graph) -> tuple[Any, ...]:
|
|
323
|
-
"""Cache and return the tuple of nodes for ``G``."""
|
|
324
|
-
|
|
325
|
-
graph = get_graph(G)
|
|
326
|
-
cache: NodeCache | None = graph.get("_node_list_cache")
|
|
327
|
-
nodes = cache.nodes if cache else None
|
|
328
|
-
sorted_nodes = cache.sorted_nodes if cache else None
|
|
329
|
-
stored_len = graph.get("_node_list_len")
|
|
330
|
-
current_n = G.number_of_nodes()
|
|
331
|
-
dirty = bool(graph.pop("_node_list_dirty", False))
|
|
332
|
-
|
|
333
|
-
invalid = nodes is None or stored_len != current_n or dirty
|
|
334
|
-
new_checksum: str | None = None
|
|
335
|
-
|
|
336
|
-
if not invalid and cache:
|
|
337
|
-
new_checksum = node_set_checksum(G)
|
|
338
|
-
invalid = cache.checksum != new_checksum
|
|
339
|
-
|
|
340
|
-
sort_nodes = bool(graph.get("SORT_NODES", False))
|
|
341
|
-
|
|
342
|
-
if invalid:
|
|
343
|
-
nodes = _refresh_node_list_cache(
|
|
344
|
-
G, graph, sort_nodes=sort_nodes, current_n=current_n
|
|
345
|
-
)
|
|
346
|
-
elif cache and "_node_list_checksum" not in graph:
|
|
347
|
-
_reuse_node_list_cache(
|
|
348
|
-
graph,
|
|
349
|
-
cache,
|
|
350
|
-
nodes,
|
|
351
|
-
sorted_nodes,
|
|
352
|
-
sort_nodes=sort_nodes,
|
|
353
|
-
new_checksum=new_checksum,
|
|
354
|
-
)
|
|
355
|
-
else:
|
|
356
|
-
if sort_nodes and sorted_nodes is None and cache is not None:
|
|
357
|
-
cache.sorted_nodes = tuple(sorted(nodes, key=_node_repr))
|
|
358
|
-
return nodes
|
|
188
|
+
def get_capacity(
|
|
189
|
+
self,
|
|
190
|
+
name: str,
|
|
191
|
+
*,
|
|
192
|
+
requested: int | None = None,
|
|
193
|
+
fallback: int | None = None,
|
|
194
|
+
use_default: bool = True,
|
|
195
|
+
) -> int | None:
|
|
196
|
+
"""Return capacity for ``name`` considering overrides and defaults."""
|
|
197
|
+
|
|
198
|
+
with self._registry_lock:
|
|
199
|
+
override = self._capacity_overrides.get(name, self._MISSING)
|
|
200
|
+
default = self._default_capacity
|
|
201
|
+
if override is not self._MISSING:
|
|
202
|
+
return override
|
|
203
|
+
values: tuple[int | None, ...]
|
|
204
|
+
if use_default:
|
|
205
|
+
values = (requested, default, fallback)
|
|
206
|
+
else:
|
|
207
|
+
values = (requested, fallback)
|
|
208
|
+
for value in values:
|
|
209
|
+
if value is self._MISSING:
|
|
210
|
+
continue
|
|
211
|
+
normalised = self._normalise_capacity(value)
|
|
212
|
+
if normalised is not None:
|
|
213
|
+
return normalised
|
|
214
|
+
return None
|
|
215
|
+
|
|
216
|
+
def has_override(self, name: str) -> bool:
|
|
217
|
+
"""Return ``True`` if ``name`` has an explicit capacity override."""
|
|
218
|
+
|
|
219
|
+
with self._registry_lock:
|
|
220
|
+
return name in self._capacity_overrides
|
|
221
|
+
|
|
222
|
+
def get_lock(self, name: str) -> threading.Lock | threading.RLock:
|
|
223
|
+
entry = self._entries.get(name)
|
|
224
|
+
if entry is None:
|
|
225
|
+
raise KeyError(name)
|
|
226
|
+
return entry.lock
|
|
227
|
+
|
|
228
|
+
def names(self) -> Iterator[str]:
|
|
229
|
+
with self._registry_lock:
|
|
230
|
+
return iter(tuple(self._entries))
|
|
231
|
+
|
|
232
|
+
def get(self, name: str, *, create: bool = True) -> Any:
|
|
233
|
+
entry = self._entries.get(name)
|
|
234
|
+
if entry is None:
|
|
235
|
+
raise KeyError(name)
|
|
236
|
+
with entry.lock:
|
|
237
|
+
value = self._storage.get(name)
|
|
238
|
+
if create and value is None:
|
|
239
|
+
value = entry.factory()
|
|
240
|
+
self._storage[name] = value
|
|
241
|
+
return value
|
|
359
242
|
|
|
243
|
+
def peek(self, name: str) -> Any:
|
|
244
|
+
return self.get(name, create=False)
|
|
360
245
|
|
|
361
|
-
def
|
|
362
|
-
|
|
246
|
+
def store(self, name: str, value: Any) -> None:
|
|
247
|
+
entry = self._entries.get(name)
|
|
248
|
+
if entry is None:
|
|
249
|
+
raise KeyError(name)
|
|
250
|
+
with entry.lock:
|
|
251
|
+
self._storage[name] = value
|
|
363
252
|
|
|
364
|
-
|
|
253
|
+
def update(
|
|
254
|
+
self,
|
|
255
|
+
name: str,
|
|
256
|
+
updater: Callable[[Any], Any],
|
|
257
|
+
*,
|
|
258
|
+
create: bool = True,
|
|
259
|
+
) -> Any:
|
|
260
|
+
entry = self._entries.get(name)
|
|
261
|
+
if entry is None:
|
|
262
|
+
raise KeyError(name)
|
|
263
|
+
with entry.lock:
|
|
264
|
+
current = self._storage.get(name)
|
|
265
|
+
if create and current is None:
|
|
266
|
+
current = entry.factory()
|
|
267
|
+
new_value = updater(current)
|
|
268
|
+
self._storage[name] = new_value
|
|
269
|
+
return new_value
|
|
270
|
+
|
|
271
|
+
def clear(self, name: str | None = None) -> None:
|
|
272
|
+
if name is not None:
|
|
273
|
+
names = (name,)
|
|
274
|
+
else:
|
|
275
|
+
with self._registry_lock:
|
|
276
|
+
names = tuple(self._entries)
|
|
277
|
+
for cache_name in names:
|
|
278
|
+
entry = self._entries.get(cache_name)
|
|
279
|
+
if entry is None:
|
|
280
|
+
continue
|
|
281
|
+
with entry.lock:
|
|
282
|
+
current = self._storage.get(cache_name)
|
|
283
|
+
new_value = None
|
|
284
|
+
if entry.reset is not None:
|
|
285
|
+
new_value = entry.reset(current)
|
|
286
|
+
if new_value is None:
|
|
287
|
+
try:
|
|
288
|
+
new_value = entry.factory()
|
|
289
|
+
except Exception:
|
|
290
|
+
self._storage.pop(cache_name, None)
|
|
291
|
+
continue
|
|
292
|
+
self._storage[cache_name] = new_value
|
|
293
|
+
|
|
294
|
+
# ------------------------------------------------------------------
|
|
295
|
+
# Metrics helpers
|
|
296
|
+
|
|
297
|
+
def _ensure_metrics(self, name: str) -> _CacheMetrics:
|
|
298
|
+
metrics = self._metrics.get(name)
|
|
299
|
+
if metrics is None:
|
|
300
|
+
with self._registry_lock:
|
|
301
|
+
metrics = self._metrics.get(name)
|
|
302
|
+
if metrics is None:
|
|
303
|
+
metrics = _CacheMetrics()
|
|
304
|
+
self._metrics[name] = metrics
|
|
305
|
+
return metrics
|
|
306
|
+
|
|
307
|
+
def increment_hit(
|
|
308
|
+
self,
|
|
309
|
+
name: str,
|
|
310
|
+
*,
|
|
311
|
+
amount: int = 1,
|
|
312
|
+
duration: float | None = None,
|
|
313
|
+
) -> None:
|
|
314
|
+
metrics = self._ensure_metrics(name)
|
|
315
|
+
with metrics.lock:
|
|
316
|
+
metrics.hits += int(amount)
|
|
317
|
+
if duration is not None:
|
|
318
|
+
metrics.total_time += float(duration)
|
|
319
|
+
metrics.timings += 1
|
|
320
|
+
|
|
321
|
+
def increment_miss(
|
|
322
|
+
self,
|
|
323
|
+
name: str,
|
|
324
|
+
*,
|
|
325
|
+
amount: int = 1,
|
|
326
|
+
duration: float | None = None,
|
|
327
|
+
) -> None:
|
|
328
|
+
metrics = self._ensure_metrics(name)
|
|
329
|
+
with metrics.lock:
|
|
330
|
+
metrics.misses += int(amount)
|
|
331
|
+
if duration is not None:
|
|
332
|
+
metrics.total_time += float(duration)
|
|
333
|
+
metrics.timings += 1
|
|
334
|
+
|
|
335
|
+
def increment_eviction(self, name: str, *, amount: int = 1) -> None:
|
|
336
|
+
metrics = self._ensure_metrics(name)
|
|
337
|
+
with metrics.lock:
|
|
338
|
+
metrics.evictions += int(amount)
|
|
339
|
+
|
|
340
|
+
def record_timing(self, name: str, duration: float) -> None:
|
|
341
|
+
metrics = self._ensure_metrics(name)
|
|
342
|
+
with metrics.lock:
|
|
343
|
+
metrics.total_time += float(duration)
|
|
344
|
+
metrics.timings += 1
|
|
345
|
+
|
|
346
|
+
@contextmanager
|
|
347
|
+
def timer(self, name: str) -> TimingContext:
|
|
348
|
+
"""Context manager recording execution time for ``name``."""
|
|
349
|
+
|
|
350
|
+
start = perf_counter()
|
|
351
|
+
try:
|
|
352
|
+
yield
|
|
353
|
+
finally:
|
|
354
|
+
self.record_timing(name, perf_counter() - start)
|
|
355
|
+
|
|
356
|
+
def get_metrics(self, name: str) -> CacheStatistics:
|
|
357
|
+
metrics = self._metrics.get(name)
|
|
358
|
+
if metrics is None:
|
|
359
|
+
return CacheStatistics()
|
|
360
|
+
with metrics.lock:
|
|
361
|
+
return metrics.snapshot()
|
|
362
|
+
|
|
363
|
+
def iter_metrics(self) -> Iterator[tuple[str, CacheStatistics]]:
|
|
364
|
+
with self._registry_lock:
|
|
365
|
+
items = tuple(self._metrics.items())
|
|
366
|
+
for name, metrics in items:
|
|
367
|
+
with metrics.lock:
|
|
368
|
+
yield name, metrics.snapshot()
|
|
369
|
+
|
|
370
|
+
def aggregate_metrics(self) -> CacheStatistics:
|
|
371
|
+
aggregate = CacheStatistics()
|
|
372
|
+
for _, stats in self.iter_metrics():
|
|
373
|
+
aggregate = aggregate.merge(stats)
|
|
374
|
+
return aggregate
|
|
375
|
+
|
|
376
|
+
def register_metrics_publisher(
|
|
377
|
+
self, publisher: Callable[[str, CacheStatistics], None]
|
|
378
|
+
) -> None:
|
|
379
|
+
with self._registry_lock:
|
|
380
|
+
self._metrics_publishers.append(publisher)
|
|
381
|
+
|
|
382
|
+
def publish_metrics(
|
|
383
|
+
self,
|
|
384
|
+
*,
|
|
385
|
+
publisher: Callable[[str, CacheStatistics], None] | None = None,
|
|
386
|
+
) -> None:
|
|
387
|
+
if publisher is None:
|
|
388
|
+
with self._registry_lock:
|
|
389
|
+
publishers = tuple(self._metrics_publishers)
|
|
390
|
+
else:
|
|
391
|
+
publishers = (publisher,)
|
|
392
|
+
if not publishers:
|
|
393
|
+
return
|
|
394
|
+
snapshot = tuple(self.iter_metrics())
|
|
395
|
+
for emit in publishers:
|
|
396
|
+
for name, stats in snapshot:
|
|
397
|
+
try:
|
|
398
|
+
emit(name, stats)
|
|
399
|
+
except Exception: # pragma: no cover - defensive logging
|
|
400
|
+
logging.getLogger(__name__).exception(
|
|
401
|
+
"Cache metrics publisher failed for %s", name
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
def log_metrics(self, logger: logging.Logger, *, level: int = logging.INFO) -> None:
|
|
405
|
+
"""Emit cache metrics using ``logger`` for telemetry hooks."""
|
|
406
|
+
|
|
407
|
+
for name, stats in self.iter_metrics():
|
|
408
|
+
logger.log(
|
|
409
|
+
level,
|
|
410
|
+
"cache=%s hits=%d misses=%d evictions=%d timings=%d total_time=%.6f",
|
|
411
|
+
name,
|
|
412
|
+
stats.hits,
|
|
413
|
+
stats.misses,
|
|
414
|
+
stats.evictions,
|
|
415
|
+
stats.timings,
|
|
416
|
+
stats.total_time,
|
|
417
|
+
)
|
|
365
418
|
|
|
366
419
|
|
|
367
|
-
def
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
)
|
|
373
|
-
|
|
420
|
+
def _normalise_callbacks(
|
|
421
|
+
callbacks: Iterable[Callable[[K, V], None]] | Callable[[K, V], None] | None,
|
|
422
|
+
) -> tuple[Callable[[K, V], None], ...]:
|
|
423
|
+
if callbacks is None:
|
|
424
|
+
return ()
|
|
425
|
+
if callable(callbacks):
|
|
426
|
+
return (callbacks,)
|
|
427
|
+
return tuple(callbacks)
|
|
374
428
|
|
|
375
|
-
graph = G.graph
|
|
376
|
-
_cache_node_list(G)
|
|
377
|
-
cache: NodeCache = graph["_node_list_cache"]
|
|
378
429
|
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
nodes = cache.sorted_nodes = tuple(
|
|
385
|
-
sorted(cache.nodes, key=_node_repr)
|
|
386
|
-
)
|
|
387
|
-
else:
|
|
388
|
-
nodes = cache.nodes
|
|
389
|
-
mappings: dict[str, dict[Any, int]] = {attr: {} for attr in missing}
|
|
390
|
-
for idx, node in enumerate(nodes):
|
|
391
|
-
for attr in missing:
|
|
392
|
-
mappings[attr][node] = idx
|
|
393
|
-
for attr in missing:
|
|
394
|
-
setattr(cache, attr, mappings[attr])
|
|
395
|
-
return getattr(cache, attrs[0])
|
|
430
|
+
def prune_lock_mapping(
|
|
431
|
+
cache: Mapping[K, Any] | MutableMapping[K, Any] | None,
|
|
432
|
+
locks: MutableMapping[K, Any] | None,
|
|
433
|
+
) -> None:
|
|
434
|
+
"""Drop lock entries not present in ``cache``."""
|
|
396
435
|
|
|
436
|
+
if locks is None:
|
|
437
|
+
return
|
|
438
|
+
if cache is None:
|
|
439
|
+
cache_keys: set[K] = set()
|
|
440
|
+
else:
|
|
441
|
+
cache_keys = set(cache.keys())
|
|
442
|
+
for key in list(locks.keys()):
|
|
443
|
+
if key not in cache_keys:
|
|
444
|
+
locks.pop(key, None)
|
|
397
445
|
|
|
398
|
-
def ensure_node_index_map(G) -> dict[Any, int]:
|
|
399
|
-
"""Return cached node-to-index mapping for ``G``."""
|
|
400
446
|
|
|
401
|
-
|
|
447
|
+
class InstrumentedLRUCache(MutableMapping[K, V], Generic[K, V]):
|
|
448
|
+
"""LRU cache wrapper that synchronises telemetry, callbacks and locks.
|
|
402
449
|
|
|
450
|
+
The wrapper owns an internal :class:`cachetools.LRUCache` instance and
|
|
451
|
+
forwards all read operations to it. Mutating operations are instrumented to
|
|
452
|
+
update :class:`CacheManager` metrics, execute registered callbacks and keep
|
|
453
|
+
an optional lock mapping aligned with the stored keys. Telemetry callbacks
|
|
454
|
+
always execute before eviction callbacks, preserving the registration order
|
|
455
|
+
for deterministic side effects.
|
|
403
456
|
|
|
404
|
-
|
|
405
|
-
|
|
457
|
+
Callbacks can be extended or replaced after construction via
|
|
458
|
+
:meth:`set_telemetry_callbacks` and :meth:`set_eviction_callbacks`. When
|
|
459
|
+
``append`` is ``False`` (default) the provided callbacks replace the
|
|
460
|
+
existing sequence; otherwise they are appended at the end while keeping the
|
|
461
|
+
previous ordering intact.
|
|
462
|
+
"""
|
|
406
463
|
|
|
407
|
-
|
|
408
|
-
return _ensure_node_map(G, attrs=("offset",), sort=sort)
|
|
464
|
+
_MISSING = object()
|
|
409
465
|
|
|
466
|
+
def __init__(
|
|
467
|
+
self,
|
|
468
|
+
maxsize: int,
|
|
469
|
+
*,
|
|
470
|
+
manager: CacheManager | None = None,
|
|
471
|
+
metrics_key: str | None = None,
|
|
472
|
+
telemetry_callbacks: Iterable[Callable[[K, V], None]]
|
|
473
|
+
| Callable[[K, V], None]
|
|
474
|
+
| None = None,
|
|
475
|
+
eviction_callbacks: Iterable[Callable[[K, V], None]]
|
|
476
|
+
| Callable[[K, V], None]
|
|
477
|
+
| None = None,
|
|
478
|
+
locks: MutableMapping[K, Any] | None = None,
|
|
479
|
+
getsizeof: Callable[[V], int] | None = None,
|
|
480
|
+
count_overwrite_hit: bool = True,
|
|
481
|
+
) -> None:
|
|
482
|
+
self._cache: LRUCache[K, V] = LRUCache(maxsize, getsizeof=getsizeof)
|
|
483
|
+
original_popitem = self._cache.popitem
|
|
484
|
+
|
|
485
|
+
def _instrumented_popitem() -> tuple[K, V]:
|
|
486
|
+
key, value = original_popitem()
|
|
487
|
+
self._dispatch_removal(key, value)
|
|
488
|
+
return key, value
|
|
489
|
+
|
|
490
|
+
self._cache.popitem = _instrumented_popitem # type: ignore[assignment]
|
|
491
|
+
self._manager = manager
|
|
492
|
+
self._metrics_key = metrics_key
|
|
493
|
+
self._locks = locks
|
|
494
|
+
self._count_overwrite_hit = bool(count_overwrite_hit)
|
|
495
|
+
self._telemetry_callbacks: list[Callable[[K, V], None]]
|
|
496
|
+
self._telemetry_callbacks = list(_normalise_callbacks(telemetry_callbacks))
|
|
497
|
+
self._eviction_callbacks: list[Callable[[K, V], None]]
|
|
498
|
+
self._eviction_callbacks = list(_normalise_callbacks(eviction_callbacks))
|
|
499
|
+
|
|
500
|
+
# ------------------------------------------------------------------
|
|
501
|
+
# Callback registration helpers
|
|
410
502
|
|
|
411
|
-
|
|
412
|
-
|
|
503
|
+
@property
|
|
504
|
+
def telemetry_callbacks(self) -> tuple[Callable[[K, V], None], ...]:
|
|
505
|
+
"""Return currently registered telemetry callbacks."""
|
|
413
506
|
|
|
414
|
-
|
|
507
|
+
return tuple(self._telemetry_callbacks)
|
|
415
508
|
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
self.locks_key = "_edge_version_cache_locks"
|
|
509
|
+
@property
|
|
510
|
+
def eviction_callbacks(self) -> tuple[Callable[[K, V], None], ...]:
|
|
511
|
+
"""Return currently registered eviction callbacks."""
|
|
420
512
|
|
|
421
|
-
|
|
422
|
-
if max_entries is None:
|
|
423
|
-
return lambda value: value is not None and not isinstance(value, LRUCache)
|
|
424
|
-
return lambda value: isinstance(value, LRUCache) and value.maxsize == max_entries
|
|
513
|
+
return tuple(self._eviction_callbacks)
|
|
425
514
|
|
|
426
|
-
def
|
|
427
|
-
self,
|
|
428
|
-
max_entries: int | None,
|
|
429
|
-
locks: dict[Hashable, threading.RLock]
|
|
430
|
-
| defaultdict[Hashable, threading.RLock],
|
|
431
|
-
) -> dict[Hashable, Any] | LRUCache[Hashable, Any]:
|
|
432
|
-
if max_entries:
|
|
433
|
-
return LockAwareLRUCache(max_entries, locks) # type: ignore[arg-type]
|
|
434
|
-
return {}
|
|
435
|
-
|
|
436
|
-
def get_cache(
|
|
515
|
+
def set_telemetry_callbacks(
|
|
437
516
|
self,
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
create: bool = True,
|
|
441
|
-
) -> tuple[
|
|
442
|
-
dict[Hashable, Any] | LRUCache[Hashable, Any] | None,
|
|
443
|
-
dict[Hashable, threading.RLock]
|
|
444
|
-
| defaultdict[Hashable, threading.RLock]
|
|
517
|
+
callbacks: Iterable[Callable[[K, V], None]]
|
|
518
|
+
| Callable[[K, V], None]
|
|
445
519
|
| None,
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
)
|
|
462
|
-
if max_entries is None:
|
|
463
|
-
_prune_locks(cache, locks)
|
|
464
|
-
return cache, locks
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
def edge_version_cache(
|
|
468
|
-
G: Any,
|
|
469
|
-
key: Hashable,
|
|
470
|
-
builder: Callable[[], T],
|
|
471
|
-
*,
|
|
472
|
-
max_entries: int | None = 128,
|
|
473
|
-
) -> T:
|
|
474
|
-
"""Return cached ``builder`` output tied to the edge version of ``G``."""
|
|
475
|
-
|
|
476
|
-
if max_entries is not None:
|
|
477
|
-
max_entries = int(max_entries)
|
|
478
|
-
if max_entries < 0:
|
|
479
|
-
raise ValueError("max_entries must be non-negative or None")
|
|
480
|
-
if max_entries is not None and max_entries == 0:
|
|
481
|
-
return builder()
|
|
482
|
-
|
|
483
|
-
graph = get_graph(G)
|
|
484
|
-
manager = graph.get("_edge_cache_manager") # type: ignore[assignment]
|
|
485
|
-
if not isinstance(manager, EdgeCacheManager) or manager.graph is not graph:
|
|
486
|
-
manager = EdgeCacheManager(graph)
|
|
487
|
-
graph["_edge_cache_manager"] = manager
|
|
488
|
-
|
|
489
|
-
cache, locks = manager.get_cache(max_entries)
|
|
490
|
-
edge_version = get_graph_version(graph, "_edge_version")
|
|
491
|
-
lock = locks[key]
|
|
492
|
-
|
|
493
|
-
with lock:
|
|
494
|
-
entry = cache.get(key)
|
|
495
|
-
if entry is not None and entry[0] == edge_version:
|
|
496
|
-
return entry[1]
|
|
497
|
-
|
|
498
|
-
try:
|
|
499
|
-
value = builder()
|
|
500
|
-
except (RuntimeError, ValueError) as exc: # pragma: no cover - logging side effect
|
|
501
|
-
logger.exception("edge_version_cache builder failed for %r: %s", key, exc)
|
|
502
|
-
raise
|
|
503
|
-
else:
|
|
504
|
-
with lock:
|
|
505
|
-
entry = cache.get(key)
|
|
506
|
-
if entry is not None and entry[0] == edge_version:
|
|
507
|
-
return entry[1]
|
|
508
|
-
cache[key] = (edge_version, value)
|
|
509
|
-
return value
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
def cached_nodes_and_A(
|
|
513
|
-
G: nx.Graph, *, cache_size: int | None = 1, require_numpy: bool = False
|
|
514
|
-
) -> tuple[tuple[Any, ...], Any]:
|
|
515
|
-
"""Return cached nodes tuple and adjacency matrix for ``G``."""
|
|
516
|
-
|
|
517
|
-
nodes = cached_node_list(G)
|
|
518
|
-
graph = G.graph
|
|
520
|
+
*,
|
|
521
|
+
append: bool = False,
|
|
522
|
+
) -> None:
|
|
523
|
+
"""Update telemetry callbacks executed on removals.
|
|
524
|
+
|
|
525
|
+
When ``append`` is ``True`` the provided callbacks are added to the end
|
|
526
|
+
of the execution chain while preserving relative order. Otherwise, the
|
|
527
|
+
previous callbacks are replaced.
|
|
528
|
+
"""
|
|
529
|
+
|
|
530
|
+
new_callbacks = list(_normalise_callbacks(callbacks))
|
|
531
|
+
if append:
|
|
532
|
+
self._telemetry_callbacks.extend(new_callbacks)
|
|
533
|
+
else:
|
|
534
|
+
self._telemetry_callbacks = new_callbacks
|
|
519
535
|
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
536
|
+
def set_eviction_callbacks(
|
|
537
|
+
self,
|
|
538
|
+
callbacks: Iterable[Callable[[K, V], None]]
|
|
539
|
+
| Callable[[K, V], None]
|
|
540
|
+
| None,
|
|
541
|
+
*,
|
|
542
|
+
append: bool = False,
|
|
543
|
+
) -> None:
|
|
544
|
+
"""Update eviction callbacks executed on removals.
|
|
529
545
|
|
|
530
|
-
|
|
531
|
-
|
|
546
|
+
Behaviour matches :meth:`set_telemetry_callbacks`.
|
|
547
|
+
"""
|
|
532
548
|
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
return nodes, A
|
|
549
|
+
new_callbacks = list(_normalise_callbacks(callbacks))
|
|
550
|
+
if append:
|
|
551
|
+
self._eviction_callbacks.extend(new_callbacks)
|
|
552
|
+
else:
|
|
553
|
+
self._eviction_callbacks = new_callbacks
|
|
539
554
|
|
|
540
|
-
|
|
555
|
+
# ------------------------------------------------------------------
|
|
556
|
+
# MutableMapping interface
|
|
541
557
|
|
|
542
|
-
|
|
543
|
-
|
|
558
|
+
def __getitem__(self, key: K) -> V:
|
|
559
|
+
return self._cache[key]
|
|
544
560
|
|
|
545
|
-
|
|
561
|
+
def __setitem__(self, key: K, value: V) -> None:
|
|
562
|
+
exists = key in self._cache
|
|
563
|
+
self._cache[key] = value
|
|
564
|
+
if exists:
|
|
565
|
+
if self._count_overwrite_hit:
|
|
566
|
+
self._record_hit(1)
|
|
567
|
+
else:
|
|
568
|
+
self._record_miss(1)
|
|
546
569
|
|
|
570
|
+
def __delitem__(self, key: K) -> None:
|
|
571
|
+
try:
|
|
572
|
+
value = self._cache[key]
|
|
573
|
+
except KeyError:
|
|
574
|
+
self._record_miss(1)
|
|
575
|
+
raise
|
|
576
|
+
del self._cache[key]
|
|
577
|
+
self._dispatch_removal(key, value, hits=1)
|
|
547
578
|
|
|
548
|
-
def
|
|
549
|
-
|
|
579
|
+
def __iter__(self) -> Iterator[K]:
|
|
580
|
+
return iter(self._cache)
|
|
550
581
|
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
cache.clear()
|
|
554
|
-
if isinstance(locks, dict):
|
|
555
|
-
locks.clear()
|
|
556
|
-
mark_dnfr_prep_dirty(G)
|
|
557
|
-
clear_node_repr_cache()
|
|
558
|
-
for key in EDGE_VERSION_CACHE_KEYS:
|
|
559
|
-
graph.pop(key, None)
|
|
582
|
+
def __len__(self) -> int:
|
|
583
|
+
return len(self._cache)
|
|
560
584
|
|
|
585
|
+
def __contains__(self, key: object) -> bool:
|
|
586
|
+
return key in self._cache
|
|
561
587
|
|
|
562
|
-
def
|
|
563
|
-
|
|
588
|
+
def __repr__(self) -> str: # pragma: no cover - debugging helper
|
|
589
|
+
return f"{self.__class__.__name__}({self._cache!r})"
|
|
564
590
|
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
_reset_edge_caches(graph, G)
|
|
591
|
+
# ------------------------------------------------------------------
|
|
592
|
+
# Cache helpers
|
|
568
593
|
|
|
594
|
+
@property
|
|
595
|
+
def maxsize(self) -> int:
|
|
596
|
+
return self._cache.maxsize
|
|
569
597
|
|
|
570
|
-
@
|
|
571
|
-
def
|
|
572
|
-
|
|
598
|
+
@property
|
|
599
|
+
def currsize(self) -> int:
|
|
600
|
+
return self._cache.currsize
|
|
601
|
+
|
|
602
|
+
def get(self, key: K, default: V | None = None) -> V | None:
|
|
603
|
+
return self._cache.get(key, default)
|
|
604
|
+
|
|
605
|
+
def pop(self, key: K, default: Any = _MISSING) -> V:
|
|
606
|
+
try:
|
|
607
|
+
value = self._cache[key]
|
|
608
|
+
except KeyError:
|
|
609
|
+
self._record_miss(1)
|
|
610
|
+
if default is self._MISSING:
|
|
611
|
+
raise
|
|
612
|
+
return cast(V, default)
|
|
613
|
+
del self._cache[key]
|
|
614
|
+
self._dispatch_removal(key, value, hits=1)
|
|
615
|
+
return value
|
|
616
|
+
|
|
617
|
+
def popitem(self) -> tuple[K, V]:
|
|
618
|
+
return self._cache.popitem()
|
|
619
|
+
|
|
620
|
+
def clear(self) -> None: # type: ignore[override]
|
|
621
|
+
while True:
|
|
622
|
+
try:
|
|
623
|
+
self.popitem()
|
|
624
|
+
except KeyError:
|
|
625
|
+
break
|
|
626
|
+
if self._locks is not None:
|
|
627
|
+
try:
|
|
628
|
+
self._locks.clear()
|
|
629
|
+
except Exception: # pragma: no cover - defensive logging
|
|
630
|
+
_logger.exception("lock cleanup failed during cache clear")
|
|
631
|
+
|
|
632
|
+
# ------------------------------------------------------------------
|
|
633
|
+
# Internal helpers
|
|
634
|
+
|
|
635
|
+
def _record_hit(self, amount: int) -> None:
|
|
636
|
+
if amount and self._manager is not None and self._metrics_key is not None:
|
|
637
|
+
self._manager.increment_hit(self._metrics_key, amount=amount)
|
|
638
|
+
|
|
639
|
+
def _record_miss(self, amount: int) -> None:
|
|
640
|
+
if amount and self._manager is not None and self._metrics_key is not None:
|
|
641
|
+
self._manager.increment_miss(self._metrics_key, amount=amount)
|
|
642
|
+
|
|
643
|
+
def _record_eviction(self, amount: int) -> None:
|
|
644
|
+
if amount and self._manager is not None and self._metrics_key is not None:
|
|
645
|
+
self._manager.increment_eviction(self._metrics_key, amount=amount)
|
|
646
|
+
|
|
647
|
+
def _dispatch_removal(
|
|
648
|
+
self,
|
|
649
|
+
key: K,
|
|
650
|
+
value: V,
|
|
651
|
+
*,
|
|
652
|
+
hits: int = 0,
|
|
653
|
+
misses: int = 0,
|
|
654
|
+
eviction_amount: int = 1,
|
|
655
|
+
purge_lock: bool = True,
|
|
656
|
+
) -> None:
|
|
657
|
+
if hits:
|
|
658
|
+
self._record_hit(hits)
|
|
659
|
+
if misses:
|
|
660
|
+
self._record_miss(misses)
|
|
661
|
+
if eviction_amount:
|
|
662
|
+
self._record_eviction(eviction_amount)
|
|
663
|
+
self._emit_callbacks(self._telemetry_callbacks, key, value, "telemetry")
|
|
664
|
+
self._emit_callbacks(self._eviction_callbacks, key, value, "eviction")
|
|
665
|
+
if purge_lock:
|
|
666
|
+
self._purge_lock(key)
|
|
667
|
+
|
|
668
|
+
def _emit_callbacks(
|
|
669
|
+
self,
|
|
670
|
+
callbacks: Iterable[Callable[[K, V], None]],
|
|
671
|
+
key: K,
|
|
672
|
+
value: V,
|
|
673
|
+
kind: str,
|
|
674
|
+
) -> None:
|
|
675
|
+
for callback in callbacks:
|
|
676
|
+
try:
|
|
677
|
+
callback(key, value)
|
|
678
|
+
except Exception: # pragma: no cover - defensive logging
|
|
679
|
+
_logger.exception("%s callback failed for %r", kind, key)
|
|
680
|
+
|
|
681
|
+
def _purge_lock(self, key: K) -> None:
|
|
682
|
+
if self._locks is None:
|
|
683
|
+
return
|
|
684
|
+
try:
|
|
685
|
+
self._locks.pop(key, None)
|
|
686
|
+
except Exception: # pragma: no cover - defensive logging
|
|
687
|
+
_logger.exception("lock cleanup failed for %r", key)
|
|
688
|
+
|
|
689
|
+
class ManagedLRUCache(LRUCache[K, V]):
|
|
690
|
+
"""LRU cache wrapper with telemetry hooks and lock synchronisation."""
|
|
691
|
+
|
|
692
|
+
def __init__(
|
|
693
|
+
self,
|
|
694
|
+
maxsize: int,
|
|
695
|
+
*,
|
|
696
|
+
manager: CacheManager | None = None,
|
|
697
|
+
metrics_key: str | None = None,
|
|
698
|
+
eviction_callbacks: Iterable[Callable[[K, V], None]]
|
|
699
|
+
| Callable[[K, V], None]
|
|
700
|
+
| None = None,
|
|
701
|
+
telemetry_callbacks: Iterable[Callable[[K, V], None]]
|
|
702
|
+
| Callable[[K, V], None]
|
|
703
|
+
| None = None,
|
|
704
|
+
locks: MutableMapping[K, Any] | None = None,
|
|
705
|
+
) -> None:
|
|
706
|
+
super().__init__(maxsize)
|
|
707
|
+
self._manager = manager
|
|
708
|
+
self._metrics_key = metrics_key
|
|
709
|
+
self._locks = locks
|
|
710
|
+
self._eviction_callbacks = _normalise_callbacks(eviction_callbacks)
|
|
711
|
+
self._telemetry_callbacks = _normalise_callbacks(telemetry_callbacks)
|
|
573
712
|
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
713
|
+
def popitem(self) -> tuple[K, V]: # type: ignore[override]
|
|
714
|
+
key, value = super().popitem()
|
|
715
|
+
if self._locks is not None:
|
|
716
|
+
try:
|
|
717
|
+
self._locks.pop(key, None)
|
|
718
|
+
except Exception: # pragma: no cover - defensive logging
|
|
719
|
+
_logger.exception("lock cleanup failed for %r", key)
|
|
720
|
+
if self._manager is not None and self._metrics_key is not None:
|
|
721
|
+
self._manager.increment_eviction(self._metrics_key)
|
|
722
|
+
for callback in self._telemetry_callbacks:
|
|
723
|
+
try:
|
|
724
|
+
callback(key, value)
|
|
725
|
+
except Exception: # pragma: no cover - defensive logging
|
|
726
|
+
_logger.exception("telemetry callback failed for %r", key)
|
|
727
|
+
for callback in self._eviction_callbacks:
|
|
728
|
+
try:
|
|
729
|
+
callback(key, value)
|
|
730
|
+
except Exception: # pragma: no cover - defensive logging
|
|
731
|
+
_logger.exception("eviction callback failed for %r", key)
|
|
732
|
+
return key, value
|