tnfr 4.5.2__py3-none-any.whl → 6.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (161) hide show
  1. tnfr/__init__.py +228 -49
  2. tnfr/__init__.pyi +40 -0
  3. tnfr/_compat.py +11 -0
  4. tnfr/_version.py +7 -0
  5. tnfr/_version.pyi +7 -0
  6. tnfr/alias.py +106 -21
  7. tnfr/alias.pyi +140 -0
  8. tnfr/cache.py +666 -512
  9. tnfr/cache.pyi +232 -0
  10. tnfr/callback_utils.py +2 -9
  11. tnfr/callback_utils.pyi +105 -0
  12. tnfr/cli/__init__.py +21 -7
  13. tnfr/cli/__init__.pyi +47 -0
  14. tnfr/cli/arguments.py +42 -20
  15. tnfr/cli/arguments.pyi +33 -0
  16. tnfr/cli/execution.py +54 -20
  17. tnfr/cli/execution.pyi +80 -0
  18. tnfr/cli/utils.py +0 -2
  19. tnfr/cli/utils.pyi +8 -0
  20. tnfr/config/__init__.py +12 -0
  21. tnfr/config/__init__.pyi +8 -0
  22. tnfr/config/constants.py +104 -0
  23. tnfr/config/constants.pyi +12 -0
  24. tnfr/{config.py → config/init.py} +11 -7
  25. tnfr/config/init.pyi +8 -0
  26. tnfr/config/operator_names.py +106 -0
  27. tnfr/config/operator_names.pyi +28 -0
  28. tnfr/config/presets.py +104 -0
  29. tnfr/config/presets.pyi +7 -0
  30. tnfr/constants/__init__.py +78 -24
  31. tnfr/constants/__init__.pyi +104 -0
  32. tnfr/constants/core.py +1 -2
  33. tnfr/constants/core.pyi +17 -0
  34. tnfr/constants/init.pyi +12 -0
  35. tnfr/constants/metric.py +4 -12
  36. tnfr/constants/metric.pyi +19 -0
  37. tnfr/constants_glyphs.py +9 -91
  38. tnfr/constants_glyphs.pyi +12 -0
  39. tnfr/dynamics/__init__.py +112 -634
  40. tnfr/dynamics/__init__.pyi +83 -0
  41. tnfr/dynamics/adaptation.py +201 -0
  42. tnfr/dynamics/aliases.py +22 -0
  43. tnfr/dynamics/coordination.py +343 -0
  44. tnfr/dynamics/dnfr.py +1936 -354
  45. tnfr/dynamics/dnfr.pyi +33 -0
  46. tnfr/dynamics/integrators.py +369 -75
  47. tnfr/dynamics/integrators.pyi +35 -0
  48. tnfr/dynamics/runtime.py +521 -0
  49. tnfr/dynamics/sampling.py +8 -5
  50. tnfr/dynamics/sampling.pyi +7 -0
  51. tnfr/dynamics/selectors.py +680 -0
  52. tnfr/execution.py +56 -41
  53. tnfr/execution.pyi +65 -0
  54. tnfr/flatten.py +7 -7
  55. tnfr/flatten.pyi +28 -0
  56. tnfr/gamma.py +54 -37
  57. tnfr/gamma.pyi +40 -0
  58. tnfr/glyph_history.py +85 -38
  59. tnfr/glyph_history.pyi +53 -0
  60. tnfr/grammar.py +19 -338
  61. tnfr/grammar.pyi +13 -0
  62. tnfr/helpers/__init__.py +110 -30
  63. tnfr/helpers/__init__.pyi +66 -0
  64. tnfr/helpers/numeric.py +1 -0
  65. tnfr/helpers/numeric.pyi +12 -0
  66. tnfr/immutable.py +55 -19
  67. tnfr/immutable.pyi +37 -0
  68. tnfr/initialization.py +12 -10
  69. tnfr/initialization.pyi +73 -0
  70. tnfr/io.py +99 -34
  71. tnfr/io.pyi +11 -0
  72. tnfr/locking.pyi +7 -0
  73. tnfr/metrics/__init__.pyi +20 -0
  74. tnfr/metrics/coherence.py +934 -294
  75. tnfr/metrics/common.py +1 -3
  76. tnfr/metrics/common.pyi +15 -0
  77. tnfr/metrics/core.py +192 -34
  78. tnfr/metrics/core.pyi +13 -0
  79. tnfr/metrics/diagnosis.py +707 -101
  80. tnfr/metrics/diagnosis.pyi +89 -0
  81. tnfr/metrics/export.py +27 -13
  82. tnfr/metrics/glyph_timing.py +218 -38
  83. tnfr/metrics/reporting.py +22 -18
  84. tnfr/metrics/reporting.pyi +12 -0
  85. tnfr/metrics/sense_index.py +199 -25
  86. tnfr/metrics/sense_index.pyi +9 -0
  87. tnfr/metrics/trig.py +53 -18
  88. tnfr/metrics/trig.pyi +12 -0
  89. tnfr/metrics/trig_cache.py +3 -7
  90. tnfr/metrics/trig_cache.pyi +10 -0
  91. tnfr/node.py +148 -125
  92. tnfr/node.pyi +161 -0
  93. tnfr/observers.py +44 -30
  94. tnfr/observers.pyi +46 -0
  95. tnfr/ontosim.py +14 -13
  96. tnfr/ontosim.pyi +33 -0
  97. tnfr/operators/__init__.py +84 -52
  98. tnfr/operators/__init__.pyi +31 -0
  99. tnfr/operators/definitions.py +181 -0
  100. tnfr/operators/definitions.pyi +92 -0
  101. tnfr/operators/jitter.py +86 -23
  102. tnfr/operators/jitter.pyi +11 -0
  103. tnfr/operators/registry.py +80 -0
  104. tnfr/operators/registry.pyi +15 -0
  105. tnfr/operators/remesh.py +141 -57
  106. tnfr/presets.py +9 -54
  107. tnfr/presets.pyi +7 -0
  108. tnfr/py.typed +0 -0
  109. tnfr/rng.py +259 -73
  110. tnfr/rng.pyi +14 -0
  111. tnfr/selector.py +24 -17
  112. tnfr/selector.pyi +19 -0
  113. tnfr/sense.py +55 -43
  114. tnfr/sense.pyi +30 -0
  115. tnfr/structural.py +44 -267
  116. tnfr/structural.pyi +46 -0
  117. tnfr/telemetry/__init__.py +13 -0
  118. tnfr/telemetry/verbosity.py +37 -0
  119. tnfr/tokens.py +3 -2
  120. tnfr/tokens.pyi +41 -0
  121. tnfr/trace.py +272 -82
  122. tnfr/trace.pyi +68 -0
  123. tnfr/types.py +345 -6
  124. tnfr/types.pyi +145 -0
  125. tnfr/utils/__init__.py +158 -0
  126. tnfr/utils/__init__.pyi +133 -0
  127. tnfr/utils/cache.py +755 -0
  128. tnfr/utils/cache.pyi +156 -0
  129. tnfr/{collections_utils.py → utils/data.py} +57 -90
  130. tnfr/utils/data.pyi +73 -0
  131. tnfr/utils/graph.py +87 -0
  132. tnfr/utils/graph.pyi +10 -0
  133. tnfr/utils/init.py +746 -0
  134. tnfr/utils/init.pyi +85 -0
  135. tnfr/{json_utils.py → utils/io.py} +13 -18
  136. tnfr/utils/io.pyi +10 -0
  137. tnfr/utils/validators.py +130 -0
  138. tnfr/utils/validators.pyi +19 -0
  139. tnfr/validation/__init__.py +25 -0
  140. tnfr/validation/__init__.pyi +17 -0
  141. tnfr/validation/compatibility.py +59 -0
  142. tnfr/validation/compatibility.pyi +8 -0
  143. tnfr/validation/grammar.py +149 -0
  144. tnfr/validation/grammar.pyi +11 -0
  145. tnfr/validation/rules.py +194 -0
  146. tnfr/validation/rules.pyi +18 -0
  147. tnfr/validation/syntax.py +151 -0
  148. tnfr/validation/syntax.pyi +7 -0
  149. tnfr-6.0.0.dist-info/METADATA +135 -0
  150. tnfr-6.0.0.dist-info/RECORD +157 -0
  151. tnfr/graph_utils.py +0 -84
  152. tnfr/import_utils.py +0 -228
  153. tnfr/logging_utils.py +0 -116
  154. tnfr/validators.py +0 -84
  155. tnfr/value_utils.py +0 -59
  156. tnfr-4.5.2.dist-info/METADATA +0 -379
  157. tnfr-4.5.2.dist-info/RECORD +0 -67
  158. {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/WHEEL +0 -0
  159. {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/entry_points.txt +0 -0
  160. {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/licenses/LICENSE.md +0 -0
  161. {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/top_level.txt +0 -0
tnfr/cache.py CHANGED
@@ -1,578 +1,732 @@
1
- """Core caching utilities shared across TNFR helpers.
2
-
3
- This module consolidates structural cache helpers that previously lived in
4
- ``tnfr.helpers.cache_utils`` and ``tnfr.helpers.edge_cache``. The functions
5
- exposed here are responsible for maintaining deterministic node digests,
6
- scoped graph caches guarded by locks, and version counters that keep edge
7
- artifacts in sync with ΔNFR driven updates.
8
- """
1
+ """Central cache registry infrastructure for TNFR services."""
9
2
 
10
3
  from __future__ import annotations
11
4
 
12
- import hashlib
5
+ import logging
13
6
  import threading
14
- from collections import defaultdict
15
- from collections.abc import Callable, Hashable, Iterable
7
+ from collections.abc import Iterable
16
8
  from contextlib import contextmanager
17
- from functools import lru_cache
18
- from dataclasses import dataclass
19
- from typing import Any, TypeVar
9
+ from dataclasses import dataclass, field
10
+ from time import perf_counter
11
+ from typing import Any, Callable, Generic, Hashable, Iterator, Mapping, MutableMapping, TypeVar, cast
20
12
 
21
13
  from cachetools import LRUCache
22
- import networkx as nx # type: ignore[import-untyped]
23
-
24
- from .graph_utils import get_graph, mark_dnfr_prep_dirty
25
- from .import_utils import get_numpy
26
- from .json_utils import json_dumps
27
- from .logging_utils import get_logger
28
-
29
- T = TypeVar("T")
30
-
31
- __all__ = (
32
- "EdgeCacheManager",
33
- "LockAwareLRUCache",
34
- "NODE_SET_CHECKSUM_KEY",
35
- "cached_node_list",
36
- "cached_nodes_and_A",
37
- "clear_node_repr_cache",
38
- "edge_version_cache",
39
- "edge_version_update",
40
- "ensure_node_index_map",
41
- "ensure_node_offset_map",
42
- "get_graph_version",
43
- "increment_edge_version",
44
- "increment_graph_version",
45
- "node_set_checksum",
46
- "stable_json",
47
- )
48
-
49
- # Key used to store the node set checksum in a graph's ``graph`` attribute.
50
- NODE_SET_CHECKSUM_KEY = "_node_set_checksum_cache"
51
-
52
- logger = get_logger(__name__)
53
-
54
- # Keys of cache entries dependent on the edge version. Any change to the edge
55
- # set requires these to be dropped to avoid stale data.
56
- EDGE_VERSION_CACHE_KEYS = ("_trig_version",)
57
-
58
-
59
- class LockAwareLRUCache(LRUCache[Hashable, Any]):
60
- """``LRUCache`` that drops per-key locks when evicting items."""
61
-
62
- def __init__(self, maxsize: int, locks: dict[Hashable, threading.RLock]):
63
- super().__init__(maxsize)
64
- self._locks: dict[Hashable, threading.RLock] = locks
65
-
66
- def popitem(self) -> tuple[Hashable, Any]: # type: ignore[override]
67
- key, value = super().popitem()
68
- self._locks.pop(key, None)
69
- return key, value
70
-
71
-
72
- def _ensure_graph_entry(
73
- graph: Any,
74
- key: str,
75
- factory: Callable[[], T],
76
- validator: Callable[[Any], bool],
77
- ) -> T:
78
- """Return a validated entry from ``graph`` or create one when missing."""
79
-
80
- value = graph.get(key)
81
- if not validator(value):
82
- value = factory()
83
- graph[key] = value
84
- return value
85
-
86
-
87
- def _ensure_lock_mapping(
88
- graph: Any,
89
- key: str,
90
- *,
91
- lock_factory: Callable[[], threading.RLock] = threading.RLock,
92
- ) -> defaultdict[Hashable, threading.RLock]:
93
- """Ensure ``graph`` holds a ``defaultdict`` of locks under ``key``."""
94
-
95
- return _ensure_graph_entry(
96
- graph,
97
- key,
98
- factory=lambda: defaultdict(lock_factory),
99
- validator=lambda value: isinstance(value, defaultdict)
100
- and value.default_factory is lock_factory,
101
- )
102
-
103
-
104
- def _prune_locks(
105
- cache: dict[Hashable, Any] | LRUCache[Hashable, Any] | None,
106
- locks: dict[Hashable, threading.RLock]
107
- | defaultdict[Hashable, threading.RLock]
108
- | None,
109
- ) -> None:
110
- """Drop locks with no corresponding cache entry."""
111
-
112
- if not isinstance(locks, dict):
113
- return
114
- cache_keys = cache.keys() if isinstance(cache, dict) else ()
115
- for key in list(locks.keys()):
116
- if key not in cache_keys:
117
- locks.pop(key, None)
118
-
119
-
120
- def get_graph_version(graph: Any, key: str, default: int = 0) -> int:
121
- """Return integer version stored in ``graph`` under ``key``."""
122
-
123
- return int(graph.get(key, default))
124
-
125
-
126
- def increment_graph_version(graph: Any, key: str) -> int:
127
- """Increment and store a version counter in ``graph`` under ``key``."""
128
14
 
129
- version = get_graph_version(graph, key) + 1
130
- graph[key] = version
131
- return version
15
+ from .types import TimingContext
132
16
 
17
+ __all__ = [
18
+ "CacheManager",
19
+ "CacheCapacityConfig",
20
+ "CacheStatistics",
21
+ "InstrumentedLRUCache",
22
+ "ManagedLRUCache",
23
+ "prune_lock_mapping",
24
+ ]
133
25
 
134
- def stable_json(obj: Any) -> str:
135
- """Return a JSON string with deterministic ordering for ``obj``."""
136
26
 
137
- return json_dumps(
138
- obj,
139
- sort_keys=True,
140
- ensure_ascii=False,
141
- to_bytes=False,
142
- )
27
+ K = TypeVar("K", bound=Hashable)
28
+ V = TypeVar("V")
143
29
 
30
+ _logger = logging.getLogger(__name__)
144
31
 
145
- @lru_cache(maxsize=1024)
146
- def _node_repr_digest(obj: Any) -> tuple[str, bytes]:
147
- """Return cached stable representation and digest for ``obj``."""
148
32
 
149
- try:
150
- repr_ = stable_json(obj)
151
- except TypeError:
152
- repr_ = repr(obj)
153
- digest = hashlib.blake2b(repr_.encode("utf-8"), digest_size=16).digest()
154
- return repr_, digest
33
+ @dataclass(frozen=True)
34
+ class CacheCapacityConfig:
35
+ """Configuration snapshot for cache capacity policies."""
155
36
 
37
+ default_capacity: int | None
38
+ overrides: dict[str, int | None]
156
39
 
157
- def clear_node_repr_cache() -> None:
158
- """Clear cached node representations used for checksums."""
159
40
 
160
- _node_repr_digest.cache_clear()
41
+ @dataclass(frozen=True)
42
+ class CacheStatistics:
43
+ """Immutable snapshot of cache telemetry counters."""
161
44
 
45
+ hits: int = 0
46
+ misses: int = 0
47
+ evictions: int = 0
48
+ total_time: float = 0.0
49
+ timings: int = 0
162
50
 
163
- def _node_repr(n: Any) -> str:
164
- """Stable representation for node hashing and sorting."""
165
-
166
- return _node_repr_digest(n)[0]
51
+ def merge(self, other: CacheStatistics) -> CacheStatistics:
52
+ """Return aggregated metrics combining ``self`` and ``other``."""
167
53
 
54
+ return CacheStatistics(
55
+ hits=self.hits + other.hits,
56
+ misses=self.misses + other.misses,
57
+ evictions=self.evictions + other.evictions,
58
+ total_time=self.total_time + other.total_time,
59
+ timings=self.timings + other.timings,
60
+ )
168
61
 
169
- def _iter_node_digests(
170
- nodes: Iterable[Any], *, presorted: bool
171
- ) -> Iterable[bytes]:
172
- """Yield node digests in a deterministic order."""
173
62
 
174
- if presorted:
175
- for node in nodes:
176
- yield _node_repr_digest(node)[1]
177
- else:
178
- for _, digest in sorted(
179
- (_node_repr_digest(n) for n in nodes), key=lambda x: x[0]
180
- ):
181
- yield digest
182
-
183
-
184
- def _node_set_checksum_no_nodes(
185
- G: nx.Graph,
186
- graph: Any,
187
- *,
188
- presorted: bool,
189
- store: bool,
190
- ) -> str:
191
- """Checksum helper when no explicit node set is provided."""
192
-
193
- nodes_view = G.nodes()
194
- current_nodes = frozenset(nodes_view)
195
- cached = graph.get(NODE_SET_CHECKSUM_KEY)
196
- if cached and len(cached) == 3 and cached[2] == current_nodes:
197
- return cached[1]
198
-
199
- hasher = hashlib.blake2b(digest_size=16)
200
- for digest in _iter_node_digests(nodes_view, presorted=presorted):
201
- hasher.update(digest)
202
-
203
- checksum = hasher.hexdigest()
204
- if store:
205
- token = checksum[:16]
206
- if cached and cached[0] == token:
207
- return cached[1]
208
- graph[NODE_SET_CHECKSUM_KEY] = (token, checksum, current_nodes)
209
- else:
210
- graph.pop(NODE_SET_CHECKSUM_KEY, None)
211
- return checksum
212
-
213
-
214
- def node_set_checksum(
215
- G: nx.Graph,
216
- nodes: Iterable[Any] | None = None,
217
- *,
218
- presorted: bool = False,
219
- store: bool = True,
220
- ) -> str:
221
- """Return a BLAKE2b checksum of ``G``'s node set."""
222
-
223
- graph = get_graph(G)
224
- if nodes is None:
225
- return _node_set_checksum_no_nodes(
226
- G, graph, presorted=presorted, store=store
63
+ @dataclass
64
+ class _CacheMetrics:
65
+ hits: int = 0
66
+ misses: int = 0
67
+ evictions: int = 0
68
+ total_time: float = 0.0
69
+ timings: int = 0
70
+ lock: threading.Lock = field(default_factory=threading.Lock, repr=False)
71
+
72
+ def snapshot(self) -> CacheStatistics:
73
+ return CacheStatistics(
74
+ hits=self.hits,
75
+ misses=self.misses,
76
+ evictions=self.evictions,
77
+ total_time=self.total_time,
78
+ timings=self.timings,
227
79
  )
228
80
 
229
- hasher = hashlib.blake2b(digest_size=16)
230
- for digest in _iter_node_digests(nodes, presorted=presorted):
231
- hasher.update(digest)
232
-
233
- checksum = hasher.hexdigest()
234
- if store:
235
- token = checksum[:16]
236
- cached = graph.get(NODE_SET_CHECKSUM_KEY)
237
- if cached and cached[0] == token:
238
- return cached[1]
239
- graph[NODE_SET_CHECKSUM_KEY] = (token, checksum)
240
- else:
241
- graph.pop(NODE_SET_CHECKSUM_KEY, None)
242
- return checksum
243
81
 
82
+ @dataclass
83
+ class _CacheEntry:
84
+ factory: Callable[[], Any]
85
+ lock: threading.Lock
86
+ reset: Callable[[Any], Any] | None = None
244
87
 
245
- @dataclass(slots=True)
246
- class NodeCache:
247
- """Container for cached node data."""
248
88
 
249
- checksum: str
250
- nodes: tuple[Any, ...]
251
- sorted_nodes: tuple[Any, ...] | None = None
252
- idx: dict[Any, int] | None = None
253
- offset: dict[Any, int] | None = None
89
+ class CacheManager:
90
+ """Coordinate named caches guarded by per-entry locks."""
254
91
 
255
- @property
256
- def n(self) -> int:
257
- return len(self.nodes)
92
+ _MISSING = object()
258
93
 
94
+ def __init__(
95
+ self,
96
+ storage: MutableMapping[str, Any] | None = None,
97
+ *,
98
+ default_capacity: int | None = None,
99
+ overrides: Mapping[str, int | None] | None = None,
100
+ ) -> None:
101
+ self._storage: MutableMapping[str, Any]
102
+ if storage is None:
103
+ self._storage = {}
104
+ else:
105
+ self._storage = storage
106
+ self._entries: dict[str, _CacheEntry] = {}
107
+ self._registry_lock = threading.RLock()
108
+ self._default_capacity = self._normalise_capacity(default_capacity)
109
+ self._capacity_overrides: dict[str, int | None] = {}
110
+ self._metrics: dict[str, _CacheMetrics] = {}
111
+ self._metrics_publishers: list[Callable[[str, CacheStatistics], None]] = []
112
+ if overrides:
113
+ self.configure(overrides=overrides)
114
+
115
+ @staticmethod
116
+ def _normalise_capacity(value: int | None) -> int | None:
117
+ if value is None:
118
+ return None
119
+ size = int(value)
120
+ if size < 0:
121
+ raise ValueError("capacity must be non-negative or None")
122
+ return size
123
+
124
+ def register(
125
+ self,
126
+ name: str,
127
+ factory: Callable[[], Any],
128
+ *,
129
+ lock_factory: Callable[[], threading.Lock | threading.RLock] | None = None,
130
+ reset: Callable[[Any], Any] | None = None,
131
+ create: bool = True,
132
+ ) -> None:
133
+ """Register ``name`` with ``factory`` and optional lifecycle hooks."""
134
+
135
+ if lock_factory is None:
136
+ lock_factory = threading.RLock
137
+ with self._registry_lock:
138
+ entry = self._entries.get(name)
139
+ if entry is None:
140
+ entry = _CacheEntry(factory=factory, lock=lock_factory(), reset=reset)
141
+ self._entries[name] = entry
142
+ else:
143
+ # Update hooks when re-registering the same cache name.
144
+ entry.factory = factory
145
+ entry.reset = reset
146
+ self._ensure_metrics(name)
147
+ if create:
148
+ self.get(name)
149
+
150
+ def configure(
151
+ self,
152
+ *,
153
+ default_capacity: int | None | object = _MISSING,
154
+ overrides: Mapping[str, int | None] | None = None,
155
+ replace_overrides: bool = False,
156
+ ) -> None:
157
+ """Update the cache capacity policy shared by registered entries."""
158
+
159
+ with self._registry_lock:
160
+ if default_capacity is not self._MISSING:
161
+ self._default_capacity = self._normalise_capacity(
162
+ default_capacity if default_capacity is not None else None
163
+ )
164
+ if overrides is not None:
165
+ if replace_overrides:
166
+ self._capacity_overrides.clear()
167
+ for key, value in overrides.items():
168
+ self._capacity_overrides[key] = self._normalise_capacity(value)
169
+
170
+ def configure_from_mapping(self, config: Mapping[str, Any]) -> None:
171
+ """Load configuration produced by :meth:`export_config`."""
172
+
173
+ default = config.get("default_capacity", self._MISSING)
174
+ overrides = config.get("overrides")
175
+ overrides_mapping: Mapping[str, int | None] | None
176
+ overrides_mapping = overrides if isinstance(overrides, Mapping) else None
177
+ self.configure(default_capacity=default, overrides=overrides_mapping)
178
+
179
+ def export_config(self) -> CacheCapacityConfig:
180
+ """Return a copy of the current capacity configuration."""
181
+
182
+ with self._registry_lock:
183
+ return CacheCapacityConfig(
184
+ default_capacity=self._default_capacity,
185
+ overrides=dict(self._capacity_overrides),
186
+ )
259
187
 
260
- def _update_node_cache(
261
- graph: Any,
262
- nodes: tuple[Any, ...],
263
- key: str,
264
- *,
265
- checksum: str,
266
- sorted_nodes: tuple[Any, ...] | None = None,
267
- ) -> None:
268
- """Store ``nodes`` and ``checksum`` in ``graph`` under ``key``."""
269
-
270
- graph[f"{key}_cache"] = NodeCache(
271
- checksum=checksum, nodes=nodes, sorted_nodes=sorted_nodes
272
- )
273
- graph[f"{key}_checksum"] = checksum
274
-
275
-
276
- def _refresh_node_list_cache(
277
- G: nx.Graph,
278
- graph: Any,
279
- *,
280
- sort_nodes: bool,
281
- current_n: int,
282
- ) -> tuple[Any, ...]:
283
- """Refresh the cached node list and return the nodes."""
284
-
285
- nodes = tuple(G.nodes())
286
- checksum = node_set_checksum(G, nodes, store=True)
287
- sorted_nodes = tuple(sorted(nodes, key=_node_repr)) if sort_nodes else None
288
- _update_node_cache(
289
- graph,
290
- nodes,
291
- "_node_list",
292
- checksum=checksum,
293
- sorted_nodes=sorted_nodes,
294
- )
295
- graph["_node_list_len"] = current_n
296
- return nodes
297
-
298
-
299
- def _reuse_node_list_cache(
300
- graph: Any,
301
- cache: NodeCache,
302
- nodes: tuple[Any, ...],
303
- sorted_nodes: tuple[Any, ...] | None,
304
- *,
305
- sort_nodes: bool,
306
- new_checksum: str | None,
307
- ) -> None:
308
- """Reuse existing node cache and record its checksum if missing."""
309
-
310
- checksum = cache.checksum if new_checksum is None else new_checksum
311
- if sort_nodes and sorted_nodes is None:
312
- sorted_nodes = tuple(sorted(nodes, key=_node_repr))
313
- _update_node_cache(
314
- graph,
315
- nodes,
316
- "_node_list",
317
- checksum=checksum,
318
- sorted_nodes=sorted_nodes,
319
- )
320
-
321
-
322
- def _cache_node_list(G: nx.Graph) -> tuple[Any, ...]:
323
- """Cache and return the tuple of nodes for ``G``."""
324
-
325
- graph = get_graph(G)
326
- cache: NodeCache | None = graph.get("_node_list_cache")
327
- nodes = cache.nodes if cache else None
328
- sorted_nodes = cache.sorted_nodes if cache else None
329
- stored_len = graph.get("_node_list_len")
330
- current_n = G.number_of_nodes()
331
- dirty = bool(graph.pop("_node_list_dirty", False))
332
-
333
- invalid = nodes is None or stored_len != current_n or dirty
334
- new_checksum: str | None = None
335
-
336
- if not invalid and cache:
337
- new_checksum = node_set_checksum(G)
338
- invalid = cache.checksum != new_checksum
339
-
340
- sort_nodes = bool(graph.get("SORT_NODES", False))
341
-
342
- if invalid:
343
- nodes = _refresh_node_list_cache(
344
- G, graph, sort_nodes=sort_nodes, current_n=current_n
345
- )
346
- elif cache and "_node_list_checksum" not in graph:
347
- _reuse_node_list_cache(
348
- graph,
349
- cache,
350
- nodes,
351
- sorted_nodes,
352
- sort_nodes=sort_nodes,
353
- new_checksum=new_checksum,
354
- )
355
- else:
356
- if sort_nodes and sorted_nodes is None and cache is not None:
357
- cache.sorted_nodes = tuple(sorted(nodes, key=_node_repr))
358
- return nodes
188
+ def get_capacity(
189
+ self,
190
+ name: str,
191
+ *,
192
+ requested: int | None = None,
193
+ fallback: int | None = None,
194
+ use_default: bool = True,
195
+ ) -> int | None:
196
+ """Return capacity for ``name`` considering overrides and defaults."""
197
+
198
+ with self._registry_lock:
199
+ override = self._capacity_overrides.get(name, self._MISSING)
200
+ default = self._default_capacity
201
+ if override is not self._MISSING:
202
+ return override
203
+ values: tuple[int | None, ...]
204
+ if use_default:
205
+ values = (requested, default, fallback)
206
+ else:
207
+ values = (requested, fallback)
208
+ for value in values:
209
+ if value is self._MISSING:
210
+ continue
211
+ normalised = self._normalise_capacity(value)
212
+ if normalised is not None:
213
+ return normalised
214
+ return None
215
+
216
+ def has_override(self, name: str) -> bool:
217
+ """Return ``True`` if ``name`` has an explicit capacity override."""
218
+
219
+ with self._registry_lock:
220
+ return name in self._capacity_overrides
221
+
222
+ def get_lock(self, name: str) -> threading.Lock | threading.RLock:
223
+ entry = self._entries.get(name)
224
+ if entry is None:
225
+ raise KeyError(name)
226
+ return entry.lock
227
+
228
+ def names(self) -> Iterator[str]:
229
+ with self._registry_lock:
230
+ return iter(tuple(self._entries))
231
+
232
+ def get(self, name: str, *, create: bool = True) -> Any:
233
+ entry = self._entries.get(name)
234
+ if entry is None:
235
+ raise KeyError(name)
236
+ with entry.lock:
237
+ value = self._storage.get(name)
238
+ if create and value is None:
239
+ value = entry.factory()
240
+ self._storage[name] = value
241
+ return value
359
242
 
243
+ def peek(self, name: str) -> Any:
244
+ return self.get(name, create=False)
360
245
 
361
- def cached_node_list(G: nx.Graph) -> tuple[Any, ...]:
362
- """Public wrapper returning the cached node tuple for ``G``."""
246
+ def store(self, name: str, value: Any) -> None:
247
+ entry = self._entries.get(name)
248
+ if entry is None:
249
+ raise KeyError(name)
250
+ with entry.lock:
251
+ self._storage[name] = value
363
252
 
364
- return _cache_node_list(G)
253
+ def update(
254
+ self,
255
+ name: str,
256
+ updater: Callable[[Any], Any],
257
+ *,
258
+ create: bool = True,
259
+ ) -> Any:
260
+ entry = self._entries.get(name)
261
+ if entry is None:
262
+ raise KeyError(name)
263
+ with entry.lock:
264
+ current = self._storage.get(name)
265
+ if create and current is None:
266
+ current = entry.factory()
267
+ new_value = updater(current)
268
+ self._storage[name] = new_value
269
+ return new_value
270
+
271
+ def clear(self, name: str | None = None) -> None:
272
+ if name is not None:
273
+ names = (name,)
274
+ else:
275
+ with self._registry_lock:
276
+ names = tuple(self._entries)
277
+ for cache_name in names:
278
+ entry = self._entries.get(cache_name)
279
+ if entry is None:
280
+ continue
281
+ with entry.lock:
282
+ current = self._storage.get(cache_name)
283
+ new_value = None
284
+ if entry.reset is not None:
285
+ new_value = entry.reset(current)
286
+ if new_value is None:
287
+ try:
288
+ new_value = entry.factory()
289
+ except Exception:
290
+ self._storage.pop(cache_name, None)
291
+ continue
292
+ self._storage[cache_name] = new_value
293
+
294
+ # ------------------------------------------------------------------
295
+ # Metrics helpers
296
+
297
+ def _ensure_metrics(self, name: str) -> _CacheMetrics:
298
+ metrics = self._metrics.get(name)
299
+ if metrics is None:
300
+ with self._registry_lock:
301
+ metrics = self._metrics.get(name)
302
+ if metrics is None:
303
+ metrics = _CacheMetrics()
304
+ self._metrics[name] = metrics
305
+ return metrics
306
+
307
+ def increment_hit(
308
+ self,
309
+ name: str,
310
+ *,
311
+ amount: int = 1,
312
+ duration: float | None = None,
313
+ ) -> None:
314
+ metrics = self._ensure_metrics(name)
315
+ with metrics.lock:
316
+ metrics.hits += int(amount)
317
+ if duration is not None:
318
+ metrics.total_time += float(duration)
319
+ metrics.timings += 1
320
+
321
+ def increment_miss(
322
+ self,
323
+ name: str,
324
+ *,
325
+ amount: int = 1,
326
+ duration: float | None = None,
327
+ ) -> None:
328
+ metrics = self._ensure_metrics(name)
329
+ with metrics.lock:
330
+ metrics.misses += int(amount)
331
+ if duration is not None:
332
+ metrics.total_time += float(duration)
333
+ metrics.timings += 1
334
+
335
+ def increment_eviction(self, name: str, *, amount: int = 1) -> None:
336
+ metrics = self._ensure_metrics(name)
337
+ with metrics.lock:
338
+ metrics.evictions += int(amount)
339
+
340
+ def record_timing(self, name: str, duration: float) -> None:
341
+ metrics = self._ensure_metrics(name)
342
+ with metrics.lock:
343
+ metrics.total_time += float(duration)
344
+ metrics.timings += 1
345
+
346
+ @contextmanager
347
+ def timer(self, name: str) -> TimingContext:
348
+ """Context manager recording execution time for ``name``."""
349
+
350
+ start = perf_counter()
351
+ try:
352
+ yield
353
+ finally:
354
+ self.record_timing(name, perf_counter() - start)
355
+
356
+ def get_metrics(self, name: str) -> CacheStatistics:
357
+ metrics = self._metrics.get(name)
358
+ if metrics is None:
359
+ return CacheStatistics()
360
+ with metrics.lock:
361
+ return metrics.snapshot()
362
+
363
+ def iter_metrics(self) -> Iterator[tuple[str, CacheStatistics]]:
364
+ with self._registry_lock:
365
+ items = tuple(self._metrics.items())
366
+ for name, metrics in items:
367
+ with metrics.lock:
368
+ yield name, metrics.snapshot()
369
+
370
+ def aggregate_metrics(self) -> CacheStatistics:
371
+ aggregate = CacheStatistics()
372
+ for _, stats in self.iter_metrics():
373
+ aggregate = aggregate.merge(stats)
374
+ return aggregate
375
+
376
+ def register_metrics_publisher(
377
+ self, publisher: Callable[[str, CacheStatistics], None]
378
+ ) -> None:
379
+ with self._registry_lock:
380
+ self._metrics_publishers.append(publisher)
381
+
382
+ def publish_metrics(
383
+ self,
384
+ *,
385
+ publisher: Callable[[str, CacheStatistics], None] | None = None,
386
+ ) -> None:
387
+ if publisher is None:
388
+ with self._registry_lock:
389
+ publishers = tuple(self._metrics_publishers)
390
+ else:
391
+ publishers = (publisher,)
392
+ if not publishers:
393
+ return
394
+ snapshot = tuple(self.iter_metrics())
395
+ for emit in publishers:
396
+ for name, stats in snapshot:
397
+ try:
398
+ emit(name, stats)
399
+ except Exception: # pragma: no cover - defensive logging
400
+ logging.getLogger(__name__).exception(
401
+ "Cache metrics publisher failed for %s", name
402
+ )
403
+
404
+ def log_metrics(self, logger: logging.Logger, *, level: int = logging.INFO) -> None:
405
+ """Emit cache metrics using ``logger`` for telemetry hooks."""
406
+
407
+ for name, stats in self.iter_metrics():
408
+ logger.log(
409
+ level,
410
+ "cache=%s hits=%d misses=%d evictions=%d timings=%d total_time=%.6f",
411
+ name,
412
+ stats.hits,
413
+ stats.misses,
414
+ stats.evictions,
415
+ stats.timings,
416
+ stats.total_time,
417
+ )
365
418
 
366
419
 
367
- def _ensure_node_map(
368
- G,
369
- *,
370
- attrs: tuple[str, ...],
371
- sort: bool = False,
372
- ) -> dict[Any, int]:
373
- """Return cached node-to-index/offset mappings stored on ``NodeCache``."""
420
+ def _normalise_callbacks(
421
+ callbacks: Iterable[Callable[[K, V], None]] | Callable[[K, V], None] | None,
422
+ ) -> tuple[Callable[[K, V], None], ...]:
423
+ if callbacks is None:
424
+ return ()
425
+ if callable(callbacks):
426
+ return (callbacks,)
427
+ return tuple(callbacks)
374
428
 
375
- graph = G.graph
376
- _cache_node_list(G)
377
- cache: NodeCache = graph["_node_list_cache"]
378
429
 
379
- missing = [attr for attr in attrs if getattr(cache, attr) is None]
380
- if missing:
381
- if sort:
382
- nodes = cache.sorted_nodes
383
- if nodes is None:
384
- nodes = cache.sorted_nodes = tuple(
385
- sorted(cache.nodes, key=_node_repr)
386
- )
387
- else:
388
- nodes = cache.nodes
389
- mappings: dict[str, dict[Any, int]] = {attr: {} for attr in missing}
390
- for idx, node in enumerate(nodes):
391
- for attr in missing:
392
- mappings[attr][node] = idx
393
- for attr in missing:
394
- setattr(cache, attr, mappings[attr])
395
- return getattr(cache, attrs[0])
430
+ def prune_lock_mapping(
431
+ cache: Mapping[K, Any] | MutableMapping[K, Any] | None,
432
+ locks: MutableMapping[K, Any] | None,
433
+ ) -> None:
434
+ """Drop lock entries not present in ``cache``."""
396
435
 
436
+ if locks is None:
437
+ return
438
+ if cache is None:
439
+ cache_keys: set[K] = set()
440
+ else:
441
+ cache_keys = set(cache.keys())
442
+ for key in list(locks.keys()):
443
+ if key not in cache_keys:
444
+ locks.pop(key, None)
397
445
 
398
- def ensure_node_index_map(G) -> dict[Any, int]:
399
- """Return cached node-to-index mapping for ``G``."""
400
446
 
401
- return _ensure_node_map(G, attrs=("idx",), sort=False)
447
+ class InstrumentedLRUCache(MutableMapping[K, V], Generic[K, V]):
448
+ """LRU cache wrapper that synchronises telemetry, callbacks and locks.
402
449
 
450
+ The wrapper owns an internal :class:`cachetools.LRUCache` instance and
451
+ forwards all read operations to it. Mutating operations are instrumented to
452
+ update :class:`CacheManager` metrics, execute registered callbacks and keep
453
+ an optional lock mapping aligned with the stored keys. Telemetry callbacks
454
+ always execute before eviction callbacks, preserving the registration order
455
+ for deterministic side effects.
403
456
 
404
- def ensure_node_offset_map(G) -> dict[Any, int]:
405
- """Return cached node-to-offset mapping for ``G``."""
457
+ Callbacks can be extended or replaced after construction via
458
+ :meth:`set_telemetry_callbacks` and :meth:`set_eviction_callbacks`. When
459
+ ``append`` is ``False`` (default) the provided callbacks replace the
460
+ existing sequence; otherwise they are appended at the end while keeping the
461
+ previous ordering intact.
462
+ """
406
463
 
407
- sort = bool(G.graph.get("SORT_NODES", False))
408
- return _ensure_node_map(G, attrs=("offset",), sort=sort)
464
+ _MISSING = object()
409
465
 
466
+ def __init__(
467
+ self,
468
+ maxsize: int,
469
+ *,
470
+ manager: CacheManager | None = None,
471
+ metrics_key: str | None = None,
472
+ telemetry_callbacks: Iterable[Callable[[K, V], None]]
473
+ | Callable[[K, V], None]
474
+ | None = None,
475
+ eviction_callbacks: Iterable[Callable[[K, V], None]]
476
+ | Callable[[K, V], None]
477
+ | None = None,
478
+ locks: MutableMapping[K, Any] | None = None,
479
+ getsizeof: Callable[[V], int] | None = None,
480
+ count_overwrite_hit: bool = True,
481
+ ) -> None:
482
+ self._cache: LRUCache[K, V] = LRUCache(maxsize, getsizeof=getsizeof)
483
+ original_popitem = self._cache.popitem
484
+
485
+ def _instrumented_popitem() -> tuple[K, V]:
486
+ key, value = original_popitem()
487
+ self._dispatch_removal(key, value)
488
+ return key, value
489
+
490
+ self._cache.popitem = _instrumented_popitem # type: ignore[assignment]
491
+ self._manager = manager
492
+ self._metrics_key = metrics_key
493
+ self._locks = locks
494
+ self._count_overwrite_hit = bool(count_overwrite_hit)
495
+ self._telemetry_callbacks: list[Callable[[K, V], None]]
496
+ self._telemetry_callbacks = list(_normalise_callbacks(telemetry_callbacks))
497
+ self._eviction_callbacks: list[Callable[[K, V], None]]
498
+ self._eviction_callbacks = list(_normalise_callbacks(eviction_callbacks))
499
+
500
+ # ------------------------------------------------------------------
501
+ # Callback registration helpers
410
502
 
411
- class EdgeCacheManager:
412
- """Coordinate cache storage and per-key locks for edge version caches."""
503
+ @property
504
+ def telemetry_callbacks(self) -> tuple[Callable[[K, V], None], ...]:
505
+ """Return currently registered telemetry callbacks."""
413
506
 
414
- _LOCK = threading.RLock()
507
+ return tuple(self._telemetry_callbacks)
415
508
 
416
- def __init__(self, graph: Any) -> None:
417
- self.graph = graph
418
- self.cache_key = "_edge_version_cache"
419
- self.locks_key = "_edge_version_cache_locks"
509
+ @property
510
+ def eviction_callbacks(self) -> tuple[Callable[[K, V], None], ...]:
511
+ """Return currently registered eviction callbacks."""
420
512
 
421
- def _validator(self, max_entries: int | None) -> Callable[[Any], bool]:
422
- if max_entries is None:
423
- return lambda value: value is not None and not isinstance(value, LRUCache)
424
- return lambda value: isinstance(value, LRUCache) and value.maxsize == max_entries
513
+ return tuple(self._eviction_callbacks)
425
514
 
426
- def _factory(
427
- self,
428
- max_entries: int | None,
429
- locks: dict[Hashable, threading.RLock]
430
- | defaultdict[Hashable, threading.RLock],
431
- ) -> dict[Hashable, Any] | LRUCache[Hashable, Any]:
432
- if max_entries:
433
- return LockAwareLRUCache(max_entries, locks) # type: ignore[arg-type]
434
- return {}
435
-
436
- def get_cache(
515
+ def set_telemetry_callbacks(
437
516
  self,
438
- max_entries: int | None,
439
- *,
440
- create: bool = True,
441
- ) -> tuple[
442
- dict[Hashable, Any] | LRUCache[Hashable, Any] | None,
443
- dict[Hashable, threading.RLock]
444
- | defaultdict[Hashable, threading.RLock]
517
+ callbacks: Iterable[Callable[[K, V], None]]
518
+ | Callable[[K, V], None]
445
519
  | None,
446
- ]:
447
- """Return the cache and lock mapping for the manager's graph."""
448
-
449
- with self._LOCK:
450
- if not create:
451
- cache = self.graph.get(self.cache_key)
452
- locks = self.graph.get(self.locks_key)
453
- return cache, locks
454
-
455
- locks = _ensure_lock_mapping(self.graph, self.locks_key)
456
- cache = _ensure_graph_entry(
457
- self.graph,
458
- self.cache_key,
459
- factory=lambda: self._factory(max_entries, locks),
460
- validator=self._validator(max_entries),
461
- )
462
- if max_entries is None:
463
- _prune_locks(cache, locks)
464
- return cache, locks
465
-
466
-
467
- def edge_version_cache(
468
- G: Any,
469
- key: Hashable,
470
- builder: Callable[[], T],
471
- *,
472
- max_entries: int | None = 128,
473
- ) -> T:
474
- """Return cached ``builder`` output tied to the edge version of ``G``."""
475
-
476
- if max_entries is not None:
477
- max_entries = int(max_entries)
478
- if max_entries < 0:
479
- raise ValueError("max_entries must be non-negative or None")
480
- if max_entries is not None and max_entries == 0:
481
- return builder()
482
-
483
- graph = get_graph(G)
484
- manager = graph.get("_edge_cache_manager") # type: ignore[assignment]
485
- if not isinstance(manager, EdgeCacheManager) or manager.graph is not graph:
486
- manager = EdgeCacheManager(graph)
487
- graph["_edge_cache_manager"] = manager
488
-
489
- cache, locks = manager.get_cache(max_entries)
490
- edge_version = get_graph_version(graph, "_edge_version")
491
- lock = locks[key]
492
-
493
- with lock:
494
- entry = cache.get(key)
495
- if entry is not None and entry[0] == edge_version:
496
- return entry[1]
497
-
498
- try:
499
- value = builder()
500
- except (RuntimeError, ValueError) as exc: # pragma: no cover - logging side effect
501
- logger.exception("edge_version_cache builder failed for %r: %s", key, exc)
502
- raise
503
- else:
504
- with lock:
505
- entry = cache.get(key)
506
- if entry is not None and entry[0] == edge_version:
507
- return entry[1]
508
- cache[key] = (edge_version, value)
509
- return value
510
-
511
-
512
- def cached_nodes_and_A(
513
- G: nx.Graph, *, cache_size: int | None = 1, require_numpy: bool = False
514
- ) -> tuple[tuple[Any, ...], Any]:
515
- """Return cached nodes tuple and adjacency matrix for ``G``."""
516
-
517
- nodes = cached_node_list(G)
518
- graph = G.graph
520
+ *,
521
+ append: bool = False,
522
+ ) -> None:
523
+ """Update telemetry callbacks executed on removals.
524
+
525
+ When ``append`` is ``True`` the provided callbacks are added to the end
526
+ of the execution chain while preserving relative order. Otherwise, the
527
+ previous callbacks are replaced.
528
+ """
529
+
530
+ new_callbacks = list(_normalise_callbacks(callbacks))
531
+ if append:
532
+ self._telemetry_callbacks.extend(new_callbacks)
533
+ else:
534
+ self._telemetry_callbacks = new_callbacks
519
535
 
520
- checksum = getattr(graph.get("_node_list_cache"), "checksum", None)
521
- if checksum is None:
522
- checksum = graph.get("_node_list_checksum")
523
- if checksum is None:
524
- node_set_cache = graph.get(NODE_SET_CHECKSUM_KEY)
525
- if isinstance(node_set_cache, tuple) and len(node_set_cache) >= 2:
526
- checksum = node_set_cache[1]
527
- if checksum is None:
528
- checksum = ""
536
+ def set_eviction_callbacks(
537
+ self,
538
+ callbacks: Iterable[Callable[[K, V], None]]
539
+ | Callable[[K, V], None]
540
+ | None,
541
+ *,
542
+ append: bool = False,
543
+ ) -> None:
544
+ """Update eviction callbacks executed on removals.
529
545
 
530
- key = f"_dnfr_{len(nodes)}_{checksum}"
531
- graph["_dnfr_nodes_checksum"] = checksum
546
+ Behaviour matches :meth:`set_telemetry_callbacks`.
547
+ """
532
548
 
533
- def builder() -> tuple[tuple[Any, ...], Any]:
534
- np = get_numpy()
535
- if np is None:
536
- return nodes, None
537
- A = nx.to_numpy_array(G, nodelist=nodes, weight=None, dtype=float)
538
- return nodes, A
549
+ new_callbacks = list(_normalise_callbacks(callbacks))
550
+ if append:
551
+ self._eviction_callbacks.extend(new_callbacks)
552
+ else:
553
+ self._eviction_callbacks = new_callbacks
539
554
 
540
- nodes, A = edge_version_cache(G, key, builder, max_entries=cache_size)
555
+ # ------------------------------------------------------------------
556
+ # MutableMapping interface
541
557
 
542
- if require_numpy and A is None:
543
- raise RuntimeError("NumPy is required for adjacency caching")
558
+ def __getitem__(self, key: K) -> V:
559
+ return self._cache[key]
544
560
 
545
- return nodes, A
561
+ def __setitem__(self, key: K, value: V) -> None:
562
+ exists = key in self._cache
563
+ self._cache[key] = value
564
+ if exists:
565
+ if self._count_overwrite_hit:
566
+ self._record_hit(1)
567
+ else:
568
+ self._record_miss(1)
546
569
 
570
+ def __delitem__(self, key: K) -> None:
571
+ try:
572
+ value = self._cache[key]
573
+ except KeyError:
574
+ self._record_miss(1)
575
+ raise
576
+ del self._cache[key]
577
+ self._dispatch_removal(key, value, hits=1)
547
578
 
548
- def _reset_edge_caches(graph: Any, G: Any) -> None:
549
- """Clear caches affected by edge updates."""
579
+ def __iter__(self) -> Iterator[K]:
580
+ return iter(self._cache)
550
581
 
551
- cache, locks = EdgeCacheManager(graph).get_cache(None, create=False)
552
- if isinstance(cache, (dict, LRUCache)):
553
- cache.clear()
554
- if isinstance(locks, dict):
555
- locks.clear()
556
- mark_dnfr_prep_dirty(G)
557
- clear_node_repr_cache()
558
- for key in EDGE_VERSION_CACHE_KEYS:
559
- graph.pop(key, None)
582
+ def __len__(self) -> int:
583
+ return len(self._cache)
560
584
 
585
+ def __contains__(self, key: object) -> bool:
586
+ return key in self._cache
561
587
 
562
- def increment_edge_version(G: Any) -> None:
563
- """Increment the edge version counter in ``G.graph``."""
588
+ def __repr__(self) -> str: # pragma: no cover - debugging helper
589
+ return f"{self.__class__.__name__}({self._cache!r})"
564
590
 
565
- graph = get_graph(G)
566
- increment_graph_version(graph, "_edge_version")
567
- _reset_edge_caches(graph, G)
591
+ # ------------------------------------------------------------------
592
+ # Cache helpers
568
593
 
594
+ @property
595
+ def maxsize(self) -> int:
596
+ return self._cache.maxsize
569
597
 
570
- @contextmanager
571
- def edge_version_update(G: Any):
572
- """Scope a batch of edge mutations."""
598
+ @property
599
+ def currsize(self) -> int:
600
+ return self._cache.currsize
601
+
602
+ def get(self, key: K, default: V | None = None) -> V | None:
603
+ return self._cache.get(key, default)
604
+
605
+ def pop(self, key: K, default: Any = _MISSING) -> V:
606
+ try:
607
+ value = self._cache[key]
608
+ except KeyError:
609
+ self._record_miss(1)
610
+ if default is self._MISSING:
611
+ raise
612
+ return cast(V, default)
613
+ del self._cache[key]
614
+ self._dispatch_removal(key, value, hits=1)
615
+ return value
616
+
617
+ def popitem(self) -> tuple[K, V]:
618
+ return self._cache.popitem()
619
+
620
+ def clear(self) -> None: # type: ignore[override]
621
+ while True:
622
+ try:
623
+ self.popitem()
624
+ except KeyError:
625
+ break
626
+ if self._locks is not None:
627
+ try:
628
+ self._locks.clear()
629
+ except Exception: # pragma: no cover - defensive logging
630
+ _logger.exception("lock cleanup failed during cache clear")
631
+
632
+ # ------------------------------------------------------------------
633
+ # Internal helpers
634
+
635
+ def _record_hit(self, amount: int) -> None:
636
+ if amount and self._manager is not None and self._metrics_key is not None:
637
+ self._manager.increment_hit(self._metrics_key, amount=amount)
638
+
639
+ def _record_miss(self, amount: int) -> None:
640
+ if amount and self._manager is not None and self._metrics_key is not None:
641
+ self._manager.increment_miss(self._metrics_key, amount=amount)
642
+
643
+ def _record_eviction(self, amount: int) -> None:
644
+ if amount and self._manager is not None and self._metrics_key is not None:
645
+ self._manager.increment_eviction(self._metrics_key, amount=amount)
646
+
647
+ def _dispatch_removal(
648
+ self,
649
+ key: K,
650
+ value: V,
651
+ *,
652
+ hits: int = 0,
653
+ misses: int = 0,
654
+ eviction_amount: int = 1,
655
+ purge_lock: bool = True,
656
+ ) -> None:
657
+ if hits:
658
+ self._record_hit(hits)
659
+ if misses:
660
+ self._record_miss(misses)
661
+ if eviction_amount:
662
+ self._record_eviction(eviction_amount)
663
+ self._emit_callbacks(self._telemetry_callbacks, key, value, "telemetry")
664
+ self._emit_callbacks(self._eviction_callbacks, key, value, "eviction")
665
+ if purge_lock:
666
+ self._purge_lock(key)
667
+
668
+ def _emit_callbacks(
669
+ self,
670
+ callbacks: Iterable[Callable[[K, V], None]],
671
+ key: K,
672
+ value: V,
673
+ kind: str,
674
+ ) -> None:
675
+ for callback in callbacks:
676
+ try:
677
+ callback(key, value)
678
+ except Exception: # pragma: no cover - defensive logging
679
+ _logger.exception("%s callback failed for %r", kind, key)
680
+
681
+ def _purge_lock(self, key: K) -> None:
682
+ if self._locks is None:
683
+ return
684
+ try:
685
+ self._locks.pop(key, None)
686
+ except Exception: # pragma: no cover - defensive logging
687
+ _logger.exception("lock cleanup failed for %r", key)
688
+
689
+ class ManagedLRUCache(LRUCache[K, V]):
690
+ """LRU cache wrapper with telemetry hooks and lock synchronisation."""
691
+
692
+ def __init__(
693
+ self,
694
+ maxsize: int,
695
+ *,
696
+ manager: CacheManager | None = None,
697
+ metrics_key: str | None = None,
698
+ eviction_callbacks: Iterable[Callable[[K, V], None]]
699
+ | Callable[[K, V], None]
700
+ | None = None,
701
+ telemetry_callbacks: Iterable[Callable[[K, V], None]]
702
+ | Callable[[K, V], None]
703
+ | None = None,
704
+ locks: MutableMapping[K, Any] | None = None,
705
+ ) -> None:
706
+ super().__init__(maxsize)
707
+ self._manager = manager
708
+ self._metrics_key = metrics_key
709
+ self._locks = locks
710
+ self._eviction_callbacks = _normalise_callbacks(eviction_callbacks)
711
+ self._telemetry_callbacks = _normalise_callbacks(telemetry_callbacks)
573
712
 
574
- increment_edge_version(G)
575
- try:
576
- yield
577
- finally:
578
- increment_edge_version(G)
713
+ def popitem(self) -> tuple[K, V]: # type: ignore[override]
714
+ key, value = super().popitem()
715
+ if self._locks is not None:
716
+ try:
717
+ self._locks.pop(key, None)
718
+ except Exception: # pragma: no cover - defensive logging
719
+ _logger.exception("lock cleanup failed for %r", key)
720
+ if self._manager is not None and self._metrics_key is not None:
721
+ self._manager.increment_eviction(self._metrics_key)
722
+ for callback in self._telemetry_callbacks:
723
+ try:
724
+ callback(key, value)
725
+ except Exception: # pragma: no cover - defensive logging
726
+ _logger.exception("telemetry callback failed for %r", key)
727
+ for callback in self._eviction_callbacks:
728
+ try:
729
+ callback(key, value)
730
+ except Exception: # pragma: no cover - defensive logging
731
+ _logger.exception("eviction callback failed for %r", key)
732
+ return key, value