tnfr 4.5.1__py3-none-any.whl → 6.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tnfr/__init__.py +270 -90
- tnfr/__init__.pyi +40 -0
- tnfr/_compat.py +11 -0
- tnfr/_version.py +7 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +631 -0
- tnfr/alias.pyi +140 -0
- tnfr/cache.py +732 -0
- tnfr/cache.pyi +232 -0
- tnfr/callback_utils.py +381 -0
- tnfr/callback_utils.pyi +105 -0
- tnfr/cli/__init__.py +89 -0
- tnfr/cli/__init__.pyi +47 -0
- tnfr/cli/arguments.py +199 -0
- tnfr/cli/arguments.pyi +33 -0
- tnfr/cli/execution.py +322 -0
- tnfr/cli/execution.pyi +80 -0
- tnfr/cli/utils.py +34 -0
- tnfr/cli/utils.pyi +8 -0
- tnfr/config/__init__.py +12 -0
- tnfr/config/__init__.pyi +8 -0
- tnfr/config/constants.py +104 -0
- tnfr/config/constants.pyi +12 -0
- tnfr/config/init.py +36 -0
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +106 -0
- tnfr/config/operator_names.pyi +28 -0
- tnfr/config/presets.py +104 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/constants/__init__.py +228 -0
- tnfr/constants/__init__.pyi +104 -0
- tnfr/constants/core.py +158 -0
- tnfr/constants/core.pyi +17 -0
- tnfr/constants/init.py +31 -0
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +102 -0
- tnfr/constants/metric.pyi +19 -0
- tnfr/constants_glyphs.py +16 -0
- tnfr/constants_glyphs.pyi +12 -0
- tnfr/dynamics/__init__.py +136 -0
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +201 -0
- tnfr/dynamics/aliases.py +22 -0
- tnfr/dynamics/coordination.py +343 -0
- tnfr/dynamics/dnfr.py +2315 -0
- tnfr/dynamics/dnfr.pyi +33 -0
- tnfr/dynamics/integrators.py +561 -0
- tnfr/dynamics/integrators.pyi +35 -0
- tnfr/dynamics/runtime.py +521 -0
- tnfr/dynamics/sampling.py +34 -0
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +680 -0
- tnfr/execution.py +216 -0
- tnfr/execution.pyi +65 -0
- tnfr/flatten.py +283 -0
- tnfr/flatten.pyi +28 -0
- tnfr/gamma.py +320 -89
- tnfr/gamma.pyi +40 -0
- tnfr/glyph_history.py +337 -0
- tnfr/glyph_history.pyi +53 -0
- tnfr/grammar.py +23 -153
- tnfr/grammar.pyi +13 -0
- tnfr/helpers/__init__.py +151 -0
- tnfr/helpers/__init__.pyi +66 -0
- tnfr/helpers/numeric.py +88 -0
- tnfr/helpers/numeric.pyi +12 -0
- tnfr/immutable.py +214 -0
- tnfr/immutable.pyi +37 -0
- tnfr/initialization.py +199 -0
- tnfr/initialization.pyi +73 -0
- tnfr/io.py +311 -0
- tnfr/io.pyi +11 -0
- tnfr/locking.py +37 -0
- tnfr/locking.pyi +7 -0
- tnfr/metrics/__init__.py +41 -0
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/coherence.py +1469 -0
- tnfr/metrics/common.py +149 -0
- tnfr/metrics/common.pyi +15 -0
- tnfr/metrics/core.py +259 -0
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +840 -0
- tnfr/metrics/diagnosis.pyi +89 -0
- tnfr/metrics/export.py +151 -0
- tnfr/metrics/glyph_timing.py +369 -0
- tnfr/metrics/reporting.py +152 -0
- tnfr/metrics/reporting.pyi +12 -0
- tnfr/metrics/sense_index.py +294 -0
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +216 -0
- tnfr/metrics/trig.pyi +12 -0
- tnfr/metrics/trig_cache.py +105 -0
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/node.py +255 -177
- tnfr/node.pyi +161 -0
- tnfr/observers.py +154 -150
- tnfr/observers.pyi +46 -0
- tnfr/ontosim.py +135 -134
- tnfr/ontosim.pyi +33 -0
- tnfr/operators/__init__.py +452 -0
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/definitions.py +181 -0
- tnfr/operators/definitions.pyi +92 -0
- tnfr/operators/jitter.py +266 -0
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/registry.py +80 -0
- tnfr/operators/registry.pyi +15 -0
- tnfr/operators/remesh.py +569 -0
- tnfr/presets.py +10 -23
- tnfr/presets.pyi +7 -0
- tnfr/py.typed +0 -0
- tnfr/rng.py +440 -0
- tnfr/rng.pyi +14 -0
- tnfr/selector.py +217 -0
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +307 -142
- tnfr/sense.pyi +30 -0
- tnfr/structural.py +69 -164
- tnfr/structural.pyi +46 -0
- tnfr/telemetry/__init__.py +13 -0
- tnfr/telemetry/verbosity.py +37 -0
- tnfr/tokens.py +61 -0
- tnfr/tokens.pyi +41 -0
- tnfr/trace.py +520 -95
- tnfr/trace.pyi +68 -0
- tnfr/types.py +382 -17
- tnfr/types.pyi +145 -0
- tnfr/utils/__init__.py +158 -0
- tnfr/utils/__init__.pyi +133 -0
- tnfr/utils/cache.py +755 -0
- tnfr/utils/cache.pyi +156 -0
- tnfr/utils/data.py +267 -0
- tnfr/utils/data.pyi +73 -0
- tnfr/utils/graph.py +87 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +746 -0
- tnfr/utils/init.pyi +85 -0
- tnfr/utils/io.py +157 -0
- tnfr/utils/io.pyi +10 -0
- tnfr/utils/validators.py +130 -0
- tnfr/utils/validators.pyi +19 -0
- tnfr/validation/__init__.py +25 -0
- tnfr/validation/__init__.pyi +17 -0
- tnfr/validation/compatibility.py +59 -0
- tnfr/validation/compatibility.pyi +8 -0
- tnfr/validation/grammar.py +149 -0
- tnfr/validation/grammar.pyi +11 -0
- tnfr/validation/rules.py +194 -0
- tnfr/validation/rules.pyi +18 -0
- tnfr/validation/syntax.py +151 -0
- tnfr/validation/syntax.pyi +7 -0
- tnfr-6.0.0.dist-info/METADATA +135 -0
- tnfr-6.0.0.dist-info/RECORD +157 -0
- tnfr/cli.py +0 -322
- tnfr/config.py +0 -41
- tnfr/constants.py +0 -277
- tnfr/dynamics.py +0 -814
- tnfr/helpers.py +0 -264
- tnfr/main.py +0 -47
- tnfr/metrics.py +0 -597
- tnfr/operators.py +0 -525
- tnfr/program.py +0 -176
- tnfr/scenarios.py +0 -34
- tnfr/validators.py +0 -38
- tnfr-4.5.1.dist-info/METADATA +0 -221
- tnfr-4.5.1.dist-info/RECORD +0 -28
- {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/WHEEL +0 -0
- {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/entry_points.txt +0 -0
- {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/licenses/LICENSE.md +0 -0
- {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/top_level.txt +0 -0
tnfr/utils/cache.py
ADDED
|
@@ -0,0 +1,755 @@
|
|
|
1
|
+
"""Core caching utilities shared across TNFR helpers.
|
|
2
|
+
|
|
3
|
+
This module consolidates structural cache helpers that previously lived in
|
|
4
|
+
``tnfr.helpers.cache_utils`` and ``tnfr.helpers.edge_cache``. The functions
|
|
5
|
+
exposed here are responsible for maintaining deterministic node digests,
|
|
6
|
+
scoped graph caches guarded by locks, and version counters that keep edge
|
|
7
|
+
artifacts in sync with ΔNFR driven updates.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import hashlib
|
|
13
|
+
import threading
|
|
14
|
+
from collections import defaultdict
|
|
15
|
+
from collections.abc import (
|
|
16
|
+
Callable,
|
|
17
|
+
Hashable,
|
|
18
|
+
Iterable,
|
|
19
|
+
Iterator,
|
|
20
|
+
Mapping,
|
|
21
|
+
MutableMapping,
|
|
22
|
+
)
|
|
23
|
+
from contextlib import contextmanager
|
|
24
|
+
from functools import lru_cache
|
|
25
|
+
from dataclasses import dataclass
|
|
26
|
+
from typing import TYPE_CHECKING, Any, TypeVar, cast
|
|
27
|
+
|
|
28
|
+
from cachetools import LRUCache
|
|
29
|
+
import networkx as nx
|
|
30
|
+
|
|
31
|
+
from ..cache import CacheCapacityConfig, CacheManager, InstrumentedLRUCache
|
|
32
|
+
from ..types import GraphLike, NodeId, TNFRGraph, TimingContext
|
|
33
|
+
from .graph import get_graph, mark_dnfr_prep_dirty
|
|
34
|
+
from .init import get_logger, get_numpy
|
|
35
|
+
from .io import json_dumps
|
|
36
|
+
|
|
37
|
+
T = TypeVar("T")
|
|
38
|
+
|
|
39
|
+
__all__ = (
|
|
40
|
+
"EdgeCacheManager",
|
|
41
|
+
"NODE_SET_CHECKSUM_KEY",
|
|
42
|
+
"cached_node_list",
|
|
43
|
+
"cached_nodes_and_A",
|
|
44
|
+
"clear_node_repr_cache",
|
|
45
|
+
"edge_version_cache",
|
|
46
|
+
"edge_version_update",
|
|
47
|
+
"ensure_node_index_map",
|
|
48
|
+
"ensure_node_offset_map",
|
|
49
|
+
"get_graph_version",
|
|
50
|
+
"increment_edge_version",
|
|
51
|
+
"increment_graph_version",
|
|
52
|
+
"node_set_checksum",
|
|
53
|
+
"stable_json",
|
|
54
|
+
"configure_graph_cache_limits",
|
|
55
|
+
"DNFR_PREP_STATE_KEY",
|
|
56
|
+
"DnfrPrepState",
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
if TYPE_CHECKING: # pragma: no cover - typing aide
|
|
60
|
+
from ..dynamics.dnfr import DnfrCache
|
|
61
|
+
|
|
62
|
+
# Key used to store the node set checksum in a graph's ``graph`` attribute.
|
|
63
|
+
NODE_SET_CHECKSUM_KEY = "_node_set_checksum_cache"
|
|
64
|
+
|
|
65
|
+
logger = get_logger(__name__)
|
|
66
|
+
|
|
67
|
+
# Keys of cache entries dependent on the edge version. Any change to the edge
|
|
68
|
+
# set requires these to be dropped to avoid stale data.
|
|
69
|
+
EDGE_VERSION_CACHE_KEYS = ("_trig_version",)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def get_graph_version(graph: Any, key: str, default: int = 0) -> int:
|
|
73
|
+
"""Return integer version stored in ``graph`` under ``key``."""
|
|
74
|
+
|
|
75
|
+
return int(graph.get(key, default))
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def increment_graph_version(graph: Any, key: str) -> int:
|
|
79
|
+
"""Increment and store a version counter in ``graph`` under ``key``."""
|
|
80
|
+
|
|
81
|
+
version = get_graph_version(graph, key) + 1
|
|
82
|
+
graph[key] = version
|
|
83
|
+
return version
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def stable_json(obj: Any) -> str:
|
|
87
|
+
"""Return a JSON string with deterministic ordering for ``obj``."""
|
|
88
|
+
|
|
89
|
+
return json_dumps(
|
|
90
|
+
obj,
|
|
91
|
+
sort_keys=True,
|
|
92
|
+
ensure_ascii=False,
|
|
93
|
+
to_bytes=False,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
@lru_cache(maxsize=1024)
|
|
98
|
+
def _node_repr_digest(obj: Any) -> tuple[str, bytes]:
|
|
99
|
+
"""Return cached stable representation and digest for ``obj``."""
|
|
100
|
+
|
|
101
|
+
try:
|
|
102
|
+
repr_ = stable_json(obj)
|
|
103
|
+
except TypeError:
|
|
104
|
+
repr_ = repr(obj)
|
|
105
|
+
digest = hashlib.blake2b(repr_.encode("utf-8"), digest_size=16).digest()
|
|
106
|
+
return repr_, digest
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def clear_node_repr_cache() -> None:
|
|
110
|
+
"""Clear cached node representations used for checksums."""
|
|
111
|
+
|
|
112
|
+
_node_repr_digest.cache_clear()
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _node_repr(n: Any) -> str:
|
|
116
|
+
"""Stable representation for node hashing and sorting."""
|
|
117
|
+
|
|
118
|
+
return _node_repr_digest(n)[0]
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _iter_node_digests(
|
|
122
|
+
nodes: Iterable[Any], *, presorted: bool
|
|
123
|
+
) -> Iterable[bytes]:
|
|
124
|
+
"""Yield node digests in a deterministic order."""
|
|
125
|
+
|
|
126
|
+
if presorted:
|
|
127
|
+
for node in nodes:
|
|
128
|
+
yield _node_repr_digest(node)[1]
|
|
129
|
+
else:
|
|
130
|
+
for _, digest in sorted(
|
|
131
|
+
(_node_repr_digest(n) for n in nodes), key=lambda x: x[0]
|
|
132
|
+
):
|
|
133
|
+
yield digest
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _node_set_checksum_no_nodes(
|
|
137
|
+
G: nx.Graph,
|
|
138
|
+
graph: Any,
|
|
139
|
+
*,
|
|
140
|
+
presorted: bool,
|
|
141
|
+
store: bool,
|
|
142
|
+
) -> str:
|
|
143
|
+
"""Checksum helper when no explicit node set is provided."""
|
|
144
|
+
|
|
145
|
+
nodes_view = G.nodes()
|
|
146
|
+
current_nodes = frozenset(nodes_view)
|
|
147
|
+
cached = graph.get(NODE_SET_CHECKSUM_KEY)
|
|
148
|
+
if cached and len(cached) == 3 and cached[2] == current_nodes:
|
|
149
|
+
return cached[1]
|
|
150
|
+
|
|
151
|
+
hasher = hashlib.blake2b(digest_size=16)
|
|
152
|
+
for digest in _iter_node_digests(nodes_view, presorted=presorted):
|
|
153
|
+
hasher.update(digest)
|
|
154
|
+
|
|
155
|
+
checksum = hasher.hexdigest()
|
|
156
|
+
if store:
|
|
157
|
+
token = checksum[:16]
|
|
158
|
+
if cached and cached[0] == token:
|
|
159
|
+
return cached[1]
|
|
160
|
+
graph[NODE_SET_CHECKSUM_KEY] = (token, checksum, current_nodes)
|
|
161
|
+
else:
|
|
162
|
+
graph.pop(NODE_SET_CHECKSUM_KEY, None)
|
|
163
|
+
return checksum
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def node_set_checksum(
|
|
167
|
+
G: nx.Graph,
|
|
168
|
+
nodes: Iterable[Any] | None = None,
|
|
169
|
+
*,
|
|
170
|
+
presorted: bool = False,
|
|
171
|
+
store: bool = True,
|
|
172
|
+
) -> str:
|
|
173
|
+
"""Return a BLAKE2b checksum of ``G``'s node set."""
|
|
174
|
+
|
|
175
|
+
graph = get_graph(G)
|
|
176
|
+
if nodes is None:
|
|
177
|
+
return _node_set_checksum_no_nodes(
|
|
178
|
+
G, graph, presorted=presorted, store=store
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
hasher = hashlib.blake2b(digest_size=16)
|
|
182
|
+
for digest in _iter_node_digests(nodes, presorted=presorted):
|
|
183
|
+
hasher.update(digest)
|
|
184
|
+
|
|
185
|
+
checksum = hasher.hexdigest()
|
|
186
|
+
if store:
|
|
187
|
+
token = checksum[:16]
|
|
188
|
+
cached = graph.get(NODE_SET_CHECKSUM_KEY)
|
|
189
|
+
if cached and cached[0] == token:
|
|
190
|
+
return cached[1]
|
|
191
|
+
graph[NODE_SET_CHECKSUM_KEY] = (token, checksum)
|
|
192
|
+
else:
|
|
193
|
+
graph.pop(NODE_SET_CHECKSUM_KEY, None)
|
|
194
|
+
return checksum
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
@dataclass(slots=True)
|
|
198
|
+
class NodeCache:
|
|
199
|
+
"""Container for cached node data."""
|
|
200
|
+
|
|
201
|
+
checksum: str
|
|
202
|
+
nodes: tuple[Any, ...]
|
|
203
|
+
sorted_nodes: tuple[Any, ...] | None = None
|
|
204
|
+
idx: dict[Any, int] | None = None
|
|
205
|
+
offset: dict[Any, int] | None = None
|
|
206
|
+
|
|
207
|
+
@property
|
|
208
|
+
def n(self) -> int:
|
|
209
|
+
return len(self.nodes)
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def _update_node_cache(
|
|
213
|
+
graph: Any,
|
|
214
|
+
nodes: tuple[Any, ...],
|
|
215
|
+
key: str,
|
|
216
|
+
*,
|
|
217
|
+
checksum: str,
|
|
218
|
+
sorted_nodes: tuple[Any, ...] | None = None,
|
|
219
|
+
) -> None:
|
|
220
|
+
"""Store ``nodes`` and ``checksum`` in ``graph`` under ``key``."""
|
|
221
|
+
|
|
222
|
+
graph[f"{key}_cache"] = NodeCache(
|
|
223
|
+
checksum=checksum, nodes=nodes, sorted_nodes=sorted_nodes
|
|
224
|
+
)
|
|
225
|
+
graph[f"{key}_checksum"] = checksum
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _refresh_node_list_cache(
|
|
229
|
+
G: nx.Graph,
|
|
230
|
+
graph: Any,
|
|
231
|
+
*,
|
|
232
|
+
sort_nodes: bool,
|
|
233
|
+
current_n: int,
|
|
234
|
+
) -> tuple[Any, ...]:
|
|
235
|
+
"""Refresh the cached node list and return the nodes."""
|
|
236
|
+
|
|
237
|
+
nodes = tuple(G.nodes())
|
|
238
|
+
checksum = node_set_checksum(G, nodes, store=True)
|
|
239
|
+
sorted_nodes = tuple(sorted(nodes, key=_node_repr)) if sort_nodes else None
|
|
240
|
+
_update_node_cache(
|
|
241
|
+
graph,
|
|
242
|
+
nodes,
|
|
243
|
+
"_node_list",
|
|
244
|
+
checksum=checksum,
|
|
245
|
+
sorted_nodes=sorted_nodes,
|
|
246
|
+
)
|
|
247
|
+
graph["_node_list_len"] = current_n
|
|
248
|
+
return nodes
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def _reuse_node_list_cache(
|
|
252
|
+
graph: Any,
|
|
253
|
+
cache: NodeCache,
|
|
254
|
+
nodes: tuple[Any, ...],
|
|
255
|
+
sorted_nodes: tuple[Any, ...] | None,
|
|
256
|
+
*,
|
|
257
|
+
sort_nodes: bool,
|
|
258
|
+
new_checksum: str | None,
|
|
259
|
+
) -> None:
|
|
260
|
+
"""Reuse existing node cache and record its checksum if missing."""
|
|
261
|
+
|
|
262
|
+
checksum = cache.checksum if new_checksum is None else new_checksum
|
|
263
|
+
if sort_nodes and sorted_nodes is None:
|
|
264
|
+
sorted_nodes = tuple(sorted(nodes, key=_node_repr))
|
|
265
|
+
_update_node_cache(
|
|
266
|
+
graph,
|
|
267
|
+
nodes,
|
|
268
|
+
"_node_list",
|
|
269
|
+
checksum=checksum,
|
|
270
|
+
sorted_nodes=sorted_nodes,
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def _cache_node_list(G: nx.Graph) -> tuple[Any, ...]:
|
|
275
|
+
"""Cache and return the tuple of nodes for ``G``."""
|
|
276
|
+
|
|
277
|
+
graph = get_graph(G)
|
|
278
|
+
cache: NodeCache | None = graph.get("_node_list_cache")
|
|
279
|
+
nodes = cache.nodes if cache else None
|
|
280
|
+
sorted_nodes = cache.sorted_nodes if cache else None
|
|
281
|
+
stored_len = graph.get("_node_list_len")
|
|
282
|
+
current_n = G.number_of_nodes()
|
|
283
|
+
dirty = bool(graph.pop("_node_list_dirty", False))
|
|
284
|
+
|
|
285
|
+
invalid = nodes is None or stored_len != current_n or dirty
|
|
286
|
+
new_checksum: str | None = None
|
|
287
|
+
|
|
288
|
+
if not invalid and cache:
|
|
289
|
+
new_checksum = node_set_checksum(G)
|
|
290
|
+
invalid = cache.checksum != new_checksum
|
|
291
|
+
|
|
292
|
+
sort_nodes = bool(graph.get("SORT_NODES", False))
|
|
293
|
+
|
|
294
|
+
if invalid:
|
|
295
|
+
nodes = _refresh_node_list_cache(
|
|
296
|
+
G, graph, sort_nodes=sort_nodes, current_n=current_n
|
|
297
|
+
)
|
|
298
|
+
elif cache and "_node_list_checksum" not in graph:
|
|
299
|
+
_reuse_node_list_cache(
|
|
300
|
+
graph,
|
|
301
|
+
cache,
|
|
302
|
+
nodes,
|
|
303
|
+
sorted_nodes,
|
|
304
|
+
sort_nodes=sort_nodes,
|
|
305
|
+
new_checksum=new_checksum,
|
|
306
|
+
)
|
|
307
|
+
else:
|
|
308
|
+
if sort_nodes and sorted_nodes is None and cache is not None:
|
|
309
|
+
cache.sorted_nodes = tuple(sorted(nodes, key=_node_repr))
|
|
310
|
+
return nodes
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def cached_node_list(G: nx.Graph) -> tuple[Any, ...]:
|
|
314
|
+
"""Public wrapper returning the cached node tuple for ``G``."""
|
|
315
|
+
|
|
316
|
+
return _cache_node_list(G)
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
def _ensure_node_map(
|
|
320
|
+
G: TNFRGraph,
|
|
321
|
+
*,
|
|
322
|
+
attrs: tuple[str, ...],
|
|
323
|
+
sort: bool = False,
|
|
324
|
+
) -> dict[NodeId, int]:
|
|
325
|
+
"""Return cached node-to-index/offset mappings stored on ``NodeCache``."""
|
|
326
|
+
|
|
327
|
+
graph = G.graph
|
|
328
|
+
_cache_node_list(G)
|
|
329
|
+
cache: NodeCache = graph["_node_list_cache"]
|
|
330
|
+
|
|
331
|
+
missing = [attr for attr in attrs if getattr(cache, attr) is None]
|
|
332
|
+
if missing:
|
|
333
|
+
if sort:
|
|
334
|
+
nodes_opt = cache.sorted_nodes
|
|
335
|
+
if nodes_opt is None:
|
|
336
|
+
nodes_opt = tuple(sorted(cache.nodes, key=_node_repr))
|
|
337
|
+
cache.sorted_nodes = nodes_opt
|
|
338
|
+
nodes_seq = nodes_opt
|
|
339
|
+
else:
|
|
340
|
+
nodes_seq = cache.nodes
|
|
341
|
+
node_ids = cast(tuple[NodeId, ...], nodes_seq)
|
|
342
|
+
mappings: dict[str, dict[NodeId, int]] = {attr: {} for attr in missing}
|
|
343
|
+
for idx, node in enumerate(node_ids):
|
|
344
|
+
for attr in missing:
|
|
345
|
+
mappings[attr][node] = idx
|
|
346
|
+
for attr in missing:
|
|
347
|
+
setattr(cache, attr, mappings[attr])
|
|
348
|
+
return cast(dict[NodeId, int], getattr(cache, attrs[0]))
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
def ensure_node_index_map(G: TNFRGraph) -> dict[NodeId, int]:
|
|
352
|
+
"""Return cached node-to-index mapping for ``G``."""
|
|
353
|
+
|
|
354
|
+
return _ensure_node_map(G, attrs=("idx",), sort=False)
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
def ensure_node_offset_map(G: TNFRGraph) -> dict[NodeId, int]:
|
|
358
|
+
"""Return cached node-to-offset mapping for ``G``."""
|
|
359
|
+
|
|
360
|
+
sort = bool(G.graph.get("SORT_NODES", False))
|
|
361
|
+
return _ensure_node_map(G, attrs=("offset",), sort=sort)
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
@dataclass
|
|
365
|
+
class EdgeCacheState:
|
|
366
|
+
cache: MutableMapping[Hashable, Any]
|
|
367
|
+
locks: defaultdict[Hashable, threading.RLock]
|
|
368
|
+
max_entries: int | None
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
_GRAPH_CACHE_MANAGER_KEY = "_tnfr_cache_manager"
|
|
372
|
+
_GRAPH_CACHE_CONFIG_KEY = "_tnfr_cache_config"
|
|
373
|
+
DNFR_PREP_STATE_KEY = "_dnfr_prep_state"
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
@dataclass(slots=True)
|
|
377
|
+
class DnfrPrepState:
|
|
378
|
+
"""State container coordinating ΔNFR preparation caches."""
|
|
379
|
+
|
|
380
|
+
cache: "DnfrCache"
|
|
381
|
+
cache_lock: threading.RLock
|
|
382
|
+
vector_lock: threading.RLock
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
def _new_dnfr_cache() -> "DnfrCache":
|
|
386
|
+
"""Return an empty :class:`~tnfr.dynamics.dnfr.DnfrCache` instance."""
|
|
387
|
+
|
|
388
|
+
from ..dynamics.dnfr import DnfrCache
|
|
389
|
+
|
|
390
|
+
return DnfrCache(
|
|
391
|
+
idx={},
|
|
392
|
+
theta=[],
|
|
393
|
+
epi=[],
|
|
394
|
+
vf=[],
|
|
395
|
+
cos_theta=[],
|
|
396
|
+
sin_theta=[],
|
|
397
|
+
neighbor_x=[],
|
|
398
|
+
neighbor_y=[],
|
|
399
|
+
neighbor_epi_sum=[],
|
|
400
|
+
neighbor_vf_sum=[],
|
|
401
|
+
neighbor_count=[],
|
|
402
|
+
neighbor_deg_sum=[],
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def _build_dnfr_prep_state(
|
|
407
|
+
graph: MutableMapping[str, Any],
|
|
408
|
+
previous: DnfrPrepState | None = None,
|
|
409
|
+
) -> DnfrPrepState:
|
|
410
|
+
"""Construct a :class:`DnfrPrepState` and mirror it on ``graph``."""
|
|
411
|
+
|
|
412
|
+
cache_lock: threading.RLock
|
|
413
|
+
vector_lock: threading.RLock
|
|
414
|
+
if isinstance(previous, DnfrPrepState):
|
|
415
|
+
cache_lock = previous.cache_lock
|
|
416
|
+
vector_lock = previous.vector_lock
|
|
417
|
+
else:
|
|
418
|
+
cache_lock = threading.RLock()
|
|
419
|
+
vector_lock = threading.RLock()
|
|
420
|
+
state = DnfrPrepState(
|
|
421
|
+
cache=_new_dnfr_cache(),
|
|
422
|
+
cache_lock=cache_lock,
|
|
423
|
+
vector_lock=vector_lock,
|
|
424
|
+
)
|
|
425
|
+
graph["_dnfr_prep_cache"] = state.cache
|
|
426
|
+
return state
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
def _coerce_dnfr_state(
|
|
430
|
+
graph: MutableMapping[str, Any],
|
|
431
|
+
current: Any,
|
|
432
|
+
) -> DnfrPrepState:
|
|
433
|
+
"""Return ``current`` normalised into :class:`DnfrPrepState`."""
|
|
434
|
+
|
|
435
|
+
if isinstance(current, DnfrPrepState):
|
|
436
|
+
graph["_dnfr_prep_cache"] = current.cache
|
|
437
|
+
return current
|
|
438
|
+
try:
|
|
439
|
+
from ..dynamics.dnfr import DnfrCache
|
|
440
|
+
except Exception: # pragma: no cover - defensive import
|
|
441
|
+
DnfrCache = None # type: ignore[assignment]
|
|
442
|
+
if DnfrCache is not None and isinstance(current, DnfrCache):
|
|
443
|
+
state = DnfrPrepState(
|
|
444
|
+
cache=current,
|
|
445
|
+
cache_lock=threading.RLock(),
|
|
446
|
+
vector_lock=threading.RLock(),
|
|
447
|
+
)
|
|
448
|
+
graph["_dnfr_prep_cache"] = current
|
|
449
|
+
return state
|
|
450
|
+
return _build_dnfr_prep_state(graph)
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
def _graph_cache_manager(graph: MutableMapping[str, Any]) -> CacheManager:
|
|
454
|
+
manager = graph.get(_GRAPH_CACHE_MANAGER_KEY)
|
|
455
|
+
if not isinstance(manager, CacheManager):
|
|
456
|
+
manager = CacheManager(default_capacity=128)
|
|
457
|
+
graph[_GRAPH_CACHE_MANAGER_KEY] = manager
|
|
458
|
+
config = graph.get(_GRAPH_CACHE_CONFIG_KEY)
|
|
459
|
+
if isinstance(config, dict):
|
|
460
|
+
manager.configure_from_mapping(config)
|
|
461
|
+
def _dnfr_factory() -> DnfrPrepState:
|
|
462
|
+
return _build_dnfr_prep_state(graph)
|
|
463
|
+
|
|
464
|
+
def _dnfr_reset(current: Any) -> DnfrPrepState:
|
|
465
|
+
if isinstance(current, DnfrPrepState):
|
|
466
|
+
return _build_dnfr_prep_state(graph, current)
|
|
467
|
+
return _build_dnfr_prep_state(graph)
|
|
468
|
+
|
|
469
|
+
manager.register(
|
|
470
|
+
DNFR_PREP_STATE_KEY,
|
|
471
|
+
_dnfr_factory,
|
|
472
|
+
reset=_dnfr_reset,
|
|
473
|
+
)
|
|
474
|
+
manager.update(
|
|
475
|
+
DNFR_PREP_STATE_KEY,
|
|
476
|
+
lambda current: _coerce_dnfr_state(graph, current),
|
|
477
|
+
)
|
|
478
|
+
return manager
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
def configure_graph_cache_limits(
|
|
482
|
+
G: GraphLike | TNFRGraph | MutableMapping[str, Any],
|
|
483
|
+
*,
|
|
484
|
+
default_capacity: int | None | object = CacheManager._MISSING,
|
|
485
|
+
overrides: Mapping[str, int | None] | None = None,
|
|
486
|
+
replace_overrides: bool = False,
|
|
487
|
+
) -> CacheCapacityConfig:
|
|
488
|
+
"""Update cache capacity policy stored on ``G.graph``."""
|
|
489
|
+
|
|
490
|
+
graph = get_graph(G)
|
|
491
|
+
manager = _graph_cache_manager(graph)
|
|
492
|
+
manager.configure(
|
|
493
|
+
default_capacity=default_capacity,
|
|
494
|
+
overrides=overrides,
|
|
495
|
+
replace_overrides=replace_overrides,
|
|
496
|
+
)
|
|
497
|
+
snapshot = manager.export_config()
|
|
498
|
+
graph[_GRAPH_CACHE_CONFIG_KEY] = {
|
|
499
|
+
"default_capacity": snapshot.default_capacity,
|
|
500
|
+
"overrides": dict(snapshot.overrides),
|
|
501
|
+
}
|
|
502
|
+
return snapshot
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
class EdgeCacheManager:
|
|
506
|
+
"""Coordinate cache storage and per-key locks for edge version caches."""
|
|
507
|
+
|
|
508
|
+
_STATE_KEY = "_edge_version_state"
|
|
509
|
+
|
|
510
|
+
def __init__(self, graph: MutableMapping[str, Any]) -> None:
|
|
511
|
+
self.graph: MutableMapping[str, Any] = graph
|
|
512
|
+
self._manager = _graph_cache_manager(graph)
|
|
513
|
+
self._manager.register(
|
|
514
|
+
self._STATE_KEY,
|
|
515
|
+
self._default_state,
|
|
516
|
+
reset=self._reset_state,
|
|
517
|
+
)
|
|
518
|
+
|
|
519
|
+
def record_hit(self) -> None:
|
|
520
|
+
"""Record a cache hit for telemetry."""
|
|
521
|
+
|
|
522
|
+
self._manager.increment_hit(self._STATE_KEY)
|
|
523
|
+
|
|
524
|
+
def record_miss(self, *, track_metrics: bool = True) -> None:
|
|
525
|
+
"""Record a cache miss for telemetry.
|
|
526
|
+
|
|
527
|
+
When ``track_metrics`` is ``False`` the miss is acknowledged without
|
|
528
|
+
mutating the aggregated metrics.
|
|
529
|
+
"""
|
|
530
|
+
|
|
531
|
+
if track_metrics:
|
|
532
|
+
self._manager.increment_miss(self._STATE_KEY)
|
|
533
|
+
|
|
534
|
+
def record_eviction(self, *, track_metrics: bool = True) -> None:
|
|
535
|
+
"""Record cache eviction events for telemetry.
|
|
536
|
+
|
|
537
|
+
When ``track_metrics`` is ``False`` the underlying metrics counter is
|
|
538
|
+
left untouched while still signalling that an eviction occurred.
|
|
539
|
+
"""
|
|
540
|
+
|
|
541
|
+
if track_metrics:
|
|
542
|
+
self._manager.increment_eviction(self._STATE_KEY)
|
|
543
|
+
|
|
544
|
+
def timer(self) -> TimingContext:
|
|
545
|
+
"""Return a timing context linked to this cache."""
|
|
546
|
+
|
|
547
|
+
return self._manager.timer(self._STATE_KEY)
|
|
548
|
+
|
|
549
|
+
def _default_state(self) -> EdgeCacheState:
|
|
550
|
+
return self._build_state(None)
|
|
551
|
+
|
|
552
|
+
def resolve_max_entries(self, max_entries: int | None | object) -> int | None:
|
|
553
|
+
"""Return effective capacity for the edge cache."""
|
|
554
|
+
|
|
555
|
+
if max_entries is CacheManager._MISSING:
|
|
556
|
+
return self._manager.get_capacity(self._STATE_KEY)
|
|
557
|
+
return self._manager.get_capacity(
|
|
558
|
+
self._STATE_KEY,
|
|
559
|
+
requested=None if max_entries is None else int(max_entries),
|
|
560
|
+
use_default=False,
|
|
561
|
+
)
|
|
562
|
+
|
|
563
|
+
def _build_state(self, max_entries: int | None) -> EdgeCacheState:
|
|
564
|
+
locks: defaultdict[Hashable, threading.RLock] = defaultdict(threading.RLock)
|
|
565
|
+
capacity = float("inf") if max_entries is None else int(max_entries)
|
|
566
|
+
cache = InstrumentedLRUCache(
|
|
567
|
+
capacity,
|
|
568
|
+
manager=self._manager,
|
|
569
|
+
metrics_key=self._STATE_KEY,
|
|
570
|
+
locks=locks,
|
|
571
|
+
count_overwrite_hit=False,
|
|
572
|
+
)
|
|
573
|
+
|
|
574
|
+
def _on_eviction(key: Hashable, _: Any) -> None:
|
|
575
|
+
self.record_eviction(track_metrics=False)
|
|
576
|
+
locks.pop(key, None)
|
|
577
|
+
|
|
578
|
+
cache.set_eviction_callbacks(_on_eviction)
|
|
579
|
+
return EdgeCacheState(cache=cache, locks=locks, max_entries=max_entries)
|
|
580
|
+
|
|
581
|
+
def _ensure_state(
|
|
582
|
+
self, state: EdgeCacheState | None, max_entries: int | None | object
|
|
583
|
+
) -> EdgeCacheState:
|
|
584
|
+
target = self.resolve_max_entries(max_entries)
|
|
585
|
+
if target is not None:
|
|
586
|
+
target = int(target)
|
|
587
|
+
if target < 0:
|
|
588
|
+
raise ValueError("max_entries must be non-negative or None")
|
|
589
|
+
if not isinstance(state, EdgeCacheState) or state.max_entries != target:
|
|
590
|
+
return self._build_state(target)
|
|
591
|
+
return state
|
|
592
|
+
|
|
593
|
+
def _reset_state(self, state: EdgeCacheState | None) -> EdgeCacheState:
|
|
594
|
+
if isinstance(state, EdgeCacheState):
|
|
595
|
+
state.cache.clear()
|
|
596
|
+
return state
|
|
597
|
+
return self._build_state(None)
|
|
598
|
+
|
|
599
|
+
def get_cache(
|
|
600
|
+
self,
|
|
601
|
+
max_entries: int | None | object,
|
|
602
|
+
*,
|
|
603
|
+
create: bool = True,
|
|
604
|
+
) -> tuple[
|
|
605
|
+
MutableMapping[Hashable, Any] | None,
|
|
606
|
+
dict[Hashable, threading.RLock]
|
|
607
|
+
| defaultdict[Hashable, threading.RLock]
|
|
608
|
+
| None,
|
|
609
|
+
]:
|
|
610
|
+
"""Return the cache and lock mapping for the manager's graph."""
|
|
611
|
+
|
|
612
|
+
if not create:
|
|
613
|
+
state = self._manager.peek(self._STATE_KEY)
|
|
614
|
+
if isinstance(state, EdgeCacheState):
|
|
615
|
+
return state.cache, state.locks
|
|
616
|
+
return None, None
|
|
617
|
+
|
|
618
|
+
state = self._manager.update(
|
|
619
|
+
self._STATE_KEY,
|
|
620
|
+
lambda current: self._ensure_state(current, max_entries),
|
|
621
|
+
)
|
|
622
|
+
return state.cache, state.locks
|
|
623
|
+
|
|
624
|
+
def clear(self) -> None:
|
|
625
|
+
"""Reset cached data managed by this instance."""
|
|
626
|
+
|
|
627
|
+
self._manager.clear(self._STATE_KEY)
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
def edge_version_cache(
|
|
631
|
+
G: Any,
|
|
632
|
+
key: Hashable,
|
|
633
|
+
builder: Callable[[], T],
|
|
634
|
+
*,
|
|
635
|
+
max_entries: int | None | object = CacheManager._MISSING,
|
|
636
|
+
) -> T:
|
|
637
|
+
"""Return cached ``builder`` output tied to the edge version of ``G``."""
|
|
638
|
+
|
|
639
|
+
graph = get_graph(G)
|
|
640
|
+
manager = graph.get("_edge_cache_manager") # type: ignore[assignment]
|
|
641
|
+
if not isinstance(manager, EdgeCacheManager) or manager.graph is not graph:
|
|
642
|
+
manager = EdgeCacheManager(graph)
|
|
643
|
+
graph["_edge_cache_manager"] = manager
|
|
644
|
+
|
|
645
|
+
resolved = manager.resolve_max_entries(max_entries)
|
|
646
|
+
if resolved == 0:
|
|
647
|
+
return builder()
|
|
648
|
+
|
|
649
|
+
cache, locks = manager.get_cache(resolved)
|
|
650
|
+
edge_version = get_graph_version(graph, "_edge_version")
|
|
651
|
+
lock = locks[key]
|
|
652
|
+
|
|
653
|
+
with lock:
|
|
654
|
+
entry = cache.get(key)
|
|
655
|
+
if entry is not None and entry[0] == edge_version:
|
|
656
|
+
manager.record_hit()
|
|
657
|
+
return entry[1]
|
|
658
|
+
|
|
659
|
+
try:
|
|
660
|
+
with manager.timer():
|
|
661
|
+
value = builder()
|
|
662
|
+
except (RuntimeError, ValueError) as exc: # pragma: no cover - logging side effect
|
|
663
|
+
logger.exception("edge_version_cache builder failed for %r: %s", key, exc)
|
|
664
|
+
raise
|
|
665
|
+
else:
|
|
666
|
+
with lock:
|
|
667
|
+
entry = cache.get(key)
|
|
668
|
+
if entry is not None:
|
|
669
|
+
cached_version, cached_value = entry
|
|
670
|
+
manager.record_miss()
|
|
671
|
+
if cached_version == edge_version:
|
|
672
|
+
manager.record_hit()
|
|
673
|
+
return cached_value
|
|
674
|
+
manager.record_eviction()
|
|
675
|
+
cache[key] = (edge_version, value)
|
|
676
|
+
return value
|
|
677
|
+
|
|
678
|
+
|
|
679
|
+
def cached_nodes_and_A(
|
|
680
|
+
G: nx.Graph,
|
|
681
|
+
*,
|
|
682
|
+
cache_size: int | None = 1,
|
|
683
|
+
require_numpy: bool = False,
|
|
684
|
+
prefer_sparse: bool = False,
|
|
685
|
+
nodes: tuple[Any, ...] | None = None,
|
|
686
|
+
) -> tuple[tuple[Any, ...], Any]:
|
|
687
|
+
"""Return cached nodes tuple and adjacency matrix for ``G``.
|
|
688
|
+
|
|
689
|
+
When ``prefer_sparse`` is true the adjacency matrix construction is skipped
|
|
690
|
+
unless a caller later requests it explicitly. This lets ΔNFR reuse the
|
|
691
|
+
edge-index buffers stored on :class:`~tnfr.dynamics.dnfr.DnfrCache` without
|
|
692
|
+
paying for ``nx.to_numpy_array`` on sparse graphs while keeping the
|
|
693
|
+
canonical cache interface unchanged.
|
|
694
|
+
"""
|
|
695
|
+
|
|
696
|
+
if nodes is None:
|
|
697
|
+
nodes = cached_node_list(G)
|
|
698
|
+
graph = G.graph
|
|
699
|
+
|
|
700
|
+
checksum = getattr(graph.get("_node_list_cache"), "checksum", None)
|
|
701
|
+
if checksum is None:
|
|
702
|
+
checksum = graph.get("_node_list_checksum")
|
|
703
|
+
if checksum is None:
|
|
704
|
+
node_set_cache = graph.get(NODE_SET_CHECKSUM_KEY)
|
|
705
|
+
if isinstance(node_set_cache, tuple) and len(node_set_cache) >= 2:
|
|
706
|
+
checksum = node_set_cache[1]
|
|
707
|
+
if checksum is None:
|
|
708
|
+
checksum = ""
|
|
709
|
+
|
|
710
|
+
key = f"_dnfr_{len(nodes)}_{checksum}"
|
|
711
|
+
graph["_dnfr_nodes_checksum"] = checksum
|
|
712
|
+
|
|
713
|
+
def builder() -> tuple[tuple[Any, ...], Any]:
|
|
714
|
+
np = get_numpy()
|
|
715
|
+
if np is None or prefer_sparse:
|
|
716
|
+
return nodes, None
|
|
717
|
+
A = nx.to_numpy_array(G, nodelist=nodes, weight=None, dtype=float)
|
|
718
|
+
return nodes, A
|
|
719
|
+
|
|
720
|
+
nodes, A = edge_version_cache(G, key, builder, max_entries=cache_size)
|
|
721
|
+
|
|
722
|
+
if require_numpy and A is None:
|
|
723
|
+
raise RuntimeError("NumPy is required for adjacency caching")
|
|
724
|
+
|
|
725
|
+
return nodes, A
|
|
726
|
+
|
|
727
|
+
|
|
728
|
+
def _reset_edge_caches(graph: Any, G: Any) -> None:
|
|
729
|
+
"""Clear caches affected by edge updates."""
|
|
730
|
+
|
|
731
|
+
EdgeCacheManager(graph).clear()
|
|
732
|
+
_graph_cache_manager(graph).clear(DNFR_PREP_STATE_KEY)
|
|
733
|
+
mark_dnfr_prep_dirty(G)
|
|
734
|
+
clear_node_repr_cache()
|
|
735
|
+
for key in EDGE_VERSION_CACHE_KEYS:
|
|
736
|
+
graph.pop(key, None)
|
|
737
|
+
|
|
738
|
+
|
|
739
|
+
def increment_edge_version(G: Any) -> None:
|
|
740
|
+
"""Increment the edge version counter in ``G.graph``."""
|
|
741
|
+
|
|
742
|
+
graph = get_graph(G)
|
|
743
|
+
increment_graph_version(graph, "_edge_version")
|
|
744
|
+
_reset_edge_caches(graph, G)
|
|
745
|
+
|
|
746
|
+
|
|
747
|
+
@contextmanager
|
|
748
|
+
def edge_version_update(G: TNFRGraph) -> Iterator[None]:
|
|
749
|
+
"""Scope a batch of edge mutations."""
|
|
750
|
+
|
|
751
|
+
increment_edge_version(G)
|
|
752
|
+
try:
|
|
753
|
+
yield
|
|
754
|
+
finally:
|
|
755
|
+
increment_edge_version(G)
|