tnfr 4.5.0__py3-none-any.whl → 4.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +91 -89
- tnfr/alias.py +546 -0
- tnfr/cache.py +578 -0
- tnfr/callback_utils.py +388 -0
- tnfr/cli/__init__.py +75 -0
- tnfr/cli/arguments.py +177 -0
- tnfr/cli/execution.py +288 -0
- tnfr/cli/utils.py +36 -0
- tnfr/collections_utils.py +300 -0
- tnfr/config.py +19 -28
- tnfr/constants/__init__.py +174 -0
- tnfr/constants/core.py +159 -0
- tnfr/constants/init.py +31 -0
- tnfr/constants/metric.py +110 -0
- tnfr/constants_glyphs.py +98 -0
- tnfr/dynamics/__init__.py +658 -0
- tnfr/dynamics/dnfr.py +733 -0
- tnfr/dynamics/integrators.py +267 -0
- tnfr/dynamics/sampling.py +31 -0
- tnfr/execution.py +201 -0
- tnfr/flatten.py +283 -0
- tnfr/gamma.py +302 -88
- tnfr/glyph_history.py +290 -0
- tnfr/grammar.py +285 -96
- tnfr/graph_utils.py +84 -0
- tnfr/helpers/__init__.py +71 -0
- tnfr/helpers/numeric.py +87 -0
- tnfr/immutable.py +178 -0
- tnfr/import_utils.py +228 -0
- tnfr/initialization.py +197 -0
- tnfr/io.py +246 -0
- tnfr/json_utils.py +162 -0
- tnfr/locking.py +37 -0
- tnfr/logging_utils.py +116 -0
- tnfr/metrics/__init__.py +41 -0
- tnfr/metrics/coherence.py +829 -0
- tnfr/metrics/common.py +151 -0
- tnfr/metrics/core.py +101 -0
- tnfr/metrics/diagnosis.py +234 -0
- tnfr/metrics/export.py +137 -0
- tnfr/metrics/glyph_timing.py +189 -0
- tnfr/metrics/reporting.py +148 -0
- tnfr/metrics/sense_index.py +120 -0
- tnfr/metrics/trig.py +181 -0
- tnfr/metrics/trig_cache.py +109 -0
- tnfr/node.py +214 -159
- tnfr/observers.py +126 -128
- tnfr/ontosim.py +134 -134
- tnfr/operators/__init__.py +420 -0
- tnfr/operators/jitter.py +203 -0
- tnfr/operators/remesh.py +485 -0
- tnfr/presets.py +46 -14
- tnfr/rng.py +254 -0
- tnfr/selector.py +210 -0
- tnfr/sense.py +284 -131
- tnfr/structural.py +207 -79
- tnfr/tokens.py +60 -0
- tnfr/trace.py +329 -94
- tnfr/types.py +43 -17
- tnfr/validators.py +70 -24
- tnfr/value_utils.py +59 -0
- tnfr-4.5.2.dist-info/METADATA +379 -0
- tnfr-4.5.2.dist-info/RECORD +67 -0
- tnfr/cli.py +0 -322
- tnfr/constants.py +0 -277
- tnfr/dynamics.py +0 -814
- tnfr/helpers.py +0 -264
- tnfr/main.py +0 -47
- tnfr/metrics.py +0 -597
- tnfr/operators.py +0 -525
- tnfr/program.py +0 -176
- tnfr/scenarios.py +0 -34
- tnfr-4.5.0.dist-info/METADATA +0 -109
- tnfr-4.5.0.dist-info/RECORD +0 -28
- {tnfr-4.5.0.dist-info → tnfr-4.5.2.dist-info}/WHEEL +0 -0
- {tnfr-4.5.0.dist-info → tnfr-4.5.2.dist-info}/entry_points.txt +0 -0
- {tnfr-4.5.0.dist-info → tnfr-4.5.2.dist-info}/licenses/LICENSE.md +0 -0
- {tnfr-4.5.0.dist-info → tnfr-4.5.2.dist-info}/top_level.txt +0 -0
tnfr/cache.py
ADDED
|
@@ -0,0 +1,578 @@
|
|
|
1
|
+
"""Core caching utilities shared across TNFR helpers.
|
|
2
|
+
|
|
3
|
+
This module consolidates structural cache helpers that previously lived in
|
|
4
|
+
``tnfr.helpers.cache_utils`` and ``tnfr.helpers.edge_cache``. The functions
|
|
5
|
+
exposed here are responsible for maintaining deterministic node digests,
|
|
6
|
+
scoped graph caches guarded by locks, and version counters that keep edge
|
|
7
|
+
artifacts in sync with ΔNFR driven updates.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import hashlib
|
|
13
|
+
import threading
|
|
14
|
+
from collections import defaultdict
|
|
15
|
+
from collections.abc import Callable, Hashable, Iterable
|
|
16
|
+
from contextlib import contextmanager
|
|
17
|
+
from functools import lru_cache
|
|
18
|
+
from dataclasses import dataclass
|
|
19
|
+
from typing import Any, TypeVar
|
|
20
|
+
|
|
21
|
+
from cachetools import LRUCache
|
|
22
|
+
import networkx as nx # type: ignore[import-untyped]
|
|
23
|
+
|
|
24
|
+
from .graph_utils import get_graph, mark_dnfr_prep_dirty
|
|
25
|
+
from .import_utils import get_numpy
|
|
26
|
+
from .json_utils import json_dumps
|
|
27
|
+
from .logging_utils import get_logger
|
|
28
|
+
|
|
29
|
+
T = TypeVar("T")
|
|
30
|
+
|
|
31
|
+
__all__ = (
|
|
32
|
+
"EdgeCacheManager",
|
|
33
|
+
"LockAwareLRUCache",
|
|
34
|
+
"NODE_SET_CHECKSUM_KEY",
|
|
35
|
+
"cached_node_list",
|
|
36
|
+
"cached_nodes_and_A",
|
|
37
|
+
"clear_node_repr_cache",
|
|
38
|
+
"edge_version_cache",
|
|
39
|
+
"edge_version_update",
|
|
40
|
+
"ensure_node_index_map",
|
|
41
|
+
"ensure_node_offset_map",
|
|
42
|
+
"get_graph_version",
|
|
43
|
+
"increment_edge_version",
|
|
44
|
+
"increment_graph_version",
|
|
45
|
+
"node_set_checksum",
|
|
46
|
+
"stable_json",
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
# Key used to store the node set checksum in a graph's ``graph`` attribute.
|
|
50
|
+
NODE_SET_CHECKSUM_KEY = "_node_set_checksum_cache"
|
|
51
|
+
|
|
52
|
+
logger = get_logger(__name__)
|
|
53
|
+
|
|
54
|
+
# Keys of cache entries dependent on the edge version. Any change to the edge
|
|
55
|
+
# set requires these to be dropped to avoid stale data.
|
|
56
|
+
EDGE_VERSION_CACHE_KEYS = ("_trig_version",)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class LockAwareLRUCache(LRUCache[Hashable, Any]):
|
|
60
|
+
"""``LRUCache`` that drops per-key locks when evicting items."""
|
|
61
|
+
|
|
62
|
+
def __init__(self, maxsize: int, locks: dict[Hashable, threading.RLock]):
|
|
63
|
+
super().__init__(maxsize)
|
|
64
|
+
self._locks: dict[Hashable, threading.RLock] = locks
|
|
65
|
+
|
|
66
|
+
def popitem(self) -> tuple[Hashable, Any]: # type: ignore[override]
|
|
67
|
+
key, value = super().popitem()
|
|
68
|
+
self._locks.pop(key, None)
|
|
69
|
+
return key, value
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _ensure_graph_entry(
|
|
73
|
+
graph: Any,
|
|
74
|
+
key: str,
|
|
75
|
+
factory: Callable[[], T],
|
|
76
|
+
validator: Callable[[Any], bool],
|
|
77
|
+
) -> T:
|
|
78
|
+
"""Return a validated entry from ``graph`` or create one when missing."""
|
|
79
|
+
|
|
80
|
+
value = graph.get(key)
|
|
81
|
+
if not validator(value):
|
|
82
|
+
value = factory()
|
|
83
|
+
graph[key] = value
|
|
84
|
+
return value
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _ensure_lock_mapping(
|
|
88
|
+
graph: Any,
|
|
89
|
+
key: str,
|
|
90
|
+
*,
|
|
91
|
+
lock_factory: Callable[[], threading.RLock] = threading.RLock,
|
|
92
|
+
) -> defaultdict[Hashable, threading.RLock]:
|
|
93
|
+
"""Ensure ``graph`` holds a ``defaultdict`` of locks under ``key``."""
|
|
94
|
+
|
|
95
|
+
return _ensure_graph_entry(
|
|
96
|
+
graph,
|
|
97
|
+
key,
|
|
98
|
+
factory=lambda: defaultdict(lock_factory),
|
|
99
|
+
validator=lambda value: isinstance(value, defaultdict)
|
|
100
|
+
and value.default_factory is lock_factory,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _prune_locks(
|
|
105
|
+
cache: dict[Hashable, Any] | LRUCache[Hashable, Any] | None,
|
|
106
|
+
locks: dict[Hashable, threading.RLock]
|
|
107
|
+
| defaultdict[Hashable, threading.RLock]
|
|
108
|
+
| None,
|
|
109
|
+
) -> None:
|
|
110
|
+
"""Drop locks with no corresponding cache entry."""
|
|
111
|
+
|
|
112
|
+
if not isinstance(locks, dict):
|
|
113
|
+
return
|
|
114
|
+
cache_keys = cache.keys() if isinstance(cache, dict) else ()
|
|
115
|
+
for key in list(locks.keys()):
|
|
116
|
+
if key not in cache_keys:
|
|
117
|
+
locks.pop(key, None)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def get_graph_version(graph: Any, key: str, default: int = 0) -> int:
|
|
121
|
+
"""Return integer version stored in ``graph`` under ``key``."""
|
|
122
|
+
|
|
123
|
+
return int(graph.get(key, default))
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def increment_graph_version(graph: Any, key: str) -> int:
|
|
127
|
+
"""Increment and store a version counter in ``graph`` under ``key``."""
|
|
128
|
+
|
|
129
|
+
version = get_graph_version(graph, key) + 1
|
|
130
|
+
graph[key] = version
|
|
131
|
+
return version
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def stable_json(obj: Any) -> str:
|
|
135
|
+
"""Return a JSON string with deterministic ordering for ``obj``."""
|
|
136
|
+
|
|
137
|
+
return json_dumps(
|
|
138
|
+
obj,
|
|
139
|
+
sort_keys=True,
|
|
140
|
+
ensure_ascii=False,
|
|
141
|
+
to_bytes=False,
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
@lru_cache(maxsize=1024)
|
|
146
|
+
def _node_repr_digest(obj: Any) -> tuple[str, bytes]:
|
|
147
|
+
"""Return cached stable representation and digest for ``obj``."""
|
|
148
|
+
|
|
149
|
+
try:
|
|
150
|
+
repr_ = stable_json(obj)
|
|
151
|
+
except TypeError:
|
|
152
|
+
repr_ = repr(obj)
|
|
153
|
+
digest = hashlib.blake2b(repr_.encode("utf-8"), digest_size=16).digest()
|
|
154
|
+
return repr_, digest
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def clear_node_repr_cache() -> None:
|
|
158
|
+
"""Clear cached node representations used for checksums."""
|
|
159
|
+
|
|
160
|
+
_node_repr_digest.cache_clear()
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _node_repr(n: Any) -> str:
|
|
164
|
+
"""Stable representation for node hashing and sorting."""
|
|
165
|
+
|
|
166
|
+
return _node_repr_digest(n)[0]
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _iter_node_digests(
|
|
170
|
+
nodes: Iterable[Any], *, presorted: bool
|
|
171
|
+
) -> Iterable[bytes]:
|
|
172
|
+
"""Yield node digests in a deterministic order."""
|
|
173
|
+
|
|
174
|
+
if presorted:
|
|
175
|
+
for node in nodes:
|
|
176
|
+
yield _node_repr_digest(node)[1]
|
|
177
|
+
else:
|
|
178
|
+
for _, digest in sorted(
|
|
179
|
+
(_node_repr_digest(n) for n in nodes), key=lambda x: x[0]
|
|
180
|
+
):
|
|
181
|
+
yield digest
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def _node_set_checksum_no_nodes(
|
|
185
|
+
G: nx.Graph,
|
|
186
|
+
graph: Any,
|
|
187
|
+
*,
|
|
188
|
+
presorted: bool,
|
|
189
|
+
store: bool,
|
|
190
|
+
) -> str:
|
|
191
|
+
"""Checksum helper when no explicit node set is provided."""
|
|
192
|
+
|
|
193
|
+
nodes_view = G.nodes()
|
|
194
|
+
current_nodes = frozenset(nodes_view)
|
|
195
|
+
cached = graph.get(NODE_SET_CHECKSUM_KEY)
|
|
196
|
+
if cached and len(cached) == 3 and cached[2] == current_nodes:
|
|
197
|
+
return cached[1]
|
|
198
|
+
|
|
199
|
+
hasher = hashlib.blake2b(digest_size=16)
|
|
200
|
+
for digest in _iter_node_digests(nodes_view, presorted=presorted):
|
|
201
|
+
hasher.update(digest)
|
|
202
|
+
|
|
203
|
+
checksum = hasher.hexdigest()
|
|
204
|
+
if store:
|
|
205
|
+
token = checksum[:16]
|
|
206
|
+
if cached and cached[0] == token:
|
|
207
|
+
return cached[1]
|
|
208
|
+
graph[NODE_SET_CHECKSUM_KEY] = (token, checksum, current_nodes)
|
|
209
|
+
else:
|
|
210
|
+
graph.pop(NODE_SET_CHECKSUM_KEY, None)
|
|
211
|
+
return checksum
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def node_set_checksum(
|
|
215
|
+
G: nx.Graph,
|
|
216
|
+
nodes: Iterable[Any] | None = None,
|
|
217
|
+
*,
|
|
218
|
+
presorted: bool = False,
|
|
219
|
+
store: bool = True,
|
|
220
|
+
) -> str:
|
|
221
|
+
"""Return a BLAKE2b checksum of ``G``'s node set."""
|
|
222
|
+
|
|
223
|
+
graph = get_graph(G)
|
|
224
|
+
if nodes is None:
|
|
225
|
+
return _node_set_checksum_no_nodes(
|
|
226
|
+
G, graph, presorted=presorted, store=store
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
hasher = hashlib.blake2b(digest_size=16)
|
|
230
|
+
for digest in _iter_node_digests(nodes, presorted=presorted):
|
|
231
|
+
hasher.update(digest)
|
|
232
|
+
|
|
233
|
+
checksum = hasher.hexdigest()
|
|
234
|
+
if store:
|
|
235
|
+
token = checksum[:16]
|
|
236
|
+
cached = graph.get(NODE_SET_CHECKSUM_KEY)
|
|
237
|
+
if cached and cached[0] == token:
|
|
238
|
+
return cached[1]
|
|
239
|
+
graph[NODE_SET_CHECKSUM_KEY] = (token, checksum)
|
|
240
|
+
else:
|
|
241
|
+
graph.pop(NODE_SET_CHECKSUM_KEY, None)
|
|
242
|
+
return checksum
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
@dataclass(slots=True)
|
|
246
|
+
class NodeCache:
|
|
247
|
+
"""Container for cached node data."""
|
|
248
|
+
|
|
249
|
+
checksum: str
|
|
250
|
+
nodes: tuple[Any, ...]
|
|
251
|
+
sorted_nodes: tuple[Any, ...] | None = None
|
|
252
|
+
idx: dict[Any, int] | None = None
|
|
253
|
+
offset: dict[Any, int] | None = None
|
|
254
|
+
|
|
255
|
+
@property
|
|
256
|
+
def n(self) -> int:
|
|
257
|
+
return len(self.nodes)
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _update_node_cache(
|
|
261
|
+
graph: Any,
|
|
262
|
+
nodes: tuple[Any, ...],
|
|
263
|
+
key: str,
|
|
264
|
+
*,
|
|
265
|
+
checksum: str,
|
|
266
|
+
sorted_nodes: tuple[Any, ...] | None = None,
|
|
267
|
+
) -> None:
|
|
268
|
+
"""Store ``nodes`` and ``checksum`` in ``graph`` under ``key``."""
|
|
269
|
+
|
|
270
|
+
graph[f"{key}_cache"] = NodeCache(
|
|
271
|
+
checksum=checksum, nodes=nodes, sorted_nodes=sorted_nodes
|
|
272
|
+
)
|
|
273
|
+
graph[f"{key}_checksum"] = checksum
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def _refresh_node_list_cache(
|
|
277
|
+
G: nx.Graph,
|
|
278
|
+
graph: Any,
|
|
279
|
+
*,
|
|
280
|
+
sort_nodes: bool,
|
|
281
|
+
current_n: int,
|
|
282
|
+
) -> tuple[Any, ...]:
|
|
283
|
+
"""Refresh the cached node list and return the nodes."""
|
|
284
|
+
|
|
285
|
+
nodes = tuple(G.nodes())
|
|
286
|
+
checksum = node_set_checksum(G, nodes, store=True)
|
|
287
|
+
sorted_nodes = tuple(sorted(nodes, key=_node_repr)) if sort_nodes else None
|
|
288
|
+
_update_node_cache(
|
|
289
|
+
graph,
|
|
290
|
+
nodes,
|
|
291
|
+
"_node_list",
|
|
292
|
+
checksum=checksum,
|
|
293
|
+
sorted_nodes=sorted_nodes,
|
|
294
|
+
)
|
|
295
|
+
graph["_node_list_len"] = current_n
|
|
296
|
+
return nodes
|
|
297
|
+
|
|
298
|
+
|
|
299
|
+
def _reuse_node_list_cache(
|
|
300
|
+
graph: Any,
|
|
301
|
+
cache: NodeCache,
|
|
302
|
+
nodes: tuple[Any, ...],
|
|
303
|
+
sorted_nodes: tuple[Any, ...] | None,
|
|
304
|
+
*,
|
|
305
|
+
sort_nodes: bool,
|
|
306
|
+
new_checksum: str | None,
|
|
307
|
+
) -> None:
|
|
308
|
+
"""Reuse existing node cache and record its checksum if missing."""
|
|
309
|
+
|
|
310
|
+
checksum = cache.checksum if new_checksum is None else new_checksum
|
|
311
|
+
if sort_nodes and sorted_nodes is None:
|
|
312
|
+
sorted_nodes = tuple(sorted(nodes, key=_node_repr))
|
|
313
|
+
_update_node_cache(
|
|
314
|
+
graph,
|
|
315
|
+
nodes,
|
|
316
|
+
"_node_list",
|
|
317
|
+
checksum=checksum,
|
|
318
|
+
sorted_nodes=sorted_nodes,
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
def _cache_node_list(G: nx.Graph) -> tuple[Any, ...]:
|
|
323
|
+
"""Cache and return the tuple of nodes for ``G``."""
|
|
324
|
+
|
|
325
|
+
graph = get_graph(G)
|
|
326
|
+
cache: NodeCache | None = graph.get("_node_list_cache")
|
|
327
|
+
nodes = cache.nodes if cache else None
|
|
328
|
+
sorted_nodes = cache.sorted_nodes if cache else None
|
|
329
|
+
stored_len = graph.get("_node_list_len")
|
|
330
|
+
current_n = G.number_of_nodes()
|
|
331
|
+
dirty = bool(graph.pop("_node_list_dirty", False))
|
|
332
|
+
|
|
333
|
+
invalid = nodes is None or stored_len != current_n or dirty
|
|
334
|
+
new_checksum: str | None = None
|
|
335
|
+
|
|
336
|
+
if not invalid and cache:
|
|
337
|
+
new_checksum = node_set_checksum(G)
|
|
338
|
+
invalid = cache.checksum != new_checksum
|
|
339
|
+
|
|
340
|
+
sort_nodes = bool(graph.get("SORT_NODES", False))
|
|
341
|
+
|
|
342
|
+
if invalid:
|
|
343
|
+
nodes = _refresh_node_list_cache(
|
|
344
|
+
G, graph, sort_nodes=sort_nodes, current_n=current_n
|
|
345
|
+
)
|
|
346
|
+
elif cache and "_node_list_checksum" not in graph:
|
|
347
|
+
_reuse_node_list_cache(
|
|
348
|
+
graph,
|
|
349
|
+
cache,
|
|
350
|
+
nodes,
|
|
351
|
+
sorted_nodes,
|
|
352
|
+
sort_nodes=sort_nodes,
|
|
353
|
+
new_checksum=new_checksum,
|
|
354
|
+
)
|
|
355
|
+
else:
|
|
356
|
+
if sort_nodes and sorted_nodes is None and cache is not None:
|
|
357
|
+
cache.sorted_nodes = tuple(sorted(nodes, key=_node_repr))
|
|
358
|
+
return nodes
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def cached_node_list(G: nx.Graph) -> tuple[Any, ...]:
|
|
362
|
+
"""Public wrapper returning the cached node tuple for ``G``."""
|
|
363
|
+
|
|
364
|
+
return _cache_node_list(G)
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
def _ensure_node_map(
|
|
368
|
+
G,
|
|
369
|
+
*,
|
|
370
|
+
attrs: tuple[str, ...],
|
|
371
|
+
sort: bool = False,
|
|
372
|
+
) -> dict[Any, int]:
|
|
373
|
+
"""Return cached node-to-index/offset mappings stored on ``NodeCache``."""
|
|
374
|
+
|
|
375
|
+
graph = G.graph
|
|
376
|
+
_cache_node_list(G)
|
|
377
|
+
cache: NodeCache = graph["_node_list_cache"]
|
|
378
|
+
|
|
379
|
+
missing = [attr for attr in attrs if getattr(cache, attr) is None]
|
|
380
|
+
if missing:
|
|
381
|
+
if sort:
|
|
382
|
+
nodes = cache.sorted_nodes
|
|
383
|
+
if nodes is None:
|
|
384
|
+
nodes = cache.sorted_nodes = tuple(
|
|
385
|
+
sorted(cache.nodes, key=_node_repr)
|
|
386
|
+
)
|
|
387
|
+
else:
|
|
388
|
+
nodes = cache.nodes
|
|
389
|
+
mappings: dict[str, dict[Any, int]] = {attr: {} for attr in missing}
|
|
390
|
+
for idx, node in enumerate(nodes):
|
|
391
|
+
for attr in missing:
|
|
392
|
+
mappings[attr][node] = idx
|
|
393
|
+
for attr in missing:
|
|
394
|
+
setattr(cache, attr, mappings[attr])
|
|
395
|
+
return getattr(cache, attrs[0])
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
def ensure_node_index_map(G) -> dict[Any, int]:
|
|
399
|
+
"""Return cached node-to-index mapping for ``G``."""
|
|
400
|
+
|
|
401
|
+
return _ensure_node_map(G, attrs=("idx",), sort=False)
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
def ensure_node_offset_map(G) -> dict[Any, int]:
|
|
405
|
+
"""Return cached node-to-offset mapping for ``G``."""
|
|
406
|
+
|
|
407
|
+
sort = bool(G.graph.get("SORT_NODES", False))
|
|
408
|
+
return _ensure_node_map(G, attrs=("offset",), sort=sort)
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
class EdgeCacheManager:
|
|
412
|
+
"""Coordinate cache storage and per-key locks for edge version caches."""
|
|
413
|
+
|
|
414
|
+
_LOCK = threading.RLock()
|
|
415
|
+
|
|
416
|
+
def __init__(self, graph: Any) -> None:
|
|
417
|
+
self.graph = graph
|
|
418
|
+
self.cache_key = "_edge_version_cache"
|
|
419
|
+
self.locks_key = "_edge_version_cache_locks"
|
|
420
|
+
|
|
421
|
+
def _validator(self, max_entries: int | None) -> Callable[[Any], bool]:
|
|
422
|
+
if max_entries is None:
|
|
423
|
+
return lambda value: value is not None and not isinstance(value, LRUCache)
|
|
424
|
+
return lambda value: isinstance(value, LRUCache) and value.maxsize == max_entries
|
|
425
|
+
|
|
426
|
+
def _factory(
|
|
427
|
+
self,
|
|
428
|
+
max_entries: int | None,
|
|
429
|
+
locks: dict[Hashable, threading.RLock]
|
|
430
|
+
| defaultdict[Hashable, threading.RLock],
|
|
431
|
+
) -> dict[Hashable, Any] | LRUCache[Hashable, Any]:
|
|
432
|
+
if max_entries:
|
|
433
|
+
return LockAwareLRUCache(max_entries, locks) # type: ignore[arg-type]
|
|
434
|
+
return {}
|
|
435
|
+
|
|
436
|
+
def get_cache(
|
|
437
|
+
self,
|
|
438
|
+
max_entries: int | None,
|
|
439
|
+
*,
|
|
440
|
+
create: bool = True,
|
|
441
|
+
) -> tuple[
|
|
442
|
+
dict[Hashable, Any] | LRUCache[Hashable, Any] | None,
|
|
443
|
+
dict[Hashable, threading.RLock]
|
|
444
|
+
| defaultdict[Hashable, threading.RLock]
|
|
445
|
+
| None,
|
|
446
|
+
]:
|
|
447
|
+
"""Return the cache and lock mapping for the manager's graph."""
|
|
448
|
+
|
|
449
|
+
with self._LOCK:
|
|
450
|
+
if not create:
|
|
451
|
+
cache = self.graph.get(self.cache_key)
|
|
452
|
+
locks = self.graph.get(self.locks_key)
|
|
453
|
+
return cache, locks
|
|
454
|
+
|
|
455
|
+
locks = _ensure_lock_mapping(self.graph, self.locks_key)
|
|
456
|
+
cache = _ensure_graph_entry(
|
|
457
|
+
self.graph,
|
|
458
|
+
self.cache_key,
|
|
459
|
+
factory=lambda: self._factory(max_entries, locks),
|
|
460
|
+
validator=self._validator(max_entries),
|
|
461
|
+
)
|
|
462
|
+
if max_entries is None:
|
|
463
|
+
_prune_locks(cache, locks)
|
|
464
|
+
return cache, locks
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
def edge_version_cache(
|
|
468
|
+
G: Any,
|
|
469
|
+
key: Hashable,
|
|
470
|
+
builder: Callable[[], T],
|
|
471
|
+
*,
|
|
472
|
+
max_entries: int | None = 128,
|
|
473
|
+
) -> T:
|
|
474
|
+
"""Return cached ``builder`` output tied to the edge version of ``G``."""
|
|
475
|
+
|
|
476
|
+
if max_entries is not None:
|
|
477
|
+
max_entries = int(max_entries)
|
|
478
|
+
if max_entries < 0:
|
|
479
|
+
raise ValueError("max_entries must be non-negative or None")
|
|
480
|
+
if max_entries is not None and max_entries == 0:
|
|
481
|
+
return builder()
|
|
482
|
+
|
|
483
|
+
graph = get_graph(G)
|
|
484
|
+
manager = graph.get("_edge_cache_manager") # type: ignore[assignment]
|
|
485
|
+
if not isinstance(manager, EdgeCacheManager) or manager.graph is not graph:
|
|
486
|
+
manager = EdgeCacheManager(graph)
|
|
487
|
+
graph["_edge_cache_manager"] = manager
|
|
488
|
+
|
|
489
|
+
cache, locks = manager.get_cache(max_entries)
|
|
490
|
+
edge_version = get_graph_version(graph, "_edge_version")
|
|
491
|
+
lock = locks[key]
|
|
492
|
+
|
|
493
|
+
with lock:
|
|
494
|
+
entry = cache.get(key)
|
|
495
|
+
if entry is not None and entry[0] == edge_version:
|
|
496
|
+
return entry[1]
|
|
497
|
+
|
|
498
|
+
try:
|
|
499
|
+
value = builder()
|
|
500
|
+
except (RuntimeError, ValueError) as exc: # pragma: no cover - logging side effect
|
|
501
|
+
logger.exception("edge_version_cache builder failed for %r: %s", key, exc)
|
|
502
|
+
raise
|
|
503
|
+
else:
|
|
504
|
+
with lock:
|
|
505
|
+
entry = cache.get(key)
|
|
506
|
+
if entry is not None and entry[0] == edge_version:
|
|
507
|
+
return entry[1]
|
|
508
|
+
cache[key] = (edge_version, value)
|
|
509
|
+
return value
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
def cached_nodes_and_A(
|
|
513
|
+
G: nx.Graph, *, cache_size: int | None = 1, require_numpy: bool = False
|
|
514
|
+
) -> tuple[tuple[Any, ...], Any]:
|
|
515
|
+
"""Return cached nodes tuple and adjacency matrix for ``G``."""
|
|
516
|
+
|
|
517
|
+
nodes = cached_node_list(G)
|
|
518
|
+
graph = G.graph
|
|
519
|
+
|
|
520
|
+
checksum = getattr(graph.get("_node_list_cache"), "checksum", None)
|
|
521
|
+
if checksum is None:
|
|
522
|
+
checksum = graph.get("_node_list_checksum")
|
|
523
|
+
if checksum is None:
|
|
524
|
+
node_set_cache = graph.get(NODE_SET_CHECKSUM_KEY)
|
|
525
|
+
if isinstance(node_set_cache, tuple) and len(node_set_cache) >= 2:
|
|
526
|
+
checksum = node_set_cache[1]
|
|
527
|
+
if checksum is None:
|
|
528
|
+
checksum = ""
|
|
529
|
+
|
|
530
|
+
key = f"_dnfr_{len(nodes)}_{checksum}"
|
|
531
|
+
graph["_dnfr_nodes_checksum"] = checksum
|
|
532
|
+
|
|
533
|
+
def builder() -> tuple[tuple[Any, ...], Any]:
|
|
534
|
+
np = get_numpy()
|
|
535
|
+
if np is None:
|
|
536
|
+
return nodes, None
|
|
537
|
+
A = nx.to_numpy_array(G, nodelist=nodes, weight=None, dtype=float)
|
|
538
|
+
return nodes, A
|
|
539
|
+
|
|
540
|
+
nodes, A = edge_version_cache(G, key, builder, max_entries=cache_size)
|
|
541
|
+
|
|
542
|
+
if require_numpy and A is None:
|
|
543
|
+
raise RuntimeError("NumPy is required for adjacency caching")
|
|
544
|
+
|
|
545
|
+
return nodes, A
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
def _reset_edge_caches(graph: Any, G: Any) -> None:
|
|
549
|
+
"""Clear caches affected by edge updates."""
|
|
550
|
+
|
|
551
|
+
cache, locks = EdgeCacheManager(graph).get_cache(None, create=False)
|
|
552
|
+
if isinstance(cache, (dict, LRUCache)):
|
|
553
|
+
cache.clear()
|
|
554
|
+
if isinstance(locks, dict):
|
|
555
|
+
locks.clear()
|
|
556
|
+
mark_dnfr_prep_dirty(G)
|
|
557
|
+
clear_node_repr_cache()
|
|
558
|
+
for key in EDGE_VERSION_CACHE_KEYS:
|
|
559
|
+
graph.pop(key, None)
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
def increment_edge_version(G: Any) -> None:
|
|
563
|
+
"""Increment the edge version counter in ``G.graph``."""
|
|
564
|
+
|
|
565
|
+
graph = get_graph(G)
|
|
566
|
+
increment_graph_version(graph, "_edge_version")
|
|
567
|
+
_reset_edge_caches(graph, G)
|
|
568
|
+
|
|
569
|
+
|
|
570
|
+
@contextmanager
|
|
571
|
+
def edge_version_update(G: Any):
|
|
572
|
+
"""Scope a batch of edge mutations."""
|
|
573
|
+
|
|
574
|
+
increment_edge_version(G)
|
|
575
|
+
try:
|
|
576
|
+
yield
|
|
577
|
+
finally:
|
|
578
|
+
increment_edge_version(G)
|