tnfr 3.0.3__py3-none-any.whl → 8.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tnfr might be problematic. Click here for more details.

Files changed (360) hide show
  1. tnfr/__init__.py +375 -56
  2. tnfr/__init__.pyi +33 -0
  3. tnfr/_compat.py +10 -0
  4. tnfr/_generated_version.py +34 -0
  5. tnfr/_version.py +49 -0
  6. tnfr/_version.pyi +7 -0
  7. tnfr/alias.py +723 -0
  8. tnfr/alias.pyi +108 -0
  9. tnfr/backends/__init__.py +354 -0
  10. tnfr/backends/jax_backend.py +173 -0
  11. tnfr/backends/numpy_backend.py +238 -0
  12. tnfr/backends/optimized_numpy.py +420 -0
  13. tnfr/backends/torch_backend.py +408 -0
  14. tnfr/cache.py +171 -0
  15. tnfr/cache.pyi +13 -0
  16. tnfr/cli/__init__.py +110 -0
  17. tnfr/cli/__init__.pyi +26 -0
  18. tnfr/cli/arguments.py +489 -0
  19. tnfr/cli/arguments.pyi +29 -0
  20. tnfr/cli/execution.py +914 -0
  21. tnfr/cli/execution.pyi +70 -0
  22. tnfr/cli/interactive_validator.py +614 -0
  23. tnfr/cli/utils.py +51 -0
  24. tnfr/cli/utils.pyi +7 -0
  25. tnfr/cli/validate.py +236 -0
  26. tnfr/compat/__init__.py +85 -0
  27. tnfr/compat/dataclass.py +136 -0
  28. tnfr/compat/jsonschema_stub.py +61 -0
  29. tnfr/compat/matplotlib_stub.py +73 -0
  30. tnfr/compat/numpy_stub.py +155 -0
  31. tnfr/config/__init__.py +224 -0
  32. tnfr/config/__init__.pyi +10 -0
  33. tnfr/config/constants.py +104 -0
  34. tnfr/config/constants.pyi +12 -0
  35. tnfr/config/defaults.py +54 -0
  36. tnfr/config/defaults_core.py +212 -0
  37. tnfr/config/defaults_init.py +33 -0
  38. tnfr/config/defaults_metric.py +104 -0
  39. tnfr/config/feature_flags.py +81 -0
  40. tnfr/config/feature_flags.pyi +16 -0
  41. tnfr/config/glyph_constants.py +31 -0
  42. tnfr/config/init.py +77 -0
  43. tnfr/config/init.pyi +8 -0
  44. tnfr/config/operator_names.py +254 -0
  45. tnfr/config/operator_names.pyi +36 -0
  46. tnfr/config/physics_derivation.py +354 -0
  47. tnfr/config/presets.py +83 -0
  48. tnfr/config/presets.pyi +7 -0
  49. tnfr/config/security.py +927 -0
  50. tnfr/config/thresholds.py +114 -0
  51. tnfr/config/tnfr_config.py +498 -0
  52. tnfr/constants/__init__.py +92 -0
  53. tnfr/constants/__init__.pyi +92 -0
  54. tnfr/constants/aliases.py +33 -0
  55. tnfr/constants/aliases.pyi +27 -0
  56. tnfr/constants/init.py +33 -0
  57. tnfr/constants/init.pyi +12 -0
  58. tnfr/constants/metric.py +104 -0
  59. tnfr/constants/metric.pyi +19 -0
  60. tnfr/core/__init__.py +33 -0
  61. tnfr/core/container.py +226 -0
  62. tnfr/core/default_implementations.py +329 -0
  63. tnfr/core/interfaces.py +279 -0
  64. tnfr/dynamics/__init__.py +238 -0
  65. tnfr/dynamics/__init__.pyi +83 -0
  66. tnfr/dynamics/adaptation.py +267 -0
  67. tnfr/dynamics/adaptation.pyi +7 -0
  68. tnfr/dynamics/adaptive_sequences.py +189 -0
  69. tnfr/dynamics/adaptive_sequences.pyi +14 -0
  70. tnfr/dynamics/aliases.py +23 -0
  71. tnfr/dynamics/aliases.pyi +19 -0
  72. tnfr/dynamics/bifurcation.py +232 -0
  73. tnfr/dynamics/canonical.py +229 -0
  74. tnfr/dynamics/canonical.pyi +48 -0
  75. tnfr/dynamics/coordination.py +385 -0
  76. tnfr/dynamics/coordination.pyi +25 -0
  77. tnfr/dynamics/dnfr.py +3034 -0
  78. tnfr/dynamics/dnfr.pyi +26 -0
  79. tnfr/dynamics/dynamic_limits.py +225 -0
  80. tnfr/dynamics/feedback.py +252 -0
  81. tnfr/dynamics/feedback.pyi +24 -0
  82. tnfr/dynamics/fused_dnfr.py +454 -0
  83. tnfr/dynamics/homeostasis.py +157 -0
  84. tnfr/dynamics/homeostasis.pyi +14 -0
  85. tnfr/dynamics/integrators.py +661 -0
  86. tnfr/dynamics/integrators.pyi +36 -0
  87. tnfr/dynamics/learning.py +310 -0
  88. tnfr/dynamics/learning.pyi +33 -0
  89. tnfr/dynamics/metabolism.py +254 -0
  90. tnfr/dynamics/nbody.py +796 -0
  91. tnfr/dynamics/nbody_tnfr.py +783 -0
  92. tnfr/dynamics/propagation.py +326 -0
  93. tnfr/dynamics/runtime.py +908 -0
  94. tnfr/dynamics/runtime.pyi +77 -0
  95. tnfr/dynamics/sampling.py +36 -0
  96. tnfr/dynamics/sampling.pyi +7 -0
  97. tnfr/dynamics/selectors.py +711 -0
  98. tnfr/dynamics/selectors.pyi +85 -0
  99. tnfr/dynamics/structural_clip.py +207 -0
  100. tnfr/errors/__init__.py +37 -0
  101. tnfr/errors/contextual.py +492 -0
  102. tnfr/execution.py +223 -0
  103. tnfr/execution.pyi +45 -0
  104. tnfr/extensions/__init__.py +205 -0
  105. tnfr/extensions/__init__.pyi +18 -0
  106. tnfr/extensions/base.py +173 -0
  107. tnfr/extensions/base.pyi +35 -0
  108. tnfr/extensions/business/__init__.py +71 -0
  109. tnfr/extensions/business/__init__.pyi +11 -0
  110. tnfr/extensions/business/cookbook.py +88 -0
  111. tnfr/extensions/business/cookbook.pyi +8 -0
  112. tnfr/extensions/business/health_analyzers.py +202 -0
  113. tnfr/extensions/business/health_analyzers.pyi +9 -0
  114. tnfr/extensions/business/patterns.py +183 -0
  115. tnfr/extensions/business/patterns.pyi +8 -0
  116. tnfr/extensions/medical/__init__.py +73 -0
  117. tnfr/extensions/medical/__init__.pyi +11 -0
  118. tnfr/extensions/medical/cookbook.py +88 -0
  119. tnfr/extensions/medical/cookbook.pyi +8 -0
  120. tnfr/extensions/medical/health_analyzers.py +181 -0
  121. tnfr/extensions/medical/health_analyzers.pyi +9 -0
  122. tnfr/extensions/medical/patterns.py +163 -0
  123. tnfr/extensions/medical/patterns.pyi +8 -0
  124. tnfr/flatten.py +262 -0
  125. tnfr/flatten.pyi +21 -0
  126. tnfr/gamma.py +354 -0
  127. tnfr/gamma.pyi +36 -0
  128. tnfr/glyph_history.py +377 -0
  129. tnfr/glyph_history.pyi +35 -0
  130. tnfr/glyph_runtime.py +19 -0
  131. tnfr/glyph_runtime.pyi +8 -0
  132. tnfr/immutable.py +218 -0
  133. tnfr/immutable.pyi +36 -0
  134. tnfr/initialization.py +203 -0
  135. tnfr/initialization.pyi +65 -0
  136. tnfr/io.py +10 -0
  137. tnfr/io.pyi +13 -0
  138. tnfr/locking.py +37 -0
  139. tnfr/locking.pyi +7 -0
  140. tnfr/mathematics/__init__.py +79 -0
  141. tnfr/mathematics/backend.py +453 -0
  142. tnfr/mathematics/backend.pyi +99 -0
  143. tnfr/mathematics/dynamics.py +408 -0
  144. tnfr/mathematics/dynamics.pyi +90 -0
  145. tnfr/mathematics/epi.py +391 -0
  146. tnfr/mathematics/epi.pyi +65 -0
  147. tnfr/mathematics/generators.py +242 -0
  148. tnfr/mathematics/generators.pyi +29 -0
  149. tnfr/mathematics/metrics.py +119 -0
  150. tnfr/mathematics/metrics.pyi +16 -0
  151. tnfr/mathematics/operators.py +239 -0
  152. tnfr/mathematics/operators.pyi +59 -0
  153. tnfr/mathematics/operators_factory.py +124 -0
  154. tnfr/mathematics/operators_factory.pyi +11 -0
  155. tnfr/mathematics/projection.py +87 -0
  156. tnfr/mathematics/projection.pyi +33 -0
  157. tnfr/mathematics/runtime.py +182 -0
  158. tnfr/mathematics/runtime.pyi +64 -0
  159. tnfr/mathematics/spaces.py +256 -0
  160. tnfr/mathematics/spaces.pyi +83 -0
  161. tnfr/mathematics/transforms.py +305 -0
  162. tnfr/mathematics/transforms.pyi +62 -0
  163. tnfr/metrics/__init__.py +79 -0
  164. tnfr/metrics/__init__.pyi +20 -0
  165. tnfr/metrics/buffer_cache.py +163 -0
  166. tnfr/metrics/buffer_cache.pyi +24 -0
  167. tnfr/metrics/cache_utils.py +214 -0
  168. tnfr/metrics/coherence.py +2009 -0
  169. tnfr/metrics/coherence.pyi +129 -0
  170. tnfr/metrics/common.py +158 -0
  171. tnfr/metrics/common.pyi +35 -0
  172. tnfr/metrics/core.py +316 -0
  173. tnfr/metrics/core.pyi +13 -0
  174. tnfr/metrics/diagnosis.py +833 -0
  175. tnfr/metrics/diagnosis.pyi +86 -0
  176. tnfr/metrics/emergence.py +245 -0
  177. tnfr/metrics/export.py +179 -0
  178. tnfr/metrics/export.pyi +7 -0
  179. tnfr/metrics/glyph_timing.py +379 -0
  180. tnfr/metrics/glyph_timing.pyi +81 -0
  181. tnfr/metrics/learning_metrics.py +280 -0
  182. tnfr/metrics/learning_metrics.pyi +21 -0
  183. tnfr/metrics/phase_coherence.py +351 -0
  184. tnfr/metrics/phase_compatibility.py +349 -0
  185. tnfr/metrics/reporting.py +183 -0
  186. tnfr/metrics/reporting.pyi +25 -0
  187. tnfr/metrics/sense_index.py +1203 -0
  188. tnfr/metrics/sense_index.pyi +9 -0
  189. tnfr/metrics/trig.py +373 -0
  190. tnfr/metrics/trig.pyi +13 -0
  191. tnfr/metrics/trig_cache.py +233 -0
  192. tnfr/metrics/trig_cache.pyi +10 -0
  193. tnfr/multiscale/__init__.py +32 -0
  194. tnfr/multiscale/hierarchical.py +517 -0
  195. tnfr/node.py +763 -0
  196. tnfr/node.pyi +139 -0
  197. tnfr/observers.py +255 -130
  198. tnfr/observers.pyi +31 -0
  199. tnfr/ontosim.py +144 -137
  200. tnfr/ontosim.pyi +28 -0
  201. tnfr/operators/__init__.py +1672 -0
  202. tnfr/operators/__init__.pyi +31 -0
  203. tnfr/operators/algebra.py +277 -0
  204. tnfr/operators/canonical_patterns.py +420 -0
  205. tnfr/operators/cascade.py +267 -0
  206. tnfr/operators/cycle_detection.py +358 -0
  207. tnfr/operators/definitions.py +4108 -0
  208. tnfr/operators/definitions.pyi +78 -0
  209. tnfr/operators/grammar.py +1164 -0
  210. tnfr/operators/grammar.pyi +140 -0
  211. tnfr/operators/hamiltonian.py +710 -0
  212. tnfr/operators/health_analyzer.py +809 -0
  213. tnfr/operators/jitter.py +272 -0
  214. tnfr/operators/jitter.pyi +11 -0
  215. tnfr/operators/lifecycle.py +314 -0
  216. tnfr/operators/metabolism.py +618 -0
  217. tnfr/operators/metrics.py +2138 -0
  218. tnfr/operators/network_analysis/__init__.py +27 -0
  219. tnfr/operators/network_analysis/source_detection.py +186 -0
  220. tnfr/operators/nodal_equation.py +395 -0
  221. tnfr/operators/pattern_detection.py +660 -0
  222. tnfr/operators/patterns.py +669 -0
  223. tnfr/operators/postconditions/__init__.py +38 -0
  224. tnfr/operators/postconditions/mutation.py +236 -0
  225. tnfr/operators/preconditions/__init__.py +1226 -0
  226. tnfr/operators/preconditions/coherence.py +305 -0
  227. tnfr/operators/preconditions/dissonance.py +236 -0
  228. tnfr/operators/preconditions/emission.py +128 -0
  229. tnfr/operators/preconditions/mutation.py +580 -0
  230. tnfr/operators/preconditions/reception.py +125 -0
  231. tnfr/operators/preconditions/resonance.py +364 -0
  232. tnfr/operators/registry.py +74 -0
  233. tnfr/operators/registry.pyi +9 -0
  234. tnfr/operators/remesh.py +1809 -0
  235. tnfr/operators/remesh.pyi +26 -0
  236. tnfr/operators/structural_units.py +268 -0
  237. tnfr/operators/unified_grammar.py +105 -0
  238. tnfr/parallel/__init__.py +54 -0
  239. tnfr/parallel/auto_scaler.py +234 -0
  240. tnfr/parallel/distributed.py +384 -0
  241. tnfr/parallel/engine.py +238 -0
  242. tnfr/parallel/gpu_engine.py +420 -0
  243. tnfr/parallel/monitoring.py +248 -0
  244. tnfr/parallel/partitioner.py +459 -0
  245. tnfr/py.typed +0 -0
  246. tnfr/recipes/__init__.py +22 -0
  247. tnfr/recipes/cookbook.py +743 -0
  248. tnfr/rng.py +178 -0
  249. tnfr/rng.pyi +26 -0
  250. tnfr/schemas/__init__.py +8 -0
  251. tnfr/schemas/grammar.json +94 -0
  252. tnfr/sdk/__init__.py +107 -0
  253. tnfr/sdk/__init__.pyi +19 -0
  254. tnfr/sdk/adaptive_system.py +173 -0
  255. tnfr/sdk/adaptive_system.pyi +21 -0
  256. tnfr/sdk/builders.py +370 -0
  257. tnfr/sdk/builders.pyi +51 -0
  258. tnfr/sdk/fluent.py +1121 -0
  259. tnfr/sdk/fluent.pyi +74 -0
  260. tnfr/sdk/templates.py +342 -0
  261. tnfr/sdk/templates.pyi +41 -0
  262. tnfr/sdk/utils.py +341 -0
  263. tnfr/secure_config.py +46 -0
  264. tnfr/security/__init__.py +70 -0
  265. tnfr/security/database.py +514 -0
  266. tnfr/security/subprocess.py +503 -0
  267. tnfr/security/validation.py +290 -0
  268. tnfr/selector.py +247 -0
  269. tnfr/selector.pyi +19 -0
  270. tnfr/sense.py +378 -0
  271. tnfr/sense.pyi +23 -0
  272. tnfr/services/__init__.py +17 -0
  273. tnfr/services/orchestrator.py +325 -0
  274. tnfr/sparse/__init__.py +39 -0
  275. tnfr/sparse/representations.py +492 -0
  276. tnfr/structural.py +705 -0
  277. tnfr/structural.pyi +83 -0
  278. tnfr/telemetry/__init__.py +35 -0
  279. tnfr/telemetry/cache_metrics.py +226 -0
  280. tnfr/telemetry/cache_metrics.pyi +64 -0
  281. tnfr/telemetry/nu_f.py +422 -0
  282. tnfr/telemetry/nu_f.pyi +108 -0
  283. tnfr/telemetry/verbosity.py +36 -0
  284. tnfr/telemetry/verbosity.pyi +15 -0
  285. tnfr/tokens.py +58 -0
  286. tnfr/tokens.pyi +36 -0
  287. tnfr/tools/__init__.py +20 -0
  288. tnfr/tools/domain_templates.py +478 -0
  289. tnfr/tools/sequence_generator.py +846 -0
  290. tnfr/topology/__init__.py +13 -0
  291. tnfr/topology/asymmetry.py +151 -0
  292. tnfr/trace.py +543 -0
  293. tnfr/trace.pyi +42 -0
  294. tnfr/tutorials/__init__.py +38 -0
  295. tnfr/tutorials/autonomous_evolution.py +285 -0
  296. tnfr/tutorials/interactive.py +1576 -0
  297. tnfr/tutorials/structural_metabolism.py +238 -0
  298. tnfr/types.py +775 -0
  299. tnfr/types.pyi +357 -0
  300. tnfr/units.py +68 -0
  301. tnfr/units.pyi +13 -0
  302. tnfr/utils/__init__.py +282 -0
  303. tnfr/utils/__init__.pyi +215 -0
  304. tnfr/utils/cache.py +4223 -0
  305. tnfr/utils/cache.pyi +470 -0
  306. tnfr/utils/callbacks.py +375 -0
  307. tnfr/utils/callbacks.pyi +49 -0
  308. tnfr/utils/chunks.py +108 -0
  309. tnfr/utils/chunks.pyi +22 -0
  310. tnfr/utils/data.py +428 -0
  311. tnfr/utils/data.pyi +74 -0
  312. tnfr/utils/graph.py +85 -0
  313. tnfr/utils/graph.pyi +10 -0
  314. tnfr/utils/init.py +821 -0
  315. tnfr/utils/init.pyi +80 -0
  316. tnfr/utils/io.py +559 -0
  317. tnfr/utils/io.pyi +66 -0
  318. tnfr/utils/numeric.py +114 -0
  319. tnfr/utils/numeric.pyi +21 -0
  320. tnfr/validation/__init__.py +257 -0
  321. tnfr/validation/__init__.pyi +85 -0
  322. tnfr/validation/compatibility.py +460 -0
  323. tnfr/validation/compatibility.pyi +6 -0
  324. tnfr/validation/config.py +73 -0
  325. tnfr/validation/graph.py +139 -0
  326. tnfr/validation/graph.pyi +18 -0
  327. tnfr/validation/input_validation.py +755 -0
  328. tnfr/validation/invariants.py +712 -0
  329. tnfr/validation/rules.py +253 -0
  330. tnfr/validation/rules.pyi +44 -0
  331. tnfr/validation/runtime.py +279 -0
  332. tnfr/validation/runtime.pyi +28 -0
  333. tnfr/validation/sequence_validator.py +162 -0
  334. tnfr/validation/soft_filters.py +170 -0
  335. tnfr/validation/soft_filters.pyi +32 -0
  336. tnfr/validation/spectral.py +164 -0
  337. tnfr/validation/spectral.pyi +42 -0
  338. tnfr/validation/validator.py +1266 -0
  339. tnfr/validation/window.py +39 -0
  340. tnfr/validation/window.pyi +1 -0
  341. tnfr/visualization/__init__.py +98 -0
  342. tnfr/visualization/cascade_viz.py +256 -0
  343. tnfr/visualization/hierarchy.py +284 -0
  344. tnfr/visualization/sequence_plotter.py +784 -0
  345. tnfr/viz/__init__.py +60 -0
  346. tnfr/viz/matplotlib.py +278 -0
  347. tnfr/viz/matplotlib.pyi +35 -0
  348. tnfr-8.5.0.dist-info/METADATA +573 -0
  349. tnfr-8.5.0.dist-info/RECORD +353 -0
  350. tnfr-8.5.0.dist-info/entry_points.txt +3 -0
  351. tnfr-3.0.3.dist-info/licenses/LICENSE.txt → tnfr-8.5.0.dist-info/licenses/LICENSE.md +1 -1
  352. tnfr/constants.py +0 -183
  353. tnfr/dynamics.py +0 -543
  354. tnfr/helpers.py +0 -198
  355. tnfr/main.py +0 -37
  356. tnfr/operators.py +0 -296
  357. tnfr-3.0.3.dist-info/METADATA +0 -35
  358. tnfr-3.0.3.dist-info/RECORD +0 -13
  359. {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
  360. {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
tnfr/dynamics/dnfr.py ADDED
@@ -0,0 +1,3034 @@
1
+ """ΔNFR (dynamic network field response) utilities and strategies.
2
+
3
+ This module provides helper functions to configure, cache and apply ΔNFR
4
+ components such as phase, epidemiological state and vortex fields during
5
+ simulations. The neighbour accumulation helpers reuse cached edge indices
6
+ and NumPy workspaces whenever available so cosine, sine, EPI, νf and topology
7
+ means remain faithful to the canonical ΔNFR reorganisation without redundant
8
+ allocations.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import math
14
+ import sys
15
+ from collections.abc import Callable, Iterator, Mapping, MutableMapping, Sequence
16
+ from concurrent.futures import ProcessPoolExecutor
17
+ from types import ModuleType
18
+ from typing import TYPE_CHECKING, Any, cast
19
+
20
+ from time import perf_counter
21
+
22
+ from ..alias import get_attr, get_theta_attr, set_dnfr
23
+ from ..constants import DEFAULTS, get_param
24
+ from ..constants.aliases import ALIAS_EPI, ALIAS_VF
25
+ from ..metrics.common import merge_and_normalize_weights
26
+ from ..metrics.trig import neighbor_phase_mean_list
27
+ from ..metrics.trig_cache import compute_theta_trig
28
+ from ..types import (
29
+ DeltaNFRHook,
30
+ DnfrCacheVectors,
31
+ DnfrVectorMap,
32
+ NeighborStats,
33
+ NodeId,
34
+ TNFRGraph,
35
+ )
36
+ from ..utils import (
37
+ DNFR_PREP_STATE_KEY,
38
+ DnfrPrepState,
39
+ DnfrCache,
40
+ CacheManager,
41
+ _graph_cache_manager,
42
+ angle_diff,
43
+ angle_diff_array,
44
+ cached_node_list,
45
+ cached_nodes_and_A,
46
+ get_numpy,
47
+ normalize_weights,
48
+ resolve_chunk_size,
49
+ new_dnfr_cache,
50
+ )
51
+
52
+ if TYPE_CHECKING: # pragma: no cover - import-time typing hook
53
+ import numpy as np
54
+
55
+ _MEAN_VECTOR_EPS = 1e-12
56
+ _SPARSE_DENSITY_THRESHOLD = 0.25
57
+ _DNFR_APPROX_BYTES_PER_EDGE = 48
58
+
59
+
60
+ def _should_vectorize(G: TNFRGraph, np_module: ModuleType | None) -> bool:
61
+ """Return ``True`` when NumPy is available unless the graph disables it."""
62
+
63
+ if np_module is None:
64
+ return False
65
+ flag = G.graph.get("vectorized_dnfr")
66
+ if flag is None:
67
+ return True
68
+ return bool(flag)
69
+
70
+
71
+ _NUMPY_CACHE_ATTRS = (
72
+ "theta_np",
73
+ "epi_np",
74
+ "vf_np",
75
+ "cos_theta_np",
76
+ "sin_theta_np",
77
+ "deg_array",
78
+ "neighbor_x_np",
79
+ "neighbor_y_np",
80
+ "neighbor_epi_sum_np",
81
+ "neighbor_vf_sum_np",
82
+ "neighbor_count_np",
83
+ "neighbor_deg_sum_np",
84
+ "neighbor_inv_count_np",
85
+ "neighbor_cos_avg_np",
86
+ "neighbor_sin_avg_np",
87
+ "neighbor_mean_tmp_np",
88
+ "neighbor_mean_length_np",
89
+ "neighbor_accum_np",
90
+ "neighbor_edge_values_np",
91
+ "dense_components_np",
92
+ "dense_accum_np",
93
+ "dense_degree_np",
94
+ )
95
+
96
+
97
+ def _profile_start_stop(
98
+ profile: MutableMapping[str, float] | None,
99
+ *,
100
+ keys: Sequence[str] = (),
101
+ ) -> tuple[Callable[[], float], Callable[[str, float], None]]:
102
+ """Return helpers to measure wall-clock durations for ``profile`` keys."""
103
+
104
+ if profile is not None:
105
+ for key in keys:
106
+ profile.setdefault(key, 0.0)
107
+
108
+ def _start() -> float:
109
+ return perf_counter()
110
+
111
+ def _stop(metric: str, start: float) -> None:
112
+ profile[metric] = float(profile.get(metric, 0.0)) + (perf_counter() - start)
113
+
114
+ else:
115
+
116
+ def _start() -> float:
117
+ return 0.0
118
+
119
+ def _stop(
120
+ metric: str, start: float
121
+ ) -> None: # noqa: ARG001 - uniform signature
122
+ return None
123
+
124
+ return _start, _stop
125
+
126
+
127
+ def _iter_chunk_offsets(total: int, jobs: int) -> Iterator[tuple[int, int]]:
128
+ """Yield ``(start, end)`` offsets splitting ``total`` items across ``jobs``."""
129
+
130
+ if total <= 0 or jobs <= 1:
131
+ return
132
+
133
+ jobs = max(1, min(int(jobs), total))
134
+ base, extra = divmod(total, jobs)
135
+ start = 0
136
+ for i in range(jobs):
137
+ size = base + (1 if i < extra else 0)
138
+ if size <= 0:
139
+ continue
140
+ end = start + size
141
+ yield start, end
142
+ start = end
143
+
144
+
145
+ def _neighbor_sums_worker(
146
+ start: int,
147
+ end: int,
148
+ neighbor_indices: Sequence[Sequence[int]],
149
+ cos_th: Sequence[float],
150
+ sin_th: Sequence[float],
151
+ epi: Sequence[float],
152
+ vf: Sequence[float],
153
+ x_base: Sequence[float],
154
+ y_base: Sequence[float],
155
+ epi_base: Sequence[float],
156
+ vf_base: Sequence[float],
157
+ count_base: Sequence[float],
158
+ deg_base: Sequence[float] | None,
159
+ deg_list: Sequence[float] | None,
160
+ degs_list: Sequence[float] | None,
161
+ ) -> tuple[
162
+ int,
163
+ list[float],
164
+ list[float],
165
+ list[float],
166
+ list[float],
167
+ list[float],
168
+ list[float] | None,
169
+ ]:
170
+ """Return partial neighbour sums for the ``[start, end)`` range."""
171
+
172
+ chunk_x: list[float] = []
173
+ chunk_y: list[float] = []
174
+ chunk_epi: list[float] = []
175
+ chunk_vf: list[float] = []
176
+ chunk_count: list[float] = []
177
+ chunk_deg: list[float] | None = [] if deg_base is not None else None
178
+
179
+ for offset, idx in enumerate(range(start, end)):
180
+ neighbors = neighbor_indices[idx]
181
+ x_i = float(x_base[offset])
182
+ y_i = float(y_base[offset])
183
+ epi_i = float(epi_base[offset])
184
+ vf_i = float(vf_base[offset])
185
+ count_i = float(count_base[offset])
186
+ if deg_base is not None and chunk_deg is not None:
187
+ deg_i_acc = float(deg_base[offset])
188
+ else:
189
+ deg_i_acc = 0.0
190
+ deg_i = float(degs_list[idx]) if degs_list is not None else 0.0
191
+
192
+ for neighbor_idx in neighbors:
193
+ x_i += float(cos_th[neighbor_idx])
194
+ y_i += float(sin_th[neighbor_idx])
195
+ epi_i += float(epi[neighbor_idx])
196
+ vf_i += float(vf[neighbor_idx])
197
+ count_i += 1.0
198
+ if chunk_deg is not None:
199
+ if deg_list is not None:
200
+ deg_i_acc += float(deg_list[neighbor_idx])
201
+ else:
202
+ deg_i_acc += deg_i
203
+
204
+ chunk_x.append(x_i)
205
+ chunk_y.append(y_i)
206
+ chunk_epi.append(epi_i)
207
+ chunk_vf.append(vf_i)
208
+ chunk_count.append(count_i)
209
+ if chunk_deg is not None:
210
+ chunk_deg.append(deg_i_acc)
211
+
212
+ return (
213
+ start,
214
+ chunk_x,
215
+ chunk_y,
216
+ chunk_epi,
217
+ chunk_vf,
218
+ chunk_count,
219
+ chunk_deg,
220
+ )
221
+
222
+
223
+ def _dnfr_gradients_worker(
224
+ start: int,
225
+ end: int,
226
+ nodes: Sequence[NodeId],
227
+ theta: list[float],
228
+ epi: list[float],
229
+ vf: list[float],
230
+ th_bar: list[float],
231
+ epi_bar: list[float],
232
+ vf_bar: list[float],
233
+ deg_bar: list[float] | None,
234
+ degs: Mapping[Any, float] | Sequence[float] | None,
235
+ w_phase: float,
236
+ w_epi: float,
237
+ w_vf: float,
238
+ w_topo: float,
239
+ ) -> tuple[int, list[float]]:
240
+ """Return partial ΔNFR gradients for the ``[start, end)`` range."""
241
+
242
+ chunk: list[float] = []
243
+ for idx in range(start, end):
244
+ n = nodes[idx]
245
+ g_phase = -angle_diff(theta[idx], th_bar[idx]) / math.pi
246
+ g_epi = epi_bar[idx] - epi[idx]
247
+ g_vf = vf_bar[idx] - vf[idx]
248
+ if w_topo != 0.0 and deg_bar is not None and degs is not None:
249
+ if isinstance(degs, dict):
250
+ deg_i = float(degs.get(n, 0))
251
+ else:
252
+ deg_i = float(degs[idx])
253
+ g_topo = deg_bar[idx] - deg_i
254
+ else:
255
+ g_topo = 0.0
256
+ chunk.append(w_phase * g_phase + w_epi * g_epi + w_vf * g_vf + w_topo * g_topo)
257
+ return start, chunk
258
+
259
+
260
+ def _resolve_parallel_jobs(n_jobs: int | None, total: int) -> int | None:
261
+ """Return an effective worker count for ``total`` items or ``None``."""
262
+
263
+ if n_jobs is None:
264
+ return None
265
+ try:
266
+ jobs = int(n_jobs)
267
+ except (TypeError, ValueError):
268
+ return None
269
+ if jobs <= 1 or total <= 1:
270
+ return None
271
+ return max(1, min(jobs, total))
272
+
273
+
274
+ def _is_numpy_like(obj) -> bool:
275
+ return (
276
+ getattr(obj, "dtype", None) is not None
277
+ and getattr(obj, "shape", None) is not None
278
+ )
279
+
280
+
281
+ def _has_cached_numpy_buffers(data: dict, cache: DnfrCache | None) -> bool:
282
+ for attr in _NUMPY_CACHE_ATTRS:
283
+ arr = data.get(attr)
284
+ if _is_numpy_like(arr):
285
+ return True
286
+ if cache is not None:
287
+ for attr in _NUMPY_CACHE_ATTRS:
288
+ arr = getattr(cache, attr, None)
289
+ if _is_numpy_like(arr):
290
+ return True
291
+ A = data.get("A")
292
+ if _is_numpy_like(A):
293
+ return True
294
+ return False
295
+
296
+
297
+ __all__ = (
298
+ "default_compute_delta_nfr",
299
+ "set_delta_nfr_hook",
300
+ "dnfr_phase_only",
301
+ "dnfr_epi_vf_mixed",
302
+ "dnfr_laplacian",
303
+ "compute_delta_nfr_hamiltonian",
304
+ )
305
+
306
+
307
+ def _write_dnfr_metadata(
308
+ G, *, weights: dict, hook_name: str, note: str | None = None
309
+ ) -> None:
310
+ """Write a ``_DNFR_META`` block in ``G.graph`` with the mix and hook name.
311
+
312
+ ``weights`` may include arbitrary components (phase/epi/vf/topo/etc.).
313
+ """
314
+ weights_norm = normalize_weights(weights, weights.keys())
315
+ meta = {
316
+ "hook": hook_name,
317
+ "weights_raw": dict(weights),
318
+ "weights_norm": weights_norm,
319
+ "components": [k for k, v in weights_norm.items() if v != 0.0],
320
+ "doc": "ΔNFR = Σ w_i·g_i",
321
+ }
322
+ if note:
323
+ meta["note"] = str(note)
324
+ G.graph["_DNFR_META"] = meta
325
+ G.graph["_dnfr_hook_name"] = hook_name # string friendly
326
+
327
+
328
+ def _configure_dnfr_weights(G) -> dict:
329
+ """Normalise and store ΔNFR weights in ``G.graph['_dnfr_weights']``.
330
+
331
+ Uses ``G.graph['DNFR_WEIGHTS']`` or default values. The result is a
332
+ dictionary of normalised components reused at each simulation step
333
+ without recomputing the mix.
334
+ """
335
+ weights = merge_and_normalize_weights(
336
+ G, "DNFR_WEIGHTS", ("phase", "epi", "vf", "topo"), default=0.0
337
+ )
338
+ G.graph["_dnfr_weights"] = weights
339
+ return weights
340
+
341
+
342
+ def _init_dnfr_cache(
343
+ G: TNFRGraph,
344
+ nodes: Sequence[NodeId],
345
+ cache_or_manager: CacheManager | DnfrCache | None = None,
346
+ checksum: Any | None = None,
347
+ force_refresh: bool = False,
348
+ *,
349
+ manager: CacheManager | None = None,
350
+ ) -> tuple[
351
+ DnfrCache,
352
+ dict[NodeId, int],
353
+ list[float],
354
+ list[float],
355
+ list[float],
356
+ list[float],
357
+ list[float],
358
+ bool,
359
+ ]:
360
+ """Initialise or reuse cached ΔNFR arrays.
361
+
362
+ ``manager`` telemetry became mandatory in TNFR 9.0 to expose cache hits,
363
+ misses and timings. Older callers still pass a ``cache`` instance as the
364
+ third positional argument; this helper supports both signatures by seeding
365
+ the manager-backed state with the provided cache when necessary.
366
+ """
367
+
368
+ if manager is None and isinstance(cache_or_manager, CacheManager):
369
+ manager = cache_or_manager
370
+ cache_or_manager = None
371
+
372
+ if manager is None:
373
+ manager = _graph_cache_manager(G.graph)
374
+
375
+ graph = G.graph
376
+ state = manager.get(DNFR_PREP_STATE_KEY)
377
+ if not isinstance(state, DnfrPrepState):
378
+ manager.clear(DNFR_PREP_STATE_KEY)
379
+ state = manager.get(DNFR_PREP_STATE_KEY)
380
+
381
+ if isinstance(cache_or_manager, DnfrCache):
382
+ state.cache = cache_or_manager
383
+ if checksum is None:
384
+ checksum = cache_or_manager.checksum
385
+
386
+ cache = state.cache
387
+ reuse = (
388
+ not force_refresh
389
+ and isinstance(cache, DnfrCache)
390
+ and cache.checksum == checksum
391
+ and len(cache.theta) == len(nodes)
392
+ )
393
+ if reuse:
394
+ manager.increment_hit(DNFR_PREP_STATE_KEY)
395
+ graph["_dnfr_prep_cache"] = cache
396
+ return (
397
+ cache,
398
+ cache.idx,
399
+ cache.theta,
400
+ cache.epi,
401
+ cache.vf,
402
+ cache.cos_theta,
403
+ cache.sin_theta,
404
+ False,
405
+ )
406
+
407
+ def _rebuild(current: DnfrPrepState | Any) -> DnfrPrepState:
408
+ if not isinstance(current, DnfrPrepState):
409
+ raise RuntimeError("ΔNFR prep state unavailable during rebuild")
410
+ prev_cache = current.cache if isinstance(current.cache, DnfrCache) else None
411
+ idx_local = {n: i for i, n in enumerate(nodes)}
412
+ size = len(nodes)
413
+ zeros = [0.0] * size
414
+ cache_new = prev_cache if prev_cache is not None else new_dnfr_cache()
415
+ cache_new.idx = idx_local
416
+ cache_new.theta = zeros.copy()
417
+ cache_new.epi = zeros.copy()
418
+ cache_new.vf = zeros.copy()
419
+ cache_new.cos_theta = [1.0] * size
420
+ cache_new.sin_theta = [0.0] * size
421
+ cache_new.neighbor_x = zeros.copy()
422
+ cache_new.neighbor_y = zeros.copy()
423
+ cache_new.neighbor_epi_sum = zeros.copy()
424
+ cache_new.neighbor_vf_sum = zeros.copy()
425
+ cache_new.neighbor_count = zeros.copy()
426
+ cache_new.neighbor_deg_sum = zeros.copy() if size else []
427
+ cache_new.degs = None
428
+ cache_new.edge_src = None
429
+ cache_new.edge_dst = None
430
+ cache_new.checksum = checksum
431
+
432
+ # Reset any numpy mirrors or aggregated buffers to avoid leaking
433
+ # state across refresh cycles (e.g. switching between vectorised
434
+ # and Python paths or reusing legacy caches).
435
+ if prev_cache is not None:
436
+ for attr in _NUMPY_CACHE_ATTRS:
437
+ setattr(cache_new, attr, None)
438
+ for attr in (
439
+ "th_bar_np",
440
+ "epi_bar_np",
441
+ "vf_bar_np",
442
+ "deg_bar_np",
443
+ "grad_phase_np",
444
+ "grad_epi_np",
445
+ "grad_vf_np",
446
+ "grad_topo_np",
447
+ "grad_total_np",
448
+ ):
449
+ setattr(cache_new, attr, None)
450
+ cache_new.edge_src = None
451
+ cache_new.edge_dst = None
452
+ cache_new.edge_signature = None
453
+ cache_new.neighbor_accum_signature = None
454
+ cache_new.degs = prev_cache.degs if prev_cache else None
455
+ cache_new.checksum = checksum
456
+ current.cache = cache_new
457
+ graph["_dnfr_prep_cache"] = cache_new
458
+ return current
459
+
460
+ with manager.timer(DNFR_PREP_STATE_KEY):
461
+ state = manager.update(DNFR_PREP_STATE_KEY, _rebuild)
462
+ manager.increment_miss(DNFR_PREP_STATE_KEY)
463
+ cache = state.cache
464
+ if not isinstance(cache, DnfrCache): # pragma: no cover - defensive guard
465
+ raise RuntimeError("ΔNFR cache initialisation failed")
466
+ return (
467
+ cache,
468
+ cache.idx,
469
+ cache.theta,
470
+ cache.epi,
471
+ cache.vf,
472
+ cache.cos_theta,
473
+ cache.sin_theta,
474
+ True,
475
+ )
476
+
477
+
478
+ def _ensure_numpy_vectors(cache: DnfrCache, np: ModuleType) -> DnfrCacheVectors:
479
+ """Ensure NumPy copies of cached vectors are initialised and up to date."""
480
+
481
+ if cache is None:
482
+ return (None, None, None, None, None)
483
+
484
+ arrays: list[Any | None] = []
485
+ size = len(cache.theta)
486
+ for attr_np, source_attr in (
487
+ ("theta_np", "theta"),
488
+ ("epi_np", "epi"),
489
+ ("vf_np", "vf"),
490
+ ("cos_theta_np", "cos_theta"),
491
+ ("sin_theta_np", "sin_theta"),
492
+ ):
493
+ arr = getattr(cache, attr_np)
494
+ if arr is not None and getattr(arr, "shape", None) == (size,):
495
+ arrays.append(arr)
496
+ continue
497
+ src = getattr(cache, source_attr)
498
+ if src is None:
499
+ setattr(cache, attr_np, None)
500
+ arrays.append(None)
501
+ continue
502
+ arr = np.asarray(src, dtype=float)
503
+ if getattr(arr, "shape", None) != (size,):
504
+ arr = np.array(src, dtype=float)
505
+ setattr(cache, attr_np, arr)
506
+ arrays.append(arr)
507
+ return tuple(arrays)
508
+
509
+
510
+ def _ensure_numpy_degrees(
511
+ cache: DnfrCache,
512
+ deg_list: Sequence[float] | None,
513
+ np: ModuleType,
514
+ ) -> np.ndarray | None:
515
+ """Initialise/update NumPy array mirroring ``deg_list``.
516
+
517
+ Deg_array reuse pattern:
518
+ -------------------------
519
+ The degree array (deg_array) is a cached NumPy buffer that stores node
520
+ degrees for topology-based ΔNFR computations. The reuse pattern follows:
521
+
522
+ 1. **Allocation**: Created once when topology weight (w_topo) > 0 or when
523
+ caching is enabled, sized to match the node count.
524
+
525
+ 2. **Reuse across steps**: When the graph topology is stable (no edge
526
+ additions/removals), the same deg_array buffer is reused across
527
+ multiple ΔNFR computation steps by updating in-place via np.copyto.
528
+
529
+ 3. **Count buffer optimization**: For undirected graphs where node degree
530
+ equals neighbor count, deg_array can serve double duty as the count
531
+ buffer (see _accumulate_neighbors_numpy lines 2185-2194), eliminating
532
+ the need for an extra accumulator row.
533
+
534
+ 4. **Invalidation**: Cache is cleared when graph.edges changes or when
535
+ _dnfr_prep_dirty flag is set, ensuring fresh allocation on next use.
536
+
537
+ This pattern maintains ΔNFR computational accuracy (Invariant #8) while
538
+ minimizing allocations for stable topologies.
539
+ """
540
+
541
+ if deg_list is None:
542
+ if cache is not None:
543
+ cache.deg_array = None
544
+ return None
545
+ if cache is None:
546
+ return np.array(deg_list, dtype=float)
547
+ arr = cache.deg_array
548
+ if arr is None or len(arr) != len(deg_list):
549
+ arr = np.array(deg_list, dtype=float)
550
+ else:
551
+ np.copyto(arr, deg_list, casting="unsafe")
552
+ cache.deg_array = arr
553
+ return arr
554
+
555
+
556
+ def _resolve_numpy_degree_array(
557
+ data: MutableMapping[str, Any],
558
+ count: np.ndarray | None,
559
+ *,
560
+ cache: DnfrCache | None,
561
+ np: ModuleType,
562
+ ) -> np.ndarray | None:
563
+ """Return the vector of node degrees required for topology gradients."""
564
+
565
+ if data["w_topo"] == 0.0:
566
+ return None
567
+ deg_array = data.get("deg_array")
568
+ if deg_array is not None:
569
+ return deg_array
570
+ deg_list = data.get("deg_list")
571
+ if deg_list is not None:
572
+ deg_array = np.array(deg_list, dtype=float)
573
+ data["deg_array"] = deg_array
574
+ if cache is not None:
575
+ cache.deg_array = deg_array
576
+ return deg_array
577
+ return count
578
+
579
+
580
+ def _ensure_cached_array(
581
+ cache: DnfrCache | None,
582
+ attr: str,
583
+ shape: tuple[int, ...],
584
+ np: ModuleType,
585
+ ) -> np.ndarray:
586
+ """Return a cached NumPy buffer with ``shape`` creating/reusing it."""
587
+
588
+ if np is None:
589
+ raise RuntimeError("NumPy is required to build cached arrays")
590
+ arr = getattr(cache, attr) if cache is not None else None
591
+ if arr is None or getattr(arr, "shape", None) != shape:
592
+ arr = np.empty(shape, dtype=float)
593
+ if cache is not None:
594
+ setattr(cache, attr, arr)
595
+ return arr
596
+
597
+
598
+ def _ensure_numpy_state_vectors(
599
+ data: MutableMapping[str, Any], np: ModuleType
600
+ ) -> DnfrVectorMap:
601
+ """Synchronise list-based state vectors with their NumPy counterparts."""
602
+
603
+ nodes = data.get("nodes") or ()
604
+ size = len(nodes)
605
+ cache: DnfrCache | None = data.get("cache")
606
+
607
+ cache_arrays: DnfrCacheVectors = (None, None, None, None, None)
608
+ if cache is not None:
609
+ cache_arrays = _ensure_numpy_vectors(cache, np)
610
+
611
+ result: dict[str, Any | None] = {}
612
+ for plain_key, np_key, cached_arr, result_key in (
613
+ ("theta", "theta_np", cache_arrays[0], "theta"),
614
+ ("epi", "epi_np", cache_arrays[1], "epi"),
615
+ ("vf", "vf_np", cache_arrays[2], "vf"),
616
+ ("cos_theta", "cos_theta_np", cache_arrays[3], "cos"),
617
+ ("sin_theta", "sin_theta_np", cache_arrays[4], "sin"),
618
+ ):
619
+ arr = data.get(np_key)
620
+ if arr is None:
621
+ arr = cached_arr
622
+ if arr is None or getattr(arr, "shape", None) != (size,):
623
+ src = data.get(plain_key)
624
+ if src is None and cache is not None:
625
+ src = getattr(cache, plain_key)
626
+ if src is None:
627
+ arr = None
628
+ else:
629
+ arr = np.asarray(src, dtype=float)
630
+ if getattr(arr, "shape", None) != (size,):
631
+ arr = np.array(src, dtype=float)
632
+ if arr is not None:
633
+ data[np_key] = arr
634
+ data[plain_key] = arr
635
+ if cache is not None:
636
+ setattr(cache, np_key, arr)
637
+ else:
638
+ data[np_key] = None
639
+ result[result_key] = arr
640
+
641
+ return result
642
+
643
+
644
+ def _build_edge_index_arrays(
645
+ G: TNFRGraph,
646
+ nodes: Sequence[NodeId],
647
+ idx: Mapping[NodeId, int],
648
+ np: ModuleType,
649
+ ) -> tuple[np.ndarray, np.ndarray]:
650
+ """Create (src, dst) index arrays for ``G`` respecting ``nodes`` order."""
651
+
652
+ if np is None:
653
+ return None, None
654
+ if not nodes:
655
+ empty = np.empty(0, dtype=np.intp)
656
+ return empty, empty
657
+
658
+ src = []
659
+ dst = []
660
+ append_src = src.append
661
+ append_dst = dst.append
662
+ for node in nodes:
663
+ i = idx.get(node)
664
+ if i is None:
665
+ continue
666
+ for neighbor in G.neighbors(node):
667
+ j = idx.get(neighbor)
668
+ if j is None:
669
+ continue
670
+ append_src(i)
671
+ append_dst(j)
672
+ if not src:
673
+ empty = np.empty(0, dtype=np.intp)
674
+ return empty, empty
675
+ edge_src = np.asarray(src, dtype=np.intp)
676
+ edge_dst = np.asarray(dst, dtype=np.intp)
677
+ return edge_src, edge_dst
678
+
679
+
680
+ def _refresh_dnfr_vectors(
681
+ G: TNFRGraph, nodes: Sequence[NodeId], cache: DnfrCache
682
+ ) -> None:
683
+ """Update cached angle and state vectors for ΔNFR."""
684
+ np_module = get_numpy()
685
+ trig = compute_theta_trig(((n, G.nodes[n]) for n in nodes), np=np_module)
686
+ use_numpy = _should_vectorize(G, np_module)
687
+ node_count = len(nodes)
688
+ trig_theta = getattr(trig, "theta_values", None)
689
+ trig_cos = getattr(trig, "cos_values", None)
690
+ trig_sin = getattr(trig, "sin_values", None)
691
+ np_ready = (
692
+ use_numpy
693
+ and np_module is not None
694
+ and isinstance(trig_theta, getattr(np_module, "ndarray", tuple()))
695
+ and isinstance(trig_cos, getattr(np_module, "ndarray", tuple()))
696
+ and isinstance(trig_sin, getattr(np_module, "ndarray", tuple()))
697
+ and getattr(trig_theta, "shape", None) == getattr(trig_cos, "shape", None)
698
+ and getattr(trig_theta, "shape", None) == getattr(trig_sin, "shape", None)
699
+ and (trig_theta.shape[0] if getattr(trig_theta, "ndim", 0) else 0) == node_count
700
+ )
701
+
702
+ if np_ready:
703
+ if node_count:
704
+ epi_arr = np_module.fromiter(
705
+ (get_attr(G.nodes[node], ALIAS_EPI, 0.0) for node in nodes),
706
+ dtype=float,
707
+ count=node_count,
708
+ )
709
+ vf_arr = np_module.fromiter(
710
+ (get_attr(G.nodes[node], ALIAS_VF, 0.0) for node in nodes),
711
+ dtype=float,
712
+ count=node_count,
713
+ )
714
+ else:
715
+ epi_arr = np_module.empty(0, dtype=float)
716
+ vf_arr = np_module.empty(0, dtype=float)
717
+
718
+ theta_arr = np_module.asarray(trig_theta, dtype=float)
719
+ cos_arr = np_module.asarray(trig_cos, dtype=float)
720
+ sin_arr = np_module.asarray(trig_sin, dtype=float)
721
+
722
+ def _sync_numpy(attr: str, source: Any) -> Any:
723
+ dest = getattr(cache, attr)
724
+ if dest is None or getattr(dest, "shape", None) != source.shape:
725
+ dest = np_module.array(source, dtype=float)
726
+ else:
727
+ np_module.copyto(dest, source, casting="unsafe")
728
+ setattr(cache, attr, dest)
729
+ return dest
730
+
731
+ _sync_numpy("theta_np", theta_arr)
732
+ _sync_numpy("epi_np", epi_arr)
733
+ _sync_numpy("vf_np", vf_arr)
734
+ _sync_numpy("cos_theta_np", cos_arr)
735
+ _sync_numpy("sin_theta_np", sin_arr)
736
+
737
+ # Python mirrors remain untouched while the vectorised path is active.
738
+ # They will be rebuilt the next time the runtime falls back to lists.
739
+ if cache.theta is not None and len(cache.theta) != node_count:
740
+ cache.theta = [0.0] * node_count
741
+ if cache.epi is not None and len(cache.epi) != node_count:
742
+ cache.epi = [0.0] * node_count
743
+ if cache.vf is not None and len(cache.vf) != node_count:
744
+ cache.vf = [0.0] * node_count
745
+ if cache.cos_theta is not None and len(cache.cos_theta) != node_count:
746
+ cache.cos_theta = [1.0] * node_count
747
+ if cache.sin_theta is not None and len(cache.sin_theta) != node_count:
748
+ cache.sin_theta = [0.0] * node_count
749
+ else:
750
+ for index, node in enumerate(nodes):
751
+ i: int = int(index)
752
+ node_id: NodeId = node
753
+ nd = G.nodes[node_id]
754
+ cache.theta[i] = trig.theta[node_id]
755
+ cache.epi[i] = get_attr(nd, ALIAS_EPI, 0.0)
756
+ cache.vf[i] = get_attr(nd, ALIAS_VF, 0.0)
757
+ cache.cos_theta[i] = trig.cos[node_id]
758
+ cache.sin_theta[i] = trig.sin[node_id]
759
+ if use_numpy and np_module is not None:
760
+ _ensure_numpy_vectors(cache, np_module)
761
+ else:
762
+ cache.theta_np = None
763
+ cache.epi_np = None
764
+ cache.vf_np = None
765
+ cache.cos_theta_np = None
766
+ cache.sin_theta_np = None
767
+
768
+
769
+ def _prepare_dnfr_data(
770
+ G: TNFRGraph,
771
+ *,
772
+ cache_size: int | None = 128,
773
+ profile: MutableMapping[str, float] | None = None,
774
+ ) -> dict[str, Any]:
775
+ """Precompute common data for ΔNFR strategies.
776
+
777
+ The helper decides between edge-wise and dense adjacency accumulation
778
+ heuristically. Graphs whose edge density exceeds
779
+ ``_SPARSE_DENSITY_THRESHOLD`` receive a cached adjacency matrix so the
780
+ dense path can be exercised; callers may also force the dense mode by
781
+ setting ``G.graph['dnfr_force_dense']`` to a truthy value.
782
+
783
+ Parameters
784
+ ----------
785
+ profile : MutableMapping[str, float] or None, optional
786
+ Mutable mapping that accumulates wall-clock timings for ΔNFR
787
+ preparation. When provided the helper increases the
788
+ ``"dnfr_cache_rebuild"`` bucket with the time spent refreshing cached
789
+ node vectors and associated NumPy workspaces.
790
+ """
791
+ start_timer, stop_timer = _profile_start_stop(
792
+ profile,
793
+ keys=("dnfr_cache_rebuild",),
794
+ )
795
+
796
+ graph = G.graph
797
+ weights = graph.get("_dnfr_weights")
798
+ if weights is None:
799
+ weights = _configure_dnfr_weights(G)
800
+
801
+ result: dict[str, Any] = {
802
+ "weights": weights,
803
+ "cache_size": cache_size,
804
+ }
805
+
806
+ np_module = get_numpy()
807
+ use_numpy = _should_vectorize(G, np_module)
808
+
809
+ nodes = cast(tuple[NodeId, ...], cached_node_list(G))
810
+ edge_count = G.number_of_edges()
811
+
812
+ # Centralized decision logic for sparse vs dense accumulation path.
813
+ # This decision affects which accumulation strategy will be used:
814
+ # - "sparse": edge-based accumulation (_accumulate_neighbors_broadcasted)
815
+ # - "dense": matrix multiplication with adjacency matrix (_accumulate_neighbors_dense)
816
+ # The decision is stored in dnfr_path_decision for telemetry and debugging.
817
+ prefer_sparse = False
818
+ dense_override = bool(G.graph.get("dnfr_force_dense"))
819
+ dnfr_path_decision = "fallback" # Default when numpy unavailable
820
+
821
+ if use_numpy:
822
+ # Heuristic: use sparse path when density <= _SPARSE_DENSITY_THRESHOLD (0.25)
823
+ prefer_sparse = _prefer_sparse_accumulation(len(nodes), edge_count)
824
+
825
+ if dense_override:
826
+ # User explicitly requested dense mode
827
+ prefer_sparse = False
828
+ dnfr_path_decision = "dense_forced"
829
+ elif not prefer_sparse:
830
+ # Heuristic chose dense path (high density graph)
831
+ dnfr_path_decision = "dense_auto"
832
+ else:
833
+ # Heuristic chose sparse path (low density graph)
834
+ dnfr_path_decision = "sparse"
835
+
836
+ nodes_cached, A_untyped = cached_nodes_and_A(
837
+ G,
838
+ cache_size=cache_size,
839
+ require_numpy=False,
840
+ prefer_sparse=prefer_sparse,
841
+ nodes=nodes,
842
+ )
843
+ nodes = cast(tuple[NodeId, ...], nodes_cached)
844
+ A: np.ndarray | None = A_untyped
845
+ result["nodes"] = nodes
846
+ result["A"] = A
847
+ manager = _graph_cache_manager(G.graph)
848
+ checksum = G.graph.get("_dnfr_nodes_checksum")
849
+ dirty_flag = bool(G.graph.pop("_dnfr_prep_dirty", False))
850
+ existing_cache = cast(DnfrCache | None, graph.get("_dnfr_prep_cache"))
851
+ cache_timer = start_timer()
852
+ cache, idx, theta, epi, vf, cos_theta, sin_theta, refreshed = _init_dnfr_cache(
853
+ G,
854
+ nodes,
855
+ existing_cache,
856
+ checksum,
857
+ force_refresh=dirty_flag,
858
+ manager=manager,
859
+ )
860
+ stop_timer("dnfr_cache_rebuild", cache_timer)
861
+ dirty = dirty_flag or refreshed
862
+ caching_enabled = cache is not None and (cache_size is None or cache_size > 0)
863
+ result["cache"] = cache
864
+ result["idx"] = idx
865
+ result["theta"] = theta
866
+ result["epi"] = epi
867
+ result["vf"] = vf
868
+ result["cos_theta"] = cos_theta
869
+ result["sin_theta"] = sin_theta
870
+ if cache is not None:
871
+ _refresh_dnfr_vectors(G, nodes, cache)
872
+ if np_module is None and not caching_enabled:
873
+ for attr in (
874
+ "neighbor_x_np",
875
+ "neighbor_y_np",
876
+ "neighbor_epi_sum_np",
877
+ "neighbor_vf_sum_np",
878
+ "neighbor_count_np",
879
+ "neighbor_deg_sum_np",
880
+ "neighbor_inv_count_np",
881
+ "neighbor_cos_avg_np",
882
+ "neighbor_sin_avg_np",
883
+ "neighbor_mean_tmp_np",
884
+ "neighbor_mean_length_np",
885
+ "neighbor_accum_np",
886
+ "neighbor_edge_values_np",
887
+ ):
888
+ setattr(cache, attr, None)
889
+ cache.neighbor_accum_signature = None
890
+ for attr in (
891
+ "th_bar_np",
892
+ "epi_bar_np",
893
+ "vf_bar_np",
894
+ "deg_bar_np",
895
+ "grad_phase_np",
896
+ "grad_epi_np",
897
+ "grad_vf_np",
898
+ "grad_topo_np",
899
+ "grad_total_np",
900
+ ):
901
+ setattr(cache, attr, None)
902
+
903
+ w_phase = float(weights.get("phase", 0.0))
904
+ w_epi = float(weights.get("epi", 0.0))
905
+ w_vf = float(weights.get("vf", 0.0))
906
+ w_topo = float(weights.get("topo", 0.0))
907
+ result["w_phase"] = w_phase
908
+ result["w_epi"] = w_epi
909
+ result["w_vf"] = w_vf
910
+ result["w_topo"] = w_topo
911
+ degree_map = cast(dict[NodeId, float] | None, cache.degs if cache else None)
912
+ if cache is not None and dirty:
913
+ cache.degs = None
914
+ cache.deg_list = None
915
+ cache.deg_array = None
916
+ cache.edge_src = None
917
+ cache.edge_dst = None
918
+ cache.edge_signature = None
919
+ cache.neighbor_accum_signature = None
920
+ cache.neighbor_accum_np = None
921
+ cache.neighbor_edge_values_np = None
922
+ degree_map = None
923
+
924
+ deg_list: list[float] | None = None
925
+ degs: dict[NodeId, float] | None = None
926
+ deg_array: np.ndarray | None = None
927
+
928
+ if w_topo != 0.0 or caching_enabled:
929
+ if degree_map is None or len(degree_map) != len(G):
930
+ degree_map = {cast(NodeId, node): float(deg) for node, deg in G.degree()}
931
+ if cache is not None:
932
+ cache.degs = degree_map
933
+
934
+ if (
935
+ cache is not None
936
+ and cache.deg_list is not None
937
+ and not dirty
938
+ and len(cache.deg_list) == len(nodes)
939
+ ):
940
+ deg_list = cache.deg_list
941
+ else:
942
+ deg_list = [float(degree_map.get(node, 0.0)) for node in nodes]
943
+ if cache is not None:
944
+ cache.deg_list = deg_list
945
+
946
+ degs = degree_map
947
+
948
+ if np_module is not None and deg_list is not None:
949
+ if cache is not None:
950
+ deg_array = _ensure_numpy_degrees(cache, deg_list, np_module)
951
+ else:
952
+ deg_array = np_module.array(deg_list, dtype=float)
953
+ elif cache is not None:
954
+ cache.deg_array = None
955
+ elif cache is not None and dirty:
956
+ cache.deg_list = None
957
+ cache.deg_array = None
958
+
959
+ G.graph["_dnfr_prep_dirty"] = False
960
+
961
+ result["degs"] = degs
962
+ result["deg_list"] = deg_list
963
+
964
+ theta_np: np.ndarray | None
965
+ epi_np: np.ndarray | None
966
+ vf_np: np.ndarray | None
967
+ cos_theta_np: np.ndarray | None
968
+ sin_theta_np: np.ndarray | None
969
+ edge_src: np.ndarray | None
970
+ edge_dst: np.ndarray | None
971
+ if use_numpy:
972
+ theta_np, epi_np, vf_np, cos_theta_np, sin_theta_np = _ensure_numpy_vectors(
973
+ cache, np_module
974
+ )
975
+ edge_src = None
976
+ edge_dst = None
977
+ if cache is not None:
978
+ edge_src = cache.edge_src
979
+ edge_dst = cache.edge_dst
980
+ if edge_src is None or edge_dst is None or dirty:
981
+ edge_src, edge_dst = _build_edge_index_arrays(G, nodes, idx, np_module)
982
+ cache.edge_src = edge_src
983
+ cache.edge_dst = edge_dst
984
+ else:
985
+ edge_src, edge_dst = _build_edge_index_arrays(G, nodes, idx, np_module)
986
+
987
+ if cache is not None:
988
+ for attr in ("neighbor_accum_np", "neighbor_edge_values_np"):
989
+ arr = getattr(cache, attr, None)
990
+ if arr is not None:
991
+ result[attr] = arr
992
+ if edge_src is not None and edge_dst is not None:
993
+ signature = (id(edge_src), id(edge_dst), len(nodes))
994
+ result["edge_signature"] = signature
995
+ if cache is not None:
996
+ cache.edge_signature = signature
997
+ else:
998
+ theta_np = None
999
+ epi_np = None
1000
+ vf_np = None
1001
+ cos_theta_np = None
1002
+ sin_theta_np = None
1003
+ edge_src = None
1004
+ edge_dst = None
1005
+ if cache is not None:
1006
+ cache.edge_src = None
1007
+ cache.edge_dst = None
1008
+
1009
+ result.setdefault("neighbor_edge_values_np", None)
1010
+ if cache is not None and "edge_signature" not in result:
1011
+ result["edge_signature"] = cache.edge_signature
1012
+
1013
+ result["theta_np"] = theta_np
1014
+ result["epi_np"] = epi_np
1015
+ result["vf_np"] = vf_np
1016
+ result["cos_theta_np"] = cos_theta_np
1017
+ result["sin_theta_np"] = sin_theta_np
1018
+ if theta_np is not None and getattr(theta_np, "shape", None) == (len(nodes),):
1019
+ result["theta"] = theta_np
1020
+ if epi_np is not None and getattr(epi_np, "shape", None) == (len(nodes),):
1021
+ result["epi"] = epi_np
1022
+ if vf_np is not None and getattr(vf_np, "shape", None) == (len(nodes),):
1023
+ result["vf"] = vf_np
1024
+ if cos_theta_np is not None and getattr(cos_theta_np, "shape", None) == (
1025
+ len(nodes),
1026
+ ):
1027
+ result["cos_theta"] = cos_theta_np
1028
+ if sin_theta_np is not None and getattr(sin_theta_np, "shape", None) == (
1029
+ len(nodes),
1030
+ ):
1031
+ result["sin_theta"] = sin_theta_np
1032
+ result["deg_array"] = deg_array
1033
+ result["edge_src"] = edge_src
1034
+ result["edge_dst"] = edge_dst
1035
+ result["edge_count"] = edge_count
1036
+ result["prefer_sparse"] = prefer_sparse
1037
+ result["dense_override"] = dense_override
1038
+ result["dnfr_path_decision"] = dnfr_path_decision
1039
+ result.setdefault("neighbor_accum_np", None)
1040
+ result.setdefault("neighbor_accum_signature", None)
1041
+
1042
+ return result
1043
+
1044
+
1045
+ def _apply_dnfr_gradients(
1046
+ G: TNFRGraph,
1047
+ data: MutableMapping[str, Any],
1048
+ th_bar: Sequence[float] | np.ndarray,
1049
+ epi_bar: Sequence[float] | np.ndarray,
1050
+ vf_bar: Sequence[float] | np.ndarray,
1051
+ deg_bar: Sequence[float] | np.ndarray | None = None,
1052
+ degs: Mapping[Any, float] | Sequence[float] | np.ndarray | None = None,
1053
+ *,
1054
+ n_jobs: int | None = None,
1055
+ profile: MutableMapping[str, float] | None = None,
1056
+ ) -> None:
1057
+ """Combine precomputed gradients and write ΔNFR to each node.
1058
+
1059
+ Parameters
1060
+ ----------
1061
+ profile : MutableMapping[str, float] or None, optional
1062
+ Mutable mapping receiving aggregated timings for the gradient assembly
1063
+ (``"dnfr_gradient_assembly"``) and in-place writes
1064
+ (``"dnfr_inplace_write"``).
1065
+ """
1066
+ start_timer, stop_timer = _profile_start_stop(
1067
+ profile,
1068
+ keys=("dnfr_gradient_assembly", "dnfr_inplace_write"),
1069
+ )
1070
+
1071
+ np = get_numpy()
1072
+ nodes = data["nodes"]
1073
+ theta = data["theta"]
1074
+ epi = data["epi"]
1075
+ vf = data["vf"]
1076
+ w_phase = data["w_phase"]
1077
+ w_epi = data["w_epi"]
1078
+ w_vf = data["w_vf"]
1079
+ w_topo = data["w_topo"]
1080
+ if degs is None:
1081
+ degs = data.get("degs")
1082
+
1083
+ cache: DnfrCache | None = data.get("cache")
1084
+
1085
+ theta_np = data.get("theta_np")
1086
+ epi_np = data.get("epi_np")
1087
+ vf_np = data.get("vf_np")
1088
+ deg_array = data.get("deg_array") if w_topo != 0.0 else None
1089
+
1090
+ use_vector = (
1091
+ np is not None
1092
+ and theta_np is not None
1093
+ and epi_np is not None
1094
+ and vf_np is not None
1095
+ and isinstance(th_bar, np.ndarray)
1096
+ and isinstance(epi_bar, np.ndarray)
1097
+ and isinstance(vf_bar, np.ndarray)
1098
+ )
1099
+ if use_vector and w_topo != 0.0:
1100
+ use_vector = (
1101
+ deg_bar is not None
1102
+ and isinstance(deg_bar, np.ndarray)
1103
+ and isinstance(deg_array, np.ndarray)
1104
+ )
1105
+
1106
+ grad_timer = start_timer()
1107
+
1108
+ if use_vector:
1109
+ grad_phase = _ensure_cached_array(cache, "grad_phase_np", theta_np.shape, np)
1110
+ grad_epi = _ensure_cached_array(cache, "grad_epi_np", epi_np.shape, np)
1111
+ grad_vf = _ensure_cached_array(cache, "grad_vf_np", vf_np.shape, np)
1112
+ grad_total = _ensure_cached_array(cache, "grad_total_np", theta_np.shape, np)
1113
+ grad_topo = None
1114
+ if w_topo != 0.0:
1115
+ grad_topo = _ensure_cached_array(cache, "grad_topo_np", deg_array.shape, np)
1116
+
1117
+ angle_diff_array(theta_np, th_bar, np=np, out=grad_phase)
1118
+ np.multiply(grad_phase, -1.0 / math.pi, out=grad_phase)
1119
+
1120
+ np.copyto(grad_epi, epi_bar, casting="unsafe")
1121
+ grad_epi -= epi_np
1122
+
1123
+ np.copyto(grad_vf, vf_bar, casting="unsafe")
1124
+ grad_vf -= vf_np
1125
+
1126
+ if grad_topo is not None and deg_bar is not None:
1127
+ np.copyto(grad_topo, deg_bar, casting="unsafe")
1128
+ grad_topo -= deg_array
1129
+
1130
+ if w_phase != 0.0:
1131
+ np.multiply(grad_phase, w_phase, out=grad_total)
1132
+ else:
1133
+ grad_total.fill(0.0)
1134
+ if w_epi != 0.0:
1135
+ if w_epi != 1.0:
1136
+ np.multiply(grad_epi, w_epi, out=grad_epi)
1137
+ np.add(grad_total, grad_epi, out=grad_total)
1138
+ if w_vf != 0.0:
1139
+ if w_vf != 1.0:
1140
+ np.multiply(grad_vf, w_vf, out=grad_vf)
1141
+ np.add(grad_total, grad_vf, out=grad_total)
1142
+ if w_topo != 0.0 and grad_topo is not None:
1143
+ if w_topo != 1.0:
1144
+ np.multiply(grad_topo, w_topo, out=grad_topo)
1145
+ np.add(grad_total, grad_topo, out=grad_total)
1146
+
1147
+ dnfr_values = grad_total
1148
+ else:
1149
+ effective_jobs = _resolve_parallel_jobs(n_jobs, len(nodes))
1150
+ if effective_jobs:
1151
+ chunk_results = []
1152
+ with ProcessPoolExecutor(max_workers=effective_jobs) as executor:
1153
+ futures = []
1154
+ for start, end in _iter_chunk_offsets(len(nodes), effective_jobs):
1155
+ if start == end:
1156
+ continue
1157
+ futures.append(
1158
+ executor.submit(
1159
+ _dnfr_gradients_worker,
1160
+ start,
1161
+ end,
1162
+ nodes,
1163
+ theta,
1164
+ epi,
1165
+ vf,
1166
+ th_bar,
1167
+ epi_bar,
1168
+ vf_bar,
1169
+ deg_bar,
1170
+ degs,
1171
+ w_phase,
1172
+ w_epi,
1173
+ w_vf,
1174
+ w_topo,
1175
+ )
1176
+ )
1177
+ for future in futures:
1178
+ chunk_results.append(future.result())
1179
+
1180
+ dnfr_values = [0.0] * len(nodes)
1181
+ for start, chunk in sorted(chunk_results, key=lambda item: item[0]):
1182
+ end = start + len(chunk)
1183
+ dnfr_values[start:end] = chunk
1184
+ else:
1185
+ dnfr_values = []
1186
+ for i, n in enumerate(nodes):
1187
+ g_phase = -angle_diff(theta[i], th_bar[i]) / math.pi
1188
+ g_epi = epi_bar[i] - epi[i]
1189
+ g_vf = vf_bar[i] - vf[i]
1190
+ if w_topo != 0.0 and deg_bar is not None and degs is not None:
1191
+ if isinstance(degs, dict):
1192
+ deg_i = float(degs.get(n, 0))
1193
+ else:
1194
+ deg_i = float(degs[i])
1195
+ g_topo = deg_bar[i] - deg_i
1196
+ else:
1197
+ g_topo = 0.0
1198
+ dnfr_values.append(
1199
+ w_phase * g_phase + w_epi * g_epi + w_vf * g_vf + w_topo * g_topo
1200
+ )
1201
+
1202
+ if cache is not None:
1203
+ cache.grad_phase_np = None
1204
+ cache.grad_epi_np = None
1205
+ cache.grad_vf_np = None
1206
+ cache.grad_topo_np = None
1207
+ cache.grad_total_np = None
1208
+
1209
+ stop_timer("dnfr_gradient_assembly", grad_timer)
1210
+
1211
+ write_timer = start_timer()
1212
+ for i, n in enumerate(nodes):
1213
+ set_dnfr(G, n, float(dnfr_values[i]))
1214
+ stop_timer("dnfr_inplace_write", write_timer)
1215
+
1216
+
1217
+ def _init_bar_arrays(
1218
+ data: MutableMapping[str, Any],
1219
+ *,
1220
+ degs: Mapping[Any, float] | Sequence[float] | None = None,
1221
+ np: ModuleType | None = None,
1222
+ ) -> tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float] | None]:
1223
+ """Prepare containers for neighbour means.
1224
+
1225
+ If ``np`` is provided, NumPy arrays are created; otherwise lists are used.
1226
+ ``degs`` is optional and only initialised when the topological term is
1227
+ active.
1228
+ """
1229
+
1230
+ nodes = data["nodes"]
1231
+ theta = data["theta"]
1232
+ epi = data["epi"]
1233
+ vf = data["vf"]
1234
+ w_topo = data["w_topo"]
1235
+ cache: DnfrCache | None = data.get("cache")
1236
+ if np is None:
1237
+ np = get_numpy()
1238
+ if np is not None:
1239
+ size = len(theta)
1240
+ if cache is not None:
1241
+ th_bar = cache.th_bar_np
1242
+ if th_bar is None or getattr(th_bar, "shape", None) != (size,):
1243
+ th_bar = np.array(theta, dtype=float)
1244
+ else:
1245
+ np.copyto(th_bar, theta, casting="unsafe")
1246
+ cache.th_bar_np = th_bar
1247
+
1248
+ epi_bar = cache.epi_bar_np
1249
+ if epi_bar is None or getattr(epi_bar, "shape", None) != (size,):
1250
+ epi_bar = np.array(epi, dtype=float)
1251
+ else:
1252
+ np.copyto(epi_bar, epi, casting="unsafe")
1253
+ cache.epi_bar_np = epi_bar
1254
+
1255
+ vf_bar = cache.vf_bar_np
1256
+ if vf_bar is None or getattr(vf_bar, "shape", None) != (size,):
1257
+ vf_bar = np.array(vf, dtype=float)
1258
+ else:
1259
+ np.copyto(vf_bar, vf, casting="unsafe")
1260
+ cache.vf_bar_np = vf_bar
1261
+
1262
+ if w_topo != 0.0 and degs is not None:
1263
+ if isinstance(degs, dict):
1264
+ deg_size = len(nodes)
1265
+ else:
1266
+ deg_size = len(degs)
1267
+ deg_bar = cache.deg_bar_np
1268
+ if deg_bar is None or getattr(deg_bar, "shape", None) != (deg_size,):
1269
+ if isinstance(degs, dict):
1270
+ deg_bar = np.array(
1271
+ [float(degs.get(node, 0.0)) for node in nodes],
1272
+ dtype=float,
1273
+ )
1274
+ else:
1275
+ deg_bar = np.array(degs, dtype=float)
1276
+ else:
1277
+ if isinstance(degs, dict):
1278
+ for i, node in enumerate(nodes):
1279
+ deg_bar[i] = float(degs.get(node, 0.0))
1280
+ else:
1281
+ np.copyto(deg_bar, degs, casting="unsafe")
1282
+ cache.deg_bar_np = deg_bar
1283
+ else:
1284
+ deg_bar = None
1285
+ if cache is not None:
1286
+ cache.deg_bar_np = None
1287
+ else:
1288
+ th_bar = np.array(theta, dtype=float)
1289
+ epi_bar = np.array(epi, dtype=float)
1290
+ vf_bar = np.array(vf, dtype=float)
1291
+ deg_bar = (
1292
+ np.array(degs, dtype=float)
1293
+ if w_topo != 0.0 and degs is not None
1294
+ else None
1295
+ )
1296
+ else:
1297
+ size = len(theta)
1298
+ if cache is not None:
1299
+ th_bar = cache.th_bar
1300
+ if th_bar is None or len(th_bar) != size:
1301
+ th_bar = [0.0] * size
1302
+ th_bar[:] = theta
1303
+ cache.th_bar = th_bar
1304
+
1305
+ epi_bar = cache.epi_bar
1306
+ if epi_bar is None or len(epi_bar) != size:
1307
+ epi_bar = [0.0] * size
1308
+ epi_bar[:] = epi
1309
+ cache.epi_bar = epi_bar
1310
+
1311
+ vf_bar = cache.vf_bar
1312
+ if vf_bar is None or len(vf_bar) != size:
1313
+ vf_bar = [0.0] * size
1314
+ vf_bar[:] = vf
1315
+ cache.vf_bar = vf_bar
1316
+
1317
+ if w_topo != 0.0 and degs is not None:
1318
+ if isinstance(degs, dict):
1319
+ deg_size = len(nodes)
1320
+ else:
1321
+ deg_size = len(degs)
1322
+ deg_bar = cache.deg_bar
1323
+ if deg_bar is None or len(deg_bar) != deg_size:
1324
+ deg_bar = [0.0] * deg_size
1325
+ if isinstance(degs, dict):
1326
+ for i, node in enumerate(nodes):
1327
+ deg_bar[i] = float(degs.get(node, 0.0))
1328
+ else:
1329
+ for i, value in enumerate(degs):
1330
+ deg_bar[i] = float(value)
1331
+ cache.deg_bar = deg_bar
1332
+ else:
1333
+ deg_bar = None
1334
+ cache.deg_bar = None
1335
+ else:
1336
+ th_bar = list(theta)
1337
+ epi_bar = list(epi)
1338
+ vf_bar = list(vf)
1339
+ deg_bar = list(degs) if w_topo != 0.0 and degs is not None else None
1340
+ return th_bar, epi_bar, vf_bar, deg_bar
1341
+
1342
+
1343
+ def _compute_neighbor_means(
1344
+ G: TNFRGraph,
1345
+ data: MutableMapping[str, Any],
1346
+ *,
1347
+ x: Sequence[float],
1348
+ y: Sequence[float],
1349
+ epi_sum: Sequence[float],
1350
+ vf_sum: Sequence[float],
1351
+ count: Sequence[float] | np.ndarray,
1352
+ deg_sum: Sequence[float] | None = None,
1353
+ degs: Mapping[Any, float] | Sequence[float] | None = None,
1354
+ np: ModuleType | None = None,
1355
+ ) -> tuple[Sequence[float], Sequence[float], Sequence[float], Sequence[float] | None]:
1356
+ """Return neighbour mean arrays for ΔNFR."""
1357
+ w_topo = data["w_topo"]
1358
+ theta = data["theta"]
1359
+ cache: DnfrCache | None = data.get("cache")
1360
+ is_numpy = np is not None and isinstance(count, np.ndarray)
1361
+ th_bar, epi_bar, vf_bar, deg_bar = _init_bar_arrays(
1362
+ data, degs=degs, np=np if is_numpy else None
1363
+ )
1364
+
1365
+ if is_numpy:
1366
+ n = count.shape[0]
1367
+ mask = count > 0
1368
+ if not np.any(mask):
1369
+ return th_bar, epi_bar, vf_bar, deg_bar
1370
+
1371
+ inv = _ensure_cached_array(cache, "neighbor_inv_count_np", (n,), np)
1372
+ inv.fill(0.0)
1373
+ np.divide(1.0, count, out=inv, where=mask)
1374
+
1375
+ cos_avg = _ensure_cached_array(cache, "neighbor_cos_avg_np", (n,), np)
1376
+ cos_avg.fill(0.0)
1377
+ np.multiply(x, inv, out=cos_avg, where=mask)
1378
+
1379
+ sin_avg = _ensure_cached_array(cache, "neighbor_sin_avg_np", (n,), np)
1380
+ sin_avg.fill(0.0)
1381
+ np.multiply(y, inv, out=sin_avg, where=mask)
1382
+
1383
+ lengths = _ensure_cached_array(cache, "neighbor_mean_length_np", (n,), np)
1384
+ np.hypot(cos_avg, sin_avg, out=lengths)
1385
+
1386
+ temp = _ensure_cached_array(cache, "neighbor_mean_tmp_np", (n,), np)
1387
+ np.arctan2(sin_avg, cos_avg, out=temp)
1388
+
1389
+ theta_src = data.get("theta_np")
1390
+ if theta_src is None:
1391
+ theta_src = np.asarray(theta, dtype=float)
1392
+ zero_mask = lengths <= _MEAN_VECTOR_EPS
1393
+ np.copyto(temp, theta_src, where=zero_mask)
1394
+ np.copyto(th_bar, temp, where=mask, casting="unsafe")
1395
+
1396
+ np.divide(epi_sum, count, out=epi_bar, where=mask)
1397
+ np.divide(vf_sum, count, out=vf_bar, where=mask)
1398
+ if w_topo != 0.0 and deg_bar is not None and deg_sum is not None:
1399
+ np.divide(deg_sum, count, out=deg_bar, where=mask)
1400
+ return th_bar, epi_bar, vf_bar, deg_bar
1401
+
1402
+ n = len(theta)
1403
+ for i in range(n):
1404
+ c = count[i]
1405
+ if not c:
1406
+ continue
1407
+ inv = 1.0 / float(c)
1408
+ cos_avg = x[i] * inv
1409
+ sin_avg = y[i] * inv
1410
+ if math.hypot(cos_avg, sin_avg) <= _MEAN_VECTOR_EPS:
1411
+ th_bar[i] = theta[i]
1412
+ else:
1413
+ th_bar[i] = math.atan2(sin_avg, cos_avg)
1414
+ epi_bar[i] = epi_sum[i] * inv
1415
+ vf_bar[i] = vf_sum[i] * inv
1416
+ if w_topo != 0.0 and deg_bar is not None and deg_sum is not None:
1417
+ deg_bar[i] = deg_sum[i] * inv
1418
+ return th_bar, epi_bar, vf_bar, deg_bar
1419
+
1420
+
1421
+ def _compute_dnfr_common(
1422
+ G: TNFRGraph,
1423
+ data: MutableMapping[str, Any],
1424
+ *,
1425
+ x: Sequence[float],
1426
+ y: Sequence[float],
1427
+ epi_sum: Sequence[float],
1428
+ vf_sum: Sequence[float],
1429
+ count: Sequence[float] | None,
1430
+ deg_sum: Sequence[float] | None = None,
1431
+ degs: Sequence[float] | None = None,
1432
+ n_jobs: int | None = None,
1433
+ profile: MutableMapping[str, float] | None = None,
1434
+ ) -> None:
1435
+ """Compute neighbour means and apply ΔNFR gradients.
1436
+
1437
+ Parameters
1438
+ ----------
1439
+ profile : MutableMapping[str, float] or None, optional
1440
+ Mutable mapping that records wall-clock durations for the neighbour
1441
+ mean computation (``"dnfr_neighbor_means"``), the gradient assembly
1442
+ (``"dnfr_gradient_assembly"``) and the final in-place writes to the
1443
+ graph (``"dnfr_inplace_write"``).
1444
+ """
1445
+ start_timer, stop_timer = _profile_start_stop(
1446
+ profile,
1447
+ keys=("dnfr_neighbor_means", "dnfr_gradient_assembly", "dnfr_inplace_write"),
1448
+ )
1449
+
1450
+ np_module = get_numpy()
1451
+ if np_module is not None and isinstance(
1452
+ count, getattr(np_module, "ndarray", tuple)
1453
+ ):
1454
+ np_arg = np_module
1455
+ else:
1456
+ np_arg = None
1457
+ neighbor_timer = start_timer()
1458
+ th_bar, epi_bar, vf_bar, deg_bar = _compute_neighbor_means(
1459
+ G,
1460
+ data,
1461
+ x=x,
1462
+ y=y,
1463
+ epi_sum=epi_sum,
1464
+ vf_sum=vf_sum,
1465
+ count=count,
1466
+ deg_sum=deg_sum,
1467
+ degs=degs,
1468
+ np=np_arg,
1469
+ )
1470
+ stop_timer("dnfr_neighbor_means", neighbor_timer)
1471
+ _apply_dnfr_gradients(
1472
+ G,
1473
+ data,
1474
+ th_bar,
1475
+ epi_bar,
1476
+ vf_bar,
1477
+ deg_bar,
1478
+ degs,
1479
+ n_jobs=n_jobs,
1480
+ profile=profile,
1481
+ )
1482
+
1483
+
1484
+ def _reset_numpy_buffer(
1485
+ buffer: np.ndarray | None,
1486
+ size: int,
1487
+ np: ModuleType,
1488
+ ) -> np.ndarray:
1489
+ if (
1490
+ buffer is None
1491
+ or getattr(buffer, "shape", None) is None
1492
+ or buffer.shape[0] != size
1493
+ ):
1494
+ return np.zeros(size, dtype=float)
1495
+ buffer.fill(0.0)
1496
+ return buffer
1497
+
1498
+
1499
+ def _init_neighbor_sums(
1500
+ data: MutableMapping[str, Any],
1501
+ *,
1502
+ np: ModuleType | None = None,
1503
+ ) -> NeighborStats:
1504
+ """Initialise containers for neighbour sums."""
1505
+ nodes = data["nodes"]
1506
+ n = len(nodes)
1507
+ w_topo = data["w_topo"]
1508
+ cache: DnfrCache | None = data.get("cache")
1509
+
1510
+ def _reset_list(buffer: list[float] | None, value: float = 0.0) -> list[float]:
1511
+ if buffer is None or len(buffer) != n:
1512
+ return [value] * n
1513
+ for i in range(n):
1514
+ buffer[i] = value
1515
+ return buffer
1516
+
1517
+ if np is not None:
1518
+ if cache is not None:
1519
+ x = cache.neighbor_x_np
1520
+ y = cache.neighbor_y_np
1521
+ epi_sum = cache.neighbor_epi_sum_np
1522
+ vf_sum = cache.neighbor_vf_sum_np
1523
+ count = cache.neighbor_count_np
1524
+ x = _reset_numpy_buffer(x, n, np)
1525
+ y = _reset_numpy_buffer(y, n, np)
1526
+ epi_sum = _reset_numpy_buffer(epi_sum, n, np)
1527
+ vf_sum = _reset_numpy_buffer(vf_sum, n, np)
1528
+ count = _reset_numpy_buffer(count, n, np)
1529
+ cache.neighbor_x_np = x
1530
+ cache.neighbor_y_np = y
1531
+ cache.neighbor_epi_sum_np = epi_sum
1532
+ cache.neighbor_vf_sum_np = vf_sum
1533
+ cache.neighbor_count_np = count
1534
+ cache.neighbor_x = _reset_list(cache.neighbor_x)
1535
+ cache.neighbor_y = _reset_list(cache.neighbor_y)
1536
+ cache.neighbor_epi_sum = _reset_list(cache.neighbor_epi_sum)
1537
+ cache.neighbor_vf_sum = _reset_list(cache.neighbor_vf_sum)
1538
+ cache.neighbor_count = _reset_list(cache.neighbor_count)
1539
+ if w_topo != 0.0:
1540
+ deg_sum = _reset_numpy_buffer(cache.neighbor_deg_sum_np, n, np)
1541
+ cache.neighbor_deg_sum_np = deg_sum
1542
+ cache.neighbor_deg_sum = _reset_list(cache.neighbor_deg_sum)
1543
+ else:
1544
+ cache.neighbor_deg_sum_np = None
1545
+ cache.neighbor_deg_sum = None
1546
+ deg_sum = None
1547
+ else:
1548
+ x = np.zeros(n, dtype=float)
1549
+ y = np.zeros(n, dtype=float)
1550
+ epi_sum = np.zeros(n, dtype=float)
1551
+ vf_sum = np.zeros(n, dtype=float)
1552
+ count = np.zeros(n, dtype=float)
1553
+ deg_sum = np.zeros(n, dtype=float) if w_topo != 0.0 else None
1554
+ degs = None
1555
+ else:
1556
+ if cache is not None:
1557
+ x = _reset_list(cache.neighbor_x)
1558
+ y = _reset_list(cache.neighbor_y)
1559
+ epi_sum = _reset_list(cache.neighbor_epi_sum)
1560
+ vf_sum = _reset_list(cache.neighbor_vf_sum)
1561
+ count = _reset_list(cache.neighbor_count)
1562
+ cache.neighbor_x = x
1563
+ cache.neighbor_y = y
1564
+ cache.neighbor_epi_sum = epi_sum
1565
+ cache.neighbor_vf_sum = vf_sum
1566
+ cache.neighbor_count = count
1567
+ if w_topo != 0.0:
1568
+ deg_sum = _reset_list(cache.neighbor_deg_sum)
1569
+ cache.neighbor_deg_sum = deg_sum
1570
+ else:
1571
+ cache.neighbor_deg_sum = None
1572
+ deg_sum = None
1573
+ else:
1574
+ x = [0.0] * n
1575
+ y = [0.0] * n
1576
+ epi_sum = [0.0] * n
1577
+ vf_sum = [0.0] * n
1578
+ count = [0.0] * n
1579
+ deg_sum = [0.0] * n if w_topo != 0.0 else None
1580
+ deg_list = data.get("deg_list")
1581
+ if w_topo != 0.0 and deg_list is not None:
1582
+ degs = deg_list
1583
+ else:
1584
+ degs = None
1585
+ return x, y, epi_sum, vf_sum, count, deg_sum, degs
1586
+
1587
+
1588
+ def _prefer_sparse_accumulation(n: int, edge_count: int | None) -> bool:
1589
+ """Return ``True`` when neighbour sums should use edge accumulation."""
1590
+
1591
+ if n <= 1 or not edge_count:
1592
+ return False
1593
+ possible_edges = n * (n - 1)
1594
+ if possible_edges <= 0:
1595
+ return False
1596
+ density = edge_count / possible_edges
1597
+ return density <= _SPARSE_DENSITY_THRESHOLD
1598
+
1599
+
1600
+ def _accumulate_neighbors_dense(
1601
+ G: TNFRGraph,
1602
+ data: MutableMapping[str, Any],
1603
+ *,
1604
+ x: np.ndarray,
1605
+ y: np.ndarray,
1606
+ epi_sum: np.ndarray,
1607
+ vf_sum: np.ndarray,
1608
+ count: np.ndarray,
1609
+ deg_sum: np.ndarray | None,
1610
+ np: ModuleType,
1611
+ ) -> NeighborStats:
1612
+ """Vectorised neighbour accumulation using a dense adjacency matrix."""
1613
+
1614
+ nodes = data["nodes"]
1615
+ if not nodes:
1616
+ return x, y, epi_sum, vf_sum, count, deg_sum, None
1617
+
1618
+ A = data.get("A")
1619
+ if A is None:
1620
+ return _accumulate_neighbors_numpy(
1621
+ G,
1622
+ data,
1623
+ x=x,
1624
+ y=y,
1625
+ epi_sum=epi_sum,
1626
+ vf_sum=vf_sum,
1627
+ count=count,
1628
+ deg_sum=deg_sum,
1629
+ np=np,
1630
+ )
1631
+
1632
+ cache: DnfrCache | None = data.get("cache")
1633
+ n = len(nodes)
1634
+
1635
+ state = _ensure_numpy_state_vectors(data, np)
1636
+ vectors = [state["cos"], state["sin"], state["epi"], state["vf"]]
1637
+
1638
+ components = _ensure_cached_array(cache, "dense_components_np", (n, 4), np)
1639
+ accum = _ensure_cached_array(cache, "dense_accum_np", (n, 4), np)
1640
+
1641
+ # ``components`` retains the last source copies so callers relying on
1642
+ # cached buffers (e.g. diagnostics) still observe meaningful values.
1643
+ np.copyto(components, np.column_stack(vectors), casting="unsafe")
1644
+
1645
+ np.matmul(A, components, out=accum)
1646
+
1647
+ np.copyto(x, accum[:, 0], casting="unsafe")
1648
+ np.copyto(y, accum[:, 1], casting="unsafe")
1649
+ np.copyto(epi_sum, accum[:, 2], casting="unsafe")
1650
+ np.copyto(vf_sum, accum[:, 3], casting="unsafe")
1651
+
1652
+ degree_counts = data.get("dense_degree_np")
1653
+ if degree_counts is None or getattr(degree_counts, "shape", (0,))[0] != n:
1654
+ degree_counts = None
1655
+ if degree_counts is None and cache is not None:
1656
+ cached_counts = cache.dense_degree_np
1657
+ if cached_counts is not None and getattr(cached_counts, "shape", (0,))[0] == n:
1658
+ degree_counts = cached_counts
1659
+ if degree_counts is None:
1660
+ degree_counts = A.sum(axis=1)
1661
+ if cache is not None:
1662
+ cache.dense_degree_np = degree_counts
1663
+ data["dense_degree_np"] = degree_counts
1664
+ np.copyto(count, degree_counts, casting="unsafe")
1665
+
1666
+ degs = None
1667
+ if deg_sum is not None:
1668
+ deg_array = data.get("deg_array")
1669
+ if deg_array is None:
1670
+ deg_array = _resolve_numpy_degree_array(
1671
+ data,
1672
+ count,
1673
+ cache=cache,
1674
+ np=np,
1675
+ )
1676
+ if deg_array is None:
1677
+ deg_sum.fill(0.0)
1678
+ else:
1679
+ np.matmul(A, deg_array, out=deg_sum)
1680
+ degs = deg_array
1681
+
1682
+ return x, y, epi_sum, vf_sum, count, deg_sum, degs
1683
+
1684
+
1685
+ def _accumulate_neighbors_broadcasted(
1686
+ *,
1687
+ edge_src: np.ndarray,
1688
+ edge_dst: np.ndarray,
1689
+ cos: np.ndarray,
1690
+ sin: np.ndarray,
1691
+ epi: np.ndarray,
1692
+ vf: np.ndarray,
1693
+ x: np.ndarray,
1694
+ y: np.ndarray,
1695
+ epi_sum: np.ndarray,
1696
+ vf_sum: np.ndarray,
1697
+ count: np.ndarray | None,
1698
+ deg_sum: np.ndarray | None,
1699
+ deg_array: np.ndarray | None,
1700
+ cache: DnfrCache | None,
1701
+ np: ModuleType,
1702
+ chunk_size: int | None = None,
1703
+ ) -> dict[str, np.ndarray]:
1704
+ """Accumulate neighbour contributions using direct indexed reductions.
1705
+
1706
+ Array reuse strategy for non-chunked blocks:
1707
+ --------------------------------------------
1708
+ This function optimizes memory usage by reusing cached destination arrays:
1709
+
1710
+ 1. **Accumulator reuse**: The `accum` matrix (component_rows × n) is cached
1711
+ across invocations when signature remains stable. For non-chunked paths,
1712
+ it's zero-filled (accum.fill(0.0)) rather than reallocated.
1713
+
1714
+ 2. **Workspace reuse**: The `workspace` buffer (component_rows × edge_count)
1715
+ stores intermediate edge values. In non-chunked mode with sufficient
1716
+ workspace size, edge values are extracted once into workspace rows
1717
+ via np.take(..., out=workspace[row, :]) to avoid repeated allocations.
1718
+
1719
+ 3. **Destination array writes**: np.bincount results are written to accum
1720
+ rows via np.copyto(..., casting="unsafe"), reusing the same memory
1721
+ across all components (cos, sin, epi, vf, count, deg).
1722
+
1723
+ 4. **Deg_array optimization**: When deg_array is provided and topology
1724
+ weight is active, degree values are extracted into workspace and
1725
+ accumulated via bincount, maintaining the reuse pattern.
1726
+
1727
+ The non-chunked path achieves minimal temporary allocations by:
1728
+ - Reusing cached accum and workspace buffers
1729
+ - Extracting all edge values into workspace in a single pass
1730
+ - Writing bincount results directly to destination rows
1731
+
1732
+ Note: np.bincount does not support an `out` parameter, so its return
1733
+ value must be copied to the destination. The workspace pattern minimizes
1734
+ the number of temporary arrays created during edge value extraction.
1735
+
1736
+ This approach maintains ΔNFR computational accuracy (Invariant #8) while
1737
+ reducing memory footprint for repeated accumulations with stable topology.
1738
+ """
1739
+
1740
+ n = x.shape[0]
1741
+ edge_count = int(edge_src.size)
1742
+
1743
+ include_count = count is not None
1744
+ use_topology = deg_sum is not None and deg_array is not None
1745
+
1746
+ component_rows = 4 + (1 if include_count else 0) + (1 if use_topology else 0)
1747
+
1748
+ if edge_count:
1749
+ if chunk_size is None:
1750
+ resolved_chunk = edge_count
1751
+ else:
1752
+ try:
1753
+ resolved_chunk = int(chunk_size)
1754
+ except (TypeError, ValueError):
1755
+ resolved_chunk = edge_count
1756
+ else:
1757
+ if resolved_chunk <= 0:
1758
+ resolved_chunk = edge_count
1759
+ resolved_chunk = max(1, min(edge_count, resolved_chunk))
1760
+ else:
1761
+ resolved_chunk = 0
1762
+
1763
+ use_chunks = bool(edge_count and resolved_chunk < edge_count)
1764
+
1765
+ if cache is not None:
1766
+ base_signature = (id(edge_src), id(edge_dst), n, edge_count)
1767
+ cache.edge_signature = base_signature
1768
+ signature = (base_signature, component_rows)
1769
+ previous_signature = cache.neighbor_accum_signature
1770
+
1771
+ accum = cache.neighbor_accum_np
1772
+ if (
1773
+ accum is None
1774
+ or getattr(accum, "shape", None) != (component_rows, n)
1775
+ or previous_signature != signature
1776
+ ):
1777
+ accum = np.zeros((component_rows, n), dtype=float)
1778
+ cache.neighbor_accum_np = accum
1779
+ else:
1780
+ accum.fill(0.0)
1781
+
1782
+ workspace = cache.neighbor_edge_values_np
1783
+ if use_chunks:
1784
+ workspace_length = resolved_chunk
1785
+ else:
1786
+ # For non-chunked path, allocate workspace to hold edge_count values
1787
+ # so we can extract edge values without temporary allocations
1788
+ workspace_length = edge_count if edge_count else component_rows
1789
+ if workspace_length:
1790
+ expected_shape = (component_rows, workspace_length)
1791
+ if workspace is None or getattr(workspace, "shape", None) != expected_shape:
1792
+ workspace = np.empty(expected_shape, dtype=float)
1793
+ else:
1794
+ workspace = None
1795
+ cache.neighbor_edge_values_np = workspace
1796
+
1797
+ cache.neighbor_accum_signature = signature
1798
+ else:
1799
+ accum = np.zeros((component_rows, n), dtype=float)
1800
+ # For non-chunked path without cache, allocate workspace for edge values
1801
+ workspace_length = (
1802
+ edge_count
1803
+ if (not use_chunks and edge_count)
1804
+ else (resolved_chunk if use_chunks else component_rows)
1805
+ )
1806
+ workspace = (
1807
+ np.empty((component_rows, workspace_length), dtype=float)
1808
+ if workspace_length
1809
+ else None
1810
+ )
1811
+
1812
+ if edge_count:
1813
+ row = 0
1814
+ cos_row = row
1815
+ row += 1
1816
+ sin_row = row
1817
+ row += 1
1818
+ epi_row = row
1819
+ row += 1
1820
+ vf_row = row
1821
+ row += 1
1822
+ count_row = row if include_count and count is not None else None
1823
+ if count_row is not None:
1824
+ row += 1
1825
+ deg_row = row if use_topology and deg_array is not None else None
1826
+
1827
+ edge_src_int = edge_src.astype(np.intp, copy=False)
1828
+ edge_dst_int = edge_dst.astype(np.intp, copy=False)
1829
+
1830
+ if use_chunks:
1831
+ chunk_step = resolved_chunk if resolved_chunk else edge_count
1832
+ chunk_indices = range(0, edge_count, chunk_step)
1833
+
1834
+ for start in chunk_indices:
1835
+ end = min(start + chunk_step, edge_count)
1836
+ if start >= end:
1837
+ continue
1838
+ src_slice = edge_src_int[start:end]
1839
+ dst_slice = edge_dst_int[start:end]
1840
+ slice_len = end - start
1841
+ if slice_len <= 0:
1842
+ continue
1843
+
1844
+ if workspace is not None:
1845
+ chunk_matrix = workspace[:, :slice_len]
1846
+ else:
1847
+ chunk_matrix = np.empty((component_rows, slice_len), dtype=float)
1848
+
1849
+ np.take(cos, dst_slice, out=chunk_matrix[cos_row, :slice_len])
1850
+ np.take(sin, dst_slice, out=chunk_matrix[sin_row, :slice_len])
1851
+ np.take(epi, dst_slice, out=chunk_matrix[epi_row, :slice_len])
1852
+ np.take(vf, dst_slice, out=chunk_matrix[vf_row, :slice_len])
1853
+
1854
+ if count_row is not None:
1855
+ chunk_matrix[count_row, :slice_len].fill(1.0)
1856
+ if deg_row is not None and deg_array is not None:
1857
+ np.take(deg_array, dst_slice, out=chunk_matrix[deg_row, :slice_len])
1858
+
1859
+ def _accumulate_into(
1860
+ target_row: int | None,
1861
+ values: np.ndarray | None = None,
1862
+ *,
1863
+ unit_weight: bool = False,
1864
+ ) -> None:
1865
+ if target_row is None:
1866
+ return
1867
+ row_view = accum[target_row]
1868
+ if unit_weight:
1869
+ np.add.at(row_view, src_slice, 1.0)
1870
+ else:
1871
+ if values is None:
1872
+ return
1873
+ np.add.at(row_view, src_slice, values)
1874
+
1875
+ _accumulate_into(cos_row, chunk_matrix[cos_row, :slice_len])
1876
+ _accumulate_into(sin_row, chunk_matrix[sin_row, :slice_len])
1877
+ _accumulate_into(epi_row, chunk_matrix[epi_row, :slice_len])
1878
+ _accumulate_into(vf_row, chunk_matrix[vf_row, :slice_len])
1879
+
1880
+ if count_row is not None:
1881
+ _accumulate_into(count_row, unit_weight=True)
1882
+
1883
+ if deg_row is not None and deg_array is not None:
1884
+ _accumulate_into(deg_row, chunk_matrix[deg_row, :slice_len])
1885
+ else:
1886
+ # Non-chunked path: reuse workspace to minimize temporary allocations.
1887
+ # When workspace is available with sufficient size, extract edge values
1888
+ # into workspace rows before passing to bincount.
1889
+ if workspace is not None and workspace.shape[1] >= edge_count:
1890
+ # Verify workspace has enough rows for all components
1891
+ # workspace has shape (component_rows, edge_count)
1892
+ required_rows = max(
1893
+ cos_row + 1,
1894
+ sin_row + 1,
1895
+ epi_row + 1,
1896
+ vf_row + 1,
1897
+ (count_row + 1) if count_row is not None else 0,
1898
+ (deg_row + 1) if deg_row is not None else 0,
1899
+ )
1900
+ if workspace.shape[0] >= required_rows:
1901
+ # Reuse workspace rows for edge value extraction
1902
+ np.take(cos, edge_dst_int, out=workspace[cos_row, :edge_count])
1903
+ np.take(sin, edge_dst_int, out=workspace[sin_row, :edge_count])
1904
+ np.take(epi, edge_dst_int, out=workspace[epi_row, :edge_count])
1905
+ np.take(vf, edge_dst_int, out=workspace[vf_row, :edge_count])
1906
+
1907
+ def _apply_full_bincount(
1908
+ target_row: int | None,
1909
+ values: np.ndarray | None = None,
1910
+ *,
1911
+ unit_weight: bool = False,
1912
+ ) -> None:
1913
+ if target_row is None:
1914
+ return
1915
+ if values is None and not unit_weight:
1916
+ return
1917
+ if unit_weight:
1918
+ component_accum = np.bincount(
1919
+ edge_src_int,
1920
+ minlength=n,
1921
+ )
1922
+ else:
1923
+ component_accum = np.bincount(
1924
+ edge_src_int,
1925
+ weights=values,
1926
+ minlength=n,
1927
+ )
1928
+ np.copyto(
1929
+ accum[target_row, :n],
1930
+ component_accum[:n],
1931
+ casting="unsafe",
1932
+ )
1933
+
1934
+ _apply_full_bincount(cos_row, workspace[cos_row, :edge_count])
1935
+ _apply_full_bincount(sin_row, workspace[sin_row, :edge_count])
1936
+ _apply_full_bincount(epi_row, workspace[epi_row, :edge_count])
1937
+ _apply_full_bincount(vf_row, workspace[vf_row, :edge_count])
1938
+
1939
+ if count_row is not None:
1940
+ _apply_full_bincount(count_row, unit_weight=True)
1941
+
1942
+ if deg_row is not None and deg_array is not None:
1943
+ np.take(
1944
+ deg_array, edge_dst_int, out=workspace[deg_row, :edge_count]
1945
+ )
1946
+ _apply_full_bincount(deg_row, workspace[deg_row, :edge_count])
1947
+ else:
1948
+ # Workspace doesn't have enough rows, fall back to temporary arrays
1949
+ def _apply_full_bincount(
1950
+ target_row: int | None,
1951
+ values: np.ndarray | None = None,
1952
+ *,
1953
+ unit_weight: bool = False,
1954
+ ) -> None:
1955
+ if target_row is None:
1956
+ return
1957
+ if values is None and not unit_weight:
1958
+ return
1959
+ if unit_weight:
1960
+ component_accum = np.bincount(
1961
+ edge_src_int,
1962
+ minlength=n,
1963
+ )
1964
+ else:
1965
+ component_accum = np.bincount(
1966
+ edge_src_int,
1967
+ weights=values,
1968
+ minlength=n,
1969
+ )
1970
+ np.copyto(
1971
+ accum[target_row, :n],
1972
+ component_accum[:n],
1973
+ casting="unsafe",
1974
+ )
1975
+
1976
+ _apply_full_bincount(cos_row, np.take(cos, edge_dst_int))
1977
+ _apply_full_bincount(sin_row, np.take(sin, edge_dst_int))
1978
+ _apply_full_bincount(epi_row, np.take(epi, edge_dst_int))
1979
+ _apply_full_bincount(vf_row, np.take(vf, edge_dst_int))
1980
+
1981
+ if count_row is not None:
1982
+ _apply_full_bincount(count_row, unit_weight=True)
1983
+
1984
+ if deg_row is not None and deg_array is not None:
1985
+ _apply_full_bincount(deg_row, np.take(deg_array, edge_dst_int))
1986
+ else:
1987
+ # Fallback: no workspace or insufficient width, use temporary arrays
1988
+ def _apply_full_bincount(
1989
+ target_row: int | None,
1990
+ values: np.ndarray | None = None,
1991
+ *,
1992
+ unit_weight: bool = False,
1993
+ ) -> None:
1994
+ if target_row is None:
1995
+ return
1996
+ if values is None and not unit_weight:
1997
+ return
1998
+ if unit_weight:
1999
+ component_accum = np.bincount(
2000
+ edge_src_int,
2001
+ minlength=n,
2002
+ )
2003
+ else:
2004
+ component_accum = np.bincount(
2005
+ edge_src_int,
2006
+ weights=values,
2007
+ minlength=n,
2008
+ )
2009
+ np.copyto(
2010
+ accum[target_row, :n],
2011
+ component_accum[:n],
2012
+ casting="unsafe",
2013
+ )
2014
+
2015
+ _apply_full_bincount(cos_row, np.take(cos, edge_dst_int))
2016
+ _apply_full_bincount(sin_row, np.take(sin, edge_dst_int))
2017
+ _apply_full_bincount(epi_row, np.take(epi, edge_dst_int))
2018
+ _apply_full_bincount(vf_row, np.take(vf, edge_dst_int))
2019
+
2020
+ if count_row is not None:
2021
+ _apply_full_bincount(count_row, unit_weight=True)
2022
+
2023
+ if deg_row is not None and deg_array is not None:
2024
+ _apply_full_bincount(deg_row, np.take(deg_array, edge_dst_int))
2025
+ else:
2026
+ accum.fill(0.0)
2027
+ if workspace is not None:
2028
+ workspace.fill(0.0)
2029
+
2030
+ row = 0
2031
+ np.copyto(x, accum[row], casting="unsafe")
2032
+ row += 1
2033
+ np.copyto(y, accum[row], casting="unsafe")
2034
+ row += 1
2035
+ np.copyto(epi_sum, accum[row], casting="unsafe")
2036
+ row += 1
2037
+ np.copyto(vf_sum, accum[row], casting="unsafe")
2038
+ row += 1
2039
+
2040
+ if include_count and count is not None:
2041
+ np.copyto(count, accum[row], casting="unsafe")
2042
+ row += 1
2043
+
2044
+ if use_topology and deg_sum is not None:
2045
+ np.copyto(deg_sum, accum[row], casting="unsafe")
2046
+
2047
+ return {
2048
+ "accumulator": accum,
2049
+ "edge_values": workspace,
2050
+ }
2051
+
2052
+
2053
+ def _build_neighbor_sums_common(
2054
+ G: TNFRGraph,
2055
+ data: MutableMapping[str, Any],
2056
+ *,
2057
+ use_numpy: bool,
2058
+ n_jobs: int | None = None,
2059
+ ) -> NeighborStats:
2060
+ """Build neighbour accumulators honouring cached NumPy buffers when possible."""
2061
+
2062
+ nodes = data["nodes"]
2063
+ cache: DnfrCache | None = data.get("cache")
2064
+ np_module = get_numpy()
2065
+ has_numpy_buffers = _has_cached_numpy_buffers(data, cache)
2066
+
2067
+ # Fallback: when get_numpy() returns None but we have cached NumPy buffers,
2068
+ # attempt to retrieve NumPy from sys.modules to avoid losing vectorization.
2069
+ # This preserves ΔNFR semantics (Invariant #3) and maintains performance.
2070
+ if use_numpy and np_module is None and has_numpy_buffers:
2071
+ candidate = sys.modules.get("numpy")
2072
+ # Validate the candidate module has required NumPy attributes
2073
+ if (
2074
+ candidate is not None
2075
+ and hasattr(candidate, "ndarray")
2076
+ and hasattr(candidate, "empty")
2077
+ ):
2078
+ np_module = candidate
2079
+
2080
+ if np_module is not None:
2081
+ if not nodes:
2082
+ return _init_neighbor_sums(data, np=np_module)
2083
+
2084
+ x, y, epi_sum, vf_sum, count, deg_sum, degs = _init_neighbor_sums(
2085
+ data, np=np_module
2086
+ )
2087
+
2088
+ # Reuse centralized sparse/dense decision from _prepare_dnfr_data.
2089
+ # The decision logic at lines 785-807 already computed prefer_sparse
2090
+ # and dense_override based on graph density and user flags.
2091
+ prefer_sparse = data.get("prefer_sparse")
2092
+ if prefer_sparse is None:
2093
+ # Fallback: recompute if not set (defensive, should be rare)
2094
+ prefer_sparse = _prefer_sparse_accumulation(
2095
+ len(nodes), data.get("edge_count")
2096
+ )
2097
+ data["prefer_sparse"] = prefer_sparse
2098
+
2099
+ use_dense = False
2100
+ A = data.get("A")
2101
+ dense_override = data.get("dense_override", False)
2102
+
2103
+ # Apply centralized decision: dense path requires adjacency matrix
2104
+ # and either high graph density or explicit dense_override flag.
2105
+ if use_numpy and A is not None:
2106
+ shape = getattr(A, "shape", (0, 0))
2107
+ matrix_valid = shape[0] == len(nodes) and shape[1] == len(nodes)
2108
+ if matrix_valid and (dense_override or not prefer_sparse):
2109
+ use_dense = True
2110
+
2111
+ if use_dense:
2112
+ accumulator = _accumulate_neighbors_dense
2113
+ else:
2114
+ _ensure_numpy_state_vectors(data, np_module)
2115
+ accumulator = _accumulate_neighbors_numpy
2116
+ return accumulator(
2117
+ G,
2118
+ data,
2119
+ x=x,
2120
+ y=y,
2121
+ epi_sum=epi_sum,
2122
+ vf_sum=vf_sum,
2123
+ count=count,
2124
+ deg_sum=deg_sum,
2125
+ np=np_module,
2126
+ )
2127
+
2128
+ if not nodes:
2129
+ return _init_neighbor_sums(data)
2130
+
2131
+ x, y, epi_sum, vf_sum, count, deg_sum, degs_list = _init_neighbor_sums(data)
2132
+ idx = data["idx"]
2133
+ epi = data["epi"]
2134
+ vf = data["vf"]
2135
+ cos_th = data["cos_theta"]
2136
+ sin_th = data["sin_theta"]
2137
+ deg_list = data.get("deg_list")
2138
+
2139
+ effective_jobs = _resolve_parallel_jobs(n_jobs, len(nodes))
2140
+ if effective_jobs:
2141
+ neighbor_indices: list[list[int]] = []
2142
+ for node in nodes:
2143
+ indices: list[int] = []
2144
+ for v in G.neighbors(node):
2145
+ indices.append(idx[v])
2146
+ neighbor_indices.append(indices)
2147
+
2148
+ chunk_results = []
2149
+ with ProcessPoolExecutor(max_workers=effective_jobs) as executor:
2150
+ futures = []
2151
+ for start, end in _iter_chunk_offsets(len(nodes), effective_jobs):
2152
+ if start == end:
2153
+ continue
2154
+ futures.append(
2155
+ executor.submit(
2156
+ _neighbor_sums_worker,
2157
+ start,
2158
+ end,
2159
+ neighbor_indices,
2160
+ cos_th,
2161
+ sin_th,
2162
+ epi,
2163
+ vf,
2164
+ x[start:end],
2165
+ y[start:end],
2166
+ epi_sum[start:end],
2167
+ vf_sum[start:end],
2168
+ count[start:end],
2169
+ deg_sum[start:end] if deg_sum is not None else None,
2170
+ deg_list,
2171
+ degs_list,
2172
+ )
2173
+ )
2174
+ for future in futures:
2175
+ chunk_results.append(future.result())
2176
+
2177
+ for (
2178
+ start,
2179
+ chunk_x,
2180
+ chunk_y,
2181
+ chunk_epi,
2182
+ chunk_vf,
2183
+ chunk_count,
2184
+ chunk_deg,
2185
+ ) in sorted(chunk_results, key=lambda item: item[0]):
2186
+ end = start + len(chunk_x)
2187
+ x[start:end] = chunk_x
2188
+ y[start:end] = chunk_y
2189
+ epi_sum[start:end] = chunk_epi
2190
+ vf_sum[start:end] = chunk_vf
2191
+ count[start:end] = chunk_count
2192
+ if deg_sum is not None and chunk_deg is not None:
2193
+ deg_sum[start:end] = chunk_deg
2194
+ return x, y, epi_sum, vf_sum, count, deg_sum, degs_list
2195
+
2196
+ for i, node in enumerate(nodes):
2197
+ deg_i = degs_list[i] if degs_list is not None else 0.0
2198
+ x_i = x[i]
2199
+ y_i = y[i]
2200
+ epi_i = epi_sum[i]
2201
+ vf_i = vf_sum[i]
2202
+ count_i = count[i]
2203
+ deg_acc = deg_sum[i] if deg_sum is not None else 0.0
2204
+ for v in G.neighbors(node):
2205
+ j = idx[v]
2206
+ cos_j = cos_th[j]
2207
+ sin_j = sin_th[j]
2208
+ epi_j = epi[j]
2209
+ vf_j = vf[j]
2210
+ x_i += cos_j
2211
+ y_i += sin_j
2212
+ epi_i += epi_j
2213
+ vf_i += vf_j
2214
+ count_i += 1
2215
+ if deg_sum is not None:
2216
+ deg_acc += deg_list[j] if deg_list is not None else deg_i
2217
+ x[i] = x_i
2218
+ y[i] = y_i
2219
+ epi_sum[i] = epi_i
2220
+ vf_sum[i] = vf_i
2221
+ count[i] = count_i
2222
+ if deg_sum is not None:
2223
+ deg_sum[i] = deg_acc
2224
+ return x, y, epi_sum, vf_sum, count, deg_sum, degs_list
2225
+
2226
+
2227
+ def _accumulate_neighbors_numpy(
2228
+ G: TNFRGraph,
2229
+ data: MutableMapping[str, Any],
2230
+ *,
2231
+ x: np.ndarray,
2232
+ y: np.ndarray,
2233
+ epi_sum: np.ndarray,
2234
+ vf_sum: np.ndarray,
2235
+ count: np.ndarray | None,
2236
+ deg_sum: np.ndarray | None,
2237
+ np: ModuleType,
2238
+ ) -> NeighborStats:
2239
+ """Vectorised neighbour accumulation reusing cached NumPy buffers."""
2240
+
2241
+ nodes = data["nodes"]
2242
+ if not nodes:
2243
+ return x, y, epi_sum, vf_sum, count, deg_sum, None
2244
+
2245
+ cache: DnfrCache | None = data.get("cache")
2246
+
2247
+ state = _ensure_numpy_state_vectors(data, np)
2248
+ cos_th = state["cos"]
2249
+ sin_th = state["sin"]
2250
+ epi = state["epi"]
2251
+ vf = state["vf"]
2252
+
2253
+ edge_src = data.get("edge_src")
2254
+ edge_dst = data.get("edge_dst")
2255
+ if edge_src is None or edge_dst is None:
2256
+ edge_src, edge_dst = _build_edge_index_arrays(G, nodes, data["idx"], np)
2257
+ data["edge_src"] = edge_src
2258
+ data["edge_dst"] = edge_dst
2259
+ if cache is not None:
2260
+ cache.edge_src = edge_src
2261
+ cache.edge_dst = edge_dst
2262
+ if edge_src is not None:
2263
+ data["edge_count"] = int(edge_src.size)
2264
+
2265
+ cached_deg_array = data.get("deg_array")
2266
+
2267
+ # Memory optimization: When we have a cached degree array and need a count
2268
+ # buffer, we can reuse the degree array buffer as the destination for counts.
2269
+ # This works because:
2270
+ # 1. For undirected graphs, node degree equals neighbor count
2271
+ # 2. The degree array is already allocated and the right size
2272
+ # 3. We avoid allocating an extra row in the accumulator matrix
2273
+ # When reuse_count_from_deg is True:
2274
+ # - We copy cached_deg_array into the count buffer before accumulation
2275
+ # - We pass count_for_accum=None to _accumulate_neighbors_broadcasted
2276
+ # - After accumulation, we restore count = cached_deg_array (line 2121)
2277
+ reuse_count_from_deg = bool(count is not None and cached_deg_array is not None)
2278
+ count_for_accum = count
2279
+ if count is not None:
2280
+ if reuse_count_from_deg:
2281
+ # Pre-fill count with degrees (will be returned as-is since accumulator
2282
+ # skips the count row when count_for_accum=None)
2283
+ np.copyto(count, cached_deg_array, casting="unsafe")
2284
+ count_for_accum = None
2285
+ else:
2286
+ count.fill(0.0)
2287
+
2288
+ deg_array = None
2289
+ if deg_sum is not None:
2290
+ deg_sum.fill(0.0)
2291
+ deg_array = _resolve_numpy_degree_array(
2292
+ data, count if count is not None else None, cache=cache, np=np
2293
+ )
2294
+ elif cached_deg_array is not None:
2295
+ deg_array = cached_deg_array
2296
+
2297
+ edge_count = int(edge_src.size) if edge_src is not None else 0
2298
+ chunk_hint = data.get("neighbor_chunk_hint")
2299
+ if chunk_hint is None:
2300
+ chunk_hint = G.graph.get("DNFR_CHUNK_SIZE")
2301
+ resolved_neighbor_chunk = (
2302
+ resolve_chunk_size(
2303
+ chunk_hint,
2304
+ edge_count,
2305
+ minimum=1,
2306
+ approx_bytes_per_item=_DNFR_APPROX_BYTES_PER_EDGE,
2307
+ clamp_to=None,
2308
+ )
2309
+ if edge_count
2310
+ else 0
2311
+ )
2312
+ data["neighbor_chunk_hint"] = chunk_hint
2313
+ data["neighbor_chunk_size"] = resolved_neighbor_chunk
2314
+
2315
+ accum = _accumulate_neighbors_broadcasted(
2316
+ edge_src=edge_src,
2317
+ edge_dst=edge_dst,
2318
+ cos=cos_th,
2319
+ sin=sin_th,
2320
+ epi=epi,
2321
+ vf=vf,
2322
+ x=x,
2323
+ y=y,
2324
+ epi_sum=epi_sum,
2325
+ vf_sum=vf_sum,
2326
+ count=count_for_accum,
2327
+ deg_sum=deg_sum,
2328
+ deg_array=deg_array,
2329
+ cache=cache,
2330
+ np=np,
2331
+ chunk_size=resolved_neighbor_chunk,
2332
+ )
2333
+
2334
+ data["neighbor_accum_np"] = accum.get("accumulator")
2335
+ edge_values = accum.get("edge_values")
2336
+ data["neighbor_edge_values_np"] = edge_values
2337
+ if edge_values is not None:
2338
+ width = getattr(edge_values, "shape", (0, 0))[1]
2339
+ data["neighbor_chunk_size"] = int(width)
2340
+ else:
2341
+ data["neighbor_chunk_size"] = resolved_neighbor_chunk
2342
+ if cache is not None:
2343
+ data["neighbor_accum_signature"] = cache.neighbor_accum_signature
2344
+ if reuse_count_from_deg and cached_deg_array is not None:
2345
+ count = cached_deg_array
2346
+ degs = deg_array if deg_sum is not None and deg_array is not None else None
2347
+ return x, y, epi_sum, vf_sum, count, deg_sum, degs
2348
+
2349
+
2350
+ def _compute_dnfr(
2351
+ G: TNFRGraph,
2352
+ data: MutableMapping[str, Any],
2353
+ *,
2354
+ use_numpy: bool | None = None,
2355
+ n_jobs: int | None = None,
2356
+ profile: MutableMapping[str, float] | None = None,
2357
+ ) -> None:
2358
+ """Compute ΔNFR using neighbour sums.
2359
+
2360
+ Parameters
2361
+ ----------
2362
+ G : nx.Graph
2363
+ Graph on which the computation is performed.
2364
+ data : dict
2365
+ Precomputed ΔNFR data as returned by :func:`_prepare_dnfr_data`.
2366
+ use_numpy : bool | None, optional
2367
+ Backwards compatibility flag. When ``True`` the function eagerly
2368
+ prepares NumPy buffers (if available). When ``False`` the engine still
2369
+ prefers the vectorised path whenever :func:`get_numpy` returns a module
2370
+ and the graph does not set ``vectorized_dnfr`` to ``False``.
2371
+ profile : MutableMapping[str, float] or None, optional
2372
+ Mutable mapping that aggregates wall-clock durations for neighbour
2373
+ accumulation and records which execution branch was used. The
2374
+ ``"dnfr_neighbor_accumulation"`` bucket gathers the time spent inside
2375
+ :func:`_build_neighbor_sums_common`, while ``"dnfr_path"`` stores the
2376
+ string ``"vectorized"`` or ``"fallback"`` describing the active
2377
+ implementation.
2378
+ """
2379
+ start_timer, stop_timer = _profile_start_stop(
2380
+ profile,
2381
+ keys=("dnfr_neighbor_accumulation",),
2382
+ )
2383
+
2384
+ np_module = get_numpy()
2385
+ data["dnfr_numpy_available"] = bool(np_module)
2386
+ vector_disabled = G.graph.get("vectorized_dnfr") is False
2387
+ prefer_dense = np_module is not None and not vector_disabled
2388
+ if use_numpy is True and np_module is not None:
2389
+ prefer_dense = True
2390
+ if use_numpy is False or vector_disabled:
2391
+ prefer_dense = False
2392
+ data["dnfr_used_numpy"] = bool(prefer_dense and np_module is not None)
2393
+ if profile is not None:
2394
+ profile["dnfr_path"] = "vectorized" if data["dnfr_used_numpy"] else "fallback"
2395
+
2396
+ data["n_jobs"] = n_jobs
2397
+ try:
2398
+ neighbor_timer = start_timer()
2399
+ res = _build_neighbor_sums_common(
2400
+ G,
2401
+ data,
2402
+ use_numpy=prefer_dense,
2403
+ n_jobs=n_jobs,
2404
+ )
2405
+ stop_timer("dnfr_neighbor_accumulation", neighbor_timer)
2406
+ except TypeError as exc:
2407
+ if "n_jobs" not in str(exc):
2408
+ raise
2409
+ neighbor_timer = start_timer()
2410
+ res = _build_neighbor_sums_common(
2411
+ G,
2412
+ data,
2413
+ use_numpy=prefer_dense,
2414
+ )
2415
+ stop_timer("dnfr_neighbor_accumulation", neighbor_timer)
2416
+ if res is None:
2417
+ return
2418
+ x, y, epi_sum, vf_sum, count, deg_sum, degs = res
2419
+ _compute_dnfr_common(
2420
+ G,
2421
+ data,
2422
+ x=x,
2423
+ y=y,
2424
+ epi_sum=epi_sum,
2425
+ vf_sum=vf_sum,
2426
+ count=count,
2427
+ deg_sum=deg_sum,
2428
+ degs=degs,
2429
+ n_jobs=n_jobs,
2430
+ profile=profile,
2431
+ )
2432
+
2433
+
2434
+ def default_compute_delta_nfr(
2435
+ G: TNFRGraph,
2436
+ *,
2437
+ cache_size: int | None = 1,
2438
+ n_jobs: int | None = None,
2439
+ profile: MutableMapping[str, float] | None = None,
2440
+ ) -> None:
2441
+ """Compute ΔNFR by mixing phase, EPI, νf and a topological term.
2442
+
2443
+ Parameters
2444
+ ----------
2445
+ G : nx.Graph
2446
+ Graph on which the computation is performed.
2447
+ cache_size : int | None, optional
2448
+ Maximum number of edge configurations cached in ``G.graph``. Values
2449
+ ``None`` or <= 0 imply unlimited cache. Defaults to ``1`` to keep the
2450
+ previous behaviour.
2451
+ n_jobs : int | None, optional
2452
+ Parallel worker count for the pure-Python accumulation path. ``None``
2453
+ or values <= 1 preserve the serial behaviour. The vectorised NumPy
2454
+ branch ignores this parameter as it already operates in bulk.
2455
+ profile : MutableMapping[str, float] or None, optional
2456
+ Mutable mapping that aggregates the wall-clock timings captured during
2457
+ the ΔNFR computation. The mapping receives the buckets documented in
2458
+ :func:`_prepare_dnfr_data` and :func:`_compute_dnfr`, plus
2459
+ ``"dnfr_neighbor_means"``, ``"dnfr_gradient_assembly"`` and
2460
+ ``"dnfr_inplace_write"`` describing the internal stages of
2461
+ :func:`_compute_dnfr_common`. ``"dnfr_path"`` reflects whether the
2462
+ vectorised or fallback implementation executed the call.
2463
+ """
2464
+ if profile is not None:
2465
+ for key in (
2466
+ "dnfr_cache_rebuild",
2467
+ "dnfr_neighbor_accumulation",
2468
+ "dnfr_neighbor_means",
2469
+ "dnfr_gradient_assembly",
2470
+ "dnfr_inplace_write",
2471
+ ):
2472
+ profile.setdefault(key, 0.0)
2473
+
2474
+ data = _prepare_dnfr_data(G, cache_size=cache_size, profile=profile)
2475
+ _write_dnfr_metadata(
2476
+ G,
2477
+ weights=data["weights"],
2478
+ hook_name="default_compute_delta_nfr",
2479
+ )
2480
+ _compute_dnfr(G, data, n_jobs=n_jobs, profile=profile)
2481
+ if not data.get("dnfr_numpy_available"):
2482
+ cache = data.get("cache")
2483
+ cache_size = data.get("cache_size")
2484
+ caching_enabled = isinstance(cache, DnfrCache) and (
2485
+ cache_size is None or int(cache_size) > 0
2486
+ )
2487
+ if isinstance(cache, DnfrCache) and not caching_enabled:
2488
+ for attr in (
2489
+ "neighbor_x_np",
2490
+ "neighbor_y_np",
2491
+ "neighbor_epi_sum_np",
2492
+ "neighbor_vf_sum_np",
2493
+ "neighbor_count_np",
2494
+ "neighbor_deg_sum_np",
2495
+ "neighbor_inv_count_np",
2496
+ "neighbor_cos_avg_np",
2497
+ "neighbor_sin_avg_np",
2498
+ "neighbor_mean_tmp_np",
2499
+ "neighbor_mean_length_np",
2500
+ "neighbor_accum_np",
2501
+ "neighbor_edge_values_np",
2502
+ ):
2503
+ setattr(cache, attr, None)
2504
+ cache.neighbor_accum_signature = None
2505
+
2506
+
2507
+ def set_delta_nfr_hook(
2508
+ G: TNFRGraph,
2509
+ func: DeltaNFRHook,
2510
+ *,
2511
+ name: str | None = None,
2512
+ note: str | None = None,
2513
+ ) -> None:
2514
+ """Set a stable hook to compute ΔNFR.
2515
+
2516
+ The callable should accept ``(G, *[, n_jobs])`` and is responsible for
2517
+ writing ``ALIAS_DNFR`` in each node. ``n_jobs`` is optional and ignored by
2518
+ hooks that do not support parallel execution. Basic metadata in
2519
+ ``G.graph`` is updated accordingly.
2520
+ """
2521
+
2522
+ def _wrapped(graph: TNFRGraph, *args: Any, **kwargs: Any) -> None:
2523
+ if "n_jobs" in kwargs:
2524
+ try:
2525
+ func(graph, *args, **kwargs)
2526
+ return
2527
+ except TypeError as exc:
2528
+ if "n_jobs" not in str(exc):
2529
+ raise
2530
+ kwargs = dict(kwargs)
2531
+ kwargs.pop("n_jobs", None)
2532
+ func(graph, *args, **kwargs)
2533
+ return
2534
+ func(graph, *args, **kwargs)
2535
+
2536
+ _wrapped.__name__ = getattr(func, "__name__", "custom_dnfr")
2537
+ _wrapped.__doc__ = getattr(func, "__doc__", _wrapped.__doc__)
2538
+
2539
+ G.graph["compute_delta_nfr"] = _wrapped
2540
+ G.graph["_dnfr_hook_name"] = str(name or getattr(func, "__name__", "custom_dnfr"))
2541
+ if "_dnfr_weights" not in G.graph:
2542
+ _configure_dnfr_weights(G)
2543
+ if note:
2544
+ meta = G.graph.get("_DNFR_META", {})
2545
+ meta["note"] = str(note)
2546
+ G.graph["_DNFR_META"] = meta
2547
+
2548
+
2549
+ def _dnfr_hook_chunk_worker(
2550
+ G: TNFRGraph,
2551
+ node_ids: Sequence[NodeId],
2552
+ grad_items: tuple[
2553
+ tuple[str, Callable[[TNFRGraph, NodeId, Mapping[str, Any]], float]],
2554
+ ...,
2555
+ ],
2556
+ weights: Mapping[str, float],
2557
+ ) -> list[tuple[NodeId, float]]:
2558
+ """Compute weighted gradients for ``node_ids``.
2559
+
2560
+ The helper is defined at module level so it can be pickled by
2561
+ :class:`concurrent.futures.ProcessPoolExecutor`.
2562
+ """
2563
+
2564
+ results: list[tuple[NodeId, float]] = []
2565
+ for node in node_ids:
2566
+ nd = G.nodes[node]
2567
+ total = 0.0
2568
+ for name, func in grad_items:
2569
+ w = weights.get(name, 0.0)
2570
+ if w:
2571
+ total += w * float(func(G, node, nd))
2572
+ results.append((node, total))
2573
+ return results
2574
+
2575
+
2576
+ def _apply_dnfr_hook(
2577
+ G: TNFRGraph,
2578
+ grads: Mapping[str, Callable[[TNFRGraph, NodeId, Mapping[str, Any]], float]],
2579
+ *,
2580
+ weights: Mapping[str, float],
2581
+ hook_name: str,
2582
+ note: str | None = None,
2583
+ n_jobs: int | None = None,
2584
+ ) -> None:
2585
+ """Compute and store ΔNFR using ``grads``.
2586
+
2587
+ Parameters
2588
+ ----------
2589
+ G : nx.Graph
2590
+ Graph whose nodes will receive the ΔNFR update.
2591
+ grads : dict
2592
+ Mapping from component names to callables with signature
2593
+ ``(G, node, data) -> float`` returning the gradient contribution.
2594
+ weights : dict
2595
+ Weight per component; missing entries default to ``0``.
2596
+ hook_name : str
2597
+ Friendly identifier stored in ``G.graph`` metadata.
2598
+ note : str | None, optional
2599
+ Additional documentation recorded next to the hook metadata.
2600
+ n_jobs : int | None, optional
2601
+ Optional worker count for the pure-Python execution path. When NumPy
2602
+ is available the helper always prefers the vectorised implementation
2603
+ and ignores ``n_jobs`` because the computation already happens in
2604
+ bulk.
2605
+ """
2606
+
2607
+ nodes_data: list[tuple[NodeId, Mapping[str, Any]]] = list(G.nodes(data=True))
2608
+ if not nodes_data:
2609
+ _write_dnfr_metadata(G, weights=weights, hook_name=hook_name, note=note)
2610
+ return
2611
+
2612
+ np_module = cast(ModuleType | None, get_numpy())
2613
+ if np_module is not None:
2614
+ totals = np_module.zeros(len(nodes_data), dtype=float)
2615
+ for name, func in grads.items():
2616
+ w = float(weights.get(name, 0.0))
2617
+ if w == 0.0:
2618
+ continue
2619
+ values = np_module.fromiter(
2620
+ (float(func(G, n, nd)) for n, nd in nodes_data),
2621
+ dtype=float,
2622
+ count=len(nodes_data),
2623
+ )
2624
+ if w == 1.0:
2625
+ np_module.add(totals, values, out=totals)
2626
+ else:
2627
+ np_module.add(totals, values * w, out=totals)
2628
+ for idx, (n, _) in enumerate(nodes_data):
2629
+ set_dnfr(G, n, float(totals[idx]))
2630
+ _write_dnfr_metadata(G, weights=weights, hook_name=hook_name, note=note)
2631
+ return
2632
+
2633
+ effective_jobs = _resolve_parallel_jobs(n_jobs, len(nodes_data))
2634
+ results: list[tuple[NodeId, float]] | None = None
2635
+ if effective_jobs:
2636
+ grad_items = tuple(grads.items())
2637
+ # ProcessPoolExecutor requires picklable arguments. Instead of explicitly
2638
+ # testing with pickle.dumps (which poses security risks), we attempt
2639
+ # parallelization and gracefully fall back to serial on any failure.
2640
+ try:
2641
+ chunk_results: list[tuple[NodeId, float]] = []
2642
+ with ProcessPoolExecutor(max_workers=effective_jobs) as executor:
2643
+ futures = []
2644
+ node_ids: list[NodeId] = [n for n, _ in nodes_data]
2645
+ for start, end in _iter_chunk_offsets(len(node_ids), effective_jobs):
2646
+ if start == end:
2647
+ continue
2648
+ futures.append(
2649
+ executor.submit(
2650
+ _dnfr_hook_chunk_worker,
2651
+ G,
2652
+ node_ids[start:end],
2653
+ grad_items,
2654
+ weights,
2655
+ )
2656
+ )
2657
+ for future in futures:
2658
+ chunk_results.extend(future.result())
2659
+ results = chunk_results
2660
+ except Exception:
2661
+ # Parallel execution failed (pickle, executor, or worker error)
2662
+ # Fall back to serial processing
2663
+ results = None
2664
+
2665
+ if results is None:
2666
+ results = []
2667
+ for n, nd in nodes_data:
2668
+ total = 0.0
2669
+ for name, func in grads.items():
2670
+ w = weights.get(name, 0.0)
2671
+ if w:
2672
+ total += w * float(func(G, n, nd))
2673
+ results.append((n, total))
2674
+
2675
+ for node, value in results:
2676
+ set_dnfr(G, node, float(value))
2677
+
2678
+ _write_dnfr_metadata(G, weights=weights, hook_name=hook_name, note=note)
2679
+
2680
+
2681
+ # --- Example hooks (optional) ---
2682
+
2683
+
2684
+ class _PhaseGradient:
2685
+ """Callable computing the phase contribution using cached trig values."""
2686
+
2687
+ __slots__ = ("cos", "sin")
2688
+
2689
+ def __init__(
2690
+ self,
2691
+ cos_map: Mapping[NodeId, float],
2692
+ sin_map: Mapping[NodeId, float],
2693
+ ) -> None:
2694
+ self.cos: Mapping[NodeId, float] = cos_map
2695
+ self.sin: Mapping[NodeId, float] = sin_map
2696
+
2697
+ def __call__(
2698
+ self,
2699
+ G: TNFRGraph,
2700
+ n: NodeId,
2701
+ nd: Mapping[str, Any],
2702
+ ) -> float:
2703
+ theta_val = get_theta_attr(nd, 0.0)
2704
+ th_i = float(theta_val if theta_val is not None else 0.0)
2705
+ neighbors = list(G.neighbors(n))
2706
+ if neighbors:
2707
+ th_bar = neighbor_phase_mean_list(
2708
+ neighbors,
2709
+ cos_th=self.cos,
2710
+ sin_th=self.sin,
2711
+ fallback=th_i,
2712
+ )
2713
+ else:
2714
+ th_bar = th_i
2715
+ return -angle_diff(th_i, th_bar) / math.pi
2716
+
2717
+
2718
+ class _NeighborAverageGradient:
2719
+ """Callable computing neighbour averages for scalar attributes."""
2720
+
2721
+ __slots__ = ("alias", "values")
2722
+
2723
+ def __init__(
2724
+ self,
2725
+ alias: tuple[str, ...],
2726
+ values: MutableMapping[NodeId, float],
2727
+ ) -> None:
2728
+ self.alias: tuple[str, ...] = alias
2729
+ self.values: MutableMapping[NodeId, float] = values
2730
+
2731
+ def __call__(
2732
+ self,
2733
+ G: TNFRGraph,
2734
+ n: NodeId,
2735
+ nd: Mapping[str, Any],
2736
+ ) -> float:
2737
+ val = self.values.get(n)
2738
+ if val is None:
2739
+ val = float(get_attr(nd, self.alias, 0.0))
2740
+ self.values[n] = val
2741
+ neighbors = list(G.neighbors(n))
2742
+ if not neighbors:
2743
+ return 0.0
2744
+ total = 0.0
2745
+ for neigh in neighbors:
2746
+ neigh_val = self.values.get(neigh)
2747
+ if neigh_val is None:
2748
+ neigh_val = float(get_attr(G.nodes[neigh], self.alias, val))
2749
+ self.values[neigh] = neigh_val
2750
+ total += neigh_val
2751
+ return total / len(neighbors) - val
2752
+
2753
+
2754
+ def dnfr_phase_only(G: TNFRGraph, *, n_jobs: int | None = None) -> None:
2755
+ """Compute ΔNFR from phase only (Kuramoto-like).
2756
+
2757
+ Parameters
2758
+ ----------
2759
+ G : nx.Graph
2760
+ Graph whose nodes receive the ΔNFR assignment.
2761
+ n_jobs : int | None, optional
2762
+ Parallel worker hint used when NumPy is unavailable. Defaults to
2763
+ serial execution.
2764
+ """
2765
+
2766
+ trig = compute_theta_trig(G.nodes(data=True))
2767
+ g_phase = _PhaseGradient(trig.cos, trig.sin)
2768
+ _apply_dnfr_hook(
2769
+ G,
2770
+ {"phase": g_phase},
2771
+ weights={"phase": 1.0},
2772
+ hook_name="dnfr_phase_only",
2773
+ note="Example hook.",
2774
+ n_jobs=n_jobs,
2775
+ )
2776
+
2777
+
2778
+ def dnfr_epi_vf_mixed(G: TNFRGraph, *, n_jobs: int | None = None) -> None:
2779
+ """Compute ΔNFR without phase, mixing EPI and νf.
2780
+
2781
+ Parameters
2782
+ ----------
2783
+ G : nx.Graph
2784
+ Graph whose nodes receive the ΔNFR assignment.
2785
+ n_jobs : int | None, optional
2786
+ Parallel worker hint used when NumPy is unavailable. Defaults to
2787
+ serial execution.
2788
+ """
2789
+
2790
+ epi_values = {
2791
+ n: float(get_attr(nd, ALIAS_EPI, 0.0)) for n, nd in G.nodes(data=True)
2792
+ }
2793
+ vf_values = {n: float(get_attr(nd, ALIAS_VF, 0.0)) for n, nd in G.nodes(data=True)}
2794
+ grads = {
2795
+ "epi": _NeighborAverageGradient(ALIAS_EPI, epi_values),
2796
+ "vf": _NeighborAverageGradient(ALIAS_VF, vf_values),
2797
+ }
2798
+ _apply_dnfr_hook(
2799
+ G,
2800
+ grads,
2801
+ weights={"phase": 0.0, "epi": 0.5, "vf": 0.5},
2802
+ hook_name="dnfr_epi_vf_mixed",
2803
+ note="Example hook.",
2804
+ n_jobs=n_jobs,
2805
+ )
2806
+
2807
+
2808
+ def dnfr_laplacian(G: TNFRGraph, *, n_jobs: int | None = None) -> None:
2809
+ """Explicit topological gradient using Laplacian over EPI and νf.
2810
+
2811
+ Parameters
2812
+ ----------
2813
+ G : nx.Graph
2814
+ Graph whose nodes receive the ΔNFR assignment.
2815
+ n_jobs : int | None, optional
2816
+ Parallel worker hint used when NumPy is unavailable. Defaults to
2817
+ serial execution.
2818
+ """
2819
+
2820
+ weights_cfg = get_param(G, "DNFR_WEIGHTS")
2821
+ wE = float(weights_cfg.get("epi", DEFAULTS["DNFR_WEIGHTS"]["epi"]))
2822
+ wV = float(weights_cfg.get("vf", DEFAULTS["DNFR_WEIGHTS"]["vf"]))
2823
+
2824
+ epi_values = {
2825
+ n: float(get_attr(nd, ALIAS_EPI, 0.0)) for n, nd in G.nodes(data=True)
2826
+ }
2827
+ vf_values = {n: float(get_attr(nd, ALIAS_VF, 0.0)) for n, nd in G.nodes(data=True)}
2828
+ grads = {
2829
+ "epi": _NeighborAverageGradient(ALIAS_EPI, epi_values),
2830
+ "vf": _NeighborAverageGradient(ALIAS_VF, vf_values),
2831
+ }
2832
+ _apply_dnfr_hook(
2833
+ G,
2834
+ grads,
2835
+ weights={"epi": wE, "vf": wV},
2836
+ hook_name="dnfr_laplacian",
2837
+ note="Topological gradient",
2838
+ n_jobs=n_jobs,
2839
+ )
2840
+
2841
+
2842
+ def compute_delta_nfr_hamiltonian(
2843
+ G: TNFRGraph,
2844
+ *,
2845
+ hbar_str: float | None = None,
2846
+ cache_hamiltonian: bool = True,
2847
+ profile: MutableMapping[str, float] | None = None,
2848
+ ) -> None:
2849
+ """Compute ΔNFR using rigorous Hamiltonian commutator formulation.
2850
+
2851
+ This is the **canonical** TNFR method that constructs the internal
2852
+ Hamiltonian H_int = H_coh + H_freq + H_coupling explicitly and computes
2853
+ ΔNFR from the quantum commutator:
2854
+
2855
+ .. math::
2856
+ \Delta\text{NFR}_n = \\frac{i}{\hbar_{str}} \langle n | [\\hat{H}_{int}, \\rho_n] | n \\rangle
2857
+
2858
+ where \\rho_n = |n\\rangle\\langle n| is the density matrix for node n.
2859
+
2860
+ Theory
2861
+ ------
2862
+
2863
+ The internal Hamiltonian governs structural evolution through:
2864
+
2865
+ .. math::
2866
+ \\frac{\partial \text{EPI}}{\partial t} = \\nu_f \cdot \Delta\text{NFR}(t)
2867
+
2868
+ with the reorganization operator defined as:
2869
+
2870
+ .. math::
2871
+ \Delta\text{NFR} = \\frac{d}{dt} + \\frac{i[\\hat{H}_{int}, \cdot]}{\hbar_{str}}
2872
+
2873
+ **Components**:
2874
+
2875
+ 1. **H_coh**: Coherence potential from structural similarity
2876
+ 2. **H_freq**: Diagonal frequency operator (νf per node)
2877
+ 3. **H_coupling**: Network topology-induced interactions
2878
+
2879
+ Parameters
2880
+ ----------
2881
+ G : TNFRGraph
2882
+ Graph with nodes containing 'nu_f', 'phase', 'epi', 'si' attributes
2883
+ hbar_str : float, optional
2884
+ Structural Planck constant (ℏ_str). If None, uses
2885
+ ``G.graph.get('HBAR_STR', 1.0)``. Natural units (1.0) make the
2886
+ Hamiltonian directly represent structural energy scales.
2887
+ cache_hamiltonian : bool, default=True
2888
+ If True, caches the Hamiltonian in ``G.graph['_hamiltonian_cache']``
2889
+ for reuse in subsequent calls. Set to False for dynamic networks
2890
+ where topology changes frequently.
2891
+ profile : MutableMapping[str, float] or None, optional
2892
+ Mutable mapping that accumulates wall-clock timings:
2893
+
2894
+ - ``"hamiltonian_construction"``: Time to build H_int
2895
+ - ``"hamiltonian_computation"``: Time to compute all ΔNFR values
2896
+ - ``"hamiltonian_write"``: Time to write results to nodes
2897
+
2898
+ Notes
2899
+ -----
2900
+
2901
+ **Advantages over heuristic methods**:
2902
+
2903
+ - **Rigorous**: Directly implements TNFR mathematical formalization
2904
+ - **Hermitian**: Guarantees real eigenvalues and unitary evolution
2905
+ - **Verifiable**: Can compute energy spectrum and eigenstates
2906
+ - **Complete**: Accounts for all structural correlations via coherence matrix
2907
+
2908
+ **Performance considerations**:
2909
+
2910
+ - Complexity: O(N²) for matrix construction, O(N³) for eigendecomposition
2911
+ - Recommended for networks with N < 1000 nodes
2912
+ - For larger networks, use default_compute_delta_nfr (heuristic, O(E))
2913
+
2914
+ **Cache behavior**:
2915
+
2916
+ - Hamiltonian is cached if ``cache_hamiltonian=True``
2917
+ - Cache is invalidated when node attributes or topology change
2918
+ - Uses ``CacheManager`` for consistency with other TNFR computations
2919
+
2920
+ Examples
2921
+ --------
2922
+
2923
+ **Basic usage**:
2924
+
2925
+ >>> import networkx as nx
2926
+ >>> from tnfr.dynamics.dnfr import compute_delta_nfr_hamiltonian
2927
+ >>> G = nx.cycle_graph(10)
2928
+ >>> for node in G.nodes:
2929
+ ... G.nodes[node].update({
2930
+ ... 'nu_f': 1.0, 'phase': 0.0, 'epi': 1.0, 'si': 0.8
2931
+ ... })
2932
+ >>> compute_delta_nfr_hamiltonian(G)
2933
+ >>> # ΔNFR values now stored in G.nodes[n]['delta_nfr']
2934
+
2935
+ **With profiling**:
2936
+
2937
+ >>> profile = {}
2938
+ >>> compute_delta_nfr_hamiltonian(G, profile=profile)
2939
+ >>> print(f"Construction: {profile['hamiltonian_construction']:.3f}s")
2940
+ >>> print(f"Computation: {profile['hamiltonian_computation']:.3f}s")
2941
+
2942
+ **Integration with dynamics**:
2943
+
2944
+ >>> from tnfr.dynamics import set_delta_nfr_hook
2945
+ >>> set_delta_nfr_hook(G, compute_delta_nfr_hamiltonian, name="hamiltonian")
2946
+ >>> # Now simulate() will use Hamiltonian-based ΔNFR
2947
+
2948
+ See Also
2949
+ --------
2950
+ tnfr.operators.hamiltonian.InternalHamiltonian : Core Hamiltonian class
2951
+ default_compute_delta_nfr : Heuristic O(E) method for large networks
2952
+ set_delta_nfr_hook : Register custom ΔNFR computation
2953
+
2954
+ References
2955
+ ----------
2956
+
2957
+ - Mathematical formalization: ``Formalizacion-Matematica-TNFR-Unificada.pdf`` §2.4
2958
+ - ΔNFR development: ``Desarrollo-Exhaustivo_-Formalizacion-Matematica-Ri-3.pdf``
2959
+ """
2960
+ from ..operators.hamiltonian import InternalHamiltonian
2961
+
2962
+ # Initialize profiling
2963
+ start_timer, stop_timer = _profile_start_stop(
2964
+ profile,
2965
+ keys=(
2966
+ "hamiltonian_construction",
2967
+ "hamiltonian_computation",
2968
+ "hamiltonian_write",
2969
+ ),
2970
+ )
2971
+
2972
+ # Get structural Planck constant
2973
+ if hbar_str is None:
2974
+ hbar_str = G.graph.get("HBAR_STR", 1.0)
2975
+
2976
+ # Check cache for existing Hamiltonian
2977
+ cache_key = "_hamiltonian_cache"
2978
+ ham = None
2979
+
2980
+ if cache_hamiltonian:
2981
+ cached_ham = G.graph.get(cache_key)
2982
+ # Verify cache validity (node count and checksum)
2983
+ if cached_ham is not None:
2984
+ current_checksum = G.graph.get("_dnfr_nodes_checksum")
2985
+ cached_checksum = getattr(cached_ham, "_cache_checksum", None)
2986
+ if (
2987
+ isinstance(cached_ham, InternalHamiltonian)
2988
+ and cached_ham.N == G.number_of_nodes()
2989
+ and current_checksum == cached_checksum
2990
+ ):
2991
+ ham = cached_ham
2992
+
2993
+ # Construct Hamiltonian if not cached or invalid
2994
+ if ham is None:
2995
+ timer = start_timer()
2996
+
2997
+ # Get cache manager for integration with existing infrastructure
2998
+ manager = _graph_cache_manager(G.graph)
2999
+
3000
+ # Build Hamiltonian
3001
+ ham = InternalHamiltonian(G, hbar_str=float(hbar_str), cache_manager=manager)
3002
+
3003
+ # Cache for reuse
3004
+ if cache_hamiltonian:
3005
+ ham._cache_checksum = G.graph.get("_dnfr_nodes_checksum")
3006
+ G.graph[cache_key] = ham
3007
+
3008
+ stop_timer("hamiltonian_construction", timer)
3009
+
3010
+ # Compute ΔNFR for all nodes
3011
+ timer = start_timer()
3012
+
3013
+ delta_nfr_values = {}
3014
+ for node in ham.nodes:
3015
+ delta_nfr = ham.compute_node_delta_nfr(node)
3016
+ delta_nfr_values[node] = delta_nfr
3017
+
3018
+ stop_timer("hamiltonian_computation", timer)
3019
+
3020
+ # Write results to graph nodes
3021
+ timer = start_timer()
3022
+
3023
+ for node, delta_val in delta_nfr_values.items():
3024
+ set_dnfr(G, node, delta_val)
3025
+
3026
+ stop_timer("hamiltonian_write", timer)
3027
+
3028
+ # Write metadata
3029
+ _write_dnfr_metadata(
3030
+ G,
3031
+ weights={"hamiltonian": 1.0},
3032
+ hook_name="compute_delta_nfr_hamiltonian",
3033
+ note="Canonical Hamiltonian commutator formulation",
3034
+ )