tnfr 6.0.0__py3-none-any.whl → 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tnfr might be problematic. Click here for more details.

Files changed (176) hide show
  1. tnfr/__init__.py +50 -5
  2. tnfr/__init__.pyi +0 -7
  3. tnfr/_compat.py +0 -1
  4. tnfr/_generated_version.py +34 -0
  5. tnfr/_version.py +44 -2
  6. tnfr/alias.py +14 -13
  7. tnfr/alias.pyi +5 -37
  8. tnfr/cache.py +9 -729
  9. tnfr/cache.pyi +8 -224
  10. tnfr/callback_utils.py +16 -31
  11. tnfr/callback_utils.pyi +3 -29
  12. tnfr/cli/__init__.py +17 -11
  13. tnfr/cli/__init__.pyi +0 -21
  14. tnfr/cli/arguments.py +175 -14
  15. tnfr/cli/arguments.pyi +5 -11
  16. tnfr/cli/execution.py +434 -48
  17. tnfr/cli/execution.pyi +14 -24
  18. tnfr/cli/utils.py +20 -3
  19. tnfr/cli/utils.pyi +5 -5
  20. tnfr/config/__init__.py +2 -1
  21. tnfr/config/__init__.pyi +2 -0
  22. tnfr/config/feature_flags.py +83 -0
  23. tnfr/config/init.py +1 -1
  24. tnfr/config/operator_names.py +1 -14
  25. tnfr/config/presets.py +6 -26
  26. tnfr/constants/__init__.py +10 -13
  27. tnfr/constants/__init__.pyi +10 -22
  28. tnfr/constants/aliases.py +31 -0
  29. tnfr/constants/core.py +4 -3
  30. tnfr/constants/init.py +1 -1
  31. tnfr/constants/metric.py +3 -3
  32. tnfr/dynamics/__init__.py +64 -10
  33. tnfr/dynamics/__init__.pyi +3 -4
  34. tnfr/dynamics/adaptation.py +79 -13
  35. tnfr/dynamics/aliases.py +10 -9
  36. tnfr/dynamics/coordination.py +77 -35
  37. tnfr/dynamics/dnfr.py +575 -274
  38. tnfr/dynamics/dnfr.pyi +1 -10
  39. tnfr/dynamics/integrators.py +47 -33
  40. tnfr/dynamics/integrators.pyi +0 -1
  41. tnfr/dynamics/runtime.py +489 -129
  42. tnfr/dynamics/sampling.py +2 -0
  43. tnfr/dynamics/selectors.py +101 -62
  44. tnfr/execution.py +15 -8
  45. tnfr/execution.pyi +5 -25
  46. tnfr/flatten.py +7 -3
  47. tnfr/flatten.pyi +1 -8
  48. tnfr/gamma.py +22 -26
  49. tnfr/gamma.pyi +0 -6
  50. tnfr/glyph_history.py +37 -26
  51. tnfr/glyph_history.pyi +1 -19
  52. tnfr/glyph_runtime.py +16 -0
  53. tnfr/glyph_runtime.pyi +9 -0
  54. tnfr/immutable.py +20 -15
  55. tnfr/immutable.pyi +4 -7
  56. tnfr/initialization.py +5 -7
  57. tnfr/initialization.pyi +1 -9
  58. tnfr/io.py +6 -305
  59. tnfr/io.pyi +13 -8
  60. tnfr/mathematics/__init__.py +81 -0
  61. tnfr/mathematics/backend.py +426 -0
  62. tnfr/mathematics/dynamics.py +398 -0
  63. tnfr/mathematics/epi.py +254 -0
  64. tnfr/mathematics/generators.py +222 -0
  65. tnfr/mathematics/metrics.py +119 -0
  66. tnfr/mathematics/operators.py +233 -0
  67. tnfr/mathematics/operators_factory.py +71 -0
  68. tnfr/mathematics/projection.py +78 -0
  69. tnfr/mathematics/runtime.py +173 -0
  70. tnfr/mathematics/spaces.py +247 -0
  71. tnfr/mathematics/transforms.py +292 -0
  72. tnfr/metrics/__init__.py +10 -10
  73. tnfr/metrics/coherence.py +123 -94
  74. tnfr/metrics/common.py +22 -13
  75. tnfr/metrics/common.pyi +42 -11
  76. tnfr/metrics/core.py +72 -14
  77. tnfr/metrics/diagnosis.py +48 -57
  78. tnfr/metrics/diagnosis.pyi +3 -7
  79. tnfr/metrics/export.py +3 -5
  80. tnfr/metrics/glyph_timing.py +41 -31
  81. tnfr/metrics/reporting.py +13 -6
  82. tnfr/metrics/sense_index.py +884 -114
  83. tnfr/metrics/trig.py +167 -11
  84. tnfr/metrics/trig.pyi +1 -0
  85. tnfr/metrics/trig_cache.py +112 -15
  86. tnfr/node.py +400 -17
  87. tnfr/node.pyi +55 -38
  88. tnfr/observers.py +111 -8
  89. tnfr/observers.pyi +0 -15
  90. tnfr/ontosim.py +9 -6
  91. tnfr/ontosim.pyi +0 -5
  92. tnfr/operators/__init__.py +529 -42
  93. tnfr/operators/__init__.pyi +14 -0
  94. tnfr/operators/definitions.py +350 -18
  95. tnfr/operators/definitions.pyi +0 -14
  96. tnfr/operators/grammar.py +760 -0
  97. tnfr/operators/jitter.py +28 -22
  98. tnfr/operators/registry.py +7 -12
  99. tnfr/operators/registry.pyi +0 -2
  100. tnfr/operators/remesh.py +38 -61
  101. tnfr/rng.py +17 -300
  102. tnfr/schemas/__init__.py +8 -0
  103. tnfr/schemas/grammar.json +94 -0
  104. tnfr/selector.py +3 -4
  105. tnfr/selector.pyi +1 -1
  106. tnfr/sense.py +22 -24
  107. tnfr/sense.pyi +0 -7
  108. tnfr/structural.py +504 -21
  109. tnfr/structural.pyi +41 -18
  110. tnfr/telemetry/__init__.py +23 -1
  111. tnfr/telemetry/cache_metrics.py +226 -0
  112. tnfr/telemetry/nu_f.py +423 -0
  113. tnfr/telemetry/nu_f.pyi +123 -0
  114. tnfr/tokens.py +1 -4
  115. tnfr/tokens.pyi +1 -6
  116. tnfr/trace.py +20 -53
  117. tnfr/trace.pyi +9 -37
  118. tnfr/types.py +244 -15
  119. tnfr/types.pyi +200 -14
  120. tnfr/units.py +69 -0
  121. tnfr/units.pyi +16 -0
  122. tnfr/utils/__init__.py +107 -48
  123. tnfr/utils/__init__.pyi +80 -11
  124. tnfr/utils/cache.py +1705 -65
  125. tnfr/utils/cache.pyi +370 -58
  126. tnfr/utils/chunks.py +104 -0
  127. tnfr/utils/chunks.pyi +21 -0
  128. tnfr/utils/data.py +95 -5
  129. tnfr/utils/data.pyi +8 -17
  130. tnfr/utils/graph.py +2 -4
  131. tnfr/utils/init.py +31 -7
  132. tnfr/utils/init.pyi +4 -11
  133. tnfr/utils/io.py +313 -14
  134. tnfr/{helpers → utils}/numeric.py +50 -24
  135. tnfr/utils/numeric.pyi +21 -0
  136. tnfr/validation/__init__.py +92 -4
  137. tnfr/validation/__init__.pyi +77 -17
  138. tnfr/validation/compatibility.py +79 -43
  139. tnfr/validation/compatibility.pyi +4 -6
  140. tnfr/validation/grammar.py +55 -133
  141. tnfr/validation/grammar.pyi +37 -8
  142. tnfr/validation/graph.py +138 -0
  143. tnfr/validation/graph.pyi +17 -0
  144. tnfr/validation/rules.py +161 -74
  145. tnfr/validation/rules.pyi +55 -18
  146. tnfr/validation/runtime.py +263 -0
  147. tnfr/validation/runtime.pyi +31 -0
  148. tnfr/validation/soft_filters.py +170 -0
  149. tnfr/validation/soft_filters.pyi +37 -0
  150. tnfr/validation/spectral.py +159 -0
  151. tnfr/validation/spectral.pyi +46 -0
  152. tnfr/validation/syntax.py +28 -139
  153. tnfr/validation/syntax.pyi +7 -4
  154. tnfr/validation/window.py +39 -0
  155. tnfr/validation/window.pyi +1 -0
  156. tnfr/viz/__init__.py +9 -0
  157. tnfr/viz/matplotlib.py +246 -0
  158. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/METADATA +63 -19
  159. tnfr-7.0.0.dist-info/RECORD +185 -0
  160. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/licenses/LICENSE.md +1 -1
  161. tnfr/constants_glyphs.py +0 -16
  162. tnfr/constants_glyphs.pyi +0 -12
  163. tnfr/grammar.py +0 -25
  164. tnfr/grammar.pyi +0 -13
  165. tnfr/helpers/__init__.py +0 -151
  166. tnfr/helpers/__init__.pyi +0 -66
  167. tnfr/helpers/numeric.pyi +0 -12
  168. tnfr/presets.py +0 -15
  169. tnfr/presets.pyi +0 -7
  170. tnfr/utils/io.pyi +0 -10
  171. tnfr/utils/validators.py +0 -130
  172. tnfr/utils/validators.pyi +0 -19
  173. tnfr-6.0.0.dist-info/RECORD +0 -157
  174. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/WHEEL +0 -0
  175. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/entry_points.txt +0 -0
  176. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/top_level.txt +0 -0
tnfr/dynamics/dnfr.py CHANGED
@@ -12,21 +12,19 @@ from __future__ import annotations
12
12
 
13
13
  import math
14
14
  import sys
15
+ from collections.abc import Callable, Iterator, Mapping, MutableMapping, Sequence
15
16
  from concurrent.futures import ProcessPoolExecutor
16
- from dataclasses import dataclass
17
17
  from types import ModuleType
18
- from collections.abc import Callable, Iterator, Mapping, MutableMapping, Sequence
19
18
  from typing import TYPE_CHECKING, Any, cast
20
19
 
20
+ from time import perf_counter
21
+
21
22
  from ..alias import get_attr, get_theta_attr, set_dnfr
22
- from ..constants import DEFAULTS, get_aliases, get_param
23
- from ..helpers.numeric import angle_diff
24
- from ..cache import CacheManager
23
+ from ..constants import DEFAULTS, get_param
24
+ from ..constants.aliases import ALIAS_EPI, ALIAS_VF
25
25
  from ..metrics.common import merge_and_normalize_weights
26
26
  from ..metrics.trig import neighbor_phase_mean_list
27
27
  from ..metrics.trig_cache import compute_theta_trig
28
- from ..utils import cached_node_list, cached_nodes_and_A, get_numpy, normalize_weights
29
- from ..utils.cache import DNFR_PREP_STATE_KEY, DnfrPrepState, _graph_cache_manager
30
28
  from ..types import (
31
29
  DeltaNFRHook,
32
30
  DnfrCacheVectors,
@@ -35,15 +33,28 @@ from ..types import (
35
33
  NodeId,
36
34
  TNFRGraph,
37
35
  )
36
+ from ..utils import (
37
+ DNFR_PREP_STATE_KEY,
38
+ DnfrPrepState,
39
+ DnfrCache,
40
+ CacheManager,
41
+ _graph_cache_manager,
42
+ angle_diff,
43
+ angle_diff_array,
44
+ cached_node_list,
45
+ cached_nodes_and_A,
46
+ get_numpy,
47
+ normalize_weights,
48
+ resolve_chunk_size,
49
+ new_dnfr_cache,
50
+ )
38
51
 
39
52
  if TYPE_CHECKING: # pragma: no cover - import-time typing hook
40
53
  import numpy as np
41
- ALIAS_EPI = get_aliases("EPI")
42
- ALIAS_VF = get_aliases("VF")
43
-
44
54
 
45
55
  _MEAN_VECTOR_EPS = 1e-12
46
56
  _SPARSE_DENSITY_THRESHOLD = 0.25
57
+ _DNFR_APPROX_BYTES_PER_EDGE = 48
47
58
 
48
59
 
49
60
  def _should_vectorize(G: TNFRGraph, np_module: ModuleType | None) -> bool:
@@ -57,64 +68,6 @@ def _should_vectorize(G: TNFRGraph, np_module: ModuleType | None) -> bool:
57
68
  return bool(flag)
58
69
 
59
70
 
60
- @dataclass
61
- class DnfrCache:
62
- idx: dict[Any, int]
63
- theta: list[float]
64
- epi: list[float]
65
- vf: list[float]
66
- cos_theta: list[float]
67
- sin_theta: list[float]
68
- neighbor_x: list[float]
69
- neighbor_y: list[float]
70
- neighbor_epi_sum: list[float]
71
- neighbor_vf_sum: list[float]
72
- neighbor_count: list[float]
73
- neighbor_deg_sum: list[float] | None
74
- th_bar: list[float] | None = None
75
- epi_bar: list[float] | None = None
76
- vf_bar: list[float] | None = None
77
- deg_bar: list[float] | None = None
78
- degs: dict[Any, float] | None = None
79
- deg_list: list[float] | None = None
80
- theta_np: Any | None = None
81
- epi_np: Any | None = None
82
- vf_np: Any | None = None
83
- cos_theta_np: Any | None = None
84
- sin_theta_np: Any | None = None
85
- deg_array: Any | None = None
86
- edge_src: Any | None = None
87
- edge_dst: Any | None = None
88
- checksum: Any | None = None
89
- neighbor_x_np: Any | None = None
90
- neighbor_y_np: Any | None = None
91
- neighbor_epi_sum_np: Any | None = None
92
- neighbor_vf_sum_np: Any | None = None
93
- neighbor_count_np: Any | None = None
94
- neighbor_deg_sum_np: Any | None = None
95
- th_bar_np: Any | None = None
96
- epi_bar_np: Any | None = None
97
- vf_bar_np: Any | None = None
98
- deg_bar_np: Any | None = None
99
- grad_phase_np: Any | None = None
100
- grad_epi_np: Any | None = None
101
- grad_vf_np: Any | None = None
102
- grad_topo_np: Any | None = None
103
- grad_total_np: Any | None = None
104
- dense_components_np: Any | None = None
105
- dense_accum_np: Any | None = None
106
- dense_degree_np: Any | None = None
107
- neighbor_accum_np: Any | None = None
108
- neighbor_inv_count_np: Any | None = None
109
- neighbor_cos_avg_np: Any | None = None
110
- neighbor_sin_avg_np: Any | None = None
111
- neighbor_mean_tmp_np: Any | None = None
112
- neighbor_mean_length_np: Any | None = None
113
- edge_signature: Any | None = None
114
- neighbor_accum_signature: Any | None = None
115
- neighbor_edge_values_np: Any | None = None
116
-
117
-
118
71
  _NUMPY_CACHE_ATTRS = (
119
72
  "theta_np",
120
73
  "epi_np",
@@ -141,6 +94,34 @@ _NUMPY_CACHE_ATTRS = (
141
94
  )
142
95
 
143
96
 
97
+ def _profile_start_stop(
98
+ profile: MutableMapping[str, float] | None,
99
+ *,
100
+ keys: Sequence[str] = (),
101
+ ) -> tuple[Callable[[], float], Callable[[str, float], None]]:
102
+ """Return helpers to measure wall-clock durations for ``profile`` keys."""
103
+
104
+ if profile is not None:
105
+ for key in keys:
106
+ profile.setdefault(key, 0.0)
107
+
108
+ def _start() -> float:
109
+ return perf_counter()
110
+
111
+ def _stop(metric: str, start: float) -> None:
112
+ profile[metric] = float(profile.get(metric, 0.0)) + (perf_counter() - start)
113
+
114
+ else:
115
+
116
+ def _start() -> float:
117
+ return 0.0
118
+
119
+ def _stop(metric: str, start: float) -> None: # noqa: ARG001 - uniform signature
120
+ return None
121
+
122
+ return _start, _stop
123
+
124
+
144
125
  def _iter_chunk_offsets(total: int, jobs: int) -> Iterator[tuple[int, int]]:
145
126
  """Yield ``(start, end)`` offsets splitting ``total`` items across ``jobs``."""
146
127
 
@@ -175,7 +156,15 @@ def _neighbor_sums_worker(
175
156
  deg_base: Sequence[float] | None,
176
157
  deg_list: Sequence[float] | None,
177
158
  degs_list: Sequence[float] | None,
178
- ) -> tuple[int, list[float], list[float], list[float], list[float], list[float], list[float] | None]:
159
+ ) -> tuple[
160
+ int,
161
+ list[float],
162
+ list[float],
163
+ list[float],
164
+ list[float],
165
+ list[float],
166
+ list[float] | None,
167
+ ]:
179
168
  """Return partial neighbour sums for the ``[start, end)`` range."""
180
169
 
181
170
  chunk_x: list[float] = []
@@ -281,7 +270,10 @@ def _resolve_parallel_jobs(n_jobs: int | None, total: int) -> int | None:
281
270
 
282
271
 
283
272
  def _is_numpy_like(obj) -> bool:
284
- return getattr(obj, "dtype", None) is not None and getattr(obj, "shape", None) is not None
273
+ return (
274
+ getattr(obj, "dtype", None) is not None
275
+ and getattr(obj, "shape", None) is not None
276
+ )
285
277
 
286
278
 
287
279
  def _has_cached_numpy_buffers(data: dict, cache: DnfrCache | None) -> bool:
@@ -416,43 +408,28 @@ def _init_dnfr_cache(
416
408
  idx_local = {n: i for i, n in enumerate(nodes)}
417
409
  size = len(nodes)
418
410
  zeros = [0.0] * size
419
- if prev_cache is None:
420
- cache_new = DnfrCache(
421
- idx=idx_local,
422
- theta=zeros.copy(),
423
- epi=zeros.copy(),
424
- vf=zeros.copy(),
425
- cos_theta=[1.0] * size,
426
- sin_theta=[0.0] * size,
427
- neighbor_x=zeros.copy(),
428
- neighbor_y=zeros.copy(),
429
- neighbor_epi_sum=zeros.copy(),
430
- neighbor_vf_sum=zeros.copy(),
431
- neighbor_count=zeros.copy(),
432
- neighbor_deg_sum=zeros.copy() if size else [],
433
- degs=None,
434
- edge_src=None,
435
- edge_dst=None,
436
- checksum=checksum,
437
- )
438
- else:
439
- cache_new = prev_cache
440
- cache_new.idx = idx_local
441
- cache_new.theta = zeros.copy()
442
- cache_new.epi = zeros.copy()
443
- cache_new.vf = zeros.copy()
444
- cache_new.cos_theta = [1.0] * size
445
- cache_new.sin_theta = [0.0] * size
446
- cache_new.neighbor_x = zeros.copy()
447
- cache_new.neighbor_y = zeros.copy()
448
- cache_new.neighbor_epi_sum = zeros.copy()
449
- cache_new.neighbor_vf_sum = zeros.copy()
450
- cache_new.neighbor_count = zeros.copy()
451
- cache_new.neighbor_deg_sum = zeros.copy() if size else []
452
-
453
- # Reset any numpy mirrors or aggregated buffers to avoid leaking
454
- # state across refresh cycles (e.g. switching between vectorised
455
- # and Python paths or reusing legacy caches).
411
+ cache_new = prev_cache if prev_cache is not None else new_dnfr_cache()
412
+ cache_new.idx = idx_local
413
+ cache_new.theta = zeros.copy()
414
+ cache_new.epi = zeros.copy()
415
+ cache_new.vf = zeros.copy()
416
+ cache_new.cos_theta = [1.0] * size
417
+ cache_new.sin_theta = [0.0] * size
418
+ cache_new.neighbor_x = zeros.copy()
419
+ cache_new.neighbor_y = zeros.copy()
420
+ cache_new.neighbor_epi_sum = zeros.copy()
421
+ cache_new.neighbor_vf_sum = zeros.copy()
422
+ cache_new.neighbor_count = zeros.copy()
423
+ cache_new.neighbor_deg_sum = zeros.copy() if size else []
424
+ cache_new.degs = None
425
+ cache_new.edge_src = None
426
+ cache_new.edge_dst = None
427
+ cache_new.checksum = checksum
428
+
429
+ # Reset any numpy mirrors or aggregated buffers to avoid leaking
430
+ # state across refresh cycles (e.g. switching between vectorised
431
+ # and Python paths or reusing legacy caches).
432
+ if prev_cache is not None:
456
433
  for attr in _NUMPY_CACHE_ATTRS:
457
434
  setattr(cache_new, attr, None)
458
435
  for attr in (
@@ -501,7 +478,8 @@ def _ensure_numpy_vectors(cache: DnfrCache, np: ModuleType) -> DnfrCacheVectors:
501
478
  if cache is None:
502
479
  return (None, None, None, None, None)
503
480
 
504
- arrays = []
481
+ arrays: list[Any | None] = []
482
+ size = len(cache.theta)
505
483
  for attr_np, source_attr in (
506
484
  ("theta_np", "theta"),
507
485
  ("epi_np", "epi"),
@@ -509,16 +487,18 @@ def _ensure_numpy_vectors(cache: DnfrCache, np: ModuleType) -> DnfrCacheVectors:
509
487
  ("cos_theta_np", "cos_theta"),
510
488
  ("sin_theta_np", "sin_theta"),
511
489
  ):
512
- src = getattr(cache, source_attr)
513
490
  arr = getattr(cache, attr_np)
491
+ if arr is not None and getattr(arr, "shape", None) == (size,):
492
+ arrays.append(arr)
493
+ continue
494
+ src = getattr(cache, source_attr)
514
495
  if src is None:
515
496
  setattr(cache, attr_np, None)
516
497
  arrays.append(None)
517
498
  continue
518
- if arr is None or len(arr) != len(src):
499
+ arr = np.asarray(src, dtype=float)
500
+ if getattr(arr, "shape", None) != (size,):
519
501
  arr = np.array(src, dtype=float)
520
- else:
521
- np.copyto(arr, src, casting="unsafe")
522
502
  setattr(cache, attr_np, arr)
523
503
  arrays.append(arr)
524
504
  return tuple(arrays)
@@ -588,52 +568,50 @@ def _ensure_cached_array(
588
568
  return arr
589
569
 
590
570
 
591
- def _ensure_numpy_state_vectors(data: MutableMapping[str, Any], np: ModuleType) -> DnfrVectorMap:
571
+ def _ensure_numpy_state_vectors(
572
+ data: MutableMapping[str, Any], np: ModuleType
573
+ ) -> DnfrVectorMap:
592
574
  """Synchronise list-based state vectors with their NumPy counterparts."""
593
575
 
594
576
  nodes = data.get("nodes") or ()
595
577
  size = len(nodes)
596
578
  cache: DnfrCache | None = data.get("cache")
597
579
 
580
+ cache_arrays: DnfrCacheVectors = (None, None, None, None, None)
598
581
  if cache is not None:
599
- theta_np, epi_np, vf_np, cos_np, sin_np = _ensure_numpy_vectors(cache, np)
600
- for key, arr in (
601
- ("theta_np", theta_np),
602
- ("epi_np", epi_np),
603
- ("vf_np", vf_np),
604
- ("cos_theta_np", cos_np),
605
- ("sin_theta_np", sin_np),
606
- ):
607
- if arr is not None and getattr(arr, "shape", None) == (size,):
608
- data[key] = arr
609
-
610
- mapping = (
611
- ("theta_np", "theta"),
612
- ("epi_np", "epi"),
613
- ("vf_np", "vf"),
614
- ("cos_theta_np", "cos_theta"),
615
- ("sin_theta_np", "sin_theta"),
616
- )
617
- for np_key, src_key in mapping:
618
- src = data.get(src_key)
619
- if src is None:
620
- continue
582
+ cache_arrays = _ensure_numpy_vectors(cache, np)
583
+
584
+ result: dict[str, Any | None] = {}
585
+ for (plain_key, np_key, cached_arr, result_key) in (
586
+ ("theta", "theta_np", cache_arrays[0], "theta"),
587
+ ("epi", "epi_np", cache_arrays[1], "epi"),
588
+ ("vf", "vf_np", cache_arrays[2], "vf"),
589
+ ("cos_theta", "cos_theta_np", cache_arrays[3], "cos"),
590
+ ("sin_theta", "sin_theta_np", cache_arrays[4], "sin"),
591
+ ):
621
592
  arr = data.get(np_key)
593
+ if arr is None:
594
+ arr = cached_arr
622
595
  if arr is None or getattr(arr, "shape", None) != (size,):
623
- arr = np.array(src, dtype=float)
624
- elif cache is None:
625
- np.copyto(arr, src, casting="unsafe")
626
- data[np_key] = arr
627
- if cache is not None:
628
- setattr(cache, np_key, arr)
596
+ src = data.get(plain_key)
597
+ if src is None and cache is not None:
598
+ src = getattr(cache, plain_key)
599
+ if src is None:
600
+ arr = None
601
+ else:
602
+ arr = np.asarray(src, dtype=float)
603
+ if getattr(arr, "shape", None) != (size,):
604
+ arr = np.array(src, dtype=float)
605
+ if arr is not None:
606
+ data[np_key] = arr
607
+ data[plain_key] = arr
608
+ if cache is not None:
609
+ setattr(cache, np_key, arr)
610
+ else:
611
+ data[np_key] = None
612
+ result[result_key] = arr
629
613
 
630
- return {
631
- "theta": data.get("theta_np"),
632
- "epi": data.get("epi_np"),
633
- "vf": data.get("vf_np"),
634
- "cos": data.get("cos_theta_np"),
635
- "sin": data.get("sin_theta_np"),
636
- }
614
+ return result
637
615
 
638
616
 
639
617
  def _build_edge_index_arrays(
@@ -679,26 +657,94 @@ def _refresh_dnfr_vectors(
679
657
  np_module = get_numpy()
680
658
  trig = compute_theta_trig(((n, G.nodes[n]) for n in nodes), np=np_module)
681
659
  use_numpy = _should_vectorize(G, np_module)
682
- for index, node in enumerate(nodes):
683
- i: int = int(index)
684
- node_id: NodeId = node
685
- nd = G.nodes[node_id]
686
- cache.theta[i] = trig.theta[node_id]
687
- cache.epi[i] = get_attr(nd, ALIAS_EPI, 0.0)
688
- cache.vf[i] = get_attr(nd, ALIAS_VF, 0.0)
689
- cache.cos_theta[i] = trig.cos[node_id]
690
- cache.sin_theta[i] = trig.sin[node_id]
691
- if use_numpy:
692
- _ensure_numpy_vectors(cache, np_module)
660
+ node_count = len(nodes)
661
+ trig_theta = getattr(trig, "theta_values", None)
662
+ trig_cos = getattr(trig, "cos_values", None)
663
+ trig_sin = getattr(trig, "sin_values", None)
664
+ np_ready = (
665
+ use_numpy
666
+ and np_module is not None
667
+ and isinstance(trig_theta, getattr(np_module, "ndarray", tuple()))
668
+ and isinstance(trig_cos, getattr(np_module, "ndarray", tuple()))
669
+ and isinstance(trig_sin, getattr(np_module, "ndarray", tuple()))
670
+ and getattr(trig_theta, "shape", None) == getattr(trig_cos, "shape", None)
671
+ and getattr(trig_theta, "shape", None) == getattr(trig_sin, "shape", None)
672
+ and (trig_theta.shape[0] if getattr(trig_theta, "ndim", 0) else 0) == node_count
673
+ )
674
+
675
+ if np_ready:
676
+ if node_count:
677
+ epi_arr = np_module.fromiter(
678
+ (get_attr(G.nodes[node], ALIAS_EPI, 0.0) for node in nodes),
679
+ dtype=float,
680
+ count=node_count,
681
+ )
682
+ vf_arr = np_module.fromiter(
683
+ (get_attr(G.nodes[node], ALIAS_VF, 0.0) for node in nodes),
684
+ dtype=float,
685
+ count=node_count,
686
+ )
687
+ else:
688
+ epi_arr = np_module.empty(0, dtype=float)
689
+ vf_arr = np_module.empty(0, dtype=float)
690
+
691
+ theta_arr = np_module.asarray(trig_theta, dtype=float)
692
+ cos_arr = np_module.asarray(trig_cos, dtype=float)
693
+ sin_arr = np_module.asarray(trig_sin, dtype=float)
694
+
695
+ def _sync_numpy(attr: str, source: Any) -> Any:
696
+ dest = getattr(cache, attr)
697
+ if dest is None or getattr(dest, "shape", None) != source.shape:
698
+ dest = np_module.array(source, dtype=float)
699
+ else:
700
+ np_module.copyto(dest, source, casting="unsafe")
701
+ setattr(cache, attr, dest)
702
+ return dest
703
+
704
+ _sync_numpy("theta_np", theta_arr)
705
+ _sync_numpy("epi_np", epi_arr)
706
+ _sync_numpy("vf_np", vf_arr)
707
+ _sync_numpy("cos_theta_np", cos_arr)
708
+ _sync_numpy("sin_theta_np", sin_arr)
709
+
710
+ # Python mirrors remain untouched while the vectorised path is active.
711
+ # They will be rebuilt the next time the runtime falls back to lists.
712
+ if cache.theta is not None and len(cache.theta) != node_count:
713
+ cache.theta = [0.0] * node_count
714
+ if cache.epi is not None and len(cache.epi) != node_count:
715
+ cache.epi = [0.0] * node_count
716
+ if cache.vf is not None and len(cache.vf) != node_count:
717
+ cache.vf = [0.0] * node_count
718
+ if cache.cos_theta is not None and len(cache.cos_theta) != node_count:
719
+ cache.cos_theta = [1.0] * node_count
720
+ if cache.sin_theta is not None and len(cache.sin_theta) != node_count:
721
+ cache.sin_theta = [0.0] * node_count
693
722
  else:
694
- cache.theta_np = None
695
- cache.epi_np = None
696
- cache.vf_np = None
697
- cache.cos_theta_np = None
698
- cache.sin_theta_np = None
723
+ for index, node in enumerate(nodes):
724
+ i: int = int(index)
725
+ node_id: NodeId = node
726
+ nd = G.nodes[node_id]
727
+ cache.theta[i] = trig.theta[node_id]
728
+ cache.epi[i] = get_attr(nd, ALIAS_EPI, 0.0)
729
+ cache.vf[i] = get_attr(nd, ALIAS_VF, 0.0)
730
+ cache.cos_theta[i] = trig.cos[node_id]
731
+ cache.sin_theta[i] = trig.sin[node_id]
732
+ if use_numpy and np_module is not None:
733
+ _ensure_numpy_vectors(cache, np_module)
734
+ else:
735
+ cache.theta_np = None
736
+ cache.epi_np = None
737
+ cache.vf_np = None
738
+ cache.cos_theta_np = None
739
+ cache.sin_theta_np = None
699
740
 
700
741
 
701
- def _prepare_dnfr_data(G: TNFRGraph, *, cache_size: int | None = 128) -> dict[str, Any]:
742
+ def _prepare_dnfr_data(
743
+ G: TNFRGraph,
744
+ *,
745
+ cache_size: int | None = 128,
746
+ profile: MutableMapping[str, float] | None = None,
747
+ ) -> dict[str, Any]:
702
748
  """Precompute common data for ΔNFR strategies.
703
749
 
704
750
  The helper decides between edge-wise and dense adjacency accumulation
@@ -706,7 +752,20 @@ def _prepare_dnfr_data(G: TNFRGraph, *, cache_size: int | None = 128) -> dict[st
706
752
  ``_SPARSE_DENSITY_THRESHOLD`` receive a cached adjacency matrix so the
707
753
  dense path can be exercised; callers may also force the dense mode by
708
754
  setting ``G.graph['dnfr_force_dense']`` to a truthy value.
755
+
756
+ Parameters
757
+ ----------
758
+ profile : MutableMapping[str, float] or None, optional
759
+ Mutable mapping that accumulates wall-clock timings for ΔNFR
760
+ preparation. When provided the helper increases the
761
+ ``"dnfr_cache_rebuild"`` bucket with the time spent refreshing cached
762
+ node vectors and associated NumPy workspaces.
709
763
  """
764
+ start_timer, stop_timer = _profile_start_stop(
765
+ profile,
766
+ keys=("dnfr_cache_rebuild",),
767
+ )
768
+
710
769
  graph = G.graph
711
770
  weights = graph.get("_dnfr_weights")
712
771
  if weights is None:
@@ -743,6 +802,7 @@ def _prepare_dnfr_data(G: TNFRGraph, *, cache_size: int | None = 128) -> dict[st
743
802
  checksum = G.graph.get("_dnfr_nodes_checksum")
744
803
  dirty_flag = bool(G.graph.pop("_dnfr_prep_dirty", False))
745
804
  existing_cache = cast(DnfrCache | None, graph.get("_dnfr_prep_cache"))
805
+ cache_timer = start_timer()
746
806
  cache, idx, theta, epi, vf, cos_theta, sin_theta, refreshed = _init_dnfr_cache(
747
807
  G,
748
808
  nodes,
@@ -751,7 +811,9 @@ def _prepare_dnfr_data(G: TNFRGraph, *, cache_size: int | None = 128) -> dict[st
751
811
  force_refresh=dirty_flag,
752
812
  manager=manager,
753
813
  )
814
+ stop_timer("dnfr_cache_rebuild", cache_timer)
754
815
  dirty = dirty_flag or refreshed
816
+ caching_enabled = cache is not None and (cache_size is None or cache_size > 0)
755
817
  result["cache"] = cache
756
818
  result["idx"] = idx
757
819
  result["theta"] = theta
@@ -761,7 +823,7 @@ def _prepare_dnfr_data(G: TNFRGraph, *, cache_size: int | None = 128) -> dict[st
761
823
  result["sin_theta"] = sin_theta
762
824
  if cache is not None:
763
825
  _refresh_dnfr_vectors(G, nodes, cache)
764
- if np_module is None:
826
+ if np_module is None and not caching_enabled:
765
827
  for attr in (
766
828
  "neighbor_x_np",
767
829
  "neighbor_y_np",
@@ -812,41 +874,47 @@ def _prepare_dnfr_data(G: TNFRGraph, *, cache_size: int | None = 128) -> dict[st
812
874
  cache.neighbor_accum_np = None
813
875
  cache.neighbor_edge_values_np = None
814
876
  degree_map = None
815
- if degree_map is None or len(degree_map) != len(G):
816
- degree_map = {cast(NodeId, node): float(deg) for node, deg in G.degree()}
817
- if cache is not None:
818
- cache.degs = degree_map
819
-
820
- G.graph["_dnfr_prep_dirty"] = False
821
877
 
822
- if (
823
- cache is not None
824
- and cache.deg_list is not None
825
- and not dirty
826
- and len(cache.deg_list) == len(nodes)
827
- ):
828
- deg_list = cache.deg_list
829
- else:
830
- deg_list = [float(degree_map.get(node, 0.0)) for node in nodes]
831
- if cache is not None:
832
- cache.deg_list = deg_list
878
+ deg_list: list[float] | None = None
879
+ degs: dict[NodeId, float] | None = None
880
+ deg_array: np.ndarray | None = None
833
881
 
834
- if w_topo != 0.0:
835
- degs: dict[NodeId, float] | None = degree_map
836
- else:
837
- degs = None
838
- result["degs"] = degs
839
- result["deg_list"] = deg_list
882
+ if w_topo != 0.0 or caching_enabled:
883
+ if degree_map is None or len(degree_map) != len(G):
884
+ degree_map = {cast(NodeId, node): float(deg) for node, deg in G.degree()}
885
+ if cache is not None:
886
+ cache.degs = degree_map
840
887
 
841
- deg_array: np.ndarray | None = None
842
- if np_module is not None and deg_list is not None:
843
- if cache is not None:
844
- deg_array = _ensure_numpy_degrees(cache, deg_list, np_module)
888
+ if (
889
+ cache is not None
890
+ and cache.deg_list is not None
891
+ and not dirty
892
+ and len(cache.deg_list) == len(nodes)
893
+ ):
894
+ deg_list = cache.deg_list
845
895
  else:
846
- deg_array = np_module.array(deg_list, dtype=float)
847
- elif cache is not None:
896
+ deg_list = [float(degree_map.get(node, 0.0)) for node in nodes]
897
+ if cache is not None:
898
+ cache.deg_list = deg_list
899
+
900
+ degs = degree_map
901
+
902
+ if np_module is not None and deg_list is not None:
903
+ if cache is not None:
904
+ deg_array = _ensure_numpy_degrees(cache, deg_list, np_module)
905
+ else:
906
+ deg_array = np_module.array(deg_list, dtype=float)
907
+ elif cache is not None:
908
+ cache.deg_array = None
909
+ elif cache is not None and dirty:
910
+ cache.deg_list = None
848
911
  cache.deg_array = None
849
912
 
913
+ G.graph["_dnfr_prep_dirty"] = False
914
+
915
+ result["degs"] = degs
916
+ result["deg_list"] = deg_list
917
+
850
918
  theta_np: np.ndarray | None
851
919
  epi_np: np.ndarray | None
852
920
  vf_np: np.ndarray | None
@@ -864,9 +932,7 @@ def _prepare_dnfr_data(G: TNFRGraph, *, cache_size: int | None = 128) -> dict[st
864
932
  edge_src = cache.edge_src
865
933
  edge_dst = cache.edge_dst
866
934
  if edge_src is None or edge_dst is None or dirty:
867
- edge_src, edge_dst = _build_edge_index_arrays(
868
- G, nodes, idx, np_module
869
- )
935
+ edge_src, edge_dst = _build_edge_index_arrays(G, nodes, idx, np_module)
870
936
  cache.edge_src = edge_src
871
937
  cache.edge_dst = edge_dst
872
938
  else:
@@ -903,6 +969,16 @@ def _prepare_dnfr_data(G: TNFRGraph, *, cache_size: int | None = 128) -> dict[st
903
969
  result["vf_np"] = vf_np
904
970
  result["cos_theta_np"] = cos_theta_np
905
971
  result["sin_theta_np"] = sin_theta_np
972
+ if theta_np is not None and getattr(theta_np, "shape", None) == (len(nodes),):
973
+ result["theta"] = theta_np
974
+ if epi_np is not None and getattr(epi_np, "shape", None) == (len(nodes),):
975
+ result["epi"] = epi_np
976
+ if vf_np is not None and getattr(vf_np, "shape", None) == (len(nodes),):
977
+ result["vf"] = vf_np
978
+ if cos_theta_np is not None and getattr(cos_theta_np, "shape", None) == (len(nodes),):
979
+ result["cos_theta"] = cos_theta_np
980
+ if sin_theta_np is not None and getattr(sin_theta_np, "shape", None) == (len(nodes),):
981
+ result["sin_theta"] = sin_theta_np
906
982
  result["deg_array"] = deg_array
907
983
  result["edge_src"] = edge_src
908
984
  result["edge_dst"] = edge_dst
@@ -925,8 +1001,22 @@ def _apply_dnfr_gradients(
925
1001
  degs: Mapping[Any, float] | Sequence[float] | np.ndarray | None = None,
926
1002
  *,
927
1003
  n_jobs: int | None = None,
1004
+ profile: MutableMapping[str, float] | None = None,
928
1005
  ) -> None:
929
- """Combine precomputed gradients and write ΔNFR to each node."""
1006
+ """Combine precomputed gradients and write ΔNFR to each node.
1007
+
1008
+ Parameters
1009
+ ----------
1010
+ profile : MutableMapping[str, float] or None, optional
1011
+ Mutable mapping receiving aggregated timings for the gradient assembly
1012
+ (``"dnfr_gradient_assembly"``) and in-place writes
1013
+ (``"dnfr_inplace_write"``).
1014
+ """
1015
+ start_timer, stop_timer = _profile_start_stop(
1016
+ profile,
1017
+ keys=("dnfr_gradient_assembly", "dnfr_inplace_write"),
1018
+ )
1019
+
930
1020
  np = get_numpy()
931
1021
  nodes = data["nodes"]
932
1022
  theta = data["theta"]
@@ -962,6 +1052,8 @@ def _apply_dnfr_gradients(
962
1052
  and isinstance(deg_array, np.ndarray)
963
1053
  )
964
1054
 
1055
+ grad_timer = start_timer()
1056
+
965
1057
  if use_vector:
966
1058
  grad_phase = _ensure_cached_array(cache, "grad_phase_np", theta_np.shape, np)
967
1059
  grad_epi = _ensure_cached_array(cache, "grad_epi_np", epi_np.shape, np)
@@ -969,16 +1061,10 @@ def _apply_dnfr_gradients(
969
1061
  grad_total = _ensure_cached_array(cache, "grad_total_np", theta_np.shape, np)
970
1062
  grad_topo = None
971
1063
  if w_topo != 0.0:
972
- grad_topo = _ensure_cached_array(
973
- cache, "grad_topo_np", deg_array.shape, np
974
- )
1064
+ grad_topo = _ensure_cached_array(cache, "grad_topo_np", deg_array.shape, np)
975
1065
 
976
- np.copyto(grad_phase, theta_np, casting="unsafe")
977
- grad_phase -= th_bar
978
- grad_phase += math.pi
979
- np.mod(grad_phase, math.tau, out=grad_phase)
980
- grad_phase -= math.pi
981
- grad_phase *= -1.0 / math.pi
1066
+ angle_diff_array(theta_np, th_bar, np=np, out=grad_phase)
1067
+ np.multiply(grad_phase, -1.0 / math.pi, out=grad_phase)
982
1068
 
983
1069
  np.copyto(grad_epi, epi_bar, casting="unsafe")
984
1070
  grad_epi -= epi_np
@@ -1069,8 +1155,12 @@ def _apply_dnfr_gradients(
1069
1155
  cache.grad_topo_np = None
1070
1156
  cache.grad_total_np = None
1071
1157
 
1158
+ stop_timer("dnfr_gradient_assembly", grad_timer)
1159
+
1160
+ write_timer = start_timer()
1072
1161
  for i, n in enumerate(nodes):
1073
1162
  set_dnfr(G, n, float(dnfr_values[i]))
1163
+ stop_timer("dnfr_inplace_write", write_timer)
1074
1164
 
1075
1165
 
1076
1166
  def _init_bar_arrays(
@@ -1124,10 +1214,7 @@ def _init_bar_arrays(
1124
1214
  else:
1125
1215
  deg_size = len(degs)
1126
1216
  deg_bar = cache.deg_bar_np
1127
- if (
1128
- deg_bar is None
1129
- or getattr(deg_bar, "shape", None) != (deg_size,)
1130
- ):
1217
+ if deg_bar is None or getattr(deg_bar, "shape", None) != (deg_size,):
1131
1218
  if isinstance(degs, dict):
1132
1219
  deg_bar = np.array(
1133
1220
  [float(degs.get(node, 0.0)) for node in nodes],
@@ -1198,9 +1285,7 @@ def _init_bar_arrays(
1198
1285
  th_bar = list(theta)
1199
1286
  epi_bar = list(epi)
1200
1287
  vf_bar = list(vf)
1201
- deg_bar = (
1202
- list(degs) if w_topo != 0.0 and degs is not None else None
1203
- )
1288
+ deg_bar = list(degs) if w_topo != 0.0 and degs is not None else None
1204
1289
  return th_bar, epi_bar, vf_bar, deg_bar
1205
1290
 
1206
1291
 
@@ -1294,13 +1379,31 @@ def _compute_dnfr_common(
1294
1379
  deg_sum: Sequence[float] | None = None,
1295
1380
  degs: Sequence[float] | None = None,
1296
1381
  n_jobs: int | None = None,
1382
+ profile: MutableMapping[str, float] | None = None,
1297
1383
  ) -> None:
1298
- """Compute neighbour means and apply ΔNFR gradients."""
1384
+ """Compute neighbour means and apply ΔNFR gradients.
1385
+
1386
+ Parameters
1387
+ ----------
1388
+ profile : MutableMapping[str, float] or None, optional
1389
+ Mutable mapping that records wall-clock durations for the neighbour
1390
+ mean computation (``"dnfr_neighbor_means"``), the gradient assembly
1391
+ (``"dnfr_gradient_assembly"``) and the final in-place writes to the
1392
+ graph (``"dnfr_inplace_write"``).
1393
+ """
1394
+ start_timer, stop_timer = _profile_start_stop(
1395
+ profile,
1396
+ keys=("dnfr_neighbor_means", "dnfr_gradient_assembly", "dnfr_inplace_write"),
1397
+ )
1398
+
1299
1399
  np_module = get_numpy()
1300
- if np_module is not None and isinstance(count, getattr(np_module, "ndarray", tuple)):
1400
+ if np_module is not None and isinstance(
1401
+ count, getattr(np_module, "ndarray", tuple)
1402
+ ):
1301
1403
  np_arg = np_module
1302
1404
  else:
1303
1405
  np_arg = None
1406
+ neighbor_timer = start_timer()
1304
1407
  th_bar, epi_bar, vf_bar, deg_bar = _compute_neighbor_means(
1305
1408
  G,
1306
1409
  data,
@@ -1313,6 +1416,7 @@ def _compute_dnfr_common(
1313
1416
  degs=degs,
1314
1417
  np=np_arg,
1315
1418
  )
1419
+ stop_timer("dnfr_neighbor_means", neighbor_timer)
1316
1420
  _apply_dnfr_gradients(
1317
1421
  G,
1318
1422
  data,
@@ -1322,6 +1426,7 @@ def _compute_dnfr_common(
1322
1426
  deg_bar,
1323
1427
  degs,
1324
1428
  n_jobs=n_jobs,
1429
+ profile=profile,
1325
1430
  )
1326
1431
 
1327
1432
 
@@ -1330,7 +1435,11 @@ def _reset_numpy_buffer(
1330
1435
  size: int,
1331
1436
  np: ModuleType,
1332
1437
  ) -> np.ndarray:
1333
- if buffer is None or getattr(buffer, "shape", None) is None or buffer.shape[0] != size:
1438
+ if (
1439
+ buffer is None
1440
+ or getattr(buffer, "shape", None) is None
1441
+ or buffer.shape[0] != size
1442
+ ):
1334
1443
  return np.zeros(size, dtype=float)
1335
1444
  buffer.fill(0.0)
1336
1445
  return buffer
@@ -1476,10 +1585,12 @@ def _accumulate_neighbors_dense(
1476
1585
  vectors = [state["cos"], state["sin"], state["epi"], state["vf"]]
1477
1586
 
1478
1587
  components = _ensure_cached_array(cache, "dense_components_np", (n, 4), np)
1479
- for col, src_vec in enumerate(vectors):
1480
- np.copyto(components[:, col], src_vec, casting="unsafe")
1481
-
1482
1588
  accum = _ensure_cached_array(cache, "dense_accum_np", (n, 4), np)
1589
+
1590
+ # ``components`` retains the last source copies so callers relying on
1591
+ # cached buffers (e.g. diagnostics) still observe meaningful values.
1592
+ np.copyto(components, np.column_stack(vectors), casting="unsafe")
1593
+
1483
1594
  np.matmul(A, components, out=accum)
1484
1595
 
1485
1596
  np.copyto(x, accum[:, 0], casting="unsafe")
@@ -1537,6 +1648,7 @@ def _accumulate_neighbors_broadcasted(
1537
1648
  deg_array: np.ndarray | None,
1538
1649
  cache: DnfrCache | None,
1539
1650
  np: ModuleType,
1651
+ chunk_size: int | None = None,
1540
1652
  ) -> dict[str, np.ndarray]:
1541
1653
  """Accumulate neighbour contributions using direct indexed reductions."""
1542
1654
 
@@ -1548,6 +1660,23 @@ def _accumulate_neighbors_broadcasted(
1548
1660
 
1549
1661
  component_rows = 4 + (1 if include_count else 0) + (1 if use_topology else 0)
1550
1662
 
1663
+ if edge_count:
1664
+ if chunk_size is None:
1665
+ resolved_chunk = edge_count
1666
+ else:
1667
+ try:
1668
+ resolved_chunk = int(chunk_size)
1669
+ except (TypeError, ValueError):
1670
+ resolved_chunk = edge_count
1671
+ else:
1672
+ if resolved_chunk <= 0:
1673
+ resolved_chunk = edge_count
1674
+ resolved_chunk = max(1, min(edge_count, resolved_chunk))
1675
+ else:
1676
+ resolved_chunk = 0
1677
+
1678
+ use_chunks = bool(edge_count and resolved_chunk < edge_count)
1679
+
1551
1680
  if cache is not None:
1552
1681
  base_signature = (id(edge_src), id(edge_dst), n, edge_count)
1553
1682
  cache.edge_signature = base_signature
@@ -1565,52 +1694,144 @@ def _accumulate_neighbors_broadcasted(
1565
1694
  else:
1566
1695
  accum.fill(0.0)
1567
1696
 
1568
- edge_values = cache.neighbor_edge_values_np
1569
- if (
1570
- edge_values is None
1571
- or getattr(edge_values, "shape", None) != (edge_count,)
1572
- ):
1573
- edge_values = np.empty((edge_count,), dtype=float)
1574
- cache.neighbor_edge_values_np = edge_values
1697
+ workspace = cache.neighbor_edge_values_np
1698
+ if use_chunks:
1699
+ workspace_length = resolved_chunk
1700
+ else:
1701
+ workspace_length = component_rows
1702
+ if workspace_length:
1703
+ expected_shape = (component_rows, workspace_length)
1704
+ if workspace is None or getattr(workspace, "shape", None) != expected_shape:
1705
+ workspace = np.empty(expected_shape, dtype=float)
1706
+ else:
1707
+ workspace = None
1708
+ cache.neighbor_edge_values_np = workspace
1575
1709
 
1576
1710
  cache.neighbor_accum_signature = signature
1577
1711
  else:
1578
1712
  accum = np.zeros((component_rows, n), dtype=float)
1579
- edge_values = (
1580
- np.empty((edge_count,), dtype=float)
1581
- if edge_count
1582
- else np.empty((0,), dtype=float)
1713
+ workspace_length = resolved_chunk if use_chunks else component_rows
1714
+ workspace = (
1715
+ np.empty((component_rows, workspace_length), dtype=float)
1716
+ if workspace_length
1717
+ else None
1583
1718
  )
1584
1719
 
1585
1720
  if edge_count:
1586
1721
  row = 0
1587
-
1588
- np.take(cos, edge_dst, out=edge_values)
1589
- np.add.at(accum[row], edge_src, edge_values)
1722
+ cos_row = row
1590
1723
  row += 1
1591
-
1592
- np.take(sin, edge_dst, out=edge_values)
1593
- np.add.at(accum[row], edge_src, edge_values)
1724
+ sin_row = row
1594
1725
  row += 1
1595
-
1596
- np.take(epi, edge_dst, out=edge_values)
1597
- np.add.at(accum[row], edge_src, edge_values)
1726
+ epi_row = row
1598
1727
  row += 1
1599
-
1600
- np.take(vf, edge_dst, out=edge_values)
1601
- np.add.at(accum[row], edge_src, edge_values)
1728
+ vf_row = row
1602
1729
  row += 1
1603
-
1604
- if include_count and count is not None:
1605
- edge_values.fill(1.0)
1606
- np.add.at(accum[row], edge_src, edge_values)
1730
+ count_row = row if include_count and count is not None else None
1731
+ if count_row is not None:
1607
1732
  row += 1
1733
+ deg_row = row if use_topology and deg_array is not None else None
1734
+
1735
+ edge_src_int = edge_src.astype(np.intp, copy=False)
1736
+ edge_dst_int = edge_dst.astype(np.intp, copy=False)
1737
+
1738
+ if use_chunks:
1739
+ chunk_step = resolved_chunk if resolved_chunk else edge_count
1740
+ chunk_indices = range(0, edge_count, chunk_step)
1741
+
1742
+ for start in chunk_indices:
1743
+ end = min(start + chunk_step, edge_count)
1744
+ if start >= end:
1745
+ continue
1746
+ src_slice = edge_src_int[start:end]
1747
+ dst_slice = edge_dst_int[start:end]
1748
+ slice_len = end - start
1749
+ if slice_len <= 0:
1750
+ continue
1751
+
1752
+ if workspace is not None:
1753
+ chunk_matrix = workspace[:, :slice_len]
1754
+ else:
1755
+ chunk_matrix = np.empty((component_rows, slice_len), dtype=float)
1756
+
1757
+ np.take(cos, dst_slice, out=chunk_matrix[cos_row, :slice_len])
1758
+ np.take(sin, dst_slice, out=chunk_matrix[sin_row, :slice_len])
1759
+ np.take(epi, dst_slice, out=chunk_matrix[epi_row, :slice_len])
1760
+ np.take(vf, dst_slice, out=chunk_matrix[vf_row, :slice_len])
1761
+
1762
+ if count_row is not None:
1763
+ chunk_matrix[count_row, :slice_len].fill(1.0)
1764
+ if deg_row is not None and deg_array is not None:
1765
+ np.take(deg_array, dst_slice, out=chunk_matrix[deg_row, :slice_len])
1766
+
1767
+ def _accumulate_into(
1768
+ target_row: int | None,
1769
+ values: np.ndarray | None = None,
1770
+ *,
1771
+ unit_weight: bool = False,
1772
+ ) -> None:
1773
+ if target_row is None:
1774
+ return
1775
+ row_view = accum[target_row]
1776
+ if unit_weight:
1777
+ np.add.at(row_view, src_slice, 1.0)
1778
+ else:
1779
+ if values is None:
1780
+ return
1781
+ np.add.at(row_view, src_slice, values)
1782
+
1783
+ _accumulate_into(cos_row, chunk_matrix[cos_row, :slice_len])
1784
+ _accumulate_into(sin_row, chunk_matrix[sin_row, :slice_len])
1785
+ _accumulate_into(epi_row, chunk_matrix[epi_row, :slice_len])
1786
+ _accumulate_into(vf_row, chunk_matrix[vf_row, :slice_len])
1787
+
1788
+ if count_row is not None:
1789
+ _accumulate_into(count_row, unit_weight=True)
1608
1790
 
1609
- if use_topology and deg_sum is not None and deg_array is not None:
1610
- np.take(deg_array, edge_dst, out=edge_values)
1611
- np.add.at(accum[row], edge_src, edge_values)
1791
+ if deg_row is not None and deg_array is not None:
1792
+ _accumulate_into(deg_row, chunk_matrix[deg_row, :slice_len])
1793
+ else:
1794
+ def _apply_full_bincount(
1795
+ target_row: int | None,
1796
+ values: np.ndarray | None = None,
1797
+ *,
1798
+ unit_weight: bool = False,
1799
+ ) -> None:
1800
+ if target_row is None:
1801
+ return
1802
+ if values is None and not unit_weight:
1803
+ return
1804
+ if unit_weight:
1805
+ component_accum = np.bincount(
1806
+ edge_src_int,
1807
+ minlength=n,
1808
+ )
1809
+ else:
1810
+ component_accum = np.bincount(
1811
+ edge_src_int,
1812
+ weights=values,
1813
+ minlength=n,
1814
+ )
1815
+ np.copyto(
1816
+ accum[target_row, : n],
1817
+ component_accum[:n],
1818
+ casting="unsafe",
1819
+ )
1820
+
1821
+ _apply_full_bincount(cos_row, np.take(cos, edge_dst_int))
1822
+ _apply_full_bincount(sin_row, np.take(sin, edge_dst_int))
1823
+ _apply_full_bincount(epi_row, np.take(epi, edge_dst_int))
1824
+ _apply_full_bincount(vf_row, np.take(vf, edge_dst_int))
1825
+
1826
+ if count_row is not None:
1827
+ _apply_full_bincount(count_row, unit_weight=True)
1828
+
1829
+ if deg_row is not None and deg_array is not None:
1830
+ _apply_full_bincount(deg_row, np.take(deg_array, edge_dst_int))
1612
1831
  else:
1613
1832
  accum.fill(0.0)
1833
+ if workspace is not None:
1834
+ workspace.fill(0.0)
1614
1835
 
1615
1836
  row = 0
1616
1837
  np.copyto(x, accum[row], casting="unsafe")
@@ -1631,7 +1852,7 @@ def _accumulate_neighbors_broadcasted(
1631
1852
 
1632
1853
  return {
1633
1854
  "accumulator": accum,
1634
- "edge_values": edge_values,
1855
+ "edge_values": workspace,
1635
1856
  }
1636
1857
 
1637
1858
 
@@ -1741,9 +1962,15 @@ def _build_neighbor_sums_common(
1741
1962
  for future in futures:
1742
1963
  chunk_results.append(future.result())
1743
1964
 
1744
- for start, chunk_x, chunk_y, chunk_epi, chunk_vf, chunk_count, chunk_deg in sorted(
1745
- chunk_results, key=lambda item: item[0]
1746
- ):
1965
+ for (
1966
+ start,
1967
+ chunk_x,
1968
+ chunk_y,
1969
+ chunk_epi,
1970
+ chunk_vf,
1971
+ chunk_count,
1972
+ chunk_deg,
1973
+ ) in sorted(chunk_results, key=lambda item: item[0]):
1747
1974
  end = start + len(chunk_x)
1748
1975
  x[start:end] = chunk_x
1749
1976
  y[start:end] = chunk_y
@@ -1824,11 +2051,14 @@ def _accumulate_neighbors_numpy(
1824
2051
  data["edge_count"] = int(edge_src.size)
1825
2052
 
1826
2053
  cached_deg_array = data.get("deg_array")
1827
- reuse_count_from_deg = False
2054
+ reuse_count_from_deg = bool(count is not None and cached_deg_array is not None)
2055
+ count_for_accum = count
1828
2056
  if count is not None:
1829
- if cached_deg_array is not None:
2057
+ if reuse_count_from_deg:
2058
+ # Reuse the cached degree vector as neighbour counts to avoid
2059
+ # allocating an extra accumulator row in the broadcast routine.
1830
2060
  np.copyto(count, cached_deg_array, casting="unsafe")
1831
- reuse_count_from_deg = True
2061
+ count_for_accum = None
1832
2062
  else:
1833
2063
  count.fill(0.0)
1834
2064
 
@@ -1841,6 +2071,24 @@ def _accumulate_neighbors_numpy(
1841
2071
  elif cached_deg_array is not None:
1842
2072
  deg_array = cached_deg_array
1843
2073
 
2074
+ edge_count = int(edge_src.size) if edge_src is not None else 0
2075
+ chunk_hint = data.get("neighbor_chunk_hint")
2076
+ if chunk_hint is None:
2077
+ chunk_hint = G.graph.get("DNFR_CHUNK_SIZE")
2078
+ resolved_neighbor_chunk = (
2079
+ resolve_chunk_size(
2080
+ chunk_hint,
2081
+ edge_count,
2082
+ minimum=1,
2083
+ approx_bytes_per_item=_DNFR_APPROX_BYTES_PER_EDGE,
2084
+ clamp_to=None,
2085
+ )
2086
+ if edge_count
2087
+ else 0
2088
+ )
2089
+ data["neighbor_chunk_hint"] = chunk_hint
2090
+ data["neighbor_chunk_size"] = resolved_neighbor_chunk
2091
+
1844
2092
  accum = _accumulate_neighbors_broadcasted(
1845
2093
  edge_src=edge_src,
1846
2094
  edge_dst=edge_dst,
@@ -1852,19 +2100,26 @@ def _accumulate_neighbors_numpy(
1852
2100
  y=y,
1853
2101
  epi_sum=epi_sum,
1854
2102
  vf_sum=vf_sum,
1855
- count=count,
2103
+ count=count_for_accum,
1856
2104
  deg_sum=deg_sum,
1857
2105
  deg_array=deg_array,
1858
2106
  cache=cache,
1859
2107
  np=np,
2108
+ chunk_size=resolved_neighbor_chunk,
1860
2109
  )
1861
2110
 
1862
2111
  data["neighbor_accum_np"] = accum.get("accumulator")
1863
- data["neighbor_edge_values_np"] = accum.get("edge_values")
2112
+ edge_values = accum.get("edge_values")
2113
+ data["neighbor_edge_values_np"] = edge_values
2114
+ if edge_values is not None:
2115
+ width = getattr(edge_values, "shape", (0, 0))[1]
2116
+ data["neighbor_chunk_size"] = int(width)
2117
+ else:
2118
+ data["neighbor_chunk_size"] = resolved_neighbor_chunk
1864
2119
  if cache is not None:
1865
2120
  data["neighbor_accum_signature"] = cache.neighbor_accum_signature
1866
- if reuse_count_from_deg and count is not None and cached_deg_array is not None:
1867
- np.copyto(count, cached_deg_array, casting="unsafe")
2121
+ if reuse_count_from_deg and cached_deg_array is not None:
2122
+ count = cached_deg_array
1868
2123
  degs = deg_array if deg_sum is not None and deg_array is not None else None
1869
2124
  return x, y, epi_sum, vf_sum, count, deg_sum, degs
1870
2125
 
@@ -1875,6 +2130,7 @@ def _compute_dnfr(
1875
2130
  *,
1876
2131
  use_numpy: bool | None = None,
1877
2132
  n_jobs: int | None = None,
2133
+ profile: MutableMapping[str, float] | None = None,
1878
2134
  ) -> None:
1879
2135
  """Compute ΔNFR using neighbour sums.
1880
2136
 
@@ -1889,7 +2145,19 @@ def _compute_dnfr(
1889
2145
  prepares NumPy buffers (if available). When ``False`` the engine still
1890
2146
  prefers the vectorised path whenever :func:`get_numpy` returns a module
1891
2147
  and the graph does not set ``vectorized_dnfr`` to ``False``.
2148
+ profile : MutableMapping[str, float] or None, optional
2149
+ Mutable mapping that aggregates wall-clock durations for neighbour
2150
+ accumulation and records which execution branch was used. The
2151
+ ``"dnfr_neighbor_accumulation"`` bucket gathers the time spent inside
2152
+ :func:`_build_neighbor_sums_common`, while ``"dnfr_path"`` stores the
2153
+ string ``"vectorized"`` or ``"fallback"`` describing the active
2154
+ implementation.
1892
2155
  """
2156
+ start_timer, stop_timer = _profile_start_stop(
2157
+ profile,
2158
+ keys=("dnfr_neighbor_accumulation",),
2159
+ )
2160
+
1893
2161
  np_module = get_numpy()
1894
2162
  data["dnfr_numpy_available"] = bool(np_module)
1895
2163
  vector_disabled = G.graph.get("vectorized_dnfr") is False
@@ -1899,23 +2167,29 @@ def _compute_dnfr(
1899
2167
  if use_numpy is False or vector_disabled:
1900
2168
  prefer_dense = False
1901
2169
  data["dnfr_used_numpy"] = bool(prefer_dense and np_module is not None)
2170
+ if profile is not None:
2171
+ profile["dnfr_path"] = "vectorized" if data["dnfr_used_numpy"] else "fallback"
1902
2172
 
1903
2173
  data["n_jobs"] = n_jobs
1904
2174
  try:
2175
+ neighbor_timer = start_timer()
1905
2176
  res = _build_neighbor_sums_common(
1906
2177
  G,
1907
2178
  data,
1908
2179
  use_numpy=prefer_dense,
1909
2180
  n_jobs=n_jobs,
1910
2181
  )
2182
+ stop_timer("dnfr_neighbor_accumulation", neighbor_timer)
1911
2183
  except TypeError as exc:
1912
2184
  if "n_jobs" not in str(exc):
1913
2185
  raise
2186
+ neighbor_timer = start_timer()
1914
2187
  res = _build_neighbor_sums_common(
1915
2188
  G,
1916
2189
  data,
1917
2190
  use_numpy=prefer_dense,
1918
2191
  )
2192
+ stop_timer("dnfr_neighbor_accumulation", neighbor_timer)
1919
2193
  if res is None:
1920
2194
  return
1921
2195
  x, y, epi_sum, vf_sum, count, deg_sum, degs = res
@@ -1930,6 +2204,7 @@ def _compute_dnfr(
1930
2204
  deg_sum=deg_sum,
1931
2205
  degs=degs,
1932
2206
  n_jobs=n_jobs,
2207
+ profile=profile,
1933
2208
  )
1934
2209
 
1935
2210
 
@@ -1938,6 +2213,7 @@ def default_compute_delta_nfr(
1938
2213
  *,
1939
2214
  cache_size: int | None = 1,
1940
2215
  n_jobs: int | None = None,
2216
+ profile: MutableMapping[str, float] | None = None,
1941
2217
  ) -> None:
1942
2218
  """Compute ΔNFR by mixing phase, EPI, νf and a topological term.
1943
2219
 
@@ -1953,17 +2229,40 @@ def default_compute_delta_nfr(
1953
2229
  Parallel worker count for the pure-Python accumulation path. ``None``
1954
2230
  or values <= 1 preserve the serial behaviour. The vectorised NumPy
1955
2231
  branch ignores this parameter as it already operates in bulk.
2232
+ profile : MutableMapping[str, float] or None, optional
2233
+ Mutable mapping that aggregates the wall-clock timings captured during
2234
+ the ΔNFR computation. The mapping receives the buckets documented in
2235
+ :func:`_prepare_dnfr_data` and :func:`_compute_dnfr`, plus
2236
+ ``"dnfr_neighbor_means"``, ``"dnfr_gradient_assembly"`` and
2237
+ ``"dnfr_inplace_write"`` describing the internal stages of
2238
+ :func:`_compute_dnfr_common`. ``"dnfr_path"`` reflects whether the
2239
+ vectorised or fallback implementation executed the call.
1956
2240
  """
1957
- data = _prepare_dnfr_data(G, cache_size=cache_size)
2241
+ if profile is not None:
2242
+ for key in (
2243
+ "dnfr_cache_rebuild",
2244
+ "dnfr_neighbor_accumulation",
2245
+ "dnfr_neighbor_means",
2246
+ "dnfr_gradient_assembly",
2247
+ "dnfr_inplace_write",
2248
+ ):
2249
+ profile.setdefault(key, 0.0)
2250
+
2251
+ data = _prepare_dnfr_data(G, cache_size=cache_size, profile=profile)
1958
2252
  _write_dnfr_metadata(
1959
2253
  G,
1960
2254
  weights=data["weights"],
1961
2255
  hook_name="default_compute_delta_nfr",
1962
2256
  )
1963
- _compute_dnfr(G, data, n_jobs=n_jobs)
2257
+ _compute_dnfr(G, data, n_jobs=n_jobs, profile=profile)
1964
2258
  if not data.get("dnfr_numpy_available"):
1965
2259
  cache = data.get("cache")
1966
- if isinstance(cache, DnfrCache):
2260
+ cache_size = data.get("cache_size")
2261
+ caching_enabled = (
2262
+ isinstance(cache, DnfrCache)
2263
+ and (cache_size is None or int(cache_size) > 0)
2264
+ )
2265
+ if isinstance(cache, DnfrCache) and not caching_enabled:
1967
2266
  for attr in (
1968
2267
  "neighbor_x_np",
1969
2268
  "neighbor_y_np",
@@ -2016,9 +2315,7 @@ def set_delta_nfr_hook(
2016
2315
  _wrapped.__doc__ = getattr(func, "__doc__", _wrapped.__doc__)
2017
2316
 
2018
2317
  G.graph["compute_delta_nfr"] = _wrapped
2019
- G.graph["_dnfr_hook_name"] = str(
2020
- name or getattr(func, "__name__", "custom_dnfr")
2021
- )
2318
+ G.graph["_dnfr_hook_name"] = str(name or getattr(func, "__name__", "custom_dnfr"))
2022
2319
  if "_dnfr_weights" not in G.graph:
2023
2320
  _configure_dnfr_weights(G)
2024
2321
  if note:
@@ -2063,7 +2360,7 @@ def _apply_dnfr_hook(
2063
2360
  note: str | None = None,
2064
2361
  n_jobs: int | None = None,
2065
2362
  ) -> None:
2066
- """Generic helper to compute and store ΔNFR using ``grads``.
2363
+ """Compute and store ΔNFR using ``grads``.
2067
2364
 
2068
2365
  Parameters
2069
2366
  ----------
@@ -2158,7 +2455,7 @@ def _apply_dnfr_hook(
2158
2455
  _write_dnfr_metadata(G, weights=weights, hook_name=hook_name, note=note)
2159
2456
 
2160
2457
 
2161
- # --- Hooks de ejemplo (opcionales) ---
2458
+ # --- Example hooks (optional) ---
2162
2459
 
2163
2460
 
2164
2461
  class _PhaseGradient:
@@ -2232,7 +2529,7 @@ class _NeighborAverageGradient:
2232
2529
 
2233
2530
 
2234
2531
  def dnfr_phase_only(G: TNFRGraph, *, n_jobs: int | None = None) -> None:
2235
- """Example: ΔNFR from phase only (Kuramoto-like).
2532
+ """Compute ΔNFR from phase only (Kuramoto-like).
2236
2533
 
2237
2534
  Parameters
2238
2535
  ----------
@@ -2256,7 +2553,7 @@ def dnfr_phase_only(G: TNFRGraph, *, n_jobs: int | None = None) -> None:
2256
2553
 
2257
2554
 
2258
2555
  def dnfr_epi_vf_mixed(G: TNFRGraph, *, n_jobs: int | None = None) -> None:
2259
- """Example: ΔNFR without phase, mixing EPI and νf.
2556
+ """Compute ΔNFR without phase, mixing EPI and νf.
2260
2557
 
2261
2558
  Parameters
2262
2559
  ----------
@@ -2267,7 +2564,9 @@ def dnfr_epi_vf_mixed(G: TNFRGraph, *, n_jobs: int | None = None) -> None:
2267
2564
  serial execution.
2268
2565
  """
2269
2566
 
2270
- epi_values = {n: float(get_attr(nd, ALIAS_EPI, 0.0)) for n, nd in G.nodes(data=True)}
2567
+ epi_values = {
2568
+ n: float(get_attr(nd, ALIAS_EPI, 0.0)) for n, nd in G.nodes(data=True)
2569
+ }
2271
2570
  vf_values = {n: float(get_attr(nd, ALIAS_VF, 0.0)) for n, nd in G.nodes(data=True)}
2272
2571
  grads = {
2273
2572
  "epi": _NeighborAverageGradient(ALIAS_EPI, epi_values),
@@ -2299,7 +2598,9 @@ def dnfr_laplacian(G: TNFRGraph, *, n_jobs: int | None = None) -> None:
2299
2598
  wE = float(weights_cfg.get("epi", DEFAULTS["DNFR_WEIGHTS"]["epi"]))
2300
2599
  wV = float(weights_cfg.get("vf", DEFAULTS["DNFR_WEIGHTS"]["vf"]))
2301
2600
 
2302
- epi_values = {n: float(get_attr(nd, ALIAS_EPI, 0.0)) for n, nd in G.nodes(data=True)}
2601
+ epi_values = {
2602
+ n: float(get_attr(nd, ALIAS_EPI, 0.0)) for n, nd in G.nodes(data=True)
2603
+ }
2303
2604
  vf_values = {n: float(get_attr(nd, ALIAS_VF, 0.0)) for n, nd in G.nodes(data=True)}
2304
2605
  grads = {
2305
2606
  "epi": _NeighborAverageGradient(ALIAS_EPI, epi_values),