tnfr 4.5.1__py3-none-any.whl → 6.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. tnfr/__init__.py +270 -90
  2. tnfr/__init__.pyi +40 -0
  3. tnfr/_compat.py +11 -0
  4. tnfr/_version.py +7 -0
  5. tnfr/_version.pyi +7 -0
  6. tnfr/alias.py +631 -0
  7. tnfr/alias.pyi +140 -0
  8. tnfr/cache.py +732 -0
  9. tnfr/cache.pyi +232 -0
  10. tnfr/callback_utils.py +381 -0
  11. tnfr/callback_utils.pyi +105 -0
  12. tnfr/cli/__init__.py +89 -0
  13. tnfr/cli/__init__.pyi +47 -0
  14. tnfr/cli/arguments.py +199 -0
  15. tnfr/cli/arguments.pyi +33 -0
  16. tnfr/cli/execution.py +322 -0
  17. tnfr/cli/execution.pyi +80 -0
  18. tnfr/cli/utils.py +34 -0
  19. tnfr/cli/utils.pyi +8 -0
  20. tnfr/config/__init__.py +12 -0
  21. tnfr/config/__init__.pyi +8 -0
  22. tnfr/config/constants.py +104 -0
  23. tnfr/config/constants.pyi +12 -0
  24. tnfr/config/init.py +36 -0
  25. tnfr/config/init.pyi +8 -0
  26. tnfr/config/operator_names.py +106 -0
  27. tnfr/config/operator_names.pyi +28 -0
  28. tnfr/config/presets.py +104 -0
  29. tnfr/config/presets.pyi +7 -0
  30. tnfr/constants/__init__.py +228 -0
  31. tnfr/constants/__init__.pyi +104 -0
  32. tnfr/constants/core.py +158 -0
  33. tnfr/constants/core.pyi +17 -0
  34. tnfr/constants/init.py +31 -0
  35. tnfr/constants/init.pyi +12 -0
  36. tnfr/constants/metric.py +102 -0
  37. tnfr/constants/metric.pyi +19 -0
  38. tnfr/constants_glyphs.py +16 -0
  39. tnfr/constants_glyphs.pyi +12 -0
  40. tnfr/dynamics/__init__.py +136 -0
  41. tnfr/dynamics/__init__.pyi +83 -0
  42. tnfr/dynamics/adaptation.py +201 -0
  43. tnfr/dynamics/aliases.py +22 -0
  44. tnfr/dynamics/coordination.py +343 -0
  45. tnfr/dynamics/dnfr.py +2315 -0
  46. tnfr/dynamics/dnfr.pyi +33 -0
  47. tnfr/dynamics/integrators.py +561 -0
  48. tnfr/dynamics/integrators.pyi +35 -0
  49. tnfr/dynamics/runtime.py +521 -0
  50. tnfr/dynamics/sampling.py +34 -0
  51. tnfr/dynamics/sampling.pyi +7 -0
  52. tnfr/dynamics/selectors.py +680 -0
  53. tnfr/execution.py +216 -0
  54. tnfr/execution.pyi +65 -0
  55. tnfr/flatten.py +283 -0
  56. tnfr/flatten.pyi +28 -0
  57. tnfr/gamma.py +320 -89
  58. tnfr/gamma.pyi +40 -0
  59. tnfr/glyph_history.py +337 -0
  60. tnfr/glyph_history.pyi +53 -0
  61. tnfr/grammar.py +23 -153
  62. tnfr/grammar.pyi +13 -0
  63. tnfr/helpers/__init__.py +151 -0
  64. tnfr/helpers/__init__.pyi +66 -0
  65. tnfr/helpers/numeric.py +88 -0
  66. tnfr/helpers/numeric.pyi +12 -0
  67. tnfr/immutable.py +214 -0
  68. tnfr/immutable.pyi +37 -0
  69. tnfr/initialization.py +199 -0
  70. tnfr/initialization.pyi +73 -0
  71. tnfr/io.py +311 -0
  72. tnfr/io.pyi +11 -0
  73. tnfr/locking.py +37 -0
  74. tnfr/locking.pyi +7 -0
  75. tnfr/metrics/__init__.py +41 -0
  76. tnfr/metrics/__init__.pyi +20 -0
  77. tnfr/metrics/coherence.py +1469 -0
  78. tnfr/metrics/common.py +149 -0
  79. tnfr/metrics/common.pyi +15 -0
  80. tnfr/metrics/core.py +259 -0
  81. tnfr/metrics/core.pyi +13 -0
  82. tnfr/metrics/diagnosis.py +840 -0
  83. tnfr/metrics/diagnosis.pyi +89 -0
  84. tnfr/metrics/export.py +151 -0
  85. tnfr/metrics/glyph_timing.py +369 -0
  86. tnfr/metrics/reporting.py +152 -0
  87. tnfr/metrics/reporting.pyi +12 -0
  88. tnfr/metrics/sense_index.py +294 -0
  89. tnfr/metrics/sense_index.pyi +9 -0
  90. tnfr/metrics/trig.py +216 -0
  91. tnfr/metrics/trig.pyi +12 -0
  92. tnfr/metrics/trig_cache.py +105 -0
  93. tnfr/metrics/trig_cache.pyi +10 -0
  94. tnfr/node.py +255 -177
  95. tnfr/node.pyi +161 -0
  96. tnfr/observers.py +154 -150
  97. tnfr/observers.pyi +46 -0
  98. tnfr/ontosim.py +135 -134
  99. tnfr/ontosim.pyi +33 -0
  100. tnfr/operators/__init__.py +452 -0
  101. tnfr/operators/__init__.pyi +31 -0
  102. tnfr/operators/definitions.py +181 -0
  103. tnfr/operators/definitions.pyi +92 -0
  104. tnfr/operators/jitter.py +266 -0
  105. tnfr/operators/jitter.pyi +11 -0
  106. tnfr/operators/registry.py +80 -0
  107. tnfr/operators/registry.pyi +15 -0
  108. tnfr/operators/remesh.py +569 -0
  109. tnfr/presets.py +10 -23
  110. tnfr/presets.pyi +7 -0
  111. tnfr/py.typed +0 -0
  112. tnfr/rng.py +440 -0
  113. tnfr/rng.pyi +14 -0
  114. tnfr/selector.py +217 -0
  115. tnfr/selector.pyi +19 -0
  116. tnfr/sense.py +307 -142
  117. tnfr/sense.pyi +30 -0
  118. tnfr/structural.py +69 -164
  119. tnfr/structural.pyi +46 -0
  120. tnfr/telemetry/__init__.py +13 -0
  121. tnfr/telemetry/verbosity.py +37 -0
  122. tnfr/tokens.py +61 -0
  123. tnfr/tokens.pyi +41 -0
  124. tnfr/trace.py +520 -95
  125. tnfr/trace.pyi +68 -0
  126. tnfr/types.py +382 -17
  127. tnfr/types.pyi +145 -0
  128. tnfr/utils/__init__.py +158 -0
  129. tnfr/utils/__init__.pyi +133 -0
  130. tnfr/utils/cache.py +755 -0
  131. tnfr/utils/cache.pyi +156 -0
  132. tnfr/utils/data.py +267 -0
  133. tnfr/utils/data.pyi +73 -0
  134. tnfr/utils/graph.py +87 -0
  135. tnfr/utils/graph.pyi +10 -0
  136. tnfr/utils/init.py +746 -0
  137. tnfr/utils/init.pyi +85 -0
  138. tnfr/utils/io.py +157 -0
  139. tnfr/utils/io.pyi +10 -0
  140. tnfr/utils/validators.py +130 -0
  141. tnfr/utils/validators.pyi +19 -0
  142. tnfr/validation/__init__.py +25 -0
  143. tnfr/validation/__init__.pyi +17 -0
  144. tnfr/validation/compatibility.py +59 -0
  145. tnfr/validation/compatibility.pyi +8 -0
  146. tnfr/validation/grammar.py +149 -0
  147. tnfr/validation/grammar.pyi +11 -0
  148. tnfr/validation/rules.py +194 -0
  149. tnfr/validation/rules.pyi +18 -0
  150. tnfr/validation/syntax.py +151 -0
  151. tnfr/validation/syntax.pyi +7 -0
  152. tnfr-6.0.0.dist-info/METADATA +135 -0
  153. tnfr-6.0.0.dist-info/RECORD +157 -0
  154. tnfr/cli.py +0 -322
  155. tnfr/config.py +0 -41
  156. tnfr/constants.py +0 -277
  157. tnfr/dynamics.py +0 -814
  158. tnfr/helpers.py +0 -264
  159. tnfr/main.py +0 -47
  160. tnfr/metrics.py +0 -597
  161. tnfr/operators.py +0 -525
  162. tnfr/program.py +0 -176
  163. tnfr/scenarios.py +0 -34
  164. tnfr/validators.py +0 -38
  165. tnfr-4.5.1.dist-info/METADATA +0 -221
  166. tnfr-4.5.1.dist-info/RECORD +0 -28
  167. {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/WHEEL +0 -0
  168. {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/entry_points.txt +0 -0
  169. {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/licenses/LICENSE.md +0 -0
  170. {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1469 @@
1
+ """Coherence metrics."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import math
6
+ from collections.abc import Callable, Iterable, Mapping, Sequence
7
+ from concurrent.futures import ProcessPoolExecutor
8
+ from dataclasses import dataclass
9
+ from types import ModuleType
10
+ from typing import Any, MutableMapping, TypedDict, cast
11
+
12
+ from .._compat import TypeAlias
13
+
14
+
15
+ from ..constants import (
16
+ get_aliases,
17
+ get_param,
18
+ )
19
+ from ..callback_utils import CallbackEvent, callback_manager
20
+ from ..glyph_history import append_metric, ensure_history
21
+ from ..alias import collect_attr, collect_theta_attr, set_attr
22
+ from ..helpers.numeric import clamp01
23
+ from ..types import (
24
+ CoherenceMetric,
25
+ FloatArray,
26
+ FloatMatrix,
27
+ GlyphLoadDistribution,
28
+ HistoryState,
29
+ NodeId,
30
+ SigmaVector,
31
+ TNFRGraph,
32
+ )
33
+ from .common import compute_coherence, min_max_range
34
+ from .trig_cache import compute_theta_trig, get_trig_cache
35
+ from ..observers import (
36
+ DEFAULT_GLYPH_LOAD_SPAN,
37
+ DEFAULT_WBAR_SPAN,
38
+ glyph_load,
39
+ kuramoto_order,
40
+ phase_sync,
41
+ )
42
+ from ..sense import sigma_vector
43
+ from ..utils import (
44
+ ensure_node_index_map,
45
+ get_logger,
46
+ get_numpy,
47
+ normalize_weights,
48
+ )
49
+
50
+ logger = get_logger(__name__)
51
+
52
+ ALIAS_EPI = get_aliases("EPI")
53
+ ALIAS_VF = get_aliases("VF")
54
+ ALIAS_SI = get_aliases("SI")
55
+ ALIAS_DNFR = get_aliases("DNFR")
56
+ ALIAS_DEPI = get_aliases("DEPI")
57
+ ALIAS_DSI = get_aliases("DSI")
58
+ ALIAS_DVF = get_aliases("DVF")
59
+ ALIAS_D2VF = get_aliases("D2VF")
60
+
61
+ GLYPH_LOAD_STABILIZERS_KEY = "glyph_load_stabilizers"
62
+
63
+
64
+ @dataclass
65
+ class SimilarityInputs:
66
+ """Similarity inputs and optional trigonometric caches."""
67
+
68
+ th_vals: Sequence[float]
69
+ epi_vals: Sequence[float]
70
+ vf_vals: Sequence[float]
71
+ si_vals: Sequence[float]
72
+ cos_vals: Sequence[float] | None = None
73
+ sin_vals: Sequence[float] | None = None
74
+
75
+
76
+ CoherenceMatrixDense = list[list[float]]
77
+ CoherenceMatrixSparse = list[tuple[int, int, float]]
78
+ CoherenceMatrixPayload = CoherenceMatrixDense | CoherenceMatrixSparse
79
+ PhaseSyncWeights: TypeAlias = (
80
+ Sequence[float] | CoherenceMatrixSparse | CoherenceMatrixDense
81
+ )
82
+
83
+ SimilarityComponents = tuple[float, float, float, float]
84
+ VectorizedComponents: TypeAlias = (
85
+ tuple[FloatMatrix, FloatMatrix, FloatMatrix, FloatMatrix]
86
+ )
87
+ ScalarOrArray: TypeAlias = float | FloatArray
88
+ StabilityChunkArgs = tuple[
89
+ Sequence[float],
90
+ Sequence[float],
91
+ Sequence[float],
92
+ Sequence[float | None],
93
+ Sequence[float],
94
+ Sequence[float | None],
95
+ Sequence[float | None],
96
+ float,
97
+ float,
98
+ float,
99
+ ]
100
+ StabilityChunkResult = tuple[
101
+ int,
102
+ int,
103
+ float,
104
+ float,
105
+ list[float],
106
+ list[float],
107
+ list[float],
108
+ ]
109
+
110
+ MetricValue: TypeAlias = CoherenceMetric
111
+ MetricProvider = Callable[[], MetricValue]
112
+ MetricRecord: TypeAlias = tuple[MetricValue | MetricProvider, str]
113
+
114
+
115
+ class ParallelWijPayload(TypedDict):
116
+ epi_vals: Sequence[float]
117
+ vf_vals: Sequence[float]
118
+ si_vals: Sequence[float]
119
+ cos_vals: Sequence[float]
120
+ sin_vals: Sequence[float]
121
+ weights: tuple[float, float, float, float]
122
+ epi_range: float
123
+ vf_range: float
124
+
125
+
126
+ def _compute_wij_phase_epi_vf_si_vectorized(
127
+ epi: FloatArray,
128
+ vf: FloatArray,
129
+ si: FloatArray,
130
+ cos_th: FloatArray,
131
+ sin_th: FloatArray,
132
+ epi_range: float,
133
+ vf_range: float,
134
+ np: ModuleType,
135
+ ) -> VectorizedComponents:
136
+ """Vectorized computation of similarity components.
137
+
138
+ All parameters are expected to be NumPy arrays already cast to ``float``
139
+ when appropriate. ``epi_range`` and ``vf_range`` are normalized inside the
140
+ function to avoid division by zero.
141
+ """
142
+
143
+ epi_range = epi_range if epi_range > 0 else 1.0
144
+ vf_range = vf_range if vf_range > 0 else 1.0
145
+ s_phase = 0.5 * (
146
+ 1.0
147
+ + cos_th[:, None] * cos_th[None, :]
148
+ + sin_th[:, None] * sin_th[None, :]
149
+ )
150
+ s_epi = 1.0 - np.abs(epi[:, None] - epi[None, :]) / epi_range
151
+ s_vf = 1.0 - np.abs(vf[:, None] - vf[None, :]) / vf_range
152
+ s_si = 1.0 - np.abs(si[:, None] - si[None, :])
153
+ return s_phase, s_epi, s_vf, s_si
154
+
155
+
156
+ def compute_wij_phase_epi_vf_si(
157
+ inputs: SimilarityInputs,
158
+ i: int | None = None,
159
+ j: int | None = None,
160
+ *,
161
+ trig: Any | None = None,
162
+ G: TNFRGraph | None = None,
163
+ nodes: Sequence[NodeId] | None = None,
164
+ epi_range: float = 1.0,
165
+ vf_range: float = 1.0,
166
+ np: ModuleType | None = None,
167
+ ) -> SimilarityComponents | VectorizedComponents:
168
+ """Return similarity components for nodes ``i`` and ``j``.
169
+
170
+ When ``np`` is provided and ``i`` and ``j`` are ``None`` the computation is
171
+ vectorized returning full matrices for all node pairs.
172
+ """
173
+
174
+ trig = trig or (get_trig_cache(G, np=np) if G is not None else None)
175
+ cos_vals = inputs.cos_vals
176
+ sin_vals = inputs.sin_vals
177
+ if cos_vals is None or sin_vals is None:
178
+ th_vals = inputs.th_vals
179
+ pairs = zip(nodes or range(len(th_vals)), th_vals)
180
+ trig_local = compute_theta_trig(pairs, np=np)
181
+ index_iter = nodes if nodes is not None else range(len(th_vals))
182
+ if trig is not None and nodes is not None:
183
+ cos_vals = [trig.cos.get(n, trig_local.cos[n]) for n in nodes]
184
+ sin_vals = [trig.sin.get(n, trig_local.sin[n]) for n in nodes]
185
+ else:
186
+ cos_vals = [trig_local.cos[i] for i in index_iter]
187
+ sin_vals = [trig_local.sin[i] for i in index_iter]
188
+ inputs.cos_vals = cos_vals
189
+ inputs.sin_vals = sin_vals
190
+
191
+ th_vals = inputs.th_vals
192
+ epi_vals = inputs.epi_vals
193
+ vf_vals = inputs.vf_vals
194
+ si_vals = inputs.si_vals
195
+
196
+ if np is not None and i is None and j is None:
197
+ epi = cast(FloatArray, np.asarray(epi_vals, dtype=float))
198
+ vf = cast(FloatArray, np.asarray(vf_vals, dtype=float))
199
+ si = cast(FloatArray, np.asarray(si_vals, dtype=float))
200
+ cos_th = cast(FloatArray, np.asarray(cos_vals, dtype=float))
201
+ sin_th = cast(FloatArray, np.asarray(sin_vals, dtype=float))
202
+ return _compute_wij_phase_epi_vf_si_vectorized(
203
+ epi,
204
+ vf,
205
+ si,
206
+ cos_th,
207
+ sin_th,
208
+ epi_range,
209
+ vf_range,
210
+ np,
211
+ )
212
+
213
+ if i is None or j is None:
214
+ raise ValueError("i and j are required for non-vectorized computation")
215
+ epi_range = epi_range if epi_range > 0 else 1.0
216
+ vf_range = vf_range if vf_range > 0 else 1.0
217
+ cos_i = cos_vals[i]
218
+ sin_i = sin_vals[i]
219
+ cos_j = cos_vals[j]
220
+ sin_j = sin_vals[j]
221
+ s_phase = 0.5 * (1.0 + (cos_i * cos_j + sin_i * sin_j))
222
+ s_epi = 1.0 - abs(epi_vals[i] - epi_vals[j]) / epi_range
223
+ s_vf = 1.0 - abs(vf_vals[i] - vf_vals[j]) / vf_range
224
+ s_si = 1.0 - abs(si_vals[i] - si_vals[j])
225
+ return s_phase, s_epi, s_vf, s_si
226
+
227
+
228
+ def _combine_similarity(
229
+ s_phase: ScalarOrArray,
230
+ s_epi: ScalarOrArray,
231
+ s_vf: ScalarOrArray,
232
+ s_si: ScalarOrArray,
233
+ phase_w: float,
234
+ epi_w: float,
235
+ vf_w: float,
236
+ si_w: float,
237
+ np: ModuleType | None = None,
238
+ ) -> ScalarOrArray:
239
+ wij = phase_w * s_phase + epi_w * s_epi + vf_w * s_vf + si_w * s_si
240
+ if np is not None:
241
+ return cast(FloatArray, np.clip(wij, 0.0, 1.0))
242
+ return clamp01(wij)
243
+
244
+
245
+ def _wij_components_weights(
246
+ G: TNFRGraph,
247
+ nodes: Sequence[NodeId] | None,
248
+ inputs: SimilarityInputs,
249
+ wnorm: Mapping[str, float],
250
+ i: int | None = None,
251
+ j: int | None = None,
252
+ epi_range: float = 1.0,
253
+ vf_range: float = 1.0,
254
+ np: ModuleType | None = None,
255
+ ) -> tuple[
256
+ ScalarOrArray,
257
+ ScalarOrArray,
258
+ ScalarOrArray,
259
+ ScalarOrArray,
260
+ float,
261
+ float,
262
+ float,
263
+ float,
264
+ ]:
265
+ """Return similarity components together with their weights.
266
+
267
+ This consolidates repeated computations ensuring that both the
268
+ similarity components and the corresponding weights are derived once and
269
+ consistently across different implementations.
270
+ """
271
+
272
+ s_phase, s_epi, s_vf, s_si = compute_wij_phase_epi_vf_si(
273
+ inputs,
274
+ i,
275
+ j,
276
+ G=G,
277
+ nodes=nodes,
278
+ epi_range=epi_range,
279
+ vf_range=vf_range,
280
+ np=np,
281
+ )
282
+ phase_w = wnorm["phase"]
283
+ epi_w = wnorm["epi"]
284
+ vf_w = wnorm["vf"]
285
+ si_w = wnorm["si"]
286
+ return s_phase, s_epi, s_vf, s_si, phase_w, epi_w, vf_w, si_w
287
+
288
+
289
+ def _wij_vectorized(
290
+ G: TNFRGraph,
291
+ nodes: Sequence[NodeId],
292
+ inputs: SimilarityInputs,
293
+ wnorm: Mapping[str, float],
294
+ epi_min: float,
295
+ epi_max: float,
296
+ vf_min: float,
297
+ vf_max: float,
298
+ self_diag: bool,
299
+ np: ModuleType,
300
+ ) -> FloatMatrix:
301
+ epi_range = epi_max - epi_min if epi_max > epi_min else 1.0
302
+ vf_range = vf_max - vf_min if vf_max > vf_min else 1.0
303
+ (
304
+ s_phase,
305
+ s_epi,
306
+ s_vf,
307
+ s_si,
308
+ phase_w,
309
+ epi_w,
310
+ vf_w,
311
+ si_w,
312
+ ) = _wij_components_weights(
313
+ G,
314
+ nodes,
315
+ inputs,
316
+ wnorm,
317
+ epi_range=epi_range,
318
+ vf_range=vf_range,
319
+ np=np,
320
+ )
321
+ wij_matrix = cast(
322
+ FloatMatrix,
323
+ _combine_similarity(
324
+ s_phase, s_epi, s_vf, s_si, phase_w, epi_w, vf_w, si_w, np=np
325
+ ),
326
+ )
327
+ if self_diag:
328
+ np.fill_diagonal(wij_matrix, 1.0)
329
+ else:
330
+ np.fill_diagonal(wij_matrix, 0.0)
331
+ return wij_matrix
332
+
333
+
334
+ def _compute_wij_value_raw(
335
+ i: int,
336
+ j: int,
337
+ epi_vals: Sequence[float],
338
+ vf_vals: Sequence[float],
339
+ si_vals: Sequence[float],
340
+ cos_vals: Sequence[float],
341
+ sin_vals: Sequence[float],
342
+ weights: tuple[float, float, float, float],
343
+ epi_range: float,
344
+ vf_range: float,
345
+ ) -> float:
346
+ epi_range = epi_range if epi_range > 0 else 1.0
347
+ vf_range = vf_range if vf_range > 0 else 1.0
348
+ phase_w, epi_w, vf_w, si_w = weights
349
+ cos_i = cos_vals[i]
350
+ sin_i = sin_vals[i]
351
+ cos_j = cos_vals[j]
352
+ sin_j = sin_vals[j]
353
+ s_phase = 0.5 * (1.0 + (cos_i * cos_j + sin_i * sin_j))
354
+ s_epi = 1.0 - abs(epi_vals[i] - epi_vals[j]) / epi_range
355
+ s_vf = 1.0 - abs(vf_vals[i] - vf_vals[j]) / vf_range
356
+ s_si = 1.0 - abs(si_vals[i] - si_vals[j])
357
+ wij = phase_w * s_phase + epi_w * s_epi + vf_w * s_vf + si_w * s_si
358
+ return clamp01(wij)
359
+
360
+
361
+ _PARALLEL_WIJ_DATA: ParallelWijPayload | None = None
362
+
363
+
364
+ def _init_parallel_wij(data: ParallelWijPayload) -> None:
365
+ """Store immutable state for parallel ``wij`` computation."""
366
+
367
+ global _PARALLEL_WIJ_DATA
368
+ _PARALLEL_WIJ_DATA = data
369
+
370
+
371
+ def _parallel_wij_worker(
372
+ pairs: Sequence[tuple[int, int]]
373
+ ) -> list[tuple[int, int, float]]:
374
+ """Compute coherence weights for ``pairs`` using shared state."""
375
+
376
+ if _PARALLEL_WIJ_DATA is None:
377
+ raise RuntimeError("Parallel coherence data not initialized")
378
+
379
+ data = _PARALLEL_WIJ_DATA
380
+ epi_vals: Sequence[float] = data["epi_vals"]
381
+ vf_vals: Sequence[float] = data["vf_vals"]
382
+ si_vals: Sequence[float] = data["si_vals"]
383
+ cos_vals: Sequence[float] = data["cos_vals"]
384
+ sin_vals: Sequence[float] = data["sin_vals"]
385
+ weights: tuple[float, float, float, float] = data["weights"]
386
+ epi_range: float = data["epi_range"]
387
+ vf_range: float = data["vf_range"]
388
+
389
+ compute = _compute_wij_value_raw
390
+ return [
391
+ (
392
+ i,
393
+ j,
394
+ compute(
395
+ i,
396
+ j,
397
+ epi_vals,
398
+ vf_vals,
399
+ si_vals,
400
+ cos_vals,
401
+ sin_vals,
402
+ weights,
403
+ epi_range,
404
+ vf_range,
405
+ ),
406
+ )
407
+ for i, j in pairs
408
+ ]
409
+
410
+
411
+ def _wij_loops(
412
+ G: TNFRGraph,
413
+ nodes: Sequence[NodeId],
414
+ node_to_index: Mapping[NodeId, int],
415
+ inputs: SimilarityInputs,
416
+ wnorm: Mapping[str, float],
417
+ epi_min: float,
418
+ epi_max: float,
419
+ vf_min: float,
420
+ vf_max: float,
421
+ neighbors_only: bool,
422
+ self_diag: bool,
423
+ n_jobs: int | None = 1,
424
+ ) -> CoherenceMatrixDense:
425
+ n = len(nodes)
426
+ cos_vals = inputs.cos_vals
427
+ sin_vals = inputs.sin_vals
428
+ if cos_vals is None or sin_vals is None:
429
+ th_vals = inputs.th_vals
430
+ trig_local = compute_theta_trig(zip(nodes, th_vals))
431
+ cos_vals = [trig_local.cos[n] for n in nodes]
432
+ sin_vals = [trig_local.sin[n] for n in nodes]
433
+ inputs.cos_vals = cos_vals
434
+ inputs.sin_vals = sin_vals
435
+ assert cos_vals is not None
436
+ assert sin_vals is not None
437
+ epi_vals = list(inputs.epi_vals)
438
+ vf_vals = list(inputs.vf_vals)
439
+ si_vals = list(inputs.si_vals)
440
+ cos_vals_list = list(cos_vals)
441
+ sin_vals_list = list(sin_vals)
442
+ inputs.epi_vals = epi_vals
443
+ inputs.vf_vals = vf_vals
444
+ inputs.si_vals = si_vals
445
+ inputs.cos_vals = cos_vals_list
446
+ inputs.sin_vals = sin_vals_list
447
+ wij = [
448
+ [1.0 if (self_diag and i == j) else 0.0 for j in range(n)]
449
+ for i in range(n)
450
+ ]
451
+ epi_range = epi_max - epi_min if epi_max > epi_min else 1.0
452
+ vf_range = vf_max - vf_min if vf_max > vf_min else 1.0
453
+ weights = (
454
+ float(wnorm["phase"]),
455
+ float(wnorm["epi"]),
456
+ float(wnorm["vf"]),
457
+ float(wnorm["si"]),
458
+ )
459
+ pair_list: list[tuple[int, int]] = []
460
+ if neighbors_only:
461
+ seen: set[tuple[int, int]] = set()
462
+ for u, v in G.edges():
463
+ i = node_to_index[u]
464
+ j = node_to_index[v]
465
+ if i == j:
466
+ continue
467
+ pair = (i, j) if i < j else (j, i)
468
+ if pair in seen:
469
+ continue
470
+ seen.add(pair)
471
+ pair_list.append(pair)
472
+ else:
473
+ for i in range(n):
474
+ for j in range(i + 1, n):
475
+ pair_list.append((i, j))
476
+
477
+ total_pairs = len(pair_list)
478
+ max_workers = 1
479
+ if n_jobs is not None:
480
+ try:
481
+ max_workers = int(n_jobs)
482
+ except (TypeError, ValueError):
483
+ max_workers = 1
484
+ if max_workers <= 1 or total_pairs == 0:
485
+ for i, j in pair_list:
486
+ wij_ij = _compute_wij_value_raw(
487
+ i,
488
+ j,
489
+ epi_vals,
490
+ vf_vals,
491
+ si_vals,
492
+ cos_vals,
493
+ sin_vals,
494
+ weights,
495
+ epi_range,
496
+ vf_range,
497
+ )
498
+ wij[i][j] = wij[j][i] = wij_ij
499
+ return wij
500
+
501
+ chunk_size = max(1, math.ceil(total_pairs / max_workers))
502
+ payload: ParallelWijPayload = {
503
+ "epi_vals": tuple(epi_vals),
504
+ "vf_vals": tuple(vf_vals),
505
+ "si_vals": tuple(si_vals),
506
+ "cos_vals": tuple(cos_vals),
507
+ "sin_vals": tuple(sin_vals),
508
+ "weights": weights,
509
+ "epi_range": float(epi_range),
510
+ "vf_range": float(vf_range),
511
+ }
512
+
513
+ def _init() -> None:
514
+ _init_parallel_wij(payload)
515
+
516
+ with ProcessPoolExecutor(max_workers=max_workers, initializer=_init) as executor:
517
+ futures = []
518
+ for start in range(0, total_pairs, chunk_size):
519
+ chunk = pair_list[start:start + chunk_size]
520
+ futures.append(executor.submit(_parallel_wij_worker, chunk))
521
+ for future in futures:
522
+ for i, j, value in future.result():
523
+ wij[i][j] = wij[j][i] = value
524
+ return wij
525
+
526
+
527
+ def _compute_stats(
528
+ values: Iterable[float] | Any,
529
+ row_sum: Iterable[float] | Any,
530
+ n: int,
531
+ self_diag: bool,
532
+ np: ModuleType | None = None,
533
+ ) -> tuple[float, float, float, list[float], int]:
534
+ """Return aggregate statistics for ``values`` and normalized row sums.
535
+
536
+ ``values`` and ``row_sum`` can be any iterables. They are normalized to
537
+ either NumPy arrays or Python lists depending on the availability of
538
+ NumPy. The computation then delegates to the appropriate numerical
539
+ functions with minimal branching.
540
+ """
541
+
542
+ if np is not None:
543
+ if not isinstance(values, np.ndarray):
544
+ values_arr = np.asarray(list(values), dtype=float)
545
+ else:
546
+ values_arr = cast(Any, values.astype(float))
547
+ if not isinstance(row_sum, np.ndarray):
548
+ row_arr = np.asarray(list(row_sum), dtype=float)
549
+ else:
550
+ row_arr = cast(Any, row_sum.astype(float))
551
+ count_val = int(values_arr.size)
552
+ min_val = float(values_arr.min()) if values_arr.size else 0.0
553
+ max_val = float(values_arr.max()) if values_arr.size else 0.0
554
+ mean_val = float(values_arr.mean()) if values_arr.size else 0.0
555
+ else:
556
+ values_list = list(values)
557
+ row_arr = list(row_sum)
558
+ count_val = len(values_list)
559
+ min_val = min(values_list) if values_list else 0.0
560
+ max_val = max(values_list) if values_list else 0.0
561
+ mean_val = sum(values_list) / len(values_list) if values_list else 0.0
562
+
563
+ row_count = n if self_diag else n - 1
564
+ denom = max(1, row_count)
565
+ if np is not None:
566
+ Wi = (row_arr / denom).astype(float).tolist() # type: ignore[operator]
567
+ else:
568
+ Wi = [float(row_arr[i]) / denom for i in range(n)]
569
+ return min_val, max_val, mean_val, Wi, count_val
570
+
571
+
572
+ def _coherence_numpy(
573
+ wij: Any,
574
+ mode: str,
575
+ thr: float,
576
+ np: ModuleType,
577
+ ) -> tuple[int, Any, Any, CoherenceMatrixPayload]:
578
+ """Aggregate coherence weights using vectorized operations.
579
+
580
+ Produces the structural weight matrix ``W`` along with the list of off
581
+ diagonal values and row sums ready for statistical analysis.
582
+ """
583
+
584
+ n = wij.shape[0]
585
+ mask = ~np.eye(n, dtype=bool)
586
+ values = wij[mask]
587
+ row_sum = wij.sum(axis=1)
588
+ if mode == "dense":
589
+ W = wij.tolist()
590
+ else:
591
+ idx = np.where((wij >= thr) & mask)
592
+ W = [
593
+ (int(i), int(j), float(wij[i, j]))
594
+ for i, j in zip(idx[0], idx[1])
595
+ ]
596
+ return n, values, row_sum, W
597
+
598
+
599
+ def _coherence_python_worker(
600
+ args: tuple[Sequence[Sequence[float]], int, str, float]
601
+ ) -> tuple[int, list[float], list[float], CoherenceMatrixSparse]:
602
+ rows, start, mode, thr = args
603
+ values: list[float] = []
604
+ row_sum: list[float] = []
605
+ sparse: list[tuple[int, int, float]] = []
606
+ dense_mode = mode == "dense"
607
+
608
+ for offset, row in enumerate(rows):
609
+ i = start + offset
610
+ total = 0.0
611
+ for j, w in enumerate(row):
612
+ total += w
613
+ if i != j:
614
+ values.append(w)
615
+ if not dense_mode and w >= thr:
616
+ sparse.append((i, j, w))
617
+ row_sum.append(total)
618
+
619
+ return start, values, row_sum, sparse
620
+
621
+
622
+ def _coherence_python(
623
+ wij: Sequence[Sequence[float]],
624
+ mode: str,
625
+ thr: float,
626
+ n_jobs: int | None = 1,
627
+ ) -> tuple[int, list[float], list[float], CoherenceMatrixPayload]:
628
+ """Aggregate coherence weights using pure Python loops."""
629
+
630
+ n = len(wij)
631
+ values: list[float] = []
632
+ row_sum = [0.0] * n
633
+
634
+ if n_jobs is not None:
635
+ try:
636
+ max_workers = int(n_jobs)
637
+ except (TypeError, ValueError):
638
+ max_workers = 1
639
+ else:
640
+ max_workers = 1
641
+
642
+ if max_workers <= 1:
643
+ if mode == "dense":
644
+ W: CoherenceMatrixDense = [list(row) for row in wij]
645
+ for i in range(n):
646
+ for j in range(n):
647
+ w = W[i][j]
648
+ if i != j:
649
+ values.append(w)
650
+ row_sum[i] += w
651
+ else:
652
+ W_sparse: CoherenceMatrixSparse = []
653
+ for i in range(n):
654
+ row_i = wij[i]
655
+ for j in range(n):
656
+ w = row_i[j]
657
+ if i != j:
658
+ values.append(w)
659
+ if w >= thr:
660
+ W_sparse.append((i, j, w))
661
+ row_sum[i] += w
662
+ return n, values, row_sum, W if mode == "dense" else W_sparse
663
+
664
+ chunk_size = max(1, math.ceil(n / max_workers))
665
+ tasks = []
666
+ with ProcessPoolExecutor(max_workers=max_workers) as executor:
667
+ for start in range(0, n, chunk_size):
668
+ rows = wij[start:start + chunk_size]
669
+ tasks.append(
670
+ executor.submit(
671
+ _coherence_python_worker,
672
+ (tuple(tuple(row) for row in rows), start, mode, thr),
673
+ )
674
+ )
675
+ results = [task.result() for task in tasks]
676
+
677
+ results.sort(key=lambda item: item[0])
678
+ sparse_entries: list[tuple[int, int, float]] | None = [] if mode != "dense" else None
679
+ for start, chunk_values, chunk_row_sum, chunk_sparse in results:
680
+ values.extend(chunk_values)
681
+ for offset, total in enumerate(chunk_row_sum):
682
+ row_sum[start + offset] = total
683
+ if sparse_entries is not None:
684
+ sparse_entries.extend(chunk_sparse)
685
+
686
+ if mode == "dense":
687
+ W_dense: CoherenceMatrixDense = [list(row) for row in wij]
688
+ return n, values, row_sum, W_dense
689
+ sparse_result: CoherenceMatrixSparse = (
690
+ sparse_entries if sparse_entries is not None else []
691
+ )
692
+ return n, values, row_sum, sparse_result
693
+
694
+
695
+ def _finalize_wij(
696
+ G: TNFRGraph,
697
+ nodes: Sequence[NodeId],
698
+ wij: FloatMatrix | Sequence[Sequence[float]],
699
+ mode: str,
700
+ thr: float,
701
+ scope: str,
702
+ self_diag: bool,
703
+ np: ModuleType | None = None,
704
+ *,
705
+ n_jobs: int = 1,
706
+ ) -> tuple[list[NodeId], CoherenceMatrixPayload]:
707
+ """Finalize the coherence matrix ``wij`` and store results in history.
708
+
709
+ When ``np`` is provided and ``wij`` is a NumPy array, the computation is
710
+ performed using vectorized operations. Otherwise a pure Python loop-based
711
+ approach is used.
712
+ """
713
+
714
+ use_np = np is not None and isinstance(wij, np.ndarray)
715
+ if use_np:
716
+ assert np is not None
717
+ n, values, row_sum, W = _coherence_numpy(wij, mode, thr, np)
718
+ else:
719
+ n, values, row_sum, W = _coherence_python(wij, mode, thr, n_jobs=n_jobs)
720
+
721
+ min_val, max_val, mean_val, Wi, count_val = _compute_stats(
722
+ values, row_sum, n, self_diag, np if use_np else None
723
+ )
724
+ stats = {
725
+ "min": min_val,
726
+ "max": max_val,
727
+ "mean": mean_val,
728
+ "n_edges": count_val,
729
+ "mode": mode,
730
+ "scope": scope,
731
+ }
732
+
733
+ hist = ensure_history(G)
734
+ cfg = get_param(G, "COHERENCE")
735
+ append_metric(hist, cfg.get("history_key", "W_sparse"), W)
736
+ append_metric(hist, cfg.get("Wi_history_key", "W_i"), Wi)
737
+ append_metric(hist, cfg.get("stats_history_key", "W_stats"), stats)
738
+ return list(nodes), W
739
+
740
+
741
+ def coherence_matrix(
742
+ G: TNFRGraph,
743
+ use_numpy: bool | None = None,
744
+ *,
745
+ n_jobs: int | None = None,
746
+ ) -> tuple[list[NodeId] | None, CoherenceMatrixPayload | None]:
747
+ """Compute the coherence weight matrix for ``G``.
748
+
749
+ Parameters
750
+ ----------
751
+ G:
752
+ Graph whose nodes encode the structural attributes.
753
+ use_numpy:
754
+ When ``True`` the vectorised NumPy implementation is forced. When
755
+ ``False`` the pure Python fallback is used. ``None`` selects NumPy
756
+ automatically when available.
757
+ n_jobs:
758
+ Maximum worker processes to use for the Python fallback. ``None`` or
759
+ values less than or equal to one preserve the serial behaviour.
760
+ """
761
+
762
+ cfg = get_param(G, "COHERENCE")
763
+ if not cfg.get("enabled", True):
764
+ return None, None
765
+
766
+ node_to_index: Mapping[NodeId, int] = ensure_node_index_map(G)
767
+ nodes: list[NodeId] = list(node_to_index.keys())
768
+ n = len(nodes)
769
+ if n == 0:
770
+ return nodes, []
771
+
772
+ # NumPy handling for optional vectorized operations
773
+ np = get_numpy()
774
+ use_np = (
775
+ np is not None if use_numpy is None else (use_numpy and np is not None)
776
+ )
777
+
778
+ cfg_jobs = cfg.get("n_jobs")
779
+ parallel_jobs = n_jobs if n_jobs is not None else cfg_jobs
780
+
781
+ # Precompute indices to avoid repeated list.index calls within loops
782
+
783
+ th_vals = collect_theta_attr(G, nodes, 0.0, np=np if use_np else None)
784
+ epi_vals = collect_attr(G, nodes, ALIAS_EPI, 0.0, np=np if use_np else None)
785
+ vf_vals = collect_attr(G, nodes, ALIAS_VF, 0.0, np=np if use_np else None)
786
+ si_vals = collect_attr(G, nodes, ALIAS_SI, 0.0, np=np if use_np else None)
787
+ if use_np:
788
+ assert np is not None
789
+ si_vals = np.clip(si_vals, 0.0, 1.0)
790
+ else:
791
+ si_vals = [clamp01(v) for v in si_vals]
792
+ epi_min, epi_max = min_max_range(epi_vals)
793
+ vf_min, vf_max = min_max_range(vf_vals)
794
+
795
+ wdict = dict(cfg.get("weights", {}))
796
+ for k in ("phase", "epi", "vf", "si"):
797
+ wdict.setdefault(k, 0.0)
798
+ wnorm = normalize_weights(wdict, ("phase", "epi", "vf", "si"), default=0.0)
799
+
800
+ scope = str(cfg.get("scope", "neighbors")).lower()
801
+ neighbors_only = scope != "all"
802
+ self_diag = bool(cfg.get("self_on_diag", True))
803
+ mode = str(cfg.get("store_mode", "sparse")).lower()
804
+ thr = float(cfg.get("threshold", 0.0))
805
+ if mode not in ("sparse", "dense"):
806
+ mode = "sparse"
807
+ trig = get_trig_cache(G, np=np)
808
+ cos_map, sin_map = trig.cos, trig.sin
809
+ trig_local = compute_theta_trig(zip(nodes, th_vals), np=np)
810
+ cos_vals = [cos_map.get(n, trig_local.cos[n]) for n in nodes]
811
+ sin_vals = [sin_map.get(n, trig_local.sin[n]) for n in nodes]
812
+ inputs = SimilarityInputs(
813
+ th_vals=th_vals,
814
+ epi_vals=epi_vals,
815
+ vf_vals=vf_vals,
816
+ si_vals=si_vals,
817
+ cos_vals=cos_vals,
818
+ sin_vals=sin_vals,
819
+ )
820
+ if use_np:
821
+ assert np is not None
822
+ wij_matrix = _wij_vectorized(
823
+ G,
824
+ nodes,
825
+ inputs,
826
+ wnorm,
827
+ epi_min,
828
+ epi_max,
829
+ vf_min,
830
+ vf_max,
831
+ self_diag,
832
+ np,
833
+ )
834
+ if neighbors_only:
835
+ adj = np.eye(n, dtype=bool)
836
+ for u, v in G.edges():
837
+ i = node_to_index[u]
838
+ j = node_to_index[v]
839
+ adj[i, j] = True
840
+ adj[j, i] = True
841
+ wij_matrix = cast(FloatMatrix, np.where(adj, wij_matrix, 0.0))
842
+ wij: FloatMatrix | CoherenceMatrixDense = wij_matrix
843
+ else:
844
+ wij = _wij_loops(
845
+ G,
846
+ nodes,
847
+ node_to_index,
848
+ inputs,
849
+ wnorm,
850
+ epi_min,
851
+ epi_max,
852
+ vf_min,
853
+ vf_max,
854
+ neighbors_only,
855
+ self_diag,
856
+ n_jobs=parallel_jobs,
857
+ )
858
+
859
+ return _finalize_wij(
860
+ G,
861
+ nodes,
862
+ wij,
863
+ mode,
864
+ thr,
865
+ scope,
866
+ self_diag,
867
+ np,
868
+ n_jobs=parallel_jobs if not use_np else 1,
869
+ )
870
+
871
+
872
+ def local_phase_sync_weighted(
873
+ G: TNFRGraph,
874
+ n: NodeId,
875
+ nodes_order: Sequence[NodeId] | None = None,
876
+ W_row: PhaseSyncWeights | None = None,
877
+ node_to_index: Mapping[NodeId, int] | None = None,
878
+ ) -> float:
879
+ """Compute local phase synchrony using explicit weights.
880
+
881
+ ``nodes_order`` is the node ordering used to build the coherence matrix
882
+ and ``W_row`` contains either the dense row corresponding to ``n`` or the
883
+ sparse list of ``(i, j, w)`` tuples for the whole matrix.
884
+ """
885
+ if W_row is None or nodes_order is None:
886
+ raise ValueError(
887
+ "nodes_order and W_row are required for weighted phase synchrony"
888
+ )
889
+
890
+ if node_to_index is None:
891
+ node_to_index = ensure_node_index_map(G)
892
+ i = node_to_index.get(n)
893
+ if i is None:
894
+ i = nodes_order.index(n)
895
+
896
+ num = 0 + 0j
897
+ den = 0.0
898
+
899
+ trig = get_trig_cache(G)
900
+ cos_map, sin_map = trig.cos, trig.sin
901
+
902
+ if isinstance(W_row, Sequence) and W_row:
903
+ first = W_row[0]
904
+ if isinstance(first, (int, float)):
905
+ row_vals = cast(Sequence[float], W_row)
906
+ for w, nj in zip(row_vals, nodes_order):
907
+ if nj == n:
908
+ continue
909
+ den += w
910
+ cos_j = cos_map.get(nj)
911
+ sin_j = sin_map.get(nj)
912
+ if cos_j is None or sin_j is None:
913
+ trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
914
+ cos_j = trig_j.cos[nj]
915
+ sin_j = trig_j.sin[nj]
916
+ num += w * complex(cos_j, sin_j)
917
+ return abs(num / den) if den else 0.0
918
+
919
+ if (
920
+ isinstance(first, Sequence)
921
+ and len(first) == 3
922
+ and isinstance(first[0], int)
923
+ and isinstance(first[1], int)
924
+ and isinstance(first[2], (int, float))
925
+ ):
926
+ sparse_entries = cast(CoherenceMatrixSparse, W_row)
927
+ for ii, jj, w in sparse_entries:
928
+ if ii != i:
929
+ continue
930
+ nj = nodes_order[jj]
931
+ if nj == n:
932
+ continue
933
+ den += w
934
+ cos_j = cos_map.get(nj)
935
+ sin_j = sin_map.get(nj)
936
+ if cos_j is None or sin_j is None:
937
+ trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
938
+ cos_j = trig_j.cos[nj]
939
+ sin_j = trig_j.sin[nj]
940
+ num += w * complex(cos_j, sin_j)
941
+ return abs(num / den) if den else 0.0
942
+
943
+ dense_matrix = cast(CoherenceMatrixDense, W_row)
944
+ if i is None:
945
+ raise ValueError("node index resolution failed for dense weights")
946
+ row_vals = cast(Sequence[float], dense_matrix[i])
947
+ for w, nj in zip(row_vals, nodes_order):
948
+ if nj == n:
949
+ continue
950
+ den += w
951
+ cos_j = cos_map.get(nj)
952
+ sin_j = sin_map.get(nj)
953
+ if cos_j is None or sin_j is None:
954
+ trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
955
+ cos_j = trig_j.cos[nj]
956
+ sin_j = trig_j.sin[nj]
957
+ num += w * complex(cos_j, sin_j)
958
+ return abs(num / den) if den else 0.0
959
+
960
+ sparse_entries = cast(CoherenceMatrixSparse, W_row)
961
+ for ii, jj, w in sparse_entries:
962
+ if ii != i:
963
+ continue
964
+ nj = nodes_order[jj]
965
+ if nj == n:
966
+ continue
967
+ den += w
968
+ cos_j = cos_map.get(nj)
969
+ sin_j = sin_map.get(nj)
970
+ if cos_j is None or sin_j is None:
971
+ trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
972
+ cos_j = trig_j.cos[nj]
973
+ sin_j = trig_j.sin[nj]
974
+ num += w * complex(cos_j, sin_j)
975
+
976
+ return abs(num / den) if den else 0.0
977
+
978
+
979
+ def local_phase_sync(G: TNFRGraph, n: NodeId) -> float:
980
+ """Compute unweighted local phase synchronization for node ``n``."""
981
+ nodes, W = coherence_matrix(G)
982
+ if nodes is None:
983
+ return 0.0
984
+ return local_phase_sync_weighted(G, n, nodes_order=nodes, W_row=W)
985
+
986
+
987
+ def _coherence_step(G: TNFRGraph, ctx: dict[str, Any] | None = None) -> None:
988
+ del ctx
989
+
990
+ if not get_param(G, "COHERENCE").get("enabled", True):
991
+ return
992
+ coherence_matrix(G)
993
+
994
+
995
+ def register_coherence_callbacks(G: TNFRGraph) -> None:
996
+ callback_manager.register_callback(
997
+ G,
998
+ event=CallbackEvent.AFTER_STEP.value,
999
+ func=_coherence_step,
1000
+ name="coherence_step",
1001
+ )
1002
+
1003
+
1004
+ # ---------------------------------------------------------------------------
1005
+ # Coherence and observer-related metric updates
1006
+ # ---------------------------------------------------------------------------
1007
+
1008
+
1009
+ def _record_metrics(
1010
+ hist: HistoryState,
1011
+ *pairs: MetricRecord,
1012
+ evaluate: bool = False,
1013
+ ) -> None:
1014
+ """Generic recorder for metric values."""
1015
+
1016
+ metrics = cast(MutableMapping[str, list[Any]], hist)
1017
+ for payload, key in pairs:
1018
+ if evaluate:
1019
+ provider = cast(MetricProvider, payload)
1020
+ append_metric(metrics, key, provider())
1021
+ else:
1022
+ append_metric(metrics, key, payload)
1023
+
1024
+
1025
+ def _update_coherence(G: TNFRGraph, hist: HistoryState) -> None:
1026
+ """Update network coherence and related means."""
1027
+
1028
+ coherence_payload = cast(
1029
+ tuple[CoherenceMetric, float, float],
1030
+ compute_coherence(G, return_means=True),
1031
+ )
1032
+ C, dnfr_mean, depi_mean = coherence_payload
1033
+ _record_metrics(
1034
+ hist,
1035
+ (C, "C_steps"),
1036
+ (dnfr_mean, "dnfr_mean"),
1037
+ (depi_mean, "depi_mean"),
1038
+ )
1039
+
1040
+ cs = hist["C_steps"]
1041
+ if cs:
1042
+ window = min(len(cs), DEFAULT_WBAR_SPAN)
1043
+ w = max(1, window)
1044
+ wbar = sum(cs[-w:]) / w
1045
+ _record_metrics(hist, (wbar, "W_bar"))
1046
+
1047
+
1048
+ def _update_phase_sync(G: TNFRGraph, hist: HistoryState) -> None:
1049
+ """Capture phase synchrony and Kuramoto order."""
1050
+
1051
+ ps = phase_sync(G)
1052
+ ko = kuramoto_order(G)
1053
+ _record_metrics(
1054
+ hist,
1055
+ (ps, "phase_sync"),
1056
+ (ko, "kuramoto_R"),
1057
+ )
1058
+
1059
+
1060
+ def _update_sigma(G: TNFRGraph, hist: HistoryState) -> None:
1061
+ """Record glyph load and associated Σ⃗ vector."""
1062
+
1063
+ metrics = cast(MutableMapping[str, list[Any]], hist)
1064
+ if "glyph_load_estab" in metrics:
1065
+ raise ValueError(
1066
+ "History payloads using 'glyph_load_estab' are no longer supported. "
1067
+ "Rename the series to 'glyph_load_stabilizers' before loading the graph."
1068
+ )
1069
+ stabilizer_series = metrics.get(GLYPH_LOAD_STABILIZERS_KEY)
1070
+
1071
+ if stabilizer_series is None:
1072
+ stabilizer_series = cast(
1073
+ list[Any], metrics.setdefault(GLYPH_LOAD_STABILIZERS_KEY, [])
1074
+ )
1075
+ else:
1076
+ stabilizer_series = cast(list[Any], stabilizer_series)
1077
+
1078
+ gl: GlyphLoadDistribution = glyph_load(G, window=DEFAULT_GLYPH_LOAD_SPAN)
1079
+ stabilizers = float(gl.get("_stabilizers", 0.0))
1080
+ disruptors = float(gl.get("_disruptors", 0.0))
1081
+ _record_metrics(
1082
+ hist,
1083
+ (stabilizers, GLYPH_LOAD_STABILIZERS_KEY),
1084
+ (disruptors, "glyph_load_disr"),
1085
+ )
1086
+
1087
+ dist: GlyphLoadDistribution = {
1088
+ k: v for k, v in gl.items() if not k.startswith("_")
1089
+ }
1090
+ sig: SigmaVector = sigma_vector(dist)
1091
+ _record_metrics(
1092
+ hist,
1093
+ (sig.get("x", 0.0), "sense_sigma_x"),
1094
+ (sig.get("y", 0.0), "sense_sigma_y"),
1095
+ (sig.get("mag", 0.0), "sense_sigma_mag"),
1096
+ (sig.get("angle", 0.0), "sense_sigma_angle"),
1097
+ )
1098
+
1099
+
1100
+ def _stability_chunk_worker(args: StabilityChunkArgs) -> StabilityChunkResult:
1101
+ """Compute stability aggregates for a chunk of nodes."""
1102
+
1103
+ (
1104
+ dnfr_vals,
1105
+ depi_vals,
1106
+ si_curr_vals,
1107
+ si_prev_vals,
1108
+ vf_curr_vals,
1109
+ vf_prev_vals,
1110
+ dvf_prev_vals,
1111
+ dt,
1112
+ eps_dnfr,
1113
+ eps_depi,
1114
+ ) = args
1115
+
1116
+ inv_dt = (1.0 / dt) if dt else 0.0
1117
+ stable = 0
1118
+ delta_sum = 0.0
1119
+ B_sum = 0.0
1120
+ delta_vals: list[float] = []
1121
+ dvf_dt_vals: list[float] = []
1122
+ B_vals: list[float] = []
1123
+
1124
+ for idx in range(len(si_curr_vals)):
1125
+ curr_si = float(si_curr_vals[idx])
1126
+ prev_si_raw = si_prev_vals[idx]
1127
+ prev_si = float(prev_si_raw) if prev_si_raw is not None else curr_si
1128
+ delta = curr_si - prev_si
1129
+ delta_vals.append(delta)
1130
+ delta_sum += delta
1131
+
1132
+ curr_vf = float(vf_curr_vals[idx])
1133
+ prev_vf_raw = vf_prev_vals[idx]
1134
+ prev_vf = float(prev_vf_raw) if prev_vf_raw is not None else curr_vf
1135
+ dvf_dt = (curr_vf - prev_vf) * inv_dt if dt else 0.0
1136
+ prev_dvf_raw = dvf_prev_vals[idx]
1137
+ prev_dvf = float(prev_dvf_raw) if prev_dvf_raw is not None else dvf_dt
1138
+ B = (dvf_dt - prev_dvf) * inv_dt if dt else 0.0
1139
+ dvf_dt_vals.append(dvf_dt)
1140
+ B_vals.append(B)
1141
+ B_sum += B
1142
+
1143
+ if abs(float(dnfr_vals[idx])) <= eps_dnfr and abs(float(depi_vals[idx])) <= eps_depi:
1144
+ stable += 1
1145
+
1146
+ chunk_len = len(si_curr_vals)
1147
+ return (
1148
+ stable,
1149
+ chunk_len,
1150
+ delta_sum,
1151
+ B_sum,
1152
+ delta_vals,
1153
+ dvf_dt_vals,
1154
+ B_vals,
1155
+ )
1156
+
1157
+
1158
+ def _track_stability(
1159
+ G: TNFRGraph,
1160
+ hist: MutableMapping[str, Any],
1161
+ dt: float,
1162
+ eps_dnfr: float,
1163
+ eps_depi: float,
1164
+ *,
1165
+ n_jobs: int | None = None,
1166
+ ) -> None:
1167
+ """Track per-node stability and derivative metrics."""
1168
+
1169
+ nodes: tuple[NodeId, ...] = tuple(G.nodes)
1170
+ total_nodes = len(nodes)
1171
+ if not total_nodes:
1172
+ hist.setdefault("stable_frac", []).append(0.0)
1173
+ hist.setdefault("delta_Si", []).append(0.0)
1174
+ hist.setdefault("B", []).append(0.0)
1175
+ return
1176
+
1177
+ np_mod = get_numpy()
1178
+
1179
+ dnfr_vals = collect_attr(G, nodes, ALIAS_DNFR, 0.0, np=np_mod)
1180
+ depi_vals = collect_attr(G, nodes, ALIAS_DEPI, 0.0, np=np_mod)
1181
+ si_curr_vals = collect_attr(G, nodes, ALIAS_SI, 0.0, np=np_mod)
1182
+ vf_curr_vals = collect_attr(G, nodes, ALIAS_VF, 0.0, np=np_mod)
1183
+
1184
+ prev_si_data = [G.nodes[n].get("_prev_Si") for n in nodes]
1185
+ prev_vf_data = [G.nodes[n].get("_prev_vf") for n in nodes]
1186
+ prev_dvf_data = [G.nodes[n].get("_prev_dvf") for n in nodes]
1187
+
1188
+ inv_dt = (1.0 / dt) if dt else 0.0
1189
+
1190
+ if np_mod is not None:
1191
+ np = np_mod
1192
+ dnfr_arr = dnfr_vals
1193
+ depi_arr = depi_vals
1194
+ si_curr_arr = si_curr_vals
1195
+ vf_curr_arr = vf_curr_vals
1196
+
1197
+ si_prev_arr = np.asarray(
1198
+ [
1199
+ float(prev_si_data[idx])
1200
+ if prev_si_data[idx] is not None
1201
+ else float(si_curr_arr[idx])
1202
+ for idx in range(total_nodes)
1203
+ ],
1204
+ dtype=float,
1205
+ )
1206
+ vf_prev_arr = np.asarray(
1207
+ [
1208
+ float(prev_vf_data[idx])
1209
+ if prev_vf_data[idx] is not None
1210
+ else float(vf_curr_arr[idx])
1211
+ for idx in range(total_nodes)
1212
+ ],
1213
+ dtype=float,
1214
+ )
1215
+
1216
+ if dt:
1217
+ dvf_dt_arr = (vf_curr_arr - vf_prev_arr) * inv_dt
1218
+ else:
1219
+ dvf_dt_arr = np.zeros_like(vf_curr_arr, dtype=float)
1220
+
1221
+ dvf_prev_arr = np.asarray(
1222
+ [
1223
+ float(prev_dvf_data[idx])
1224
+ if prev_dvf_data[idx] is not None
1225
+ else float(dvf_dt_arr[idx])
1226
+ for idx in range(total_nodes)
1227
+ ],
1228
+ dtype=float,
1229
+ )
1230
+
1231
+ if dt:
1232
+ B_arr = (dvf_dt_arr - dvf_prev_arr) * inv_dt
1233
+ else:
1234
+ B_arr = np.zeros_like(dvf_dt_arr, dtype=float)
1235
+
1236
+ stable_mask = (np.abs(dnfr_arr) <= eps_dnfr) & (np.abs(depi_arr) <= eps_depi)
1237
+ stable_frac = float(stable_mask.mean()) if total_nodes else 0.0
1238
+
1239
+ delta_si_arr = si_curr_arr - si_prev_arr
1240
+ delta_si_mean = float(delta_si_arr.mean()) if total_nodes else 0.0
1241
+ B_mean = float(B_arr.mean()) if total_nodes else 0.0
1242
+
1243
+ hist.setdefault("stable_frac", []).append(stable_frac)
1244
+ hist.setdefault("delta_Si", []).append(delta_si_mean)
1245
+ hist.setdefault("B", []).append(B_mean)
1246
+
1247
+ for idx, node in enumerate(nodes):
1248
+ nd = G.nodes[node]
1249
+ curr_si = float(si_curr_arr[idx])
1250
+ delta_val = float(delta_si_arr[idx])
1251
+ nd["_prev_Si"] = curr_si
1252
+ set_attr(nd, ALIAS_DSI, delta_val)
1253
+
1254
+ curr_vf = float(vf_curr_arr[idx])
1255
+ nd["_prev_vf"] = curr_vf
1256
+
1257
+ dvf_dt_val = float(dvf_dt_arr[idx])
1258
+ nd["_prev_dvf"] = dvf_dt_val
1259
+ set_attr(nd, ALIAS_DVF, dvf_dt_val)
1260
+ set_attr(nd, ALIAS_D2VF, float(B_arr[idx]))
1261
+
1262
+ return
1263
+
1264
+ # NumPy not available: optionally parallel fallback or sequential computation.
1265
+ dnfr_list = list(dnfr_vals)
1266
+ depi_list = list(depi_vals)
1267
+ si_curr_list = list(si_curr_vals)
1268
+ vf_curr_list = list(vf_curr_vals)
1269
+
1270
+ if n_jobs and n_jobs > 1:
1271
+ chunk_size = max(1, math.ceil(total_nodes / n_jobs))
1272
+ chunk_results: list[tuple[int, tuple[int, int, float, float, list[float], list[float], list[float]]]] = []
1273
+ with ProcessPoolExecutor(max_workers=n_jobs) as executor:
1274
+ futures: list[tuple[int, Any]] = []
1275
+ for start in range(0, total_nodes, chunk_size):
1276
+ end = min(start + chunk_size, total_nodes)
1277
+ chunk_args = (
1278
+ dnfr_list[start:end],
1279
+ depi_list[start:end],
1280
+ si_curr_list[start:end],
1281
+ prev_si_data[start:end],
1282
+ vf_curr_list[start:end],
1283
+ prev_vf_data[start:end],
1284
+ prev_dvf_data[start:end],
1285
+ dt,
1286
+ eps_dnfr,
1287
+ eps_depi,
1288
+ )
1289
+ futures.append((start, executor.submit(_stability_chunk_worker, chunk_args)))
1290
+
1291
+ for start, fut in futures:
1292
+ chunk_results.append((start, fut.result()))
1293
+
1294
+ chunk_results.sort(key=lambda item: item[0])
1295
+
1296
+ stable_total = 0
1297
+ delta_sum = 0.0
1298
+ B_sum = 0.0
1299
+ delta_vals_all: list[float] = []
1300
+ dvf_dt_all: list[float] = []
1301
+ B_vals_all: list[float] = []
1302
+
1303
+ for _, result in chunk_results:
1304
+ (
1305
+ stable_count,
1306
+ chunk_len,
1307
+ chunk_delta_sum,
1308
+ chunk_B_sum,
1309
+ delta_vals,
1310
+ dvf_vals,
1311
+ B_vals,
1312
+ ) = result
1313
+ stable_total += stable_count
1314
+ delta_sum += chunk_delta_sum
1315
+ B_sum += chunk_B_sum
1316
+ delta_vals_all.extend(delta_vals)
1317
+ dvf_dt_all.extend(dvf_vals)
1318
+ B_vals_all.extend(B_vals)
1319
+
1320
+ total = len(delta_vals_all)
1321
+ stable_frac = stable_total / total if total else 0.0
1322
+ delta_si_mean = delta_sum / total if total else 0.0
1323
+ B_mean = B_sum / total if total else 0.0
1324
+
1325
+ else:
1326
+ stable_total = 0
1327
+ delta_sum = 0.0
1328
+ B_sum = 0.0
1329
+ delta_vals_all = []
1330
+ dvf_dt_all = []
1331
+ B_vals_all = []
1332
+
1333
+ for idx in range(total_nodes):
1334
+ curr_si = float(si_curr_list[idx])
1335
+ prev_si_raw = prev_si_data[idx]
1336
+ prev_si = float(prev_si_raw) if prev_si_raw is not None else curr_si
1337
+ delta = curr_si - prev_si
1338
+ delta_vals_all.append(delta)
1339
+ delta_sum += delta
1340
+
1341
+ curr_vf = float(vf_curr_list[idx])
1342
+ prev_vf_raw = prev_vf_data[idx]
1343
+ prev_vf = float(prev_vf_raw) if prev_vf_raw is not None else curr_vf
1344
+ dvf_dt_val = (curr_vf - prev_vf) * inv_dt if dt else 0.0
1345
+ prev_dvf_raw = prev_dvf_data[idx]
1346
+ prev_dvf = float(prev_dvf_raw) if prev_dvf_raw is not None else dvf_dt_val
1347
+ B_val = (dvf_dt_val - prev_dvf) * inv_dt if dt else 0.0
1348
+ dvf_dt_all.append(dvf_dt_val)
1349
+ B_vals_all.append(B_val)
1350
+ B_sum += B_val
1351
+
1352
+ if abs(float(dnfr_list[idx])) <= eps_dnfr and abs(float(depi_list[idx])) <= eps_depi:
1353
+ stable_total += 1
1354
+
1355
+ total = len(delta_vals_all)
1356
+ stable_frac = stable_total / total if total else 0.0
1357
+ delta_si_mean = delta_sum / total if total else 0.0
1358
+ B_mean = B_sum / total if total else 0.0
1359
+
1360
+ hist.setdefault("stable_frac", []).append(stable_frac)
1361
+ hist.setdefault("delta_Si", []).append(delta_si_mean)
1362
+ hist.setdefault("B", []).append(B_mean)
1363
+
1364
+ for idx, node in enumerate(nodes):
1365
+ nd = G.nodes[node]
1366
+ curr_si = float(si_curr_list[idx])
1367
+ delta_val = float(delta_vals_all[idx])
1368
+ nd["_prev_Si"] = curr_si
1369
+ set_attr(nd, ALIAS_DSI, delta_val)
1370
+
1371
+ curr_vf = float(vf_curr_list[idx])
1372
+ nd["_prev_vf"] = curr_vf
1373
+
1374
+ dvf_dt_val = float(dvf_dt_all[idx])
1375
+ nd["_prev_dvf"] = dvf_dt_val
1376
+ set_attr(nd, ALIAS_DVF, dvf_dt_val)
1377
+ set_attr(nd, ALIAS_D2VF, float(B_vals_all[idx]))
1378
+
1379
+
1380
+ def _si_chunk_stats(
1381
+ values: Sequence[float], si_hi: float, si_lo: float
1382
+ ) -> tuple[float, int, int, int]:
1383
+ """Compute partial Si aggregates for ``values``.
1384
+
1385
+ The helper keeps the logic shared between the sequential and parallel
1386
+ fallbacks when NumPy is unavailable.
1387
+ """
1388
+
1389
+ total = 0.0
1390
+ count = 0
1391
+ hi_count = 0
1392
+ lo_count = 0
1393
+ for s in values:
1394
+ if math.isnan(s):
1395
+ continue
1396
+ total += s
1397
+ count += 1
1398
+ if s >= si_hi:
1399
+ hi_count += 1
1400
+ if s <= si_lo:
1401
+ lo_count += 1
1402
+ return total, count, hi_count, lo_count
1403
+
1404
+
1405
+ def _aggregate_si(
1406
+ G: TNFRGraph,
1407
+ hist: MutableMapping[str, list[float]],
1408
+ *,
1409
+ n_jobs: int | None = None,
1410
+ ) -> None:
1411
+ """Aggregate Si statistics across nodes."""
1412
+
1413
+ try:
1414
+ thr_sel = get_param(G, "SELECTOR_THRESHOLDS")
1415
+ thr_def = get_param(G, "GLYPH_THRESHOLDS")
1416
+ si_hi = float(thr_sel.get("si_hi", thr_def.get("hi", 0.66)))
1417
+ si_lo = float(thr_sel.get("si_lo", thr_def.get("lo", 0.33)))
1418
+
1419
+ np_mod = get_numpy()
1420
+ if np_mod is not None:
1421
+ sis = collect_attr(G, G.nodes, ALIAS_SI, float("nan"), np=np_mod)
1422
+ valid = sis[~np_mod.isnan(sis)]
1423
+ n = int(valid.size)
1424
+ if n:
1425
+ hist["Si_mean"].append(float(valid.mean()))
1426
+ hi_frac = np_mod.count_nonzero(valid >= si_hi) / n
1427
+ lo_frac = np_mod.count_nonzero(valid <= si_lo) / n
1428
+ hist["Si_hi_frac"].append(float(hi_frac))
1429
+ hist["Si_lo_frac"].append(float(lo_frac))
1430
+ else:
1431
+ hist["Si_mean"].append(0.0)
1432
+ hist["Si_hi_frac"].append(0.0)
1433
+ hist["Si_lo_frac"].append(0.0)
1434
+ return
1435
+
1436
+ sis = collect_attr(G, G.nodes, ALIAS_SI, float("nan"))
1437
+ if not sis:
1438
+ hist["Si_mean"].append(0.0)
1439
+ hist["Si_hi_frac"].append(0.0)
1440
+ hist["Si_lo_frac"].append(0.0)
1441
+ return
1442
+
1443
+ if n_jobs is not None and n_jobs > 1:
1444
+ chunk_size = max(1, math.ceil(len(sis) / n_jobs))
1445
+ futures = []
1446
+ with ProcessPoolExecutor(max_workers=n_jobs) as executor:
1447
+ for idx in range(0, len(sis), chunk_size):
1448
+ chunk = sis[idx:idx + chunk_size]
1449
+ futures.append(
1450
+ executor.submit(_si_chunk_stats, chunk, si_hi, si_lo)
1451
+ )
1452
+ totals = [future.result() for future in futures]
1453
+ total = sum(part[0] for part in totals)
1454
+ count = sum(part[1] for part in totals)
1455
+ hi_count = sum(part[2] for part in totals)
1456
+ lo_count = sum(part[3] for part in totals)
1457
+ else:
1458
+ total, count, hi_count, lo_count = _si_chunk_stats(sis, si_hi, si_lo)
1459
+
1460
+ if count:
1461
+ hist["Si_mean"].append(total / count)
1462
+ hist["Si_hi_frac"].append(hi_count / count)
1463
+ hist["Si_lo_frac"].append(lo_count / count)
1464
+ else:
1465
+ hist["Si_mean"].append(0.0)
1466
+ hist["Si_hi_frac"].append(0.0)
1467
+ hist["Si_lo_frac"].append(0.0)
1468
+ except (KeyError, AttributeError, TypeError) as exc:
1469
+ logger.debug("Si aggregation failed: %s", exc)