tnfr 3.0.3__py3-none-any.whl → 8.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +375 -56
- tnfr/__init__.pyi +33 -0
- tnfr/_compat.py +10 -0
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +49 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +723 -0
- tnfr/alias.pyi +108 -0
- tnfr/backends/__init__.py +354 -0
- tnfr/backends/jax_backend.py +173 -0
- tnfr/backends/numpy_backend.py +238 -0
- tnfr/backends/optimized_numpy.py +420 -0
- tnfr/backends/torch_backend.py +408 -0
- tnfr/cache.py +171 -0
- tnfr/cache.pyi +13 -0
- tnfr/cli/__init__.py +110 -0
- tnfr/cli/__init__.pyi +26 -0
- tnfr/cli/arguments.py +489 -0
- tnfr/cli/arguments.pyi +29 -0
- tnfr/cli/execution.py +914 -0
- tnfr/cli/execution.pyi +70 -0
- tnfr/cli/interactive_validator.py +614 -0
- tnfr/cli/utils.py +51 -0
- tnfr/cli/utils.pyi +7 -0
- tnfr/cli/validate.py +236 -0
- tnfr/compat/__init__.py +85 -0
- tnfr/compat/dataclass.py +136 -0
- tnfr/compat/jsonschema_stub.py +61 -0
- tnfr/compat/matplotlib_stub.py +73 -0
- tnfr/compat/numpy_stub.py +155 -0
- tnfr/config/__init__.py +224 -0
- tnfr/config/__init__.pyi +10 -0
- tnfr/config/constants.py +104 -0
- tnfr/config/constants.pyi +12 -0
- tnfr/config/defaults.py +54 -0
- tnfr/config/defaults_core.py +212 -0
- tnfr/config/defaults_init.py +33 -0
- tnfr/config/defaults_metric.py +104 -0
- tnfr/config/feature_flags.py +81 -0
- tnfr/config/feature_flags.pyi +16 -0
- tnfr/config/glyph_constants.py +31 -0
- tnfr/config/init.py +77 -0
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +254 -0
- tnfr/config/operator_names.pyi +36 -0
- tnfr/config/physics_derivation.py +354 -0
- tnfr/config/presets.py +83 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/config/security.py +927 -0
- tnfr/config/thresholds.py +114 -0
- tnfr/config/tnfr_config.py +498 -0
- tnfr/constants/__init__.py +92 -0
- tnfr/constants/__init__.pyi +92 -0
- tnfr/constants/aliases.py +33 -0
- tnfr/constants/aliases.pyi +27 -0
- tnfr/constants/init.py +33 -0
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +104 -0
- tnfr/constants/metric.pyi +19 -0
- tnfr/core/__init__.py +33 -0
- tnfr/core/container.py +226 -0
- tnfr/core/default_implementations.py +329 -0
- tnfr/core/interfaces.py +279 -0
- tnfr/dynamics/__init__.py +238 -0
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +267 -0
- tnfr/dynamics/adaptation.pyi +7 -0
- tnfr/dynamics/adaptive_sequences.py +189 -0
- tnfr/dynamics/adaptive_sequences.pyi +14 -0
- tnfr/dynamics/aliases.py +23 -0
- tnfr/dynamics/aliases.pyi +19 -0
- tnfr/dynamics/bifurcation.py +232 -0
- tnfr/dynamics/canonical.py +229 -0
- tnfr/dynamics/canonical.pyi +48 -0
- tnfr/dynamics/coordination.py +385 -0
- tnfr/dynamics/coordination.pyi +25 -0
- tnfr/dynamics/dnfr.py +3034 -0
- tnfr/dynamics/dnfr.pyi +26 -0
- tnfr/dynamics/dynamic_limits.py +225 -0
- tnfr/dynamics/feedback.py +252 -0
- tnfr/dynamics/feedback.pyi +24 -0
- tnfr/dynamics/fused_dnfr.py +454 -0
- tnfr/dynamics/homeostasis.py +157 -0
- tnfr/dynamics/homeostasis.pyi +14 -0
- tnfr/dynamics/integrators.py +661 -0
- tnfr/dynamics/integrators.pyi +36 -0
- tnfr/dynamics/learning.py +310 -0
- tnfr/dynamics/learning.pyi +33 -0
- tnfr/dynamics/metabolism.py +254 -0
- tnfr/dynamics/nbody.py +796 -0
- tnfr/dynamics/nbody_tnfr.py +783 -0
- tnfr/dynamics/propagation.py +326 -0
- tnfr/dynamics/runtime.py +908 -0
- tnfr/dynamics/runtime.pyi +77 -0
- tnfr/dynamics/sampling.py +36 -0
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +711 -0
- tnfr/dynamics/selectors.pyi +85 -0
- tnfr/dynamics/structural_clip.py +207 -0
- tnfr/errors/__init__.py +37 -0
- tnfr/errors/contextual.py +492 -0
- tnfr/execution.py +223 -0
- tnfr/execution.pyi +45 -0
- tnfr/extensions/__init__.py +205 -0
- tnfr/extensions/__init__.pyi +18 -0
- tnfr/extensions/base.py +173 -0
- tnfr/extensions/base.pyi +35 -0
- tnfr/extensions/business/__init__.py +71 -0
- tnfr/extensions/business/__init__.pyi +11 -0
- tnfr/extensions/business/cookbook.py +88 -0
- tnfr/extensions/business/cookbook.pyi +8 -0
- tnfr/extensions/business/health_analyzers.py +202 -0
- tnfr/extensions/business/health_analyzers.pyi +9 -0
- tnfr/extensions/business/patterns.py +183 -0
- tnfr/extensions/business/patterns.pyi +8 -0
- tnfr/extensions/medical/__init__.py +73 -0
- tnfr/extensions/medical/__init__.pyi +11 -0
- tnfr/extensions/medical/cookbook.py +88 -0
- tnfr/extensions/medical/cookbook.pyi +8 -0
- tnfr/extensions/medical/health_analyzers.py +181 -0
- tnfr/extensions/medical/health_analyzers.pyi +9 -0
- tnfr/extensions/medical/patterns.py +163 -0
- tnfr/extensions/medical/patterns.pyi +8 -0
- tnfr/flatten.py +262 -0
- tnfr/flatten.pyi +21 -0
- tnfr/gamma.py +354 -0
- tnfr/gamma.pyi +36 -0
- tnfr/glyph_history.py +377 -0
- tnfr/glyph_history.pyi +35 -0
- tnfr/glyph_runtime.py +19 -0
- tnfr/glyph_runtime.pyi +8 -0
- tnfr/immutable.py +218 -0
- tnfr/immutable.pyi +36 -0
- tnfr/initialization.py +203 -0
- tnfr/initialization.pyi +65 -0
- tnfr/io.py +10 -0
- tnfr/io.pyi +13 -0
- tnfr/locking.py +37 -0
- tnfr/locking.pyi +7 -0
- tnfr/mathematics/__init__.py +79 -0
- tnfr/mathematics/backend.py +453 -0
- tnfr/mathematics/backend.pyi +99 -0
- tnfr/mathematics/dynamics.py +408 -0
- tnfr/mathematics/dynamics.pyi +90 -0
- tnfr/mathematics/epi.py +391 -0
- tnfr/mathematics/epi.pyi +65 -0
- tnfr/mathematics/generators.py +242 -0
- tnfr/mathematics/generators.pyi +29 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/metrics.pyi +16 -0
- tnfr/mathematics/operators.py +239 -0
- tnfr/mathematics/operators.pyi +59 -0
- tnfr/mathematics/operators_factory.py +124 -0
- tnfr/mathematics/operators_factory.pyi +11 -0
- tnfr/mathematics/projection.py +87 -0
- tnfr/mathematics/projection.pyi +33 -0
- tnfr/mathematics/runtime.py +182 -0
- tnfr/mathematics/runtime.pyi +64 -0
- tnfr/mathematics/spaces.py +256 -0
- tnfr/mathematics/spaces.pyi +83 -0
- tnfr/mathematics/transforms.py +305 -0
- tnfr/mathematics/transforms.pyi +62 -0
- tnfr/metrics/__init__.py +79 -0
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/buffer_cache.py +163 -0
- tnfr/metrics/buffer_cache.pyi +24 -0
- tnfr/metrics/cache_utils.py +214 -0
- tnfr/metrics/coherence.py +2009 -0
- tnfr/metrics/coherence.pyi +129 -0
- tnfr/metrics/common.py +158 -0
- tnfr/metrics/common.pyi +35 -0
- tnfr/metrics/core.py +316 -0
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +833 -0
- tnfr/metrics/diagnosis.pyi +86 -0
- tnfr/metrics/emergence.py +245 -0
- tnfr/metrics/export.py +179 -0
- tnfr/metrics/export.pyi +7 -0
- tnfr/metrics/glyph_timing.py +379 -0
- tnfr/metrics/glyph_timing.pyi +81 -0
- tnfr/metrics/learning_metrics.py +280 -0
- tnfr/metrics/learning_metrics.pyi +21 -0
- tnfr/metrics/phase_coherence.py +351 -0
- tnfr/metrics/phase_compatibility.py +349 -0
- tnfr/metrics/reporting.py +183 -0
- tnfr/metrics/reporting.pyi +25 -0
- tnfr/metrics/sense_index.py +1203 -0
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +373 -0
- tnfr/metrics/trig.pyi +13 -0
- tnfr/metrics/trig_cache.py +233 -0
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/multiscale/__init__.py +32 -0
- tnfr/multiscale/hierarchical.py +517 -0
- tnfr/node.py +763 -0
- tnfr/node.pyi +139 -0
- tnfr/observers.py +255 -130
- tnfr/observers.pyi +31 -0
- tnfr/ontosim.py +144 -137
- tnfr/ontosim.pyi +28 -0
- tnfr/operators/__init__.py +1672 -0
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/algebra.py +277 -0
- tnfr/operators/canonical_patterns.py +420 -0
- tnfr/operators/cascade.py +267 -0
- tnfr/operators/cycle_detection.py +358 -0
- tnfr/operators/definitions.py +4108 -0
- tnfr/operators/definitions.pyi +78 -0
- tnfr/operators/grammar.py +1164 -0
- tnfr/operators/grammar.pyi +140 -0
- tnfr/operators/hamiltonian.py +710 -0
- tnfr/operators/health_analyzer.py +809 -0
- tnfr/operators/jitter.py +272 -0
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/lifecycle.py +314 -0
- tnfr/operators/metabolism.py +618 -0
- tnfr/operators/metrics.py +2138 -0
- tnfr/operators/network_analysis/__init__.py +27 -0
- tnfr/operators/network_analysis/source_detection.py +186 -0
- tnfr/operators/nodal_equation.py +395 -0
- tnfr/operators/pattern_detection.py +660 -0
- tnfr/operators/patterns.py +669 -0
- tnfr/operators/postconditions/__init__.py +38 -0
- tnfr/operators/postconditions/mutation.py +236 -0
- tnfr/operators/preconditions/__init__.py +1226 -0
- tnfr/operators/preconditions/coherence.py +305 -0
- tnfr/operators/preconditions/dissonance.py +236 -0
- tnfr/operators/preconditions/emission.py +128 -0
- tnfr/operators/preconditions/mutation.py +580 -0
- tnfr/operators/preconditions/reception.py +125 -0
- tnfr/operators/preconditions/resonance.py +364 -0
- tnfr/operators/registry.py +74 -0
- tnfr/operators/registry.pyi +9 -0
- tnfr/operators/remesh.py +1809 -0
- tnfr/operators/remesh.pyi +26 -0
- tnfr/operators/structural_units.py +268 -0
- tnfr/operators/unified_grammar.py +105 -0
- tnfr/parallel/__init__.py +54 -0
- tnfr/parallel/auto_scaler.py +234 -0
- tnfr/parallel/distributed.py +384 -0
- tnfr/parallel/engine.py +238 -0
- tnfr/parallel/gpu_engine.py +420 -0
- tnfr/parallel/monitoring.py +248 -0
- tnfr/parallel/partitioner.py +459 -0
- tnfr/py.typed +0 -0
- tnfr/recipes/__init__.py +22 -0
- tnfr/recipes/cookbook.py +743 -0
- tnfr/rng.py +178 -0
- tnfr/rng.pyi +26 -0
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/sdk/__init__.py +107 -0
- tnfr/sdk/__init__.pyi +19 -0
- tnfr/sdk/adaptive_system.py +173 -0
- tnfr/sdk/adaptive_system.pyi +21 -0
- tnfr/sdk/builders.py +370 -0
- tnfr/sdk/builders.pyi +51 -0
- tnfr/sdk/fluent.py +1121 -0
- tnfr/sdk/fluent.pyi +74 -0
- tnfr/sdk/templates.py +342 -0
- tnfr/sdk/templates.pyi +41 -0
- tnfr/sdk/utils.py +341 -0
- tnfr/secure_config.py +46 -0
- tnfr/security/__init__.py +70 -0
- tnfr/security/database.py +514 -0
- tnfr/security/subprocess.py +503 -0
- tnfr/security/validation.py +290 -0
- tnfr/selector.py +247 -0
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +378 -0
- tnfr/sense.pyi +23 -0
- tnfr/services/__init__.py +17 -0
- tnfr/services/orchestrator.py +325 -0
- tnfr/sparse/__init__.py +39 -0
- tnfr/sparse/representations.py +492 -0
- tnfr/structural.py +705 -0
- tnfr/structural.pyi +83 -0
- tnfr/telemetry/__init__.py +35 -0
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/cache_metrics.pyi +64 -0
- tnfr/telemetry/nu_f.py +422 -0
- tnfr/telemetry/nu_f.pyi +108 -0
- tnfr/telemetry/verbosity.py +36 -0
- tnfr/telemetry/verbosity.pyi +15 -0
- tnfr/tokens.py +58 -0
- tnfr/tokens.pyi +36 -0
- tnfr/tools/__init__.py +20 -0
- tnfr/tools/domain_templates.py +478 -0
- tnfr/tools/sequence_generator.py +846 -0
- tnfr/topology/__init__.py +13 -0
- tnfr/topology/asymmetry.py +151 -0
- tnfr/trace.py +543 -0
- tnfr/trace.pyi +42 -0
- tnfr/tutorials/__init__.py +38 -0
- tnfr/tutorials/autonomous_evolution.py +285 -0
- tnfr/tutorials/interactive.py +1576 -0
- tnfr/tutorials/structural_metabolism.py +238 -0
- tnfr/types.py +775 -0
- tnfr/types.pyi +357 -0
- tnfr/units.py +68 -0
- tnfr/units.pyi +13 -0
- tnfr/utils/__init__.py +282 -0
- tnfr/utils/__init__.pyi +215 -0
- tnfr/utils/cache.py +4223 -0
- tnfr/utils/cache.pyi +470 -0
- tnfr/utils/callbacks.py +375 -0
- tnfr/utils/callbacks.pyi +49 -0
- tnfr/utils/chunks.py +108 -0
- tnfr/utils/chunks.pyi +22 -0
- tnfr/utils/data.py +428 -0
- tnfr/utils/data.pyi +74 -0
- tnfr/utils/graph.py +85 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +821 -0
- tnfr/utils/init.pyi +80 -0
- tnfr/utils/io.py +559 -0
- tnfr/utils/io.pyi +66 -0
- tnfr/utils/numeric.py +114 -0
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +257 -0
- tnfr/validation/__init__.pyi +85 -0
- tnfr/validation/compatibility.py +460 -0
- tnfr/validation/compatibility.pyi +6 -0
- tnfr/validation/config.py +73 -0
- tnfr/validation/graph.py +139 -0
- tnfr/validation/graph.pyi +18 -0
- tnfr/validation/input_validation.py +755 -0
- tnfr/validation/invariants.py +712 -0
- tnfr/validation/rules.py +253 -0
- tnfr/validation/rules.pyi +44 -0
- tnfr/validation/runtime.py +279 -0
- tnfr/validation/runtime.pyi +28 -0
- tnfr/validation/sequence_validator.py +162 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +32 -0
- tnfr/validation/spectral.py +164 -0
- tnfr/validation/spectral.pyi +42 -0
- tnfr/validation/validator.py +1266 -0
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/visualization/__init__.py +98 -0
- tnfr/visualization/cascade_viz.py +256 -0
- tnfr/visualization/hierarchy.py +284 -0
- tnfr/visualization/sequence_plotter.py +784 -0
- tnfr/viz/__init__.py +60 -0
- tnfr/viz/matplotlib.py +278 -0
- tnfr/viz/matplotlib.pyi +35 -0
- tnfr-8.5.0.dist-info/METADATA +573 -0
- tnfr-8.5.0.dist-info/RECORD +353 -0
- tnfr-8.5.0.dist-info/entry_points.txt +3 -0
- tnfr-3.0.3.dist-info/licenses/LICENSE.txt → tnfr-8.5.0.dist-info/licenses/LICENSE.md +1 -1
- tnfr/constants.py +0 -183
- tnfr/dynamics.py +0 -543
- tnfr/helpers.py +0 -198
- tnfr/main.py +0 -37
- tnfr/operators.py +0 -296
- tnfr-3.0.3.dist-info/METADATA +0 -35
- tnfr-3.0.3.dist-info/RECORD +0 -13
- {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
- {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,2009 @@
|
|
|
1
|
+
r"""Coherence metrics for TNFR networks.
|
|
2
|
+
|
|
3
|
+
This module implements the coherence operator :math:`\hat{C}` and related
|
|
4
|
+
metrics for measuring structural stability in resonant fractal networks.
|
|
5
|
+
|
|
6
|
+
Mathematical Foundation
|
|
7
|
+
-----------------------
|
|
8
|
+
|
|
9
|
+
The **coherence operator** :math:`\hat{C}` is a Hermitian operator on the Hilbert
|
|
10
|
+
space :math:`H_{\text{NFR}}` with spectral decomposition:
|
|
11
|
+
|
|
12
|
+
.. math::
|
|
13
|
+
\hat{C} = \sum_i \lambda_i |\phi_i\rangle\langle\phi_i|
|
|
14
|
+
|
|
15
|
+
where :math:`\lambda_i \geq 0` are coherence eigenvalues and :math:`|\phi_i\rangle`
|
|
16
|
+
are coherence eigenstates (maximally stable configurations).
|
|
17
|
+
|
|
18
|
+
**Properties**:
|
|
19
|
+
|
|
20
|
+
1. **Hermiticity**: :math:`\hat{C}^\dagger = \hat{C}` (ensures real eigenvalues)
|
|
21
|
+
2. **Positivity**: :math:`\langle\psi|\hat{C}|\psi\rangle \geq 0` (coherence is non-negative)
|
|
22
|
+
3. **Boundedness**: :math:`\|\hat{C}\| \leq M` (prevents runaway growth)
|
|
23
|
+
|
|
24
|
+
In the discrete node basis :math:`\{|i\rangle\}`, matrix elements are approximated:
|
|
25
|
+
|
|
26
|
+
.. math::
|
|
27
|
+
w_{ij} \approx \langle i | \hat{C} | j \rangle
|
|
28
|
+
|
|
29
|
+
The **total coherence** is computed as the trace:
|
|
30
|
+
|
|
31
|
+
.. math::
|
|
32
|
+
C(t) = \text{Tr}(\hat{C}\rho) = \sum_i w_{ii} \rho_i
|
|
33
|
+
|
|
34
|
+
where :math:`\rho_i` is the density of node :math:`i` (typically uniform: :math:`\rho_i = 1/N`).
|
|
35
|
+
|
|
36
|
+
Similarity Components
|
|
37
|
+
---------------------
|
|
38
|
+
|
|
39
|
+
Matrix elements :math:`w_{ij}` are computed from four structural similarity components:
|
|
40
|
+
|
|
41
|
+
.. math::
|
|
42
|
+
w_{ij} = w_{\text{phase}} \cdot s_{\text{phase}}(i,j)
|
|
43
|
+
+ w_{\text{EPI}} \cdot s_{\text{EPI}}(i,j)
|
|
44
|
+
+ w_{\nu_f} \cdot s_{\nu_f}(i,j)
|
|
45
|
+
+ w_{\text{Si}} \cdot s_{\text{Si}}(i,j)
|
|
46
|
+
|
|
47
|
+
where:
|
|
48
|
+
|
|
49
|
+
- :math:`s_{\text{phase}}(i,j) = \frac{1}{2}\left(1 + \cos(\theta_i - \theta_j)\right)` : Phase similarity
|
|
50
|
+
- :math:`s_{\text{EPI}}(i,j) = 1 - \frac{|\text{EPI}_i - \text{EPI}_j|}{\Delta_{\text{EPI}}}` : Structural form similarity
|
|
51
|
+
- :math:`s_{\nu_f}(i,j) = 1 - \frac{|\nu_{f,i} - \nu_{f,j}|}{\Delta_{\nu_f}}` : Frequency similarity
|
|
52
|
+
- :math:`s_{\text{Si}}(i,j) = 1 - |\text{Si}_i - \text{Si}_j|` : Stability similarity
|
|
53
|
+
|
|
54
|
+
and :math:`w_{\text{phase}}, w_{\text{EPI}}, w_{\nu_f}, w_{\text{Si}}` are structural weights
|
|
55
|
+
(default: 0.25 each).
|
|
56
|
+
|
|
57
|
+
Implementation Map
|
|
58
|
+
------------------
|
|
59
|
+
|
|
60
|
+
**Core Functions**:
|
|
61
|
+
|
|
62
|
+
- :func:`coherence_matrix` : Constructs :math:`W \approx \hat{C}` matrix representation
|
|
63
|
+
- :func:`compute_coherence` : Computes :math:`C(t) = \text{Tr}(\hat{C}\rho)` from graph (imported from `.common`)
|
|
64
|
+
- :func:`compute_wij_phase_epi_vf_si` : Computes similarity components :math:`(s_{\text{phase}}, s_{\text{EPI}}, s_{\nu_f}, s_{\text{Si}})`
|
|
65
|
+
|
|
66
|
+
**Helper Functions**:
|
|
67
|
+
|
|
68
|
+
- :func:`_combine_similarity` : Weighted combination: :math:`w_{ij} = \sum_k w_k s_k`
|
|
69
|
+
- :func:`_compute_wij_phase_epi_vf_si_vectorized` : Vectorized computation for all pairs
|
|
70
|
+
- :func:`_wij_vectorized` : Builds full matrix with NumPy acceleration
|
|
71
|
+
- :func:`_wij_sparse` : Builds sparse matrix for large networks
|
|
72
|
+
|
|
73
|
+
**Parallel Computation**:
|
|
74
|
+
|
|
75
|
+
- :func:`_coherence_matrix_parallel` : Multi-process matrix construction
|
|
76
|
+
- :func:`_parallel_wij_worker` : Worker function for parallel chunks
|
|
77
|
+
|
|
78
|
+
Theoretical References
|
|
79
|
+
----------------------
|
|
80
|
+
|
|
81
|
+
See the following for complete mathematical derivation:
|
|
82
|
+
|
|
83
|
+
- **Mathematical Foundations**: `docs/source/theory/mathematical_foundations.md` §3.1
|
|
84
|
+
- **Coherence Operator Theory**: Sections 3.1 (operator definition), 3.1.1 (implementation bridge)
|
|
85
|
+
- **Spectral Properties**: Section 3.1 on eigenvalue decomposition
|
|
86
|
+
- **Style Guide**: `docs/source/style_guide.md` for notation conventions
|
|
87
|
+
|
|
88
|
+
Examples
|
|
89
|
+
--------
|
|
90
|
+
|
|
91
|
+
**Basic coherence computation**:
|
|
92
|
+
|
|
93
|
+
>>> import networkx as nx
|
|
94
|
+
>>> from tnfr.metrics.coherence import coherence_matrix
|
|
95
|
+
>>> from tnfr.metrics.common import compute_coherence
|
|
96
|
+
>>> G = nx.Graph()
|
|
97
|
+
>>> G.add_edge("a", "b")
|
|
98
|
+
>>> G.nodes["a"].update({"EPI": 0.5, "nu_f": 0.8, "phase": 0.0, "Si": 0.7})
|
|
99
|
+
>>> G.nodes["b"].update({"EPI": 0.6, "nu_f": 0.7, "phase": 0.1, "Si": 0.8})
|
|
100
|
+
>>> C = compute_coherence(G)
|
|
101
|
+
>>> 0 <= C <= 1
|
|
102
|
+
True
|
|
103
|
+
|
|
104
|
+
**Matrix representation**:
|
|
105
|
+
|
|
106
|
+
>>> nodes, W = coherence_matrix(G)
|
|
107
|
+
>>> len(nodes) == 2
|
|
108
|
+
True
|
|
109
|
+
>>> W.shape == (2, 2) # Assuming numpy backend
|
|
110
|
+
True
|
|
111
|
+
|
|
112
|
+
**Worked examples** with step-by-step calculations:
|
|
113
|
+
|
|
114
|
+
See `docs/source/examples/worked_examples.md` Example 2 for detailed coherence
|
|
115
|
+
matrix element computation walkthrough.
|
|
116
|
+
|
|
117
|
+
Notes
|
|
118
|
+
-----
|
|
119
|
+
|
|
120
|
+
- Matrix element computation can use different backends (NumPy, JAX, PyTorch)
|
|
121
|
+
- Sparse matrix format is automatically selected for large networks (>1000 nodes)
|
|
122
|
+
- Parallel computation is enabled for networks with >500 nodes by default
|
|
123
|
+
- Trigonometric values are cached to avoid redundant cos/sin evaluations
|
|
124
|
+
|
|
125
|
+
See Also
|
|
126
|
+
--------
|
|
127
|
+
|
|
128
|
+
compute_coherence : Total coherence :math:`C(t)` computation
|
|
129
|
+
sense_index.compute_Si : Sense Index :math:`\text{Si}` computation
|
|
130
|
+
observers.kuramoto_order : Kuramoto order parameter :math:`r`
|
|
131
|
+
observers.phase_sync : Phase synchronization metrics
|
|
132
|
+
"""
|
|
133
|
+
|
|
134
|
+
from __future__ import annotations
|
|
135
|
+
|
|
136
|
+
import math
|
|
137
|
+
from collections.abc import Callable, Iterable, Mapping, Sequence
|
|
138
|
+
from concurrent.futures import ProcessPoolExecutor
|
|
139
|
+
from dataclasses import dataclass
|
|
140
|
+
from types import ModuleType
|
|
141
|
+
from typing import Any, MutableMapping, cast
|
|
142
|
+
|
|
143
|
+
from .._compat import TypeAlias
|
|
144
|
+
from ..alias import collect_attr, collect_theta_attr, get_attr, set_attr
|
|
145
|
+
from ..utils import CallbackEvent, callback_manager
|
|
146
|
+
from ..constants import get_param
|
|
147
|
+
from ..constants.aliases import (
|
|
148
|
+
ALIAS_D2VF,
|
|
149
|
+
ALIAS_DNFR,
|
|
150
|
+
ALIAS_DSI,
|
|
151
|
+
ALIAS_DVF,
|
|
152
|
+
ALIAS_DEPI,
|
|
153
|
+
ALIAS_EPI,
|
|
154
|
+
ALIAS_SI,
|
|
155
|
+
ALIAS_VF,
|
|
156
|
+
)
|
|
157
|
+
from ..glyph_history import append_metric, ensure_history
|
|
158
|
+
from ..utils import clamp01
|
|
159
|
+
from ..observers import (
|
|
160
|
+
DEFAULT_GLYPH_LOAD_SPAN,
|
|
161
|
+
DEFAULT_WBAR_SPAN,
|
|
162
|
+
glyph_load,
|
|
163
|
+
kuramoto_order,
|
|
164
|
+
phase_sync,
|
|
165
|
+
)
|
|
166
|
+
from ..sense import sigma_vector
|
|
167
|
+
from ..types import (
|
|
168
|
+
CoherenceMetric,
|
|
169
|
+
FloatArray,
|
|
170
|
+
FloatMatrix,
|
|
171
|
+
GlyphLoadDistribution,
|
|
172
|
+
HistoryState,
|
|
173
|
+
NodeId,
|
|
174
|
+
ParallelWijPayload,
|
|
175
|
+
SigmaVector,
|
|
176
|
+
TNFRGraph,
|
|
177
|
+
)
|
|
178
|
+
from ..utils import (
|
|
179
|
+
ensure_node_index_map,
|
|
180
|
+
get_logger,
|
|
181
|
+
get_numpy,
|
|
182
|
+
normalize_weights,
|
|
183
|
+
resolve_chunk_size,
|
|
184
|
+
)
|
|
185
|
+
from .common import compute_coherence, min_max_range
|
|
186
|
+
from .trig_cache import compute_theta_trig, get_trig_cache
|
|
187
|
+
|
|
188
|
+
logger = get_logger(__name__)
|
|
189
|
+
|
|
190
|
+
GLYPH_LOAD_STABILIZERS_KEY = "glyph_load_stabilizers"
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
@dataclass
|
|
194
|
+
class SimilarityInputs:
|
|
195
|
+
"""Similarity inputs and optional trigonometric caches."""
|
|
196
|
+
|
|
197
|
+
th_vals: Sequence[float]
|
|
198
|
+
epi_vals: Sequence[float]
|
|
199
|
+
vf_vals: Sequence[float]
|
|
200
|
+
si_vals: Sequence[float]
|
|
201
|
+
cos_vals: Sequence[float] | None = None
|
|
202
|
+
sin_vals: Sequence[float] | None = None
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
CoherenceMatrixDense = list[list[float]]
|
|
206
|
+
CoherenceMatrixSparse = list[tuple[int, int, float]]
|
|
207
|
+
CoherenceMatrixPayload = CoherenceMatrixDense | CoherenceMatrixSparse
|
|
208
|
+
PhaseSyncWeights: TypeAlias = (
|
|
209
|
+
Sequence[float] | CoherenceMatrixSparse | CoherenceMatrixDense
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
SimilarityComponents = tuple[float, float, float, float]
|
|
213
|
+
VectorizedComponents: TypeAlias = tuple[
|
|
214
|
+
FloatMatrix, FloatMatrix, FloatMatrix, FloatMatrix
|
|
215
|
+
]
|
|
216
|
+
ScalarOrArray: TypeAlias = float | FloatArray
|
|
217
|
+
StabilityChunkArgs = tuple[
|
|
218
|
+
Sequence[float],
|
|
219
|
+
Sequence[float],
|
|
220
|
+
Sequence[float],
|
|
221
|
+
Sequence[float | None],
|
|
222
|
+
Sequence[float],
|
|
223
|
+
Sequence[float | None],
|
|
224
|
+
Sequence[float | None],
|
|
225
|
+
float,
|
|
226
|
+
float,
|
|
227
|
+
float,
|
|
228
|
+
]
|
|
229
|
+
StabilityChunkResult = tuple[
|
|
230
|
+
int,
|
|
231
|
+
int,
|
|
232
|
+
float,
|
|
233
|
+
float,
|
|
234
|
+
list[float],
|
|
235
|
+
list[float],
|
|
236
|
+
list[float],
|
|
237
|
+
]
|
|
238
|
+
|
|
239
|
+
MetricValue: TypeAlias = CoherenceMetric
|
|
240
|
+
MetricProvider = Callable[[], MetricValue]
|
|
241
|
+
MetricRecord: TypeAlias = tuple[MetricValue | MetricProvider, str]
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def _compute_wij_phase_epi_vf_si_vectorized(
|
|
245
|
+
epi: FloatArray,
|
|
246
|
+
vf: FloatArray,
|
|
247
|
+
si: FloatArray,
|
|
248
|
+
cos_th: FloatArray,
|
|
249
|
+
sin_th: FloatArray,
|
|
250
|
+
epi_range: float,
|
|
251
|
+
vf_range: float,
|
|
252
|
+
np: ModuleType,
|
|
253
|
+
) -> VectorizedComponents:
|
|
254
|
+
"""Vectorized computation of similarity components.
|
|
255
|
+
|
|
256
|
+
All parameters are expected to be NumPy arrays already cast to ``float``
|
|
257
|
+
when appropriate. ``epi_range`` and ``vf_range`` are normalized inside the
|
|
258
|
+
function to avoid division by zero.
|
|
259
|
+
"""
|
|
260
|
+
|
|
261
|
+
epi_range = epi_range if epi_range > 0 else 1.0
|
|
262
|
+
vf_range = vf_range if vf_range > 0 else 1.0
|
|
263
|
+
s_phase = 0.5 * (
|
|
264
|
+
1.0 + cos_th[:, None] * cos_th[None, :] + sin_th[:, None] * sin_th[None, :]
|
|
265
|
+
)
|
|
266
|
+
s_epi = 1.0 - np.abs(epi[:, None] - epi[None, :]) / epi_range
|
|
267
|
+
s_vf = 1.0 - np.abs(vf[:, None] - vf[None, :]) / vf_range
|
|
268
|
+
s_si = 1.0 - np.abs(si[:, None] - si[None, :])
|
|
269
|
+
return s_phase, s_epi, s_vf, s_si
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def compute_wij_phase_epi_vf_si(
|
|
273
|
+
inputs: SimilarityInputs,
|
|
274
|
+
i: int | None = None,
|
|
275
|
+
j: int | None = None,
|
|
276
|
+
*,
|
|
277
|
+
trig: Any | None = None,
|
|
278
|
+
G: TNFRGraph | None = None,
|
|
279
|
+
nodes: Sequence[NodeId] | None = None,
|
|
280
|
+
epi_range: float = 1.0,
|
|
281
|
+
vf_range: float = 1.0,
|
|
282
|
+
np: ModuleType | None = None,
|
|
283
|
+
) -> SimilarityComponents | VectorizedComponents:
|
|
284
|
+
r"""Compute structural similarity components for coherence matrix elements.
|
|
285
|
+
|
|
286
|
+
Returns four similarity components :math:`(s_{\text{phase}}, s_{\text{EPI}}, s_{\nu_f}, s_{\text{Si}})`
|
|
287
|
+
that approximate coherence operator matrix elements :math:`w_{ij} \approx \langle i | \hat{C} | j \rangle`.
|
|
288
|
+
|
|
289
|
+
Mathematical Foundation
|
|
290
|
+
-----------------------
|
|
291
|
+
|
|
292
|
+
Each similarity component measures structural resemblance between nodes :math:`i` and :math:`j`
|
|
293
|
+
in a specific dimension:
|
|
294
|
+
|
|
295
|
+
**Phase similarity** (synchronization):
|
|
296
|
+
|
|
297
|
+
.. math::
|
|
298
|
+
s_{\text{phase}}(i,j) = \frac{1}{2}\left(1 + \cos(\theta_i - \theta_j)\right)
|
|
299
|
+
|
|
300
|
+
Range: [0, 1] where 1 = perfect synchrony, 0 = anti-phase.
|
|
301
|
+
|
|
302
|
+
**EPI similarity** (structural form):
|
|
303
|
+
|
|
304
|
+
.. math::
|
|
305
|
+
s_{\text{EPI}}(i,j) = 1 - \frac{|\text{EPI}_i - \text{EPI}_j|}{\Delta_{\text{EPI}}}
|
|
306
|
+
|
|
307
|
+
Range: [0, 1] where 1 = identical structure, 0 = maximally different.
|
|
308
|
+
|
|
309
|
+
**Frequency similarity** (reorganization rate):
|
|
310
|
+
|
|
311
|
+
.. math::
|
|
312
|
+
s_{\nu_f}(i,j) = 1 - \frac{|\nu_{f,i} - \nu_{f,j}|}{\Delta_{\nu_f}}
|
|
313
|
+
|
|
314
|
+
Range: [0, 1] where 1 = matching frequencies.
|
|
315
|
+
|
|
316
|
+
**Si similarity** (stability):
|
|
317
|
+
|
|
318
|
+
.. math::
|
|
319
|
+
s_{\text{Si}}(i,j) = 1 - |\text{Si}_i - \text{Si}_j|
|
|
320
|
+
|
|
321
|
+
Range: [0, 1] where 1 = equal reorganization stability.
|
|
322
|
+
|
|
323
|
+
These components are combined via weighted sum to obtain :math:`w_{ij}`:
|
|
324
|
+
|
|
325
|
+
.. math::
|
|
326
|
+
w_{ij} = w_{\text{phase}} \cdot s_{\text{phase}} + w_{\text{EPI}} \cdot s_{\text{EPI}}
|
|
327
|
+
+ w_{\nu_f} \cdot s_{\nu_f} + w_{\text{Si}} \cdot s_{\text{Si}}
|
|
328
|
+
|
|
329
|
+
where :math:`w_{ij} \approx \langle i | \hat{C} | j \rangle` (coherence operator matrix element).
|
|
330
|
+
|
|
331
|
+
Parameters
|
|
332
|
+
----------
|
|
333
|
+
inputs : SimilarityInputs
|
|
334
|
+
Container with structural data:
|
|
335
|
+
|
|
336
|
+
- `th_vals` : Sequence[float] - Phase values :math:`\theta` in radians
|
|
337
|
+
- `epi_vals` : Sequence[float] - EPI values
|
|
338
|
+
- `vf_vals` : Sequence[float] - Structural frequencies :math:`\nu_f` in Hz_str
|
|
339
|
+
- `si_vals` : Sequence[float] - Sense Index values
|
|
340
|
+
- `cos_vals` : Sequence[float] | None - Precomputed :math:`\cos\theta` (optional cache)
|
|
341
|
+
- `sin_vals` : Sequence[float] | None - Precomputed :math:`\sin\theta` (optional cache)
|
|
342
|
+
|
|
343
|
+
i : int | None, optional
|
|
344
|
+
Index of first node for pairwise computation. If None, vectorized mode is used.
|
|
345
|
+
j : int | None, optional
|
|
346
|
+
Index of second node for pairwise computation. If None, vectorized mode is used.
|
|
347
|
+
trig : Any | None, optional
|
|
348
|
+
Trigonometric cache object with `cos` and `sin` dictionaries. If None, computed on demand.
|
|
349
|
+
G : TNFRGraph | None, optional
|
|
350
|
+
Source graph (used to retrieve cached trigonometric values if available).
|
|
351
|
+
nodes : Sequence[NodeId] | None, optional
|
|
352
|
+
Node identifiers corresponding to indices in `inputs` arrays.
|
|
353
|
+
epi_range : float, default=1.0
|
|
354
|
+
Normalization range :math:`\Delta_{\text{EPI}}` for EPI similarity.
|
|
355
|
+
Should be :math:`\text{EPI}_{\max} - \text{EPI}_{\min}`.
|
|
356
|
+
vf_range : float, default=1.0
|
|
357
|
+
Normalization range :math:`\Delta_{\nu_f}` for frequency similarity.
|
|
358
|
+
Should be :math:`\nu_{f,\max} - \nu_{f,\min}`.
|
|
359
|
+
np : ModuleType | None, optional
|
|
360
|
+
NumPy-like module (numpy, jax.numpy, torch) for vectorized computation.
|
|
361
|
+
If provided with `i=None, j=None`, returns vectorized arrays for all pairs.
|
|
362
|
+
|
|
363
|
+
Returns
|
|
364
|
+
-------
|
|
365
|
+
SimilarityComponents or VectorizedComponents
|
|
366
|
+
**Pairwise mode** (i and j provided):
|
|
367
|
+
tuple of (s_phase, s_epi, s_vf, s_si) : tuple[float, float, float, float]
|
|
368
|
+
Normalized similarity scores :math:`\in [0,1]` for the pair (i, j).
|
|
369
|
+
|
|
370
|
+
**Vectorized mode** (i=None, j=None, np provided):
|
|
371
|
+
tuple of (S_phase, S_epi, S_vf, S_si) : tuple[FloatMatrix, FloatMatrix, FloatMatrix, FloatMatrix]
|
|
372
|
+
Matrices of shape (N, N) containing all pairwise similarities.
|
|
373
|
+
|
|
374
|
+
Raises
|
|
375
|
+
------
|
|
376
|
+
ValueError
|
|
377
|
+
If pairwise mode is requested (i or j provided) but both are not specified.
|
|
378
|
+
|
|
379
|
+
See Also
|
|
380
|
+
--------
|
|
381
|
+
coherence_matrix : Constructs full :math:`W \approx \hat{C}` matrix
|
|
382
|
+
compute_coherence : Computes :math:`C(t) = \text{Tr}(\hat{C}\rho)`
|
|
383
|
+
_combine_similarity : Weighted combination of similarity components
|
|
384
|
+
|
|
385
|
+
Notes
|
|
386
|
+
-----
|
|
387
|
+
|
|
388
|
+
**Performance**:
|
|
389
|
+
|
|
390
|
+
- Vectorized mode (with `np`) is ~10-100x faster for large networks
|
|
391
|
+
- Trigonometric caching avoids redundant cos/sin evaluations
|
|
392
|
+
- Use `get_trig_cache(G)` to populate cache before repeated calls
|
|
393
|
+
|
|
394
|
+
**Normalization**:
|
|
395
|
+
|
|
396
|
+
- `epi_range` and `vf_range` should reflect actual network ranges for proper scaling
|
|
397
|
+
- If ranges are 0, defaults to 1.0 to avoid division by zero
|
|
398
|
+
- Si similarity uses absolute difference (already bounded to [0,1])
|
|
399
|
+
|
|
400
|
+
References
|
|
401
|
+
----------
|
|
402
|
+
.. [1] Mathematical Foundations, §3.1.1 - Implementation Bridge
|
|
403
|
+
.. [2] docs/source/theory/mathematical_foundations.md#311-implementation-bridge-theory-to-code
|
|
404
|
+
.. [3] docs/source/examples/worked_examples.md - Example 2: Coherence Matrix Elements
|
|
405
|
+
|
|
406
|
+
Examples
|
|
407
|
+
--------
|
|
408
|
+
|
|
409
|
+
**Pairwise computation**:
|
|
410
|
+
|
|
411
|
+
>>> from tnfr.metrics.coherence import compute_wij_phase_epi_vf_si, SimilarityInputs
|
|
412
|
+
>>> inputs = SimilarityInputs(
|
|
413
|
+
... th_vals=[0.0, 0.1],
|
|
414
|
+
... epi_vals=[0.5, 0.6],
|
|
415
|
+
... vf_vals=[0.8, 0.7],
|
|
416
|
+
... si_vals=[0.7, 0.8]
|
|
417
|
+
... )
|
|
418
|
+
>>> s_phase, s_epi, s_vf, s_si = compute_wij_phase_epi_vf_si(
|
|
419
|
+
... inputs, i=0, j=1, epi_range=1.0, vf_range=1.0
|
|
420
|
+
... )
|
|
421
|
+
>>> 0.9 < s_phase < 1.0 # Nearly synchronized (theta_diff = 0.1 rad)
|
|
422
|
+
True
|
|
423
|
+
>>> 0.8 < s_epi < 1.0 # Similar EPI values
|
|
424
|
+
True
|
|
425
|
+
|
|
426
|
+
**Vectorized computation**:
|
|
427
|
+
|
|
428
|
+
>>> import numpy as np
|
|
429
|
+
>>> S_phase, S_epi, S_vf, S_si = compute_wij_phase_epi_vf_si(
|
|
430
|
+
... inputs, epi_range=1.0, vf_range=1.0, np=np
|
|
431
|
+
... )
|
|
432
|
+
>>> S_phase.shape # All pairwise similarities
|
|
433
|
+
(2, 2)
|
|
434
|
+
>>> np.allclose(S_phase[0, 1], S_phase[1, 0]) # Symmetric
|
|
435
|
+
True
|
|
436
|
+
|
|
437
|
+
**With graph and caching**:
|
|
438
|
+
|
|
439
|
+
>>> import networkx as nx
|
|
440
|
+
>>> from tnfr.metrics.trig_cache import get_trig_cache
|
|
441
|
+
>>> G = nx.Graph()
|
|
442
|
+
>>> G.add_edge(0, 1)
|
|
443
|
+
>>> G.nodes[0].update({"phase": 0.0, "EPI": 0.5, "nu_f": 0.8, "Si": 0.7})
|
|
444
|
+
>>> G.nodes[1].update({"phase": 0.1, "EPI": 0.6, "nu_f": 0.7, "Si": 0.8})
|
|
445
|
+
>>> trig = get_trig_cache(G, np=np) # Precompute cos/sin
|
|
446
|
+
>>> # ... use trig in repeated calls for efficiency
|
|
447
|
+
"""
|
|
448
|
+
|
|
449
|
+
trig = trig or (get_trig_cache(G, np=np) if G is not None else None)
|
|
450
|
+
cos_vals = inputs.cos_vals
|
|
451
|
+
sin_vals = inputs.sin_vals
|
|
452
|
+
if cos_vals is None or sin_vals is None:
|
|
453
|
+
th_vals = inputs.th_vals
|
|
454
|
+
pairs = zip(nodes or range(len(th_vals)), th_vals)
|
|
455
|
+
trig_local = compute_theta_trig(pairs, np=np)
|
|
456
|
+
index_iter = nodes if nodes is not None else range(len(th_vals))
|
|
457
|
+
if trig is not None and nodes is not None:
|
|
458
|
+
cos_vals = [trig.cos.get(n, trig_local.cos[n]) for n in nodes]
|
|
459
|
+
sin_vals = [trig.sin.get(n, trig_local.sin[n]) for n in nodes]
|
|
460
|
+
else:
|
|
461
|
+
cos_vals = [trig_local.cos[i] for i in index_iter]
|
|
462
|
+
sin_vals = [trig_local.sin[i] for i in index_iter]
|
|
463
|
+
inputs.cos_vals = cos_vals
|
|
464
|
+
inputs.sin_vals = sin_vals
|
|
465
|
+
|
|
466
|
+
epi_vals = inputs.epi_vals
|
|
467
|
+
vf_vals = inputs.vf_vals
|
|
468
|
+
si_vals = inputs.si_vals
|
|
469
|
+
|
|
470
|
+
if np is not None and i is None and j is None:
|
|
471
|
+
epi = cast(FloatArray, np.asarray(epi_vals, dtype=float))
|
|
472
|
+
vf = cast(FloatArray, np.asarray(vf_vals, dtype=float))
|
|
473
|
+
si = cast(FloatArray, np.asarray(si_vals, dtype=float))
|
|
474
|
+
cos_th = cast(FloatArray, np.asarray(cos_vals, dtype=float))
|
|
475
|
+
sin_th = cast(FloatArray, np.asarray(sin_vals, dtype=float))
|
|
476
|
+
return _compute_wij_phase_epi_vf_si_vectorized(
|
|
477
|
+
epi,
|
|
478
|
+
vf,
|
|
479
|
+
si,
|
|
480
|
+
cos_th,
|
|
481
|
+
sin_th,
|
|
482
|
+
epi_range,
|
|
483
|
+
vf_range,
|
|
484
|
+
np,
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
if i is None or j is None:
|
|
488
|
+
raise ValueError("i and j are required for non-vectorized computation")
|
|
489
|
+
epi_range = epi_range if epi_range > 0 else 1.0
|
|
490
|
+
vf_range = vf_range if vf_range > 0 else 1.0
|
|
491
|
+
cos_i = cos_vals[i]
|
|
492
|
+
sin_i = sin_vals[i]
|
|
493
|
+
cos_j = cos_vals[j]
|
|
494
|
+
sin_j = sin_vals[j]
|
|
495
|
+
s_phase = 0.5 * (1.0 + (cos_i * cos_j + sin_i * sin_j))
|
|
496
|
+
s_epi = 1.0 - abs(epi_vals[i] - epi_vals[j]) / epi_range
|
|
497
|
+
s_vf = 1.0 - abs(vf_vals[i] - vf_vals[j]) / vf_range
|
|
498
|
+
s_si = 1.0 - abs(si_vals[i] - si_vals[j])
|
|
499
|
+
return s_phase, s_epi, s_vf, s_si
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
def _combine_similarity(
|
|
503
|
+
s_phase: ScalarOrArray,
|
|
504
|
+
s_epi: ScalarOrArray,
|
|
505
|
+
s_vf: ScalarOrArray,
|
|
506
|
+
s_si: ScalarOrArray,
|
|
507
|
+
phase_w: float,
|
|
508
|
+
epi_w: float,
|
|
509
|
+
vf_w: float,
|
|
510
|
+
si_w: float,
|
|
511
|
+
np: ModuleType | None = None,
|
|
512
|
+
) -> ScalarOrArray:
|
|
513
|
+
"""Combine similarity components into coherence weight wᵢⱼ ≈ ⟨i|Ĉ|j⟩.
|
|
514
|
+
|
|
515
|
+
Returns wᵢⱼ ∈ [0, 1] clamped to maintain operator boundedness.
|
|
516
|
+
|
|
517
|
+
See: Mathematical Foundations §3.1.1 for spectral projection details.
|
|
518
|
+
"""
|
|
519
|
+
wij = phase_w * s_phase + epi_w * s_epi + vf_w * s_vf + si_w * s_si
|
|
520
|
+
if np is not None:
|
|
521
|
+
return cast(FloatArray, np.clip(wij, 0.0, 1.0))
|
|
522
|
+
return clamp01(wij)
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
def _wij_components_weights(
|
|
526
|
+
G: TNFRGraph,
|
|
527
|
+
nodes: Sequence[NodeId] | None,
|
|
528
|
+
inputs: SimilarityInputs,
|
|
529
|
+
wnorm: Mapping[str, float],
|
|
530
|
+
i: int | None = None,
|
|
531
|
+
j: int | None = None,
|
|
532
|
+
epi_range: float = 1.0,
|
|
533
|
+
vf_range: float = 1.0,
|
|
534
|
+
np: ModuleType | None = None,
|
|
535
|
+
) -> tuple[
|
|
536
|
+
ScalarOrArray,
|
|
537
|
+
ScalarOrArray,
|
|
538
|
+
ScalarOrArray,
|
|
539
|
+
ScalarOrArray,
|
|
540
|
+
float,
|
|
541
|
+
float,
|
|
542
|
+
float,
|
|
543
|
+
float,
|
|
544
|
+
]:
|
|
545
|
+
"""Return similarity components together with their weights.
|
|
546
|
+
|
|
547
|
+
This consolidates repeated computations ensuring that both the
|
|
548
|
+
similarity components and the corresponding weights are derived once and
|
|
549
|
+
consistently across different implementations.
|
|
550
|
+
"""
|
|
551
|
+
|
|
552
|
+
s_phase, s_epi, s_vf, s_si = compute_wij_phase_epi_vf_si(
|
|
553
|
+
inputs,
|
|
554
|
+
i,
|
|
555
|
+
j,
|
|
556
|
+
G=G,
|
|
557
|
+
nodes=nodes,
|
|
558
|
+
epi_range=epi_range,
|
|
559
|
+
vf_range=vf_range,
|
|
560
|
+
np=np,
|
|
561
|
+
)
|
|
562
|
+
phase_w = wnorm["phase"]
|
|
563
|
+
epi_w = wnorm["epi"]
|
|
564
|
+
vf_w = wnorm["vf"]
|
|
565
|
+
si_w = wnorm["si"]
|
|
566
|
+
return s_phase, s_epi, s_vf, s_si, phase_w, epi_w, vf_w, si_w
|
|
567
|
+
|
|
568
|
+
|
|
569
|
+
def _wij_vectorized(
|
|
570
|
+
G: TNFRGraph,
|
|
571
|
+
nodes: Sequence[NodeId],
|
|
572
|
+
inputs: SimilarityInputs,
|
|
573
|
+
wnorm: Mapping[str, float],
|
|
574
|
+
epi_min: float,
|
|
575
|
+
epi_max: float,
|
|
576
|
+
vf_min: float,
|
|
577
|
+
vf_max: float,
|
|
578
|
+
self_diag: bool,
|
|
579
|
+
np: ModuleType,
|
|
580
|
+
) -> FloatMatrix:
|
|
581
|
+
epi_range = epi_max - epi_min if epi_max > epi_min else 1.0
|
|
582
|
+
vf_range = vf_max - vf_min if vf_max > vf_min else 1.0
|
|
583
|
+
(
|
|
584
|
+
s_phase,
|
|
585
|
+
s_epi,
|
|
586
|
+
s_vf,
|
|
587
|
+
s_si,
|
|
588
|
+
phase_w,
|
|
589
|
+
epi_w,
|
|
590
|
+
vf_w,
|
|
591
|
+
si_w,
|
|
592
|
+
) = _wij_components_weights(
|
|
593
|
+
G,
|
|
594
|
+
nodes,
|
|
595
|
+
inputs,
|
|
596
|
+
wnorm,
|
|
597
|
+
epi_range=epi_range,
|
|
598
|
+
vf_range=vf_range,
|
|
599
|
+
np=np,
|
|
600
|
+
)
|
|
601
|
+
wij_matrix = cast(
|
|
602
|
+
FloatMatrix,
|
|
603
|
+
_combine_similarity(
|
|
604
|
+
s_phase, s_epi, s_vf, s_si, phase_w, epi_w, vf_w, si_w, np=np
|
|
605
|
+
),
|
|
606
|
+
)
|
|
607
|
+
if self_diag:
|
|
608
|
+
np.fill_diagonal(wij_matrix, 1.0)
|
|
609
|
+
else:
|
|
610
|
+
np.fill_diagonal(wij_matrix, 0.0)
|
|
611
|
+
return wij_matrix
|
|
612
|
+
|
|
613
|
+
|
|
614
|
+
def _compute_wij_value_raw(
|
|
615
|
+
i: int,
|
|
616
|
+
j: int,
|
|
617
|
+
epi_vals: Sequence[float],
|
|
618
|
+
vf_vals: Sequence[float],
|
|
619
|
+
si_vals: Sequence[float],
|
|
620
|
+
cos_vals: Sequence[float],
|
|
621
|
+
sin_vals: Sequence[float],
|
|
622
|
+
weights: tuple[float, float, float, float],
|
|
623
|
+
epi_range: float,
|
|
624
|
+
vf_range: float,
|
|
625
|
+
) -> float:
|
|
626
|
+
epi_range = epi_range if epi_range > 0 else 1.0
|
|
627
|
+
vf_range = vf_range if vf_range > 0 else 1.0
|
|
628
|
+
phase_w, epi_w, vf_w, si_w = weights
|
|
629
|
+
cos_i = cos_vals[i]
|
|
630
|
+
sin_i = sin_vals[i]
|
|
631
|
+
cos_j = cos_vals[j]
|
|
632
|
+
sin_j = sin_vals[j]
|
|
633
|
+
s_phase = 0.5 * (1.0 + (cos_i * cos_j + sin_i * sin_j))
|
|
634
|
+
s_epi = 1.0 - abs(epi_vals[i] - epi_vals[j]) / epi_range
|
|
635
|
+
s_vf = 1.0 - abs(vf_vals[i] - vf_vals[j]) / vf_range
|
|
636
|
+
s_si = 1.0 - abs(si_vals[i] - si_vals[j])
|
|
637
|
+
wij = phase_w * s_phase + epi_w * s_epi + vf_w * s_vf + si_w * s_si
|
|
638
|
+
return clamp01(wij)
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
_PARALLEL_WIJ_DATA: ParallelWijPayload | None = None
|
|
642
|
+
|
|
643
|
+
|
|
644
|
+
def _init_parallel_wij(data: ParallelWijPayload) -> None:
|
|
645
|
+
"""Store immutable state for parallel ``wij`` computation."""
|
|
646
|
+
|
|
647
|
+
global _PARALLEL_WIJ_DATA
|
|
648
|
+
_PARALLEL_WIJ_DATA = data
|
|
649
|
+
|
|
650
|
+
|
|
651
|
+
def _parallel_wij_worker(
|
|
652
|
+
pairs: Sequence[tuple[int, int]],
|
|
653
|
+
) -> list[tuple[int, int, float]]:
|
|
654
|
+
"""Compute coherence weights for ``pairs`` using shared state."""
|
|
655
|
+
|
|
656
|
+
if _PARALLEL_WIJ_DATA is None:
|
|
657
|
+
raise RuntimeError("Parallel coherence data not initialized")
|
|
658
|
+
|
|
659
|
+
data = _PARALLEL_WIJ_DATA
|
|
660
|
+
epi_vals: Sequence[float] = data["epi_vals"]
|
|
661
|
+
vf_vals: Sequence[float] = data["vf_vals"]
|
|
662
|
+
si_vals: Sequence[float] = data["si_vals"]
|
|
663
|
+
cos_vals: Sequence[float] = data["cos_vals"]
|
|
664
|
+
sin_vals: Sequence[float] = data["sin_vals"]
|
|
665
|
+
weights: tuple[float, float, float, float] = data["weights"]
|
|
666
|
+
epi_range: float = data["epi_range"]
|
|
667
|
+
vf_range: float = data["vf_range"]
|
|
668
|
+
|
|
669
|
+
compute = _compute_wij_value_raw
|
|
670
|
+
return [
|
|
671
|
+
(
|
|
672
|
+
i,
|
|
673
|
+
j,
|
|
674
|
+
compute(
|
|
675
|
+
i,
|
|
676
|
+
j,
|
|
677
|
+
epi_vals,
|
|
678
|
+
vf_vals,
|
|
679
|
+
si_vals,
|
|
680
|
+
cos_vals,
|
|
681
|
+
sin_vals,
|
|
682
|
+
weights,
|
|
683
|
+
epi_range,
|
|
684
|
+
vf_range,
|
|
685
|
+
),
|
|
686
|
+
)
|
|
687
|
+
for i, j in pairs
|
|
688
|
+
]
|
|
689
|
+
|
|
690
|
+
|
|
691
|
+
def _wij_loops(
|
|
692
|
+
G: TNFRGraph,
|
|
693
|
+
nodes: Sequence[NodeId],
|
|
694
|
+
node_to_index: Mapping[NodeId, int],
|
|
695
|
+
inputs: SimilarityInputs,
|
|
696
|
+
wnorm: Mapping[str, float],
|
|
697
|
+
epi_min: float,
|
|
698
|
+
epi_max: float,
|
|
699
|
+
vf_min: float,
|
|
700
|
+
vf_max: float,
|
|
701
|
+
neighbors_only: bool,
|
|
702
|
+
self_diag: bool,
|
|
703
|
+
n_jobs: int | None = 1,
|
|
704
|
+
) -> CoherenceMatrixDense:
|
|
705
|
+
n = len(nodes)
|
|
706
|
+
cos_vals = inputs.cos_vals
|
|
707
|
+
sin_vals = inputs.sin_vals
|
|
708
|
+
if cos_vals is None or sin_vals is None:
|
|
709
|
+
th_vals = inputs.th_vals
|
|
710
|
+
trig_local = compute_theta_trig(zip(nodes, th_vals))
|
|
711
|
+
cos_vals = [trig_local.cos[n] for n in nodes]
|
|
712
|
+
sin_vals = [trig_local.sin[n] for n in nodes]
|
|
713
|
+
inputs.cos_vals = cos_vals
|
|
714
|
+
inputs.sin_vals = sin_vals
|
|
715
|
+
assert cos_vals is not None
|
|
716
|
+
assert sin_vals is not None
|
|
717
|
+
epi_vals = list(inputs.epi_vals)
|
|
718
|
+
vf_vals = list(inputs.vf_vals)
|
|
719
|
+
si_vals = list(inputs.si_vals)
|
|
720
|
+
cos_vals_list = list(cos_vals)
|
|
721
|
+
sin_vals_list = list(sin_vals)
|
|
722
|
+
inputs.epi_vals = epi_vals
|
|
723
|
+
inputs.vf_vals = vf_vals
|
|
724
|
+
inputs.si_vals = si_vals
|
|
725
|
+
inputs.cos_vals = cos_vals_list
|
|
726
|
+
inputs.sin_vals = sin_vals_list
|
|
727
|
+
wij = [[1.0 if (self_diag and i == j) else 0.0 for j in range(n)] for i in range(n)]
|
|
728
|
+
epi_range = epi_max - epi_min if epi_max > epi_min else 1.0
|
|
729
|
+
vf_range = vf_max - vf_min if vf_max > vf_min else 1.0
|
|
730
|
+
weights = (
|
|
731
|
+
float(wnorm["phase"]),
|
|
732
|
+
float(wnorm["epi"]),
|
|
733
|
+
float(wnorm["vf"]),
|
|
734
|
+
float(wnorm["si"]),
|
|
735
|
+
)
|
|
736
|
+
pair_list: list[tuple[int, int]] = []
|
|
737
|
+
if neighbors_only:
|
|
738
|
+
seen: set[tuple[int, int]] = set()
|
|
739
|
+
for u, v in G.edges():
|
|
740
|
+
i = node_to_index[u]
|
|
741
|
+
j = node_to_index[v]
|
|
742
|
+
if i == j:
|
|
743
|
+
continue
|
|
744
|
+
pair = (i, j) if i < j else (j, i)
|
|
745
|
+
if pair in seen:
|
|
746
|
+
continue
|
|
747
|
+
seen.add(pair)
|
|
748
|
+
pair_list.append(pair)
|
|
749
|
+
else:
|
|
750
|
+
for i in range(n):
|
|
751
|
+
for j in range(i + 1, n):
|
|
752
|
+
pair_list.append((i, j))
|
|
753
|
+
|
|
754
|
+
total_pairs = len(pair_list)
|
|
755
|
+
max_workers = 1
|
|
756
|
+
if n_jobs is not None:
|
|
757
|
+
try:
|
|
758
|
+
max_workers = int(n_jobs)
|
|
759
|
+
except (TypeError, ValueError):
|
|
760
|
+
max_workers = 1
|
|
761
|
+
if max_workers <= 1 or total_pairs == 0:
|
|
762
|
+
for i, j in pair_list:
|
|
763
|
+
wij_ij = _compute_wij_value_raw(
|
|
764
|
+
i,
|
|
765
|
+
j,
|
|
766
|
+
epi_vals,
|
|
767
|
+
vf_vals,
|
|
768
|
+
si_vals,
|
|
769
|
+
cos_vals,
|
|
770
|
+
sin_vals,
|
|
771
|
+
weights,
|
|
772
|
+
epi_range,
|
|
773
|
+
vf_range,
|
|
774
|
+
)
|
|
775
|
+
wij[i][j] = wij[j][i] = wij_ij
|
|
776
|
+
return wij
|
|
777
|
+
|
|
778
|
+
approx_chunk = math.ceil(total_pairs / max_workers) if max_workers else None
|
|
779
|
+
chunk_size = resolve_chunk_size(
|
|
780
|
+
approx_chunk,
|
|
781
|
+
total_pairs,
|
|
782
|
+
minimum=1,
|
|
783
|
+
)
|
|
784
|
+
payload: ParallelWijPayload = {
|
|
785
|
+
"epi_vals": tuple(epi_vals),
|
|
786
|
+
"vf_vals": tuple(vf_vals),
|
|
787
|
+
"si_vals": tuple(si_vals),
|
|
788
|
+
"cos_vals": tuple(cos_vals),
|
|
789
|
+
"sin_vals": tuple(sin_vals),
|
|
790
|
+
"weights": weights,
|
|
791
|
+
"epi_range": float(epi_range),
|
|
792
|
+
"vf_range": float(vf_range),
|
|
793
|
+
}
|
|
794
|
+
|
|
795
|
+
def _init() -> None:
|
|
796
|
+
_init_parallel_wij(payload)
|
|
797
|
+
|
|
798
|
+
with ProcessPoolExecutor(max_workers=max_workers, initializer=_init) as executor:
|
|
799
|
+
futures = []
|
|
800
|
+
for start in range(0, total_pairs, chunk_size):
|
|
801
|
+
chunk = pair_list[start : start + chunk_size]
|
|
802
|
+
futures.append(executor.submit(_parallel_wij_worker, chunk))
|
|
803
|
+
for future in futures:
|
|
804
|
+
for i, j, value in future.result():
|
|
805
|
+
wij[i][j] = wij[j][i] = value
|
|
806
|
+
return wij
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
def _compute_stats(
|
|
810
|
+
values: Iterable[float] | Any,
|
|
811
|
+
row_sum: Iterable[float] | Any,
|
|
812
|
+
n: int,
|
|
813
|
+
self_diag: bool,
|
|
814
|
+
np: ModuleType | None = None,
|
|
815
|
+
) -> tuple[float, float, float, list[float], int]:
|
|
816
|
+
"""Return aggregate statistics for ``values`` and normalized row sums.
|
|
817
|
+
|
|
818
|
+
``values`` and ``row_sum`` can be any iterables. They are normalized to
|
|
819
|
+
either NumPy arrays or Python lists depending on the availability of
|
|
820
|
+
NumPy. The computation then delegates to the appropriate numerical
|
|
821
|
+
functions with minimal branching.
|
|
822
|
+
"""
|
|
823
|
+
|
|
824
|
+
if np is not None:
|
|
825
|
+
if not isinstance(values, np.ndarray):
|
|
826
|
+
values_arr = np.asarray(list(values), dtype=float)
|
|
827
|
+
else:
|
|
828
|
+
values_arr = cast(Any, values.astype(float))
|
|
829
|
+
if not isinstance(row_sum, np.ndarray):
|
|
830
|
+
row_arr = np.asarray(list(row_sum), dtype=float)
|
|
831
|
+
else:
|
|
832
|
+
row_arr = cast(Any, row_sum.astype(float))
|
|
833
|
+
count_val = int(values_arr.size)
|
|
834
|
+
min_val = float(values_arr.min()) if values_arr.size else 0.0
|
|
835
|
+
max_val = float(values_arr.max()) if values_arr.size else 0.0
|
|
836
|
+
mean_val = float(values_arr.mean()) if values_arr.size else 0.0
|
|
837
|
+
else:
|
|
838
|
+
values_list = list(values)
|
|
839
|
+
row_arr = list(row_sum)
|
|
840
|
+
count_val = len(values_list)
|
|
841
|
+
min_val = min(values_list) if values_list else 0.0
|
|
842
|
+
max_val = max(values_list) if values_list else 0.0
|
|
843
|
+
mean_val = sum(values_list) / len(values_list) if values_list else 0.0
|
|
844
|
+
|
|
845
|
+
row_count = n if self_diag else n - 1
|
|
846
|
+
denom = max(1, row_count)
|
|
847
|
+
if np is not None:
|
|
848
|
+
Wi = (row_arr / denom).astype(float).tolist() # type: ignore[operator]
|
|
849
|
+
else:
|
|
850
|
+
Wi = [float(row_arr[i]) / denom for i in range(n)]
|
|
851
|
+
return min_val, max_val, mean_val, Wi, count_val
|
|
852
|
+
|
|
853
|
+
|
|
854
|
+
def _coherence_numpy(
|
|
855
|
+
wij: Any,
|
|
856
|
+
mode: str,
|
|
857
|
+
thr: float,
|
|
858
|
+
np: ModuleType,
|
|
859
|
+
) -> tuple[int, Any, Any, CoherenceMatrixPayload]:
|
|
860
|
+
"""Aggregate coherence weights using vectorized operations.
|
|
861
|
+
|
|
862
|
+
Produces the structural weight matrix ``W`` along with the list of off
|
|
863
|
+
diagonal values and row sums ready for statistical analysis.
|
|
864
|
+
"""
|
|
865
|
+
|
|
866
|
+
n = wij.shape[0]
|
|
867
|
+
mask = ~np.eye(n, dtype=bool)
|
|
868
|
+
values = wij[mask]
|
|
869
|
+
row_sum = wij.sum(axis=1)
|
|
870
|
+
if mode == "dense":
|
|
871
|
+
W = wij.tolist()
|
|
872
|
+
else:
|
|
873
|
+
idx = np.where((wij >= thr) & mask)
|
|
874
|
+
W = [(int(i), int(j), float(wij[i, j])) for i, j in zip(idx[0], idx[1])]
|
|
875
|
+
return n, values, row_sum, W
|
|
876
|
+
|
|
877
|
+
|
|
878
|
+
def _coherence_python_worker(
|
|
879
|
+
args: tuple[Sequence[Sequence[float]], int, str, float],
|
|
880
|
+
) -> tuple[int, list[float], list[float], CoherenceMatrixSparse]:
|
|
881
|
+
rows, start, mode, thr = args
|
|
882
|
+
values: list[float] = []
|
|
883
|
+
row_sum: list[float] = []
|
|
884
|
+
sparse: list[tuple[int, int, float]] = []
|
|
885
|
+
dense_mode = mode == "dense"
|
|
886
|
+
|
|
887
|
+
for offset, row in enumerate(rows):
|
|
888
|
+
i = start + offset
|
|
889
|
+
total = 0.0
|
|
890
|
+
for j, w in enumerate(row):
|
|
891
|
+
total += w
|
|
892
|
+
if i != j:
|
|
893
|
+
values.append(w)
|
|
894
|
+
if not dense_mode and w >= thr:
|
|
895
|
+
sparse.append((i, j, w))
|
|
896
|
+
row_sum.append(total)
|
|
897
|
+
|
|
898
|
+
return start, values, row_sum, sparse
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
def _coherence_python(
|
|
902
|
+
wij: Sequence[Sequence[float]],
|
|
903
|
+
mode: str,
|
|
904
|
+
thr: float,
|
|
905
|
+
n_jobs: int | None = 1,
|
|
906
|
+
) -> tuple[int, list[float], list[float], CoherenceMatrixPayload]:
|
|
907
|
+
"""Aggregate coherence weights using pure Python loops."""
|
|
908
|
+
|
|
909
|
+
n = len(wij)
|
|
910
|
+
values: list[float] = []
|
|
911
|
+
row_sum = [0.0] * n
|
|
912
|
+
|
|
913
|
+
if n_jobs is not None:
|
|
914
|
+
try:
|
|
915
|
+
max_workers = int(n_jobs)
|
|
916
|
+
except (TypeError, ValueError):
|
|
917
|
+
max_workers = 1
|
|
918
|
+
else:
|
|
919
|
+
max_workers = 1
|
|
920
|
+
|
|
921
|
+
if max_workers <= 1:
|
|
922
|
+
if mode == "dense":
|
|
923
|
+
W: CoherenceMatrixDense = [list(row) for row in wij]
|
|
924
|
+
for i in range(n):
|
|
925
|
+
for j in range(n):
|
|
926
|
+
w = W[i][j]
|
|
927
|
+
if i != j:
|
|
928
|
+
values.append(w)
|
|
929
|
+
row_sum[i] += w
|
|
930
|
+
else:
|
|
931
|
+
W_sparse: CoherenceMatrixSparse = []
|
|
932
|
+
for i in range(n):
|
|
933
|
+
row_i = wij[i]
|
|
934
|
+
for j in range(n):
|
|
935
|
+
w = row_i[j]
|
|
936
|
+
if i != j:
|
|
937
|
+
values.append(w)
|
|
938
|
+
if w >= thr:
|
|
939
|
+
W_sparse.append((i, j, w))
|
|
940
|
+
row_sum[i] += w
|
|
941
|
+
return n, values, row_sum, W if mode == "dense" else W_sparse
|
|
942
|
+
|
|
943
|
+
approx_chunk = math.ceil(n / max_workers) if max_workers else None
|
|
944
|
+
chunk_size = resolve_chunk_size(
|
|
945
|
+
approx_chunk,
|
|
946
|
+
n,
|
|
947
|
+
minimum=1,
|
|
948
|
+
)
|
|
949
|
+
tasks = []
|
|
950
|
+
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
|
951
|
+
for start in range(0, n, chunk_size):
|
|
952
|
+
rows = wij[start : start + chunk_size]
|
|
953
|
+
tasks.append(
|
|
954
|
+
executor.submit(
|
|
955
|
+
_coherence_python_worker,
|
|
956
|
+
(tuple(tuple(row) for row in rows), start, mode, thr),
|
|
957
|
+
)
|
|
958
|
+
)
|
|
959
|
+
results = [task.result() for task in tasks]
|
|
960
|
+
|
|
961
|
+
results.sort(key=lambda item: item[0])
|
|
962
|
+
sparse_entries: list[tuple[int, int, float]] | None = (
|
|
963
|
+
[] if mode != "dense" else None
|
|
964
|
+
)
|
|
965
|
+
for start, chunk_values, chunk_row_sum, chunk_sparse in results:
|
|
966
|
+
values.extend(chunk_values)
|
|
967
|
+
for offset, total in enumerate(chunk_row_sum):
|
|
968
|
+
row_sum[start + offset] = total
|
|
969
|
+
if sparse_entries is not None:
|
|
970
|
+
sparse_entries.extend(chunk_sparse)
|
|
971
|
+
|
|
972
|
+
if mode == "dense":
|
|
973
|
+
W_dense: CoherenceMatrixDense = [list(row) for row in wij]
|
|
974
|
+
return n, values, row_sum, W_dense
|
|
975
|
+
sparse_result: CoherenceMatrixSparse = (
|
|
976
|
+
sparse_entries if sparse_entries is not None else []
|
|
977
|
+
)
|
|
978
|
+
return n, values, row_sum, sparse_result
|
|
979
|
+
|
|
980
|
+
|
|
981
|
+
def _finalize_wij(
|
|
982
|
+
G: TNFRGraph,
|
|
983
|
+
nodes: Sequence[NodeId],
|
|
984
|
+
wij: FloatMatrix | Sequence[Sequence[float]],
|
|
985
|
+
mode: str,
|
|
986
|
+
thr: float,
|
|
987
|
+
scope: str,
|
|
988
|
+
self_diag: bool,
|
|
989
|
+
np: ModuleType | None = None,
|
|
990
|
+
*,
|
|
991
|
+
n_jobs: int = 1,
|
|
992
|
+
) -> tuple[list[NodeId], CoherenceMatrixPayload]:
|
|
993
|
+
"""Finalize the coherence matrix ``wij`` and store results in history.
|
|
994
|
+
|
|
995
|
+
When ``np`` is provided and ``wij`` is a NumPy array, the computation is
|
|
996
|
+
performed using vectorized operations. Otherwise a pure Python loop-based
|
|
997
|
+
approach is used.
|
|
998
|
+
"""
|
|
999
|
+
|
|
1000
|
+
use_np = np is not None and isinstance(wij, np.ndarray)
|
|
1001
|
+
if use_np:
|
|
1002
|
+
assert np is not None
|
|
1003
|
+
n, values, row_sum, W = _coherence_numpy(wij, mode, thr, np)
|
|
1004
|
+
else:
|
|
1005
|
+
n, values, row_sum, W = _coherence_python(wij, mode, thr, n_jobs=n_jobs)
|
|
1006
|
+
|
|
1007
|
+
min_val, max_val, mean_val, Wi, count_val = _compute_stats(
|
|
1008
|
+
values, row_sum, n, self_diag, np if use_np else None
|
|
1009
|
+
)
|
|
1010
|
+
stats = {
|
|
1011
|
+
"min": min_val,
|
|
1012
|
+
"max": max_val,
|
|
1013
|
+
"mean": mean_val,
|
|
1014
|
+
"n_edges": count_val,
|
|
1015
|
+
"mode": mode,
|
|
1016
|
+
"scope": scope,
|
|
1017
|
+
}
|
|
1018
|
+
|
|
1019
|
+
hist = ensure_history(G)
|
|
1020
|
+
cfg = get_param(G, "COHERENCE")
|
|
1021
|
+
append_metric(hist, cfg.get("history_key", "W_sparse"), W)
|
|
1022
|
+
append_metric(hist, cfg.get("Wi_history_key", "W_i"), Wi)
|
|
1023
|
+
append_metric(hist, cfg.get("stats_history_key", "W_stats"), stats)
|
|
1024
|
+
return list(nodes), W
|
|
1025
|
+
|
|
1026
|
+
|
|
1027
|
+
def coherence_matrix(
|
|
1028
|
+
G: TNFRGraph,
|
|
1029
|
+
use_numpy: bool | None = None,
|
|
1030
|
+
*,
|
|
1031
|
+
n_jobs: int | None = None,
|
|
1032
|
+
) -> tuple[list[NodeId] | None, CoherenceMatrixPayload | None]:
|
|
1033
|
+
"""Compute coherence matrix W approximating operator Ĉ.
|
|
1034
|
+
|
|
1035
|
+
Returns matrix W where wᵢⱼ ≈ ⟨i|Ĉ|j⟩ computed from structural
|
|
1036
|
+
similarities: phase, EPI, frequency, and sense index.
|
|
1037
|
+
|
|
1038
|
+
Mathematical Foundation:
|
|
1039
|
+
Ĉ ≈ Σᵢⱼ wᵢⱼ |i⟩⟨j|
|
|
1040
|
+
|
|
1041
|
+
Matrix W satisfies Hermiticity (W=W^T), element bounds (wᵢⱼ ∈ [0,1]),
|
|
1042
|
+
and provides spectrum σ(Ĉ) via eigenvalues.
|
|
1043
|
+
|
|
1044
|
+
Parameters
|
|
1045
|
+
----------
|
|
1046
|
+
G:
|
|
1047
|
+
Graph with node attributes: theta, EPI, vf, Si
|
|
1048
|
+
use_numpy:
|
|
1049
|
+
Force NumPy (True), pure Python (False), or auto-detect (None)
|
|
1050
|
+
n_jobs:
|
|
1051
|
+
Worker processes for Python fallback (None or ≤1 = serial)
|
|
1052
|
+
|
|
1053
|
+
Returns
|
|
1054
|
+
-------
|
|
1055
|
+
nodes:
|
|
1056
|
+
Ordered node list matching matrix indexing
|
|
1057
|
+
W:
|
|
1058
|
+
Coherence matrix (dense or sparse per configuration)
|
|
1059
|
+
|
|
1060
|
+
See Also
|
|
1061
|
+
--------
|
|
1062
|
+
compute_coherence : Computes C(t) = Tr(Ĉρ)
|
|
1063
|
+
Mathematical Foundations §3.1: Theory + Implementation Bridge
|
|
1064
|
+
|
|
1065
|
+
Examples
|
|
1066
|
+
--------
|
|
1067
|
+
>>> nodes, W = coherence_matrix(G)
|
|
1068
|
+
>>> # W[i][j] ≈ ⟨i|Ĉ|j⟩ for computational basis
|
|
1069
|
+
"""
|
|
1070
|
+
|
|
1071
|
+
cfg = get_param(G, "COHERENCE")
|
|
1072
|
+
if not cfg.get("enabled", True):
|
|
1073
|
+
return None, None
|
|
1074
|
+
|
|
1075
|
+
node_to_index: Mapping[NodeId, int] = ensure_node_index_map(G)
|
|
1076
|
+
nodes: list[NodeId] = list(node_to_index.keys())
|
|
1077
|
+
n = len(nodes)
|
|
1078
|
+
if n == 0:
|
|
1079
|
+
return nodes, []
|
|
1080
|
+
|
|
1081
|
+
# NumPy handling for optional vectorized operations
|
|
1082
|
+
np = get_numpy()
|
|
1083
|
+
use_np = np is not None if use_numpy is None else (use_numpy and np is not None)
|
|
1084
|
+
|
|
1085
|
+
cfg_jobs = cfg.get("n_jobs")
|
|
1086
|
+
parallel_jobs = n_jobs if n_jobs is not None else cfg_jobs
|
|
1087
|
+
|
|
1088
|
+
# Precompute indices to avoid repeated list.index calls within loops
|
|
1089
|
+
|
|
1090
|
+
th_vals = collect_theta_attr(G, nodes, 0.0, np=np if use_np else None)
|
|
1091
|
+
epi_vals = collect_attr(G, nodes, ALIAS_EPI, 0.0, np=np if use_np else None)
|
|
1092
|
+
vf_vals = collect_attr(G, nodes, ALIAS_VF, 0.0, np=np if use_np else None)
|
|
1093
|
+
si_vals = collect_attr(G, nodes, ALIAS_SI, 0.0, np=np if use_np else None)
|
|
1094
|
+
if use_np:
|
|
1095
|
+
assert np is not None
|
|
1096
|
+
si_vals = np.clip(si_vals, 0.0, 1.0)
|
|
1097
|
+
else:
|
|
1098
|
+
si_vals = [clamp01(v) for v in si_vals]
|
|
1099
|
+
epi_min, epi_max = min_max_range(epi_vals)
|
|
1100
|
+
vf_min, vf_max = min_max_range(vf_vals)
|
|
1101
|
+
|
|
1102
|
+
wdict = dict(cfg.get("weights", {}))
|
|
1103
|
+
for k in ("phase", "epi", "vf", "si"):
|
|
1104
|
+
wdict.setdefault(k, 0.0)
|
|
1105
|
+
wnorm = normalize_weights(wdict, ("phase", "epi", "vf", "si"), default=0.0)
|
|
1106
|
+
|
|
1107
|
+
scope = str(cfg.get("scope", "neighbors")).lower()
|
|
1108
|
+
neighbors_only = scope != "all"
|
|
1109
|
+
self_diag = bool(cfg.get("self_on_diag", True))
|
|
1110
|
+
mode = str(cfg.get("store_mode", "sparse")).lower()
|
|
1111
|
+
thr = float(cfg.get("threshold", 0.0))
|
|
1112
|
+
if mode not in ("sparse", "dense"):
|
|
1113
|
+
mode = "sparse"
|
|
1114
|
+
trig = get_trig_cache(G, np=np)
|
|
1115
|
+
cos_map, sin_map = trig.cos, trig.sin
|
|
1116
|
+
trig_local = compute_theta_trig(zip(nodes, th_vals), np=np)
|
|
1117
|
+
cos_vals = [cos_map.get(n, trig_local.cos[n]) for n in nodes]
|
|
1118
|
+
sin_vals = [sin_map.get(n, trig_local.sin[n]) for n in nodes]
|
|
1119
|
+
inputs = SimilarityInputs(
|
|
1120
|
+
th_vals=th_vals,
|
|
1121
|
+
epi_vals=epi_vals,
|
|
1122
|
+
vf_vals=vf_vals,
|
|
1123
|
+
si_vals=si_vals,
|
|
1124
|
+
cos_vals=cos_vals,
|
|
1125
|
+
sin_vals=sin_vals,
|
|
1126
|
+
)
|
|
1127
|
+
if use_np:
|
|
1128
|
+
assert np is not None
|
|
1129
|
+
wij_matrix = _wij_vectorized(
|
|
1130
|
+
G,
|
|
1131
|
+
nodes,
|
|
1132
|
+
inputs,
|
|
1133
|
+
wnorm,
|
|
1134
|
+
epi_min,
|
|
1135
|
+
epi_max,
|
|
1136
|
+
vf_min,
|
|
1137
|
+
vf_max,
|
|
1138
|
+
self_diag,
|
|
1139
|
+
np,
|
|
1140
|
+
)
|
|
1141
|
+
if neighbors_only:
|
|
1142
|
+
adj = np.eye(n, dtype=bool)
|
|
1143
|
+
for u, v in G.edges():
|
|
1144
|
+
i = node_to_index[u]
|
|
1145
|
+
j = node_to_index[v]
|
|
1146
|
+
adj[i, j] = True
|
|
1147
|
+
adj[j, i] = True
|
|
1148
|
+
wij_matrix = cast(FloatMatrix, np.where(adj, wij_matrix, 0.0))
|
|
1149
|
+
wij: FloatMatrix | CoherenceMatrixDense = wij_matrix
|
|
1150
|
+
else:
|
|
1151
|
+
wij = _wij_loops(
|
|
1152
|
+
G,
|
|
1153
|
+
nodes,
|
|
1154
|
+
node_to_index,
|
|
1155
|
+
inputs,
|
|
1156
|
+
wnorm,
|
|
1157
|
+
epi_min,
|
|
1158
|
+
epi_max,
|
|
1159
|
+
vf_min,
|
|
1160
|
+
vf_max,
|
|
1161
|
+
neighbors_only,
|
|
1162
|
+
self_diag,
|
|
1163
|
+
n_jobs=parallel_jobs,
|
|
1164
|
+
)
|
|
1165
|
+
|
|
1166
|
+
return _finalize_wij(
|
|
1167
|
+
G,
|
|
1168
|
+
nodes,
|
|
1169
|
+
wij,
|
|
1170
|
+
mode,
|
|
1171
|
+
thr,
|
|
1172
|
+
scope,
|
|
1173
|
+
self_diag,
|
|
1174
|
+
np,
|
|
1175
|
+
n_jobs=parallel_jobs if not use_np else 1,
|
|
1176
|
+
)
|
|
1177
|
+
|
|
1178
|
+
|
|
1179
|
+
def local_phase_sync_weighted(
|
|
1180
|
+
G: TNFRGraph,
|
|
1181
|
+
n: NodeId,
|
|
1182
|
+
nodes_order: Sequence[NodeId] | None = None,
|
|
1183
|
+
W_row: PhaseSyncWeights | None = None,
|
|
1184
|
+
node_to_index: Mapping[NodeId, int] | None = None,
|
|
1185
|
+
) -> float:
|
|
1186
|
+
"""Compute local phase synchrony using explicit weights.
|
|
1187
|
+
|
|
1188
|
+
``nodes_order`` is the node ordering used to build the coherence matrix
|
|
1189
|
+
and ``W_row`` contains either the dense row corresponding to ``n`` or the
|
|
1190
|
+
sparse list of ``(i, j, w)`` tuples for the whole matrix.
|
|
1191
|
+
"""
|
|
1192
|
+
if W_row is None or nodes_order is None:
|
|
1193
|
+
raise ValueError(
|
|
1194
|
+
"nodes_order and W_row are required for weighted phase synchrony"
|
|
1195
|
+
)
|
|
1196
|
+
|
|
1197
|
+
if node_to_index is None:
|
|
1198
|
+
node_to_index = ensure_node_index_map(G)
|
|
1199
|
+
i = node_to_index.get(n)
|
|
1200
|
+
if i is None:
|
|
1201
|
+
i = nodes_order.index(n)
|
|
1202
|
+
|
|
1203
|
+
num = 0 + 0j
|
|
1204
|
+
den = 0.0
|
|
1205
|
+
|
|
1206
|
+
trig = get_trig_cache(G)
|
|
1207
|
+
cos_map, sin_map = trig.cos, trig.sin
|
|
1208
|
+
|
|
1209
|
+
if isinstance(W_row, Sequence) and W_row:
|
|
1210
|
+
first = W_row[0]
|
|
1211
|
+
if isinstance(first, (int, float)):
|
|
1212
|
+
row_vals = cast(Sequence[float], W_row)
|
|
1213
|
+
for w, nj in zip(row_vals, nodes_order):
|
|
1214
|
+
if nj == n:
|
|
1215
|
+
continue
|
|
1216
|
+
den += w
|
|
1217
|
+
cos_j = cos_map.get(nj)
|
|
1218
|
+
sin_j = sin_map.get(nj)
|
|
1219
|
+
if cos_j is None or sin_j is None:
|
|
1220
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
1221
|
+
cos_j = trig_j.cos[nj]
|
|
1222
|
+
sin_j = trig_j.sin[nj]
|
|
1223
|
+
num += w * complex(cos_j, sin_j)
|
|
1224
|
+
return abs(num / den) if den else 0.0
|
|
1225
|
+
|
|
1226
|
+
if (
|
|
1227
|
+
isinstance(first, Sequence)
|
|
1228
|
+
and len(first) == 3
|
|
1229
|
+
and isinstance(first[0], int)
|
|
1230
|
+
and isinstance(first[1], int)
|
|
1231
|
+
and isinstance(first[2], (int, float))
|
|
1232
|
+
):
|
|
1233
|
+
sparse_entries = cast(CoherenceMatrixSparse, W_row)
|
|
1234
|
+
for ii, jj, w in sparse_entries:
|
|
1235
|
+
if ii != i:
|
|
1236
|
+
continue
|
|
1237
|
+
nj = nodes_order[jj]
|
|
1238
|
+
if nj == n:
|
|
1239
|
+
continue
|
|
1240
|
+
den += w
|
|
1241
|
+
cos_j = cos_map.get(nj)
|
|
1242
|
+
sin_j = sin_map.get(nj)
|
|
1243
|
+
if cos_j is None or sin_j is None:
|
|
1244
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
1245
|
+
cos_j = trig_j.cos[nj]
|
|
1246
|
+
sin_j = trig_j.sin[nj]
|
|
1247
|
+
num += w * complex(cos_j, sin_j)
|
|
1248
|
+
return abs(num / den) if den else 0.0
|
|
1249
|
+
|
|
1250
|
+
dense_matrix = cast(CoherenceMatrixDense, W_row)
|
|
1251
|
+
if i is None:
|
|
1252
|
+
raise ValueError("node index resolution failed for dense weights")
|
|
1253
|
+
row_vals = cast(Sequence[float], dense_matrix[i])
|
|
1254
|
+
for w, nj in zip(row_vals, nodes_order):
|
|
1255
|
+
if nj == n:
|
|
1256
|
+
continue
|
|
1257
|
+
den += w
|
|
1258
|
+
cos_j = cos_map.get(nj)
|
|
1259
|
+
sin_j = sin_map.get(nj)
|
|
1260
|
+
if cos_j is None or sin_j is None:
|
|
1261
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
1262
|
+
cos_j = trig_j.cos[nj]
|
|
1263
|
+
sin_j = trig_j.sin[nj]
|
|
1264
|
+
num += w * complex(cos_j, sin_j)
|
|
1265
|
+
return abs(num / den) if den else 0.0
|
|
1266
|
+
|
|
1267
|
+
sparse_entries = cast(CoherenceMatrixSparse, W_row)
|
|
1268
|
+
for ii, jj, w in sparse_entries:
|
|
1269
|
+
if ii != i:
|
|
1270
|
+
continue
|
|
1271
|
+
nj = nodes_order[jj]
|
|
1272
|
+
if nj == n:
|
|
1273
|
+
continue
|
|
1274
|
+
den += w
|
|
1275
|
+
cos_j = cos_map.get(nj)
|
|
1276
|
+
sin_j = sin_map.get(nj)
|
|
1277
|
+
if cos_j is None or sin_j is None:
|
|
1278
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
1279
|
+
cos_j = trig_j.cos[nj]
|
|
1280
|
+
sin_j = trig_j.sin[nj]
|
|
1281
|
+
num += w * complex(cos_j, sin_j)
|
|
1282
|
+
|
|
1283
|
+
return abs(num / den) if den else 0.0
|
|
1284
|
+
|
|
1285
|
+
|
|
1286
|
+
def local_phase_sync(G: TNFRGraph, n: NodeId) -> float:
|
|
1287
|
+
"""Compute unweighted local phase synchronization for node ``n``."""
|
|
1288
|
+
nodes, W = coherence_matrix(G)
|
|
1289
|
+
if nodes is None:
|
|
1290
|
+
return 0.0
|
|
1291
|
+
return local_phase_sync_weighted(G, n, nodes_order=nodes, W_row=W)
|
|
1292
|
+
|
|
1293
|
+
|
|
1294
|
+
def _coherence_step(G: TNFRGraph, ctx: dict[str, Any] | None = None) -> None:
|
|
1295
|
+
del ctx
|
|
1296
|
+
|
|
1297
|
+
if not get_param(G, "COHERENCE").get("enabled", True):
|
|
1298
|
+
return
|
|
1299
|
+
coherence_matrix(G)
|
|
1300
|
+
|
|
1301
|
+
|
|
1302
|
+
def register_coherence_callbacks(G: TNFRGraph) -> None:
|
|
1303
|
+
"""Attach coherence matrix maintenance to the ``AFTER_STEP`` event."""
|
|
1304
|
+
|
|
1305
|
+
callback_manager.register_callback(
|
|
1306
|
+
G,
|
|
1307
|
+
event=CallbackEvent.AFTER_STEP.value,
|
|
1308
|
+
func=_coherence_step,
|
|
1309
|
+
name="coherence_step",
|
|
1310
|
+
)
|
|
1311
|
+
|
|
1312
|
+
|
|
1313
|
+
# ---------------------------------------------------------------------------
|
|
1314
|
+
# Coherence and observer-related metric updates
|
|
1315
|
+
# ---------------------------------------------------------------------------
|
|
1316
|
+
|
|
1317
|
+
|
|
1318
|
+
def _record_metrics(
|
|
1319
|
+
hist: HistoryState,
|
|
1320
|
+
*pairs: MetricRecord,
|
|
1321
|
+
evaluate: bool = False,
|
|
1322
|
+
) -> None:
|
|
1323
|
+
"""Record metric values for the trace history."""
|
|
1324
|
+
|
|
1325
|
+
metrics = cast(MutableMapping[str, list[Any]], hist)
|
|
1326
|
+
for payload, key in pairs:
|
|
1327
|
+
if evaluate:
|
|
1328
|
+
provider = cast(MetricProvider, payload)
|
|
1329
|
+
append_metric(metrics, key, provider())
|
|
1330
|
+
else:
|
|
1331
|
+
append_metric(metrics, key, payload)
|
|
1332
|
+
|
|
1333
|
+
|
|
1334
|
+
def _update_coherence(G: TNFRGraph, hist: HistoryState) -> None:
|
|
1335
|
+
"""Update network coherence and related means."""
|
|
1336
|
+
|
|
1337
|
+
coherence_payload = cast(
|
|
1338
|
+
tuple[CoherenceMetric, float, float],
|
|
1339
|
+
compute_coherence(G, return_means=True),
|
|
1340
|
+
)
|
|
1341
|
+
C, dnfr_mean, depi_mean = coherence_payload
|
|
1342
|
+
_record_metrics(
|
|
1343
|
+
hist,
|
|
1344
|
+
(C, "C_steps"),
|
|
1345
|
+
(dnfr_mean, "dnfr_mean"),
|
|
1346
|
+
(depi_mean, "depi_mean"),
|
|
1347
|
+
)
|
|
1348
|
+
|
|
1349
|
+
cs = hist["C_steps"]
|
|
1350
|
+
if cs:
|
|
1351
|
+
window = min(len(cs), DEFAULT_WBAR_SPAN)
|
|
1352
|
+
w = max(1, window)
|
|
1353
|
+
wbar = sum(cs[-w:]) / w
|
|
1354
|
+
_record_metrics(hist, (wbar, "W_bar"))
|
|
1355
|
+
|
|
1356
|
+
|
|
1357
|
+
def _update_phase_sync(G: TNFRGraph, hist: HistoryState) -> None:
|
|
1358
|
+
"""Capture phase synchrony and Kuramoto order."""
|
|
1359
|
+
|
|
1360
|
+
ps = phase_sync(G)
|
|
1361
|
+
ko = kuramoto_order(G)
|
|
1362
|
+
_record_metrics(
|
|
1363
|
+
hist,
|
|
1364
|
+
(ps, "phase_sync"),
|
|
1365
|
+
(ko, "kuramoto_R"),
|
|
1366
|
+
)
|
|
1367
|
+
|
|
1368
|
+
|
|
1369
|
+
def _update_sigma(G: TNFRGraph, hist: HistoryState) -> None:
|
|
1370
|
+
"""Record glyph load and associated Σ⃗ vector."""
|
|
1371
|
+
|
|
1372
|
+
metrics = cast(MutableMapping[str, list[Any]], hist)
|
|
1373
|
+
if "glyph_load_estab" in metrics:
|
|
1374
|
+
raise ValueError(
|
|
1375
|
+
"History payloads using 'glyph_load_estab' are no longer supported. "
|
|
1376
|
+
"Rename the series to 'glyph_load_stabilizers' before loading the graph."
|
|
1377
|
+
)
|
|
1378
|
+
if metrics.get(GLYPH_LOAD_STABILIZERS_KEY) is None:
|
|
1379
|
+
metrics.setdefault(GLYPH_LOAD_STABILIZERS_KEY, [])
|
|
1380
|
+
|
|
1381
|
+
gl: GlyphLoadDistribution = glyph_load(G, window=DEFAULT_GLYPH_LOAD_SPAN)
|
|
1382
|
+
stabilizers = float(gl.get("_stabilizers", 0.0))
|
|
1383
|
+
disruptors = float(gl.get("_disruptors", 0.0))
|
|
1384
|
+
_record_metrics(
|
|
1385
|
+
hist,
|
|
1386
|
+
(stabilizers, GLYPH_LOAD_STABILIZERS_KEY),
|
|
1387
|
+
(disruptors, "glyph_load_disr"),
|
|
1388
|
+
)
|
|
1389
|
+
|
|
1390
|
+
dist: GlyphLoadDistribution = {k: v for k, v in gl.items() if not k.startswith("_")}
|
|
1391
|
+
sig: SigmaVector = sigma_vector(dist)
|
|
1392
|
+
_record_metrics(
|
|
1393
|
+
hist,
|
|
1394
|
+
(sig.get("x", 0.0), "sense_sigma_x"),
|
|
1395
|
+
(sig.get("y", 0.0), "sense_sigma_y"),
|
|
1396
|
+
(sig.get("mag", 0.0), "sense_sigma_mag"),
|
|
1397
|
+
(sig.get("angle", 0.0), "sense_sigma_angle"),
|
|
1398
|
+
)
|
|
1399
|
+
|
|
1400
|
+
|
|
1401
|
+
def _stability_chunk_worker(args: StabilityChunkArgs) -> StabilityChunkResult:
|
|
1402
|
+
"""Compute stability aggregates for a chunk of nodes."""
|
|
1403
|
+
|
|
1404
|
+
(
|
|
1405
|
+
dnfr_vals,
|
|
1406
|
+
depi_vals,
|
|
1407
|
+
si_curr_vals,
|
|
1408
|
+
si_prev_vals,
|
|
1409
|
+
vf_curr_vals,
|
|
1410
|
+
vf_prev_vals,
|
|
1411
|
+
dvf_prev_vals,
|
|
1412
|
+
dt,
|
|
1413
|
+
eps_dnfr,
|
|
1414
|
+
eps_depi,
|
|
1415
|
+
) = args
|
|
1416
|
+
|
|
1417
|
+
inv_dt = (1.0 / dt) if dt else 0.0
|
|
1418
|
+
stable = 0
|
|
1419
|
+
delta_sum = 0.0
|
|
1420
|
+
B_sum = 0.0
|
|
1421
|
+
delta_vals: list[float] = []
|
|
1422
|
+
dvf_dt_vals: list[float] = []
|
|
1423
|
+
B_vals: list[float] = []
|
|
1424
|
+
|
|
1425
|
+
for idx in range(len(si_curr_vals)):
|
|
1426
|
+
curr_si = float(si_curr_vals[idx])
|
|
1427
|
+
prev_si_raw = si_prev_vals[idx]
|
|
1428
|
+
prev_si = float(prev_si_raw) if prev_si_raw is not None else curr_si
|
|
1429
|
+
delta = curr_si - prev_si
|
|
1430
|
+
delta_vals.append(delta)
|
|
1431
|
+
delta_sum += delta
|
|
1432
|
+
|
|
1433
|
+
curr_vf = float(vf_curr_vals[idx])
|
|
1434
|
+
prev_vf_raw = vf_prev_vals[idx]
|
|
1435
|
+
prev_vf = float(prev_vf_raw) if prev_vf_raw is not None else curr_vf
|
|
1436
|
+
dvf_dt = (curr_vf - prev_vf) * inv_dt if dt else 0.0
|
|
1437
|
+
prev_dvf_raw = dvf_prev_vals[idx]
|
|
1438
|
+
prev_dvf = float(prev_dvf_raw) if prev_dvf_raw is not None else dvf_dt
|
|
1439
|
+
B = (dvf_dt - prev_dvf) * inv_dt if dt else 0.0
|
|
1440
|
+
dvf_dt_vals.append(dvf_dt)
|
|
1441
|
+
B_vals.append(B)
|
|
1442
|
+
B_sum += B
|
|
1443
|
+
|
|
1444
|
+
if (
|
|
1445
|
+
abs(float(dnfr_vals[idx])) <= eps_dnfr
|
|
1446
|
+
and abs(float(depi_vals[idx])) <= eps_depi
|
|
1447
|
+
):
|
|
1448
|
+
stable += 1
|
|
1449
|
+
|
|
1450
|
+
chunk_len = len(si_curr_vals)
|
|
1451
|
+
return (
|
|
1452
|
+
stable,
|
|
1453
|
+
chunk_len,
|
|
1454
|
+
delta_sum,
|
|
1455
|
+
B_sum,
|
|
1456
|
+
delta_vals,
|
|
1457
|
+
dvf_dt_vals,
|
|
1458
|
+
B_vals,
|
|
1459
|
+
)
|
|
1460
|
+
|
|
1461
|
+
|
|
1462
|
+
def _track_stability(
|
|
1463
|
+
G: TNFRGraph,
|
|
1464
|
+
hist: MutableMapping[str, Any],
|
|
1465
|
+
dt: float,
|
|
1466
|
+
eps_dnfr: float,
|
|
1467
|
+
eps_depi: float,
|
|
1468
|
+
*,
|
|
1469
|
+
n_jobs: int | None = None,
|
|
1470
|
+
) -> None:
|
|
1471
|
+
"""Track per-node stability and derivative metrics."""
|
|
1472
|
+
|
|
1473
|
+
nodes: tuple[NodeId, ...] = tuple(G.nodes)
|
|
1474
|
+
total_nodes = len(nodes)
|
|
1475
|
+
if not total_nodes:
|
|
1476
|
+
hist.setdefault("stable_frac", []).append(0.0)
|
|
1477
|
+
hist.setdefault("delta_Si", []).append(0.0)
|
|
1478
|
+
hist.setdefault("B", []).append(0.0)
|
|
1479
|
+
return
|
|
1480
|
+
|
|
1481
|
+
np_mod = get_numpy()
|
|
1482
|
+
|
|
1483
|
+
dnfr_vals = collect_attr(G, nodes, ALIAS_DNFR, 0.0, np=np_mod)
|
|
1484
|
+
depi_vals = collect_attr(G, nodes, ALIAS_DEPI, 0.0, np=np_mod)
|
|
1485
|
+
si_curr_vals = collect_attr(G, nodes, ALIAS_SI, 0.0, np=np_mod)
|
|
1486
|
+
vf_curr_vals = collect_attr(G, nodes, ALIAS_VF, 0.0, np=np_mod)
|
|
1487
|
+
|
|
1488
|
+
prev_si_data = [G.nodes[n].get("_prev_Si") for n in nodes]
|
|
1489
|
+
prev_vf_data = [G.nodes[n].get("_prev_vf") for n in nodes]
|
|
1490
|
+
prev_dvf_data = [G.nodes[n].get("_prev_dvf") for n in nodes]
|
|
1491
|
+
|
|
1492
|
+
inv_dt = (1.0 / dt) if dt else 0.0
|
|
1493
|
+
|
|
1494
|
+
if np_mod is not None:
|
|
1495
|
+
np = np_mod
|
|
1496
|
+
dnfr_arr = dnfr_vals
|
|
1497
|
+
depi_arr = depi_vals
|
|
1498
|
+
si_curr_arr = si_curr_vals
|
|
1499
|
+
vf_curr_arr = vf_curr_vals
|
|
1500
|
+
|
|
1501
|
+
si_prev_arr = np.asarray(
|
|
1502
|
+
[
|
|
1503
|
+
(
|
|
1504
|
+
float(prev_si_data[idx])
|
|
1505
|
+
if prev_si_data[idx] is not None
|
|
1506
|
+
else float(si_curr_arr[idx])
|
|
1507
|
+
)
|
|
1508
|
+
for idx in range(total_nodes)
|
|
1509
|
+
],
|
|
1510
|
+
dtype=float,
|
|
1511
|
+
)
|
|
1512
|
+
vf_prev_arr = np.asarray(
|
|
1513
|
+
[
|
|
1514
|
+
(
|
|
1515
|
+
float(prev_vf_data[idx])
|
|
1516
|
+
if prev_vf_data[idx] is not None
|
|
1517
|
+
else float(vf_curr_arr[idx])
|
|
1518
|
+
)
|
|
1519
|
+
for idx in range(total_nodes)
|
|
1520
|
+
],
|
|
1521
|
+
dtype=float,
|
|
1522
|
+
)
|
|
1523
|
+
|
|
1524
|
+
if dt:
|
|
1525
|
+
dvf_dt_arr = (vf_curr_arr - vf_prev_arr) * inv_dt
|
|
1526
|
+
else:
|
|
1527
|
+
dvf_dt_arr = np.zeros_like(vf_curr_arr, dtype=float)
|
|
1528
|
+
|
|
1529
|
+
dvf_prev_arr = np.asarray(
|
|
1530
|
+
[
|
|
1531
|
+
(
|
|
1532
|
+
float(prev_dvf_data[idx])
|
|
1533
|
+
if prev_dvf_data[idx] is not None
|
|
1534
|
+
else float(dvf_dt_arr[idx])
|
|
1535
|
+
)
|
|
1536
|
+
for idx in range(total_nodes)
|
|
1537
|
+
],
|
|
1538
|
+
dtype=float,
|
|
1539
|
+
)
|
|
1540
|
+
|
|
1541
|
+
if dt:
|
|
1542
|
+
B_arr = (dvf_dt_arr - dvf_prev_arr) * inv_dt
|
|
1543
|
+
else:
|
|
1544
|
+
B_arr = np.zeros_like(dvf_dt_arr, dtype=float)
|
|
1545
|
+
|
|
1546
|
+
stable_mask = (np.abs(dnfr_arr) <= eps_dnfr) & (np.abs(depi_arr) <= eps_depi)
|
|
1547
|
+
stable_frac = float(stable_mask.mean()) if total_nodes else 0.0
|
|
1548
|
+
|
|
1549
|
+
delta_si_arr = si_curr_arr - si_prev_arr
|
|
1550
|
+
delta_si_mean = float(delta_si_arr.mean()) if total_nodes else 0.0
|
|
1551
|
+
B_mean = float(B_arr.mean()) if total_nodes else 0.0
|
|
1552
|
+
|
|
1553
|
+
hist.setdefault("stable_frac", []).append(stable_frac)
|
|
1554
|
+
hist.setdefault("delta_Si", []).append(delta_si_mean)
|
|
1555
|
+
hist.setdefault("B", []).append(B_mean)
|
|
1556
|
+
|
|
1557
|
+
for idx, node in enumerate(nodes):
|
|
1558
|
+
nd = G.nodes[node]
|
|
1559
|
+
curr_si = float(si_curr_arr[idx])
|
|
1560
|
+
delta_val = float(delta_si_arr[idx])
|
|
1561
|
+
nd["_prev_Si"] = curr_si
|
|
1562
|
+
set_attr(nd, ALIAS_DSI, delta_val)
|
|
1563
|
+
|
|
1564
|
+
curr_vf = float(vf_curr_arr[idx])
|
|
1565
|
+
nd["_prev_vf"] = curr_vf
|
|
1566
|
+
|
|
1567
|
+
dvf_dt_val = float(dvf_dt_arr[idx])
|
|
1568
|
+
nd["_prev_dvf"] = dvf_dt_val
|
|
1569
|
+
set_attr(nd, ALIAS_DVF, dvf_dt_val)
|
|
1570
|
+
set_attr(nd, ALIAS_D2VF, float(B_arr[idx]))
|
|
1571
|
+
|
|
1572
|
+
return
|
|
1573
|
+
|
|
1574
|
+
# NumPy not available: optionally parallel fallback or sequential computation.
|
|
1575
|
+
dnfr_list = list(dnfr_vals)
|
|
1576
|
+
depi_list = list(depi_vals)
|
|
1577
|
+
si_curr_list = list(si_curr_vals)
|
|
1578
|
+
vf_curr_list = list(vf_curr_vals)
|
|
1579
|
+
|
|
1580
|
+
if n_jobs and n_jobs > 1:
|
|
1581
|
+
approx_chunk = math.ceil(total_nodes / n_jobs) if n_jobs else None
|
|
1582
|
+
chunk_size = resolve_chunk_size(
|
|
1583
|
+
approx_chunk,
|
|
1584
|
+
total_nodes,
|
|
1585
|
+
minimum=1,
|
|
1586
|
+
)
|
|
1587
|
+
chunk_results: list[
|
|
1588
|
+
tuple[
|
|
1589
|
+
int,
|
|
1590
|
+
tuple[int, int, float, float, list[float], list[float], list[float]],
|
|
1591
|
+
]
|
|
1592
|
+
] = []
|
|
1593
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
1594
|
+
futures: list[tuple[int, Any]] = []
|
|
1595
|
+
for start in range(0, total_nodes, chunk_size):
|
|
1596
|
+
end = min(start + chunk_size, total_nodes)
|
|
1597
|
+
chunk_args = (
|
|
1598
|
+
dnfr_list[start:end],
|
|
1599
|
+
depi_list[start:end],
|
|
1600
|
+
si_curr_list[start:end],
|
|
1601
|
+
prev_si_data[start:end],
|
|
1602
|
+
vf_curr_list[start:end],
|
|
1603
|
+
prev_vf_data[start:end],
|
|
1604
|
+
prev_dvf_data[start:end],
|
|
1605
|
+
dt,
|
|
1606
|
+
eps_dnfr,
|
|
1607
|
+
eps_depi,
|
|
1608
|
+
)
|
|
1609
|
+
futures.append(
|
|
1610
|
+
(start, executor.submit(_stability_chunk_worker, chunk_args))
|
|
1611
|
+
)
|
|
1612
|
+
|
|
1613
|
+
for start, fut in futures:
|
|
1614
|
+
chunk_results.append((start, fut.result()))
|
|
1615
|
+
|
|
1616
|
+
chunk_results.sort(key=lambda item: item[0])
|
|
1617
|
+
|
|
1618
|
+
stable_total = 0
|
|
1619
|
+
delta_sum = 0.0
|
|
1620
|
+
B_sum = 0.0
|
|
1621
|
+
delta_vals_all: list[float] = []
|
|
1622
|
+
dvf_dt_all: list[float] = []
|
|
1623
|
+
B_vals_all: list[float] = []
|
|
1624
|
+
|
|
1625
|
+
for _, result in chunk_results:
|
|
1626
|
+
(
|
|
1627
|
+
stable_count,
|
|
1628
|
+
chunk_len,
|
|
1629
|
+
chunk_delta_sum,
|
|
1630
|
+
chunk_B_sum,
|
|
1631
|
+
delta_vals,
|
|
1632
|
+
dvf_vals,
|
|
1633
|
+
B_vals,
|
|
1634
|
+
) = result
|
|
1635
|
+
stable_total += stable_count
|
|
1636
|
+
delta_sum += chunk_delta_sum
|
|
1637
|
+
B_sum += chunk_B_sum
|
|
1638
|
+
delta_vals_all.extend(delta_vals)
|
|
1639
|
+
dvf_dt_all.extend(dvf_vals)
|
|
1640
|
+
B_vals_all.extend(B_vals)
|
|
1641
|
+
|
|
1642
|
+
total = len(delta_vals_all)
|
|
1643
|
+
stable_frac = stable_total / total if total else 0.0
|
|
1644
|
+
delta_si_mean = delta_sum / total if total else 0.0
|
|
1645
|
+
B_mean = B_sum / total if total else 0.0
|
|
1646
|
+
|
|
1647
|
+
else:
|
|
1648
|
+
stable_total = 0
|
|
1649
|
+
delta_sum = 0.0
|
|
1650
|
+
B_sum = 0.0
|
|
1651
|
+
delta_vals_all = []
|
|
1652
|
+
dvf_dt_all = []
|
|
1653
|
+
B_vals_all = []
|
|
1654
|
+
|
|
1655
|
+
for idx in range(total_nodes):
|
|
1656
|
+
curr_si = float(si_curr_list[idx])
|
|
1657
|
+
prev_si_raw = prev_si_data[idx]
|
|
1658
|
+
prev_si = float(prev_si_raw) if prev_si_raw is not None else curr_si
|
|
1659
|
+
delta = curr_si - prev_si
|
|
1660
|
+
delta_vals_all.append(delta)
|
|
1661
|
+
delta_sum += delta
|
|
1662
|
+
|
|
1663
|
+
curr_vf = float(vf_curr_list[idx])
|
|
1664
|
+
prev_vf_raw = prev_vf_data[idx]
|
|
1665
|
+
prev_vf = float(prev_vf_raw) if prev_vf_raw is not None else curr_vf
|
|
1666
|
+
dvf_dt_val = (curr_vf - prev_vf) * inv_dt if dt else 0.0
|
|
1667
|
+
prev_dvf_raw = prev_dvf_data[idx]
|
|
1668
|
+
prev_dvf = float(prev_dvf_raw) if prev_dvf_raw is not None else dvf_dt_val
|
|
1669
|
+
B_val = (dvf_dt_val - prev_dvf) * inv_dt if dt else 0.0
|
|
1670
|
+
dvf_dt_all.append(dvf_dt_val)
|
|
1671
|
+
B_vals_all.append(B_val)
|
|
1672
|
+
B_sum += B_val
|
|
1673
|
+
|
|
1674
|
+
if (
|
|
1675
|
+
abs(float(dnfr_list[idx])) <= eps_dnfr
|
|
1676
|
+
and abs(float(depi_list[idx])) <= eps_depi
|
|
1677
|
+
):
|
|
1678
|
+
stable_total += 1
|
|
1679
|
+
|
|
1680
|
+
total = len(delta_vals_all)
|
|
1681
|
+
stable_frac = stable_total / total if total else 0.0
|
|
1682
|
+
delta_si_mean = delta_sum / total if total else 0.0
|
|
1683
|
+
B_mean = B_sum / total if total else 0.0
|
|
1684
|
+
|
|
1685
|
+
hist.setdefault("stable_frac", []).append(stable_frac)
|
|
1686
|
+
hist.setdefault("delta_Si", []).append(delta_si_mean)
|
|
1687
|
+
hist.setdefault("B", []).append(B_mean)
|
|
1688
|
+
|
|
1689
|
+
for idx, node in enumerate(nodes):
|
|
1690
|
+
nd = G.nodes[node]
|
|
1691
|
+
curr_si = float(si_curr_list[idx])
|
|
1692
|
+
delta_val = float(delta_vals_all[idx])
|
|
1693
|
+
nd["_prev_Si"] = curr_si
|
|
1694
|
+
set_attr(nd, ALIAS_DSI, delta_val)
|
|
1695
|
+
|
|
1696
|
+
curr_vf = float(vf_curr_list[idx])
|
|
1697
|
+
nd["_prev_vf"] = curr_vf
|
|
1698
|
+
|
|
1699
|
+
dvf_dt_val = float(dvf_dt_all[idx])
|
|
1700
|
+
nd["_prev_dvf"] = dvf_dt_val
|
|
1701
|
+
set_attr(nd, ALIAS_DVF, dvf_dt_val)
|
|
1702
|
+
set_attr(nd, ALIAS_D2VF, float(B_vals_all[idx]))
|
|
1703
|
+
|
|
1704
|
+
|
|
1705
|
+
def _si_chunk_stats(
|
|
1706
|
+
values: Sequence[float], si_hi: float, si_lo: float
|
|
1707
|
+
) -> tuple[float, int, int, int]:
|
|
1708
|
+
"""Compute partial Si aggregates for ``values``.
|
|
1709
|
+
|
|
1710
|
+
The helper keeps the logic shared between the sequential and parallel
|
|
1711
|
+
fallbacks when NumPy is unavailable.
|
|
1712
|
+
"""
|
|
1713
|
+
|
|
1714
|
+
total = 0.0
|
|
1715
|
+
count = 0
|
|
1716
|
+
hi_count = 0
|
|
1717
|
+
lo_count = 0
|
|
1718
|
+
for s in values:
|
|
1719
|
+
if math.isnan(s):
|
|
1720
|
+
continue
|
|
1721
|
+
total += s
|
|
1722
|
+
count += 1
|
|
1723
|
+
if s >= si_hi:
|
|
1724
|
+
hi_count += 1
|
|
1725
|
+
if s <= si_lo:
|
|
1726
|
+
lo_count += 1
|
|
1727
|
+
return total, count, hi_count, lo_count
|
|
1728
|
+
|
|
1729
|
+
|
|
1730
|
+
def _aggregate_si(
|
|
1731
|
+
G: TNFRGraph,
|
|
1732
|
+
hist: MutableMapping[str, list[float]],
|
|
1733
|
+
*,
|
|
1734
|
+
n_jobs: int | None = None,
|
|
1735
|
+
) -> None:
|
|
1736
|
+
"""Aggregate Si statistics across nodes."""
|
|
1737
|
+
|
|
1738
|
+
try:
|
|
1739
|
+
thr_sel = get_param(G, "SELECTOR_THRESHOLDS")
|
|
1740
|
+
thr_def = get_param(G, "GLYPH_THRESHOLDS")
|
|
1741
|
+
si_hi = float(thr_sel.get("si_hi", thr_def.get("hi", 0.66)))
|
|
1742
|
+
si_lo = float(thr_sel.get("si_lo", thr_def.get("lo", 0.33)))
|
|
1743
|
+
|
|
1744
|
+
node_ids = list(G.nodes)
|
|
1745
|
+
if not node_ids:
|
|
1746
|
+
hist["Si_mean"].append(0.0)
|
|
1747
|
+
hist["Si_hi_frac"].append(0.0)
|
|
1748
|
+
hist["Si_lo_frac"].append(0.0)
|
|
1749
|
+
return
|
|
1750
|
+
|
|
1751
|
+
sis = []
|
|
1752
|
+
for node in node_ids:
|
|
1753
|
+
raw = get_attr(
|
|
1754
|
+
G.nodes[node],
|
|
1755
|
+
ALIAS_SI,
|
|
1756
|
+
None,
|
|
1757
|
+
conv=lambda value: value, # Preserve NaN sentinels
|
|
1758
|
+
)
|
|
1759
|
+
try:
|
|
1760
|
+
sis.append(float(raw) if raw is not None else math.nan)
|
|
1761
|
+
except (TypeError, ValueError):
|
|
1762
|
+
sis.append(math.nan)
|
|
1763
|
+
|
|
1764
|
+
np_mod = get_numpy()
|
|
1765
|
+
if np_mod is not None:
|
|
1766
|
+
sis_array = np_mod.asarray(sis, dtype=float)
|
|
1767
|
+
valid = sis_array[~np_mod.isnan(sis_array)]
|
|
1768
|
+
n = int(valid.size)
|
|
1769
|
+
if n:
|
|
1770
|
+
hist["Si_mean"].append(float(valid.mean()))
|
|
1771
|
+
hi_frac = np_mod.count_nonzero(valid >= si_hi) / n
|
|
1772
|
+
lo_frac = np_mod.count_nonzero(valid <= si_lo) / n
|
|
1773
|
+
hist["Si_hi_frac"].append(float(hi_frac))
|
|
1774
|
+
hist["Si_lo_frac"].append(float(lo_frac))
|
|
1775
|
+
else:
|
|
1776
|
+
hist["Si_mean"].append(0.0)
|
|
1777
|
+
hist["Si_hi_frac"].append(0.0)
|
|
1778
|
+
hist["Si_lo_frac"].append(0.0)
|
|
1779
|
+
return
|
|
1780
|
+
|
|
1781
|
+
if n_jobs is not None and n_jobs > 1:
|
|
1782
|
+
approx_chunk = math.ceil(len(sis) / n_jobs) if n_jobs else None
|
|
1783
|
+
chunk_size = resolve_chunk_size(
|
|
1784
|
+
approx_chunk,
|
|
1785
|
+
len(sis),
|
|
1786
|
+
minimum=1,
|
|
1787
|
+
)
|
|
1788
|
+
futures = []
|
|
1789
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
1790
|
+
for idx in range(0, len(sis), chunk_size):
|
|
1791
|
+
chunk = sis[idx : idx + chunk_size]
|
|
1792
|
+
futures.append(
|
|
1793
|
+
executor.submit(_si_chunk_stats, chunk, si_hi, si_lo)
|
|
1794
|
+
)
|
|
1795
|
+
totals = [future.result() for future in futures]
|
|
1796
|
+
total = sum(part[0] for part in totals)
|
|
1797
|
+
count = sum(part[1] for part in totals)
|
|
1798
|
+
hi_count = sum(part[2] for part in totals)
|
|
1799
|
+
lo_count = sum(part[3] for part in totals)
|
|
1800
|
+
else:
|
|
1801
|
+
total, count, hi_count, lo_count = _si_chunk_stats(sis, si_hi, si_lo)
|
|
1802
|
+
|
|
1803
|
+
if count:
|
|
1804
|
+
hist["Si_mean"].append(total / count)
|
|
1805
|
+
hist["Si_hi_frac"].append(hi_count / count)
|
|
1806
|
+
hist["Si_lo_frac"].append(lo_count / count)
|
|
1807
|
+
else:
|
|
1808
|
+
hist["Si_mean"].append(0.0)
|
|
1809
|
+
hist["Si_hi_frac"].append(0.0)
|
|
1810
|
+
hist["Si_lo_frac"].append(0.0)
|
|
1811
|
+
except (KeyError, AttributeError, TypeError) as exc:
|
|
1812
|
+
logger.debug("Si aggregation failed: %s", exc)
|
|
1813
|
+
|
|
1814
|
+
|
|
1815
|
+
def compute_global_coherence(G: TNFRGraph) -> float:
|
|
1816
|
+
"""Compute global coherence C(t) for entire network.
|
|
1817
|
+
|
|
1818
|
+
C(t) = 1 - (σ_ΔNFR / ΔNFR_max)
|
|
1819
|
+
|
|
1820
|
+
This is the canonical TNFR coherence metric that measures global structural
|
|
1821
|
+
stability through the dispersion of reorganization pressure (ΔNFR) across
|
|
1822
|
+
the network.
|
|
1823
|
+
|
|
1824
|
+
Parameters
|
|
1825
|
+
----------
|
|
1826
|
+
G : TNFRGraph
|
|
1827
|
+
Network graph with nodes containing ΔNFR attributes
|
|
1828
|
+
|
|
1829
|
+
Returns
|
|
1830
|
+
-------
|
|
1831
|
+
float
|
|
1832
|
+
Global coherence value in [0, 1] where:
|
|
1833
|
+
- 1.0 = perfect coherence (no reorganization pressure variance)
|
|
1834
|
+
- 0.0 = maximum incoherence (extreme ΔNFR dispersion)
|
|
1835
|
+
|
|
1836
|
+
Notes
|
|
1837
|
+
-----
|
|
1838
|
+
**Mathematical Foundation:**
|
|
1839
|
+
|
|
1840
|
+
Global coherence quantifies the network's structural stability by measuring
|
|
1841
|
+
how uniformly reorganization pressure is distributed across nodes:
|
|
1842
|
+
|
|
1843
|
+
- **σ_ΔNFR**: Standard deviation of ΔNFR values measures dispersion
|
|
1844
|
+
- **ΔNFR_max**: Maximum ΔNFR provides normalization scale
|
|
1845
|
+
- **C(t)**: Higher values indicate more uniform structural state
|
|
1846
|
+
|
|
1847
|
+
**Special Cases:**
|
|
1848
|
+
|
|
1849
|
+
- Empty network: Returns 1.0 (perfect coherence by definition)
|
|
1850
|
+
- All ΔNFR = 0: Returns 1.0 (no reorganization pressure)
|
|
1851
|
+
- ΔNFR_max = 0: Returns 1.0 (degenerate case, no pressure)
|
|
1852
|
+
|
|
1853
|
+
**TNFR Context:**
|
|
1854
|
+
|
|
1855
|
+
C(t) is the primary metric for measuring IL (Coherence) operator
|
|
1856
|
+
effectiveness. When IL is applied, C(t) should increase as ΔNFR
|
|
1857
|
+
becomes more uniformly distributed (ideally all approaching zero).
|
|
1858
|
+
|
|
1859
|
+
See Also
|
|
1860
|
+
--------
|
|
1861
|
+
compute_local_coherence : Local coherence for node neighborhoods
|
|
1862
|
+
compute_coherence : Alternative coherence metric (legacy)
|
|
1863
|
+
|
|
1864
|
+
Examples
|
|
1865
|
+
--------
|
|
1866
|
+
>>> import networkx as nx
|
|
1867
|
+
>>> from tnfr.metrics.coherence import compute_global_coherence
|
|
1868
|
+
>>> from tnfr.constants import DNFR_PRIMARY
|
|
1869
|
+
>>> G = nx.Graph()
|
|
1870
|
+
>>> G.add_nodes_from([1, 2, 3])
|
|
1871
|
+
>>> G.nodes[1][DNFR_PRIMARY] = 0.1
|
|
1872
|
+
>>> G.nodes[2][DNFR_PRIMARY] = 0.2
|
|
1873
|
+
>>> G.nodes[3][DNFR_PRIMARY] = 0.15
|
|
1874
|
+
>>> C_global = compute_global_coherence(G)
|
|
1875
|
+
>>> 0.0 <= C_global <= 1.0
|
|
1876
|
+
True
|
|
1877
|
+
"""
|
|
1878
|
+
# Collect all ΔNFR values
|
|
1879
|
+
dnfr_values = [float(get_attr(G.nodes[n], ALIAS_DNFR, 0.0)) for n in G.nodes()]
|
|
1880
|
+
|
|
1881
|
+
if not dnfr_values or all(v == 0 for v in dnfr_values):
|
|
1882
|
+
return 1.0 # Perfect coherence when no reorganization pressure
|
|
1883
|
+
|
|
1884
|
+
np = get_numpy()
|
|
1885
|
+
if np is not None:
|
|
1886
|
+
dnfr_array = np.array(dnfr_values)
|
|
1887
|
+
sigma_dnfr = float(np.std(dnfr_array))
|
|
1888
|
+
dnfr_max = float(np.max(dnfr_array))
|
|
1889
|
+
else:
|
|
1890
|
+
# Pure Python fallback
|
|
1891
|
+
mean_dnfr = sum(dnfr_values) / len(dnfr_values)
|
|
1892
|
+
variance = sum((v - mean_dnfr) ** 2 for v in dnfr_values) / len(dnfr_values)
|
|
1893
|
+
sigma_dnfr = variance**0.5
|
|
1894
|
+
dnfr_max = max(dnfr_values)
|
|
1895
|
+
|
|
1896
|
+
if dnfr_max == 0:
|
|
1897
|
+
return 1.0
|
|
1898
|
+
|
|
1899
|
+
C_t = 1.0 - (sigma_dnfr / dnfr_max)
|
|
1900
|
+
|
|
1901
|
+
# Clamp to [0, 1] to handle numerical edge cases
|
|
1902
|
+
if np is not None:
|
|
1903
|
+
return float(np.clip(C_t, 0.0, 1.0))
|
|
1904
|
+
return max(0.0, min(1.0, C_t))
|
|
1905
|
+
|
|
1906
|
+
|
|
1907
|
+
def compute_local_coherence(G: TNFRGraph, node: Any, radius: int = 1) -> float:
|
|
1908
|
+
"""Compute local coherence for node and its neighborhood.
|
|
1909
|
+
|
|
1910
|
+
Local coherence applies the same C(t) formula to a neighborhood subgraph:
|
|
1911
|
+
C_local(t) = 1 - (σ_ΔNFR_local / ΔNFR_max_local)
|
|
1912
|
+
|
|
1913
|
+
This measures structural stability within a node's local vicinity, useful
|
|
1914
|
+
for identifying coherence gradients and structural weak points in networks.
|
|
1915
|
+
|
|
1916
|
+
Parameters
|
|
1917
|
+
----------
|
|
1918
|
+
G : TNFRGraph
|
|
1919
|
+
Network graph
|
|
1920
|
+
node : Any
|
|
1921
|
+
Central node for local coherence computation
|
|
1922
|
+
radius : int, default=1
|
|
1923
|
+
Neighborhood radius:
|
|
1924
|
+
- 1 = immediate neighbors (default)
|
|
1925
|
+
- 2 = neighbors + neighbors-of-neighbors
|
|
1926
|
+
- etc.
|
|
1927
|
+
|
|
1928
|
+
Returns
|
|
1929
|
+
-------
|
|
1930
|
+
float
|
|
1931
|
+
Local coherence value in [0, 1] where:
|
|
1932
|
+
- 1.0 = perfect local coherence
|
|
1933
|
+
- 0.0 = maximum local incoherence
|
|
1934
|
+
|
|
1935
|
+
Notes
|
|
1936
|
+
-----
|
|
1937
|
+
**Use Cases:**
|
|
1938
|
+
|
|
1939
|
+
- **Hotspot Detection**: Identify regions of structural instability
|
|
1940
|
+
- **IL Targeting**: Prioritize nodes needing coherence stabilization
|
|
1941
|
+
- **Network Health**: Monitor local vs. global coherence balance
|
|
1942
|
+
- **Bifurcation Risk**: Low local C(t) may predict structural splits
|
|
1943
|
+
|
|
1944
|
+
**Radius Selection:**
|
|
1945
|
+
|
|
1946
|
+
- **radius=1**: Fast, captures immediate structural environment
|
|
1947
|
+
- **radius=2**: Better for mesoscale patterns, slower
|
|
1948
|
+
- **radius>2**: Approaches global coherence, expensive
|
|
1949
|
+
|
|
1950
|
+
**Special Cases:**
|
|
1951
|
+
|
|
1952
|
+
- Isolated node (no neighbors): Returns 1.0
|
|
1953
|
+
- All neighborhood ΔNFR = 0: Returns 1.0
|
|
1954
|
+
- Single-node neighborhood: Returns 1.0 (no variance)
|
|
1955
|
+
|
|
1956
|
+
See Also
|
|
1957
|
+
--------
|
|
1958
|
+
compute_global_coherence : Global network coherence
|
|
1959
|
+
|
|
1960
|
+
Examples
|
|
1961
|
+
--------
|
|
1962
|
+
>>> import networkx as nx
|
|
1963
|
+
>>> from tnfr.metrics.coherence import compute_local_coherence
|
|
1964
|
+
>>> from tnfr.constants import DNFR_PRIMARY
|
|
1965
|
+
>>> G = nx.Graph()
|
|
1966
|
+
>>> G.add_edges_from([(1, 2), (2, 3), (3, 4)])
|
|
1967
|
+
>>> for n in [1, 2, 3, 4]:
|
|
1968
|
+
... G.nodes[n][DNFR_PRIMARY] = 0.1 * n
|
|
1969
|
+
>>> C_local = compute_local_coherence(G, node=2, radius=1)
|
|
1970
|
+
>>> 0.0 <= C_local <= 1.0
|
|
1971
|
+
True
|
|
1972
|
+
"""
|
|
1973
|
+
import networkx as nx
|
|
1974
|
+
|
|
1975
|
+
# Get neighborhood
|
|
1976
|
+
if radius == 1:
|
|
1977
|
+
neighbors = set(G.neighbors(node)) | {node}
|
|
1978
|
+
else:
|
|
1979
|
+
neighbors = set(
|
|
1980
|
+
nx.single_source_shortest_path_length(G, node, cutoff=radius).keys()
|
|
1981
|
+
)
|
|
1982
|
+
|
|
1983
|
+
# Collect ΔNFR for neighborhood
|
|
1984
|
+
dnfr_values = [float(get_attr(G.nodes[n], ALIAS_DNFR, 0.0)) for n in neighbors]
|
|
1985
|
+
|
|
1986
|
+
if not dnfr_values or all(v == 0 for v in dnfr_values):
|
|
1987
|
+
return 1.0
|
|
1988
|
+
|
|
1989
|
+
np = get_numpy()
|
|
1990
|
+
if np is not None:
|
|
1991
|
+
dnfr_array = np.array(dnfr_values)
|
|
1992
|
+
sigma_dnfr = float(np.std(dnfr_array))
|
|
1993
|
+
dnfr_max = float(np.max(dnfr_array))
|
|
1994
|
+
else:
|
|
1995
|
+
# Pure Python fallback
|
|
1996
|
+
mean_dnfr = sum(dnfr_values) / len(dnfr_values)
|
|
1997
|
+
variance = sum((v - mean_dnfr) ** 2 for v in dnfr_values) / len(dnfr_values)
|
|
1998
|
+
sigma_dnfr = variance**0.5
|
|
1999
|
+
dnfr_max = max(dnfr_values)
|
|
2000
|
+
|
|
2001
|
+
if dnfr_max == 0:
|
|
2002
|
+
return 1.0
|
|
2003
|
+
|
|
2004
|
+
C_local = 1.0 - (sigma_dnfr / dnfr_max)
|
|
2005
|
+
|
|
2006
|
+
# Clamp to [0, 1]
|
|
2007
|
+
if np is not None:
|
|
2008
|
+
return float(np.clip(C_local, 0.0, 1.0))
|
|
2009
|
+
return max(0.0, min(1.0, C_local))
|