tnfr 4.5.2__py3-none-any.whl → 8.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +334 -50
- tnfr/__init__.pyi +33 -0
- tnfr/_compat.py +10 -0
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +49 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +214 -37
- tnfr/alias.pyi +108 -0
- tnfr/backends/__init__.py +354 -0
- tnfr/backends/jax_backend.py +173 -0
- tnfr/backends/numpy_backend.py +238 -0
- tnfr/backends/optimized_numpy.py +420 -0
- tnfr/backends/torch_backend.py +408 -0
- tnfr/cache.py +149 -556
- tnfr/cache.pyi +13 -0
- tnfr/cli/__init__.py +51 -16
- tnfr/cli/__init__.pyi +26 -0
- tnfr/cli/arguments.py +344 -32
- tnfr/cli/arguments.pyi +29 -0
- tnfr/cli/execution.py +676 -50
- tnfr/cli/execution.pyi +70 -0
- tnfr/cli/interactive_validator.py +614 -0
- tnfr/cli/utils.py +18 -3
- tnfr/cli/utils.pyi +7 -0
- tnfr/cli/validate.py +236 -0
- tnfr/compat/__init__.py +85 -0
- tnfr/compat/dataclass.py +136 -0
- tnfr/compat/jsonschema_stub.py +61 -0
- tnfr/compat/matplotlib_stub.py +73 -0
- tnfr/compat/numpy_stub.py +155 -0
- tnfr/config/__init__.py +224 -0
- tnfr/config/__init__.pyi +10 -0
- tnfr/{constants_glyphs.py → config/constants.py} +26 -20
- tnfr/config/constants.pyi +12 -0
- tnfr/config/defaults.py +54 -0
- tnfr/{constants/core.py → config/defaults_core.py} +59 -6
- tnfr/config/defaults_init.py +33 -0
- tnfr/config/defaults_metric.py +104 -0
- tnfr/config/feature_flags.py +81 -0
- tnfr/config/feature_flags.pyi +16 -0
- tnfr/config/glyph_constants.py +31 -0
- tnfr/config/init.py +77 -0
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +254 -0
- tnfr/config/operator_names.pyi +36 -0
- tnfr/config/physics_derivation.py +354 -0
- tnfr/config/presets.py +83 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/config/security.py +927 -0
- tnfr/config/thresholds.py +114 -0
- tnfr/config/tnfr_config.py +498 -0
- tnfr/constants/__init__.py +51 -133
- tnfr/constants/__init__.pyi +92 -0
- tnfr/constants/aliases.py +33 -0
- tnfr/constants/aliases.pyi +27 -0
- tnfr/constants/init.py +3 -1
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +9 -15
- tnfr/constants/metric.pyi +19 -0
- tnfr/core/__init__.py +33 -0
- tnfr/core/container.py +226 -0
- tnfr/core/default_implementations.py +329 -0
- tnfr/core/interfaces.py +279 -0
- tnfr/dynamics/__init__.py +213 -633
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +267 -0
- tnfr/dynamics/adaptation.pyi +7 -0
- tnfr/dynamics/adaptive_sequences.py +189 -0
- tnfr/dynamics/adaptive_sequences.pyi +14 -0
- tnfr/dynamics/aliases.py +23 -0
- tnfr/dynamics/aliases.pyi +19 -0
- tnfr/dynamics/bifurcation.py +232 -0
- tnfr/dynamics/canonical.py +229 -0
- tnfr/dynamics/canonical.pyi +48 -0
- tnfr/dynamics/coordination.py +385 -0
- tnfr/dynamics/coordination.pyi +25 -0
- tnfr/dynamics/dnfr.py +2699 -398
- tnfr/dynamics/dnfr.pyi +26 -0
- tnfr/dynamics/dynamic_limits.py +225 -0
- tnfr/dynamics/feedback.py +252 -0
- tnfr/dynamics/feedback.pyi +24 -0
- tnfr/dynamics/fused_dnfr.py +454 -0
- tnfr/dynamics/homeostasis.py +157 -0
- tnfr/dynamics/homeostasis.pyi +14 -0
- tnfr/dynamics/integrators.py +496 -102
- tnfr/dynamics/integrators.pyi +36 -0
- tnfr/dynamics/learning.py +310 -0
- tnfr/dynamics/learning.pyi +33 -0
- tnfr/dynamics/metabolism.py +254 -0
- tnfr/dynamics/nbody.py +796 -0
- tnfr/dynamics/nbody_tnfr.py +783 -0
- tnfr/dynamics/propagation.py +326 -0
- tnfr/dynamics/runtime.py +908 -0
- tnfr/dynamics/runtime.pyi +77 -0
- tnfr/dynamics/sampling.py +10 -5
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +711 -0
- tnfr/dynamics/selectors.pyi +85 -0
- tnfr/dynamics/structural_clip.py +207 -0
- tnfr/errors/__init__.py +37 -0
- tnfr/errors/contextual.py +492 -0
- tnfr/execution.py +77 -55
- tnfr/execution.pyi +45 -0
- tnfr/extensions/__init__.py +205 -0
- tnfr/extensions/__init__.pyi +18 -0
- tnfr/extensions/base.py +173 -0
- tnfr/extensions/base.pyi +35 -0
- tnfr/extensions/business/__init__.py +71 -0
- tnfr/extensions/business/__init__.pyi +11 -0
- tnfr/extensions/business/cookbook.py +88 -0
- tnfr/extensions/business/cookbook.pyi +8 -0
- tnfr/extensions/business/health_analyzers.py +202 -0
- tnfr/extensions/business/health_analyzers.pyi +9 -0
- tnfr/extensions/business/patterns.py +183 -0
- tnfr/extensions/business/patterns.pyi +8 -0
- tnfr/extensions/medical/__init__.py +73 -0
- tnfr/extensions/medical/__init__.pyi +11 -0
- tnfr/extensions/medical/cookbook.py +88 -0
- tnfr/extensions/medical/cookbook.pyi +8 -0
- tnfr/extensions/medical/health_analyzers.py +181 -0
- tnfr/extensions/medical/health_analyzers.pyi +9 -0
- tnfr/extensions/medical/patterns.py +163 -0
- tnfr/extensions/medical/patterns.pyi +8 -0
- tnfr/flatten.py +29 -50
- tnfr/flatten.pyi +21 -0
- tnfr/gamma.py +66 -53
- tnfr/gamma.pyi +36 -0
- tnfr/glyph_history.py +144 -57
- tnfr/glyph_history.pyi +35 -0
- tnfr/glyph_runtime.py +19 -0
- tnfr/glyph_runtime.pyi +8 -0
- tnfr/immutable.py +70 -30
- tnfr/immutable.pyi +36 -0
- tnfr/initialization.py +22 -16
- tnfr/initialization.pyi +65 -0
- tnfr/io.py +5 -241
- tnfr/io.pyi +13 -0
- tnfr/locking.pyi +7 -0
- tnfr/mathematics/__init__.py +79 -0
- tnfr/mathematics/backend.py +453 -0
- tnfr/mathematics/backend.pyi +99 -0
- tnfr/mathematics/dynamics.py +408 -0
- tnfr/mathematics/dynamics.pyi +90 -0
- tnfr/mathematics/epi.py +391 -0
- tnfr/mathematics/epi.pyi +65 -0
- tnfr/mathematics/generators.py +242 -0
- tnfr/mathematics/generators.pyi +29 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/metrics.pyi +16 -0
- tnfr/mathematics/operators.py +239 -0
- tnfr/mathematics/operators.pyi +59 -0
- tnfr/mathematics/operators_factory.py +124 -0
- tnfr/mathematics/operators_factory.pyi +11 -0
- tnfr/mathematics/projection.py +87 -0
- tnfr/mathematics/projection.pyi +33 -0
- tnfr/mathematics/runtime.py +182 -0
- tnfr/mathematics/runtime.pyi +64 -0
- tnfr/mathematics/spaces.py +256 -0
- tnfr/mathematics/spaces.pyi +83 -0
- tnfr/mathematics/transforms.py +305 -0
- tnfr/mathematics/transforms.pyi +62 -0
- tnfr/metrics/__init__.py +47 -9
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/buffer_cache.py +163 -0
- tnfr/metrics/buffer_cache.pyi +24 -0
- tnfr/metrics/cache_utils.py +214 -0
- tnfr/metrics/coherence.py +1510 -330
- tnfr/metrics/coherence.pyi +129 -0
- tnfr/metrics/common.py +23 -16
- tnfr/metrics/common.pyi +35 -0
- tnfr/metrics/core.py +251 -36
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +709 -110
- tnfr/metrics/diagnosis.pyi +86 -0
- tnfr/metrics/emergence.py +245 -0
- tnfr/metrics/export.py +60 -18
- tnfr/metrics/export.pyi +7 -0
- tnfr/metrics/glyph_timing.py +233 -43
- tnfr/metrics/glyph_timing.pyi +81 -0
- tnfr/metrics/learning_metrics.py +280 -0
- tnfr/metrics/learning_metrics.pyi +21 -0
- tnfr/metrics/phase_coherence.py +351 -0
- tnfr/metrics/phase_compatibility.py +349 -0
- tnfr/metrics/reporting.py +63 -28
- tnfr/metrics/reporting.pyi +25 -0
- tnfr/metrics/sense_index.py +1126 -43
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +215 -23
- tnfr/metrics/trig.pyi +13 -0
- tnfr/metrics/trig_cache.py +148 -24
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/multiscale/__init__.py +32 -0
- tnfr/multiscale/hierarchical.py +517 -0
- tnfr/node.py +646 -140
- tnfr/node.pyi +139 -0
- tnfr/observers.py +160 -45
- tnfr/observers.pyi +31 -0
- tnfr/ontosim.py +23 -19
- tnfr/ontosim.pyi +28 -0
- tnfr/operators/__init__.py +1358 -106
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/algebra.py +277 -0
- tnfr/operators/canonical_patterns.py +420 -0
- tnfr/operators/cascade.py +267 -0
- tnfr/operators/cycle_detection.py +358 -0
- tnfr/operators/definitions.py +4108 -0
- tnfr/operators/definitions.pyi +78 -0
- tnfr/operators/grammar.py +1164 -0
- tnfr/operators/grammar.pyi +140 -0
- tnfr/operators/hamiltonian.py +710 -0
- tnfr/operators/health_analyzer.py +809 -0
- tnfr/operators/jitter.py +107 -38
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/lifecycle.py +314 -0
- tnfr/operators/metabolism.py +618 -0
- tnfr/operators/metrics.py +2138 -0
- tnfr/operators/network_analysis/__init__.py +27 -0
- tnfr/operators/network_analysis/source_detection.py +186 -0
- tnfr/operators/nodal_equation.py +395 -0
- tnfr/operators/pattern_detection.py +660 -0
- tnfr/operators/patterns.py +669 -0
- tnfr/operators/postconditions/__init__.py +38 -0
- tnfr/operators/postconditions/mutation.py +236 -0
- tnfr/operators/preconditions/__init__.py +1226 -0
- tnfr/operators/preconditions/coherence.py +305 -0
- tnfr/operators/preconditions/dissonance.py +236 -0
- tnfr/operators/preconditions/emission.py +128 -0
- tnfr/operators/preconditions/mutation.py +580 -0
- tnfr/operators/preconditions/reception.py +125 -0
- tnfr/operators/preconditions/resonance.py +364 -0
- tnfr/operators/registry.py +74 -0
- tnfr/operators/registry.pyi +9 -0
- tnfr/operators/remesh.py +1415 -91
- tnfr/operators/remesh.pyi +26 -0
- tnfr/operators/structural_units.py +268 -0
- tnfr/operators/unified_grammar.py +105 -0
- tnfr/parallel/__init__.py +54 -0
- tnfr/parallel/auto_scaler.py +234 -0
- tnfr/parallel/distributed.py +384 -0
- tnfr/parallel/engine.py +238 -0
- tnfr/parallel/gpu_engine.py +420 -0
- tnfr/parallel/monitoring.py +248 -0
- tnfr/parallel/partitioner.py +459 -0
- tnfr/py.typed +0 -0
- tnfr/recipes/__init__.py +22 -0
- tnfr/recipes/cookbook.py +743 -0
- tnfr/rng.py +75 -151
- tnfr/rng.pyi +26 -0
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/sdk/__init__.py +107 -0
- tnfr/sdk/__init__.pyi +19 -0
- tnfr/sdk/adaptive_system.py +173 -0
- tnfr/sdk/adaptive_system.pyi +21 -0
- tnfr/sdk/builders.py +370 -0
- tnfr/sdk/builders.pyi +51 -0
- tnfr/sdk/fluent.py +1121 -0
- tnfr/sdk/fluent.pyi +74 -0
- tnfr/sdk/templates.py +342 -0
- tnfr/sdk/templates.pyi +41 -0
- tnfr/sdk/utils.py +341 -0
- tnfr/secure_config.py +46 -0
- tnfr/security/__init__.py +70 -0
- tnfr/security/database.py +514 -0
- tnfr/security/subprocess.py +503 -0
- tnfr/security/validation.py +290 -0
- tnfr/selector.py +59 -22
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +92 -67
- tnfr/sense.pyi +23 -0
- tnfr/services/__init__.py +17 -0
- tnfr/services/orchestrator.py +325 -0
- tnfr/sparse/__init__.py +39 -0
- tnfr/sparse/representations.py +492 -0
- tnfr/structural.py +639 -263
- tnfr/structural.pyi +83 -0
- tnfr/telemetry/__init__.py +35 -0
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/cache_metrics.pyi +64 -0
- tnfr/telemetry/nu_f.py +422 -0
- tnfr/telemetry/nu_f.pyi +108 -0
- tnfr/telemetry/verbosity.py +36 -0
- tnfr/telemetry/verbosity.pyi +15 -0
- tnfr/tokens.py +2 -4
- tnfr/tokens.pyi +36 -0
- tnfr/tools/__init__.py +20 -0
- tnfr/tools/domain_templates.py +478 -0
- tnfr/tools/sequence_generator.py +846 -0
- tnfr/topology/__init__.py +13 -0
- tnfr/topology/asymmetry.py +151 -0
- tnfr/trace.py +300 -126
- tnfr/trace.pyi +42 -0
- tnfr/tutorials/__init__.py +38 -0
- tnfr/tutorials/autonomous_evolution.py +285 -0
- tnfr/tutorials/interactive.py +1576 -0
- tnfr/tutorials/structural_metabolism.py +238 -0
- tnfr/types.py +743 -12
- tnfr/types.pyi +357 -0
- tnfr/units.py +68 -0
- tnfr/units.pyi +13 -0
- tnfr/utils/__init__.py +282 -0
- tnfr/utils/__init__.pyi +215 -0
- tnfr/utils/cache.py +4223 -0
- tnfr/utils/cache.pyi +470 -0
- tnfr/{callback_utils.py → utils/callbacks.py} +26 -39
- tnfr/utils/callbacks.pyi +49 -0
- tnfr/utils/chunks.py +108 -0
- tnfr/utils/chunks.pyi +22 -0
- tnfr/utils/data.py +428 -0
- tnfr/utils/data.pyi +74 -0
- tnfr/utils/graph.py +85 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +821 -0
- tnfr/utils/init.pyi +80 -0
- tnfr/utils/io.py +559 -0
- tnfr/utils/io.pyi +66 -0
- tnfr/{helpers → utils}/numeric.py +51 -24
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +257 -0
- tnfr/validation/__init__.pyi +85 -0
- tnfr/validation/compatibility.py +460 -0
- tnfr/validation/compatibility.pyi +6 -0
- tnfr/validation/config.py +73 -0
- tnfr/validation/graph.py +139 -0
- tnfr/validation/graph.pyi +18 -0
- tnfr/validation/input_validation.py +755 -0
- tnfr/validation/invariants.py +712 -0
- tnfr/validation/rules.py +253 -0
- tnfr/validation/rules.pyi +44 -0
- tnfr/validation/runtime.py +279 -0
- tnfr/validation/runtime.pyi +28 -0
- tnfr/validation/sequence_validator.py +162 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +32 -0
- tnfr/validation/spectral.py +164 -0
- tnfr/validation/spectral.pyi +42 -0
- tnfr/validation/validator.py +1266 -0
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/visualization/__init__.py +98 -0
- tnfr/visualization/cascade_viz.py +256 -0
- tnfr/visualization/hierarchy.py +284 -0
- tnfr/visualization/sequence_plotter.py +784 -0
- tnfr/viz/__init__.py +60 -0
- tnfr/viz/matplotlib.py +278 -0
- tnfr/viz/matplotlib.pyi +35 -0
- tnfr-8.5.0.dist-info/METADATA +573 -0
- tnfr-8.5.0.dist-info/RECORD +353 -0
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/entry_points.txt +1 -0
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/licenses/LICENSE.md +1 -1
- tnfr/collections_utils.py +0 -300
- tnfr/config.py +0 -32
- tnfr/grammar.py +0 -344
- tnfr/graph_utils.py +0 -84
- tnfr/helpers/__init__.py +0 -71
- tnfr/import_utils.py +0 -228
- tnfr/json_utils.py +0 -162
- tnfr/logging_utils.py +0 -116
- tnfr/presets.py +0 -60
- tnfr/validators.py +0 -84
- tnfr/value_utils.py +0 -59
- tnfr-4.5.2.dist-info/METADATA +0 -379
- tnfr-4.5.2.dist-info/RECORD +0 -67
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
tnfr/metrics/diagnosis.py
CHANGED
|
@@ -2,47 +2,307 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
|
|
5
|
+
import math
|
|
6
|
+
from collections import deque
|
|
7
|
+
from collections.abc import Mapping, MutableMapping, Sequence
|
|
8
|
+
from concurrent.futures import ProcessPoolExecutor
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from functools import partial
|
|
6
11
|
from operator import ge, le
|
|
7
|
-
from
|
|
12
|
+
from statistics import StatisticsError, fmean
|
|
13
|
+
from typing import Any, Callable, Iterable, cast
|
|
8
14
|
|
|
15
|
+
from ..alias import get_attr
|
|
16
|
+
from ..utils import CallbackEvent, callback_manager
|
|
9
17
|
from ..constants import (
|
|
18
|
+
STATE_DISSONANT,
|
|
19
|
+
STATE_STABLE,
|
|
20
|
+
STATE_TRANSITION,
|
|
10
21
|
VF_KEY,
|
|
11
|
-
get_aliases,
|
|
12
22
|
get_param,
|
|
23
|
+
normalise_state_token,
|
|
13
24
|
)
|
|
14
|
-
from ..
|
|
15
|
-
from ..glyph_history import
|
|
16
|
-
from ..
|
|
17
|
-
from ..
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
25
|
+
from ..constants.aliases import ALIAS_DNFR, ALIAS_EPI, ALIAS_SI, ALIAS_VF
|
|
26
|
+
from ..glyph_history import append_metric, ensure_history
|
|
27
|
+
from ..utils import clamp01, resolve_chunk_size, similarity_abs
|
|
28
|
+
from ..types import (
|
|
29
|
+
DiagnosisNodeData,
|
|
30
|
+
DiagnosisPayload,
|
|
31
|
+
DiagnosisPayloadChunk,
|
|
32
|
+
DiagnosisResult,
|
|
33
|
+
DiagnosisResultList,
|
|
34
|
+
DiagnosisSharedState,
|
|
35
|
+
NodeId,
|
|
36
|
+
TNFRGraph,
|
|
37
|
+
)
|
|
38
|
+
from ..utils import get_numpy
|
|
39
|
+
from .coherence import CoherenceMatrixPayload, coherence_matrix, local_phase_sync
|
|
40
|
+
from .common import (
|
|
41
|
+
_coerce_jobs,
|
|
42
|
+
compute_dnfr_accel_max,
|
|
43
|
+
min_max_range,
|
|
44
|
+
normalize_dnfr,
|
|
22
45
|
)
|
|
46
|
+
from .trig_cache import compute_theta_trig, get_trig_cache
|
|
47
|
+
|
|
48
|
+
CoherenceSeries = Sequence[CoherenceMatrixPayload | None]
|
|
49
|
+
CoherenceHistory = Mapping[str, CoherenceSeries]
|
|
50
|
+
|
|
23
51
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
52
|
+
def _coherence_matrix_to_numpy(
|
|
53
|
+
weight_matrix: Any,
|
|
54
|
+
size: int,
|
|
55
|
+
np_mod: Any,
|
|
56
|
+
) -> Any:
|
|
57
|
+
"""Convert stored coherence weights into a dense NumPy array."""
|
|
58
|
+
|
|
59
|
+
if weight_matrix is None or np_mod is None or size <= 0:
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
ndarray_type: Any = getattr(np_mod, "ndarray", tuple())
|
|
63
|
+
if ndarray_type and isinstance(weight_matrix, ndarray_type):
|
|
64
|
+
matrix = weight_matrix.astype(float, copy=True)
|
|
65
|
+
elif isinstance(weight_matrix, (list, tuple)):
|
|
66
|
+
weight_seq = list(weight_matrix)
|
|
67
|
+
if not weight_seq:
|
|
68
|
+
matrix = np_mod.zeros((size, size), dtype=float)
|
|
69
|
+
else:
|
|
70
|
+
first = weight_seq[0]
|
|
71
|
+
if isinstance(first, (list, tuple)) and len(first) == size:
|
|
72
|
+
matrix = np_mod.array(weight_seq, dtype=float)
|
|
73
|
+
elif (
|
|
74
|
+
isinstance(first, (list, tuple))
|
|
75
|
+
and len(first) == 3
|
|
76
|
+
and not isinstance(first[0], (list, tuple))
|
|
77
|
+
):
|
|
78
|
+
matrix = np_mod.zeros((size, size), dtype=float)
|
|
79
|
+
for i, j, weight in weight_seq:
|
|
80
|
+
matrix[int(i), int(j)] = float(weight)
|
|
81
|
+
else:
|
|
82
|
+
return None
|
|
83
|
+
else:
|
|
84
|
+
return None
|
|
85
|
+
|
|
86
|
+
if matrix.shape != (size, size):
|
|
87
|
+
return None
|
|
88
|
+
np_mod.fill_diagonal(matrix, 0.0)
|
|
89
|
+
return matrix
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _weighted_phase_sync_vectorized(
|
|
93
|
+
matrix: Any,
|
|
94
|
+
cos_vals: Any,
|
|
95
|
+
sin_vals: Any,
|
|
96
|
+
np_mod: Any,
|
|
97
|
+
) -> Any:
|
|
98
|
+
"""Vectorised computation of weighted local phase synchrony."""
|
|
99
|
+
|
|
100
|
+
denom = np_mod.sum(matrix, axis=1)
|
|
101
|
+
if np_mod.all(denom == 0.0):
|
|
102
|
+
return np_mod.zeros_like(denom, dtype=float)
|
|
103
|
+
real = matrix @ cos_vals
|
|
104
|
+
imag = matrix @ sin_vals
|
|
105
|
+
magnitude = np_mod.hypot(real, imag)
|
|
106
|
+
safe_denom = np_mod.where(denom == 0.0, 1.0, denom)
|
|
107
|
+
return magnitude / safe_denom
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _unweighted_phase_sync_vectorized(
|
|
111
|
+
nodes: Sequence[Any],
|
|
112
|
+
neighbors_map: Mapping[Any, tuple[Any, ...]],
|
|
113
|
+
cos_arr: Any,
|
|
114
|
+
sin_arr: Any,
|
|
115
|
+
index_map: Mapping[Any, int],
|
|
116
|
+
np_mod: Any,
|
|
117
|
+
) -> list[float]:
|
|
118
|
+
"""Compute unweighted phase synchrony using NumPy helpers."""
|
|
119
|
+
|
|
120
|
+
results: list[float] = []
|
|
121
|
+
for node in nodes:
|
|
122
|
+
neighbors = neighbors_map.get(node, ())
|
|
123
|
+
if not neighbors:
|
|
124
|
+
results.append(0.0)
|
|
125
|
+
continue
|
|
126
|
+
indices = [index_map[nb] for nb in neighbors if nb in index_map]
|
|
127
|
+
if not indices:
|
|
128
|
+
results.append(0.0)
|
|
129
|
+
continue
|
|
130
|
+
cos_vals = np_mod.take(cos_arr, indices)
|
|
131
|
+
sin_vals = np_mod.take(sin_arr, indices)
|
|
132
|
+
real = np_mod.sum(cos_vals)
|
|
133
|
+
imag = np_mod.sum(sin_vals)
|
|
134
|
+
denom = float(len(indices))
|
|
135
|
+
if denom == 0.0:
|
|
136
|
+
results.append(0.0)
|
|
137
|
+
else:
|
|
138
|
+
results.append(float(np_mod.hypot(real, imag) / denom))
|
|
139
|
+
return results
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _neighbor_means_vectorized(
|
|
143
|
+
nodes: Sequence[Any],
|
|
144
|
+
neighbors_map: Mapping[Any, tuple[Any, ...]],
|
|
145
|
+
epi_arr: Any,
|
|
146
|
+
index_map: Mapping[Any, int],
|
|
147
|
+
np_mod: Any,
|
|
148
|
+
) -> list[float | None]:
|
|
149
|
+
"""Vectorized helper to compute neighbour EPI means."""
|
|
150
|
+
|
|
151
|
+
results: list[float | None] = []
|
|
152
|
+
for node in nodes:
|
|
153
|
+
neighbors = neighbors_map.get(node, ())
|
|
154
|
+
if not neighbors:
|
|
155
|
+
results.append(None)
|
|
156
|
+
continue
|
|
157
|
+
indices = [index_map[nb] for nb in neighbors if nb in index_map]
|
|
158
|
+
if not indices:
|
|
159
|
+
results.append(None)
|
|
160
|
+
continue
|
|
161
|
+
values = np_mod.take(epi_arr, indices)
|
|
162
|
+
results.append(float(np_mod.mean(values)))
|
|
163
|
+
return results
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
@dataclass(frozen=True)
|
|
167
|
+
class RLocalWorkerArgs:
|
|
168
|
+
"""Typed payload passed to :func:`_rlocal_worker`."""
|
|
169
|
+
|
|
170
|
+
chunk: Sequence[Any]
|
|
171
|
+
coherence_nodes: Sequence[Any]
|
|
172
|
+
weight_matrix: Any
|
|
173
|
+
weight_index: Mapping[Any, int]
|
|
174
|
+
neighbors_map: Mapping[Any, tuple[Any, ...]]
|
|
175
|
+
cos_map: Mapping[Any, float]
|
|
176
|
+
sin_map: Mapping[Any, float]
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
@dataclass(frozen=True)
|
|
180
|
+
class NeighborMeanWorkerArgs:
|
|
181
|
+
"""Typed payload passed to :func:`_neighbor_mean_worker`."""
|
|
182
|
+
|
|
183
|
+
chunk: Sequence[Any]
|
|
184
|
+
neighbors_map: Mapping[Any, tuple[Any, ...]]
|
|
185
|
+
epi_map: Mapping[Any, float]
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _rlocal_worker(args: RLocalWorkerArgs) -> list[float]:
|
|
189
|
+
"""Worker used to compute ``R_local`` in Python fallbacks."""
|
|
190
|
+
|
|
191
|
+
results: list[float] = []
|
|
192
|
+
for node in args.chunk:
|
|
193
|
+
if args.coherence_nodes and args.weight_matrix is not None:
|
|
194
|
+
idx = args.weight_index.get(node)
|
|
195
|
+
if idx is None:
|
|
196
|
+
rloc = 0.0
|
|
197
|
+
else:
|
|
198
|
+
rloc = _weighted_phase_sync_from_matrix(
|
|
199
|
+
idx,
|
|
200
|
+
node,
|
|
201
|
+
args.coherence_nodes,
|
|
202
|
+
args.weight_matrix,
|
|
203
|
+
args.cos_map,
|
|
204
|
+
args.sin_map,
|
|
205
|
+
)
|
|
206
|
+
else:
|
|
207
|
+
rloc = _local_phase_sync_unweighted(
|
|
208
|
+
args.neighbors_map.get(node, ()),
|
|
209
|
+
args.cos_map,
|
|
210
|
+
args.sin_map,
|
|
211
|
+
)
|
|
212
|
+
results.append(float(rloc))
|
|
213
|
+
return results
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def _neighbor_mean_worker(args: NeighborMeanWorkerArgs) -> list[float | None]:
|
|
217
|
+
"""Worker used to compute neighbour EPI means in Python mode."""
|
|
218
|
+
|
|
219
|
+
results: list[float | None] = []
|
|
220
|
+
for node in args.chunk:
|
|
221
|
+
neighbors = args.neighbors_map.get(node, ())
|
|
222
|
+
if not neighbors:
|
|
223
|
+
results.append(None)
|
|
224
|
+
continue
|
|
225
|
+
try:
|
|
226
|
+
results.append(fmean(args.epi_map[nb] for nb in neighbors))
|
|
227
|
+
except StatisticsError:
|
|
228
|
+
results.append(None)
|
|
229
|
+
return results
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def _weighted_phase_sync_from_matrix(
|
|
233
|
+
node_index: int,
|
|
234
|
+
node: Any,
|
|
235
|
+
nodes_order: Sequence[Any],
|
|
236
|
+
matrix: Any,
|
|
237
|
+
cos_map: Mapping[Any, float],
|
|
238
|
+
sin_map: Mapping[Any, float],
|
|
239
|
+
) -> float:
|
|
240
|
+
"""Compute weighted phase synchrony using a cached matrix."""
|
|
241
|
+
|
|
242
|
+
if matrix is None or not nodes_order:
|
|
243
|
+
return 0.0
|
|
244
|
+
|
|
245
|
+
num = 0.0 + 0.0j
|
|
246
|
+
den = 0.0
|
|
247
|
+
|
|
248
|
+
if isinstance(matrix, list) and matrix and isinstance(matrix[0], list):
|
|
249
|
+
row = matrix[node_index]
|
|
250
|
+
for weight, neighbor in zip(row, nodes_order):
|
|
251
|
+
if neighbor == node:
|
|
252
|
+
continue
|
|
253
|
+
w = float(weight)
|
|
254
|
+
if w == 0.0:
|
|
255
|
+
continue
|
|
256
|
+
cos_j = cos_map.get(neighbor)
|
|
257
|
+
sin_j = sin_map.get(neighbor)
|
|
258
|
+
if cos_j is None or sin_j is None:
|
|
259
|
+
continue
|
|
260
|
+
den += w
|
|
261
|
+
num += w * complex(cos_j, sin_j)
|
|
262
|
+
else:
|
|
263
|
+
for ii, jj, weight in matrix:
|
|
264
|
+
if ii != node_index:
|
|
265
|
+
continue
|
|
266
|
+
neighbor = nodes_order[jj]
|
|
267
|
+
if neighbor == node:
|
|
268
|
+
continue
|
|
269
|
+
w = float(weight)
|
|
270
|
+
if w == 0.0:
|
|
271
|
+
continue
|
|
272
|
+
cos_j = cos_map.get(neighbor)
|
|
273
|
+
sin_j = sin_map.get(neighbor)
|
|
274
|
+
if cos_j is None or sin_j is None:
|
|
275
|
+
continue
|
|
276
|
+
den += w
|
|
277
|
+
num += w * complex(cos_j, sin_j)
|
|
278
|
+
|
|
279
|
+
return abs(num / den) if den else 0.0
|
|
280
|
+
|
|
281
|
+
|
|
282
|
+
def _local_phase_sync_unweighted(
|
|
283
|
+
neighbors: Iterable[Any],
|
|
284
|
+
cos_map: Mapping[Any, float],
|
|
285
|
+
sin_map: Mapping[Any, float],
|
|
286
|
+
) -> float:
|
|
287
|
+
"""Fallback unweighted phase synchrony based on neighbours."""
|
|
288
|
+
|
|
289
|
+
num = 0.0 + 0.0j
|
|
290
|
+
den = 0.0
|
|
291
|
+
for neighbor in neighbors:
|
|
292
|
+
cos_j = cos_map.get(neighbor)
|
|
293
|
+
sin_j = sin_map.get(neighbor)
|
|
294
|
+
if cos_j is None or sin_j is None:
|
|
295
|
+
continue
|
|
296
|
+
num += complex(cos_j, sin_j)
|
|
297
|
+
den += 1.0
|
|
298
|
+
return abs(num / den) if den else 0.0
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def _state_from_thresholds(
|
|
302
|
+
Rloc: float,
|
|
303
|
+
dnfr_n: float,
|
|
304
|
+
cfg: Mapping[str, Any],
|
|
305
|
+
) -> str:
|
|
46
306
|
stb = cfg.get("stable", {"Rloc_hi": 0.8, "dnfr_lo": 0.2, "persist": 3})
|
|
47
307
|
dsr = cfg.get("dissonance", {"Rloc_lo": 0.4, "dnfr_hi": 0.5, "persist": 3})
|
|
48
308
|
|
|
@@ -51,29 +311,28 @@ def _state_from_thresholds(Rloc, dnfr_n, cfg):
|
|
|
51
311
|
"dnfr": (dnfr_n, float(stb["dnfr_lo"]), le),
|
|
52
312
|
}
|
|
53
313
|
if all(comp(val, thr) for val, thr, comp in stable_checks.values()):
|
|
54
|
-
return
|
|
314
|
+
return STATE_STABLE
|
|
55
315
|
|
|
56
316
|
dissonant_checks = {
|
|
57
317
|
"Rloc": (Rloc, float(dsr["Rloc_lo"]), le),
|
|
58
318
|
"dnfr": (dnfr_n, float(dsr["dnfr_hi"]), ge),
|
|
59
319
|
}
|
|
60
320
|
if all(comp(val, thr) for val, thr, comp in dissonant_checks.values()):
|
|
61
|
-
return
|
|
321
|
+
return STATE_DISSONANT
|
|
62
322
|
|
|
63
|
-
return
|
|
323
|
+
return STATE_TRANSITION
|
|
64
324
|
|
|
65
325
|
|
|
66
|
-
def _recommendation(state, cfg):
|
|
326
|
+
def _recommendation(state: str, cfg: Mapping[str, Any]) -> list[Any]:
|
|
67
327
|
adv = cfg.get("advice", {})
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
"transicion": "transition",
|
|
71
|
-
"disonante": "dissonant",
|
|
72
|
-
}[state]
|
|
73
|
-
return list(adv.get(key, []))
|
|
328
|
+
canonical_state = normalise_state_token(state)
|
|
329
|
+
return list(adv.get(canonical_state, []))
|
|
74
330
|
|
|
75
331
|
|
|
76
|
-
def _get_last_weights(
|
|
332
|
+
def _get_last_weights(
|
|
333
|
+
G: TNFRGraph,
|
|
334
|
+
hist: CoherenceHistory,
|
|
335
|
+
) -> tuple[CoherenceMatrixPayload | None, CoherenceMatrixPayload | None]:
|
|
77
336
|
"""Return last Wi and Wm matrices from history."""
|
|
78
337
|
CfgW = get_param(G, "COHERENCE")
|
|
79
338
|
Wkey = CfgW.get("Wi_history_key", "W_i")
|
|
@@ -86,105 +345,440 @@ def _get_last_weights(G, hist):
|
|
|
86
345
|
|
|
87
346
|
|
|
88
347
|
def _node_diagnostics(
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
epi_min
|
|
97
|
-
epi_max
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
else:
|
|
111
|
-
row = Wm_last
|
|
112
|
-
Rloc = local_phase_sync_weighted(
|
|
113
|
-
G, n, nodes_order=nodes, W_row=row, node_to_index=node_to_index
|
|
348
|
+
node_data: DiagnosisNodeData,
|
|
349
|
+
shared: DiagnosisSharedState,
|
|
350
|
+
) -> DiagnosisResult:
|
|
351
|
+
"""Compute diagnostic payload for a single node."""
|
|
352
|
+
|
|
353
|
+
dcfg = shared["dcfg"]
|
|
354
|
+
compute_symmetry = shared["compute_symmetry"]
|
|
355
|
+
epi_min = shared["epi_min"]
|
|
356
|
+
epi_max = shared["epi_max"]
|
|
357
|
+
|
|
358
|
+
node = node_data["node"]
|
|
359
|
+
Si = clamp01(float(node_data["Si"]))
|
|
360
|
+
EPI = float(node_data["EPI"])
|
|
361
|
+
vf = float(node_data["VF"])
|
|
362
|
+
dnfr_n = clamp01(float(node_data["dnfr_norm"]))
|
|
363
|
+
Rloc = float(node_data["R_local"])
|
|
364
|
+
|
|
365
|
+
if compute_symmetry:
|
|
366
|
+
epi_bar = node_data.get("neighbor_epi_mean")
|
|
367
|
+
symm = (
|
|
368
|
+
1.0 if epi_bar is None else similarity_abs(EPI, epi_bar, epi_min, epi_max)
|
|
114
369
|
)
|
|
115
370
|
else:
|
|
116
|
-
|
|
371
|
+
symm = None
|
|
117
372
|
|
|
118
|
-
symm = (
|
|
119
|
-
_symmetry_index(G, n, epi_min=epi_min, epi_max=epi_max)
|
|
120
|
-
if dcfg.get("compute_symmetry", True)
|
|
121
|
-
else None
|
|
122
|
-
)
|
|
123
373
|
state = _state_from_thresholds(Rloc, dnfr_n, dcfg)
|
|
374
|
+
canonical_state = normalise_state_token(state)
|
|
124
375
|
|
|
125
376
|
alerts = []
|
|
126
|
-
if
|
|
127
|
-
dcfg.get("dissonance", {}).get("dnfr_hi", 0.5)
|
|
128
|
-
):
|
|
377
|
+
if canonical_state == STATE_DISSONANT and dnfr_n >= shared["dissonance_hi"]:
|
|
129
378
|
alerts.append("high structural tension")
|
|
130
379
|
|
|
131
|
-
advice = _recommendation(
|
|
380
|
+
advice = _recommendation(canonical_state, dcfg)
|
|
132
381
|
|
|
133
|
-
|
|
134
|
-
"node":
|
|
382
|
+
payload: DiagnosisPayload = {
|
|
383
|
+
"node": node,
|
|
135
384
|
"Si": Si,
|
|
136
385
|
"EPI": EPI,
|
|
137
386
|
VF_KEY: vf,
|
|
138
387
|
"dnfr_norm": dnfr_n,
|
|
139
|
-
"W_i": (
|
|
388
|
+
"W_i": node_data.get("W_i"),
|
|
140
389
|
"R_local": Rloc,
|
|
141
390
|
"symmetry": symm,
|
|
142
|
-
"state":
|
|
391
|
+
"state": canonical_state,
|
|
143
392
|
"advice": advice,
|
|
144
393
|
"alerts": alerts,
|
|
145
394
|
}
|
|
146
395
|
|
|
396
|
+
return node, payload
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
def _diagnosis_worker_chunk(
|
|
400
|
+
chunk: DiagnosisPayloadChunk,
|
|
401
|
+
shared: DiagnosisSharedState,
|
|
402
|
+
) -> DiagnosisResultList:
|
|
403
|
+
"""Evaluate diagnostics for a chunk of nodes."""
|
|
147
404
|
|
|
148
|
-
|
|
405
|
+
return [_node_diagnostics(item, shared) for item in chunk]
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
def _diagnosis_step(
|
|
409
|
+
G: TNFRGraph,
|
|
410
|
+
ctx: DiagnosisSharedState | None = None,
|
|
411
|
+
*,
|
|
412
|
+
n_jobs: int | None = None,
|
|
413
|
+
) -> None:
|
|
149
414
|
del ctx
|
|
150
415
|
|
|
416
|
+
if n_jobs is None:
|
|
417
|
+
n_jobs = _coerce_jobs(G.graph.get("DIAGNOSIS_N_JOBS"))
|
|
418
|
+
else:
|
|
419
|
+
n_jobs = _coerce_jobs(n_jobs)
|
|
420
|
+
|
|
151
421
|
dcfg = get_param(G, "DIAGNOSIS")
|
|
152
422
|
if not dcfg.get("enabled", True):
|
|
153
423
|
return
|
|
154
424
|
|
|
155
425
|
hist = ensure_history(G)
|
|
426
|
+
coherence_hist = cast(CoherenceHistory, hist)
|
|
156
427
|
key = dcfg.get("history_key", "nodal_diag")
|
|
157
428
|
|
|
429
|
+
existing_diag_history = hist.get(key, [])
|
|
430
|
+
if isinstance(existing_diag_history, deque):
|
|
431
|
+
snapshots = list(existing_diag_history)
|
|
432
|
+
elif isinstance(existing_diag_history, list):
|
|
433
|
+
snapshots = existing_diag_history
|
|
434
|
+
else:
|
|
435
|
+
snapshots = []
|
|
436
|
+
|
|
437
|
+
for snapshot in snapshots:
|
|
438
|
+
if not isinstance(snapshot, Mapping):
|
|
439
|
+
continue
|
|
440
|
+
for node, payload in snapshot.items():
|
|
441
|
+
if not isinstance(payload, Mapping):
|
|
442
|
+
continue
|
|
443
|
+
state_value = payload.get("state")
|
|
444
|
+
if not isinstance(state_value, str):
|
|
445
|
+
continue
|
|
446
|
+
canonical = normalise_state_token(state_value)
|
|
447
|
+
if canonical == state_value:
|
|
448
|
+
continue
|
|
449
|
+
if isinstance(payload, MutableMapping):
|
|
450
|
+
payload["state"] = canonical
|
|
451
|
+
elif isinstance(snapshot, MutableMapping):
|
|
452
|
+
new_payload = dict(payload)
|
|
453
|
+
new_payload["state"] = canonical
|
|
454
|
+
snapshot[node] = new_payload
|
|
455
|
+
|
|
158
456
|
norms = compute_dnfr_accel_max(G)
|
|
159
457
|
G.graph["_sel_norms"] = norms
|
|
160
458
|
dnfr_max = float(norms.get("dnfr_max", 1.0)) or 1.0
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
459
|
+
|
|
460
|
+
nodes_data: list[tuple[NodeId, dict[str, Any]]] = list(G.nodes(data=True))
|
|
461
|
+
nodes: list[NodeId] = [n for n, _ in nodes_data]
|
|
462
|
+
|
|
463
|
+
Wi_last, Wm_last = _get_last_weights(G, coherence_hist)
|
|
464
|
+
|
|
465
|
+
np_mod = get_numpy()
|
|
466
|
+
supports_vector = bool(
|
|
467
|
+
np_mod is not None
|
|
468
|
+
and all(
|
|
469
|
+
hasattr(np_mod, attr)
|
|
470
|
+
for attr in (
|
|
471
|
+
"fromiter",
|
|
472
|
+
"clip",
|
|
473
|
+
"abs",
|
|
474
|
+
"maximum",
|
|
475
|
+
"minimum",
|
|
476
|
+
"array",
|
|
477
|
+
"zeros",
|
|
478
|
+
"zeros_like",
|
|
479
|
+
"sum",
|
|
480
|
+
"hypot",
|
|
481
|
+
"where",
|
|
482
|
+
"take",
|
|
483
|
+
"mean",
|
|
484
|
+
"fill_diagonal",
|
|
485
|
+
"all",
|
|
486
|
+
)
|
|
487
|
+
)
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
if not nodes:
|
|
491
|
+
append_metric(hist, key, {})
|
|
492
|
+
return
|
|
493
|
+
|
|
494
|
+
rloc_values: list[float]
|
|
495
|
+
|
|
496
|
+
if supports_vector:
|
|
497
|
+
epi_arr = np_mod.fromiter(
|
|
498
|
+
(cast(float, get_attr(nd, ALIAS_EPI, 0.0)) for _, nd in nodes_data),
|
|
499
|
+
dtype=float,
|
|
500
|
+
count=len(nodes_data),
|
|
501
|
+
)
|
|
502
|
+
epi_min = float(np_mod.min(epi_arr))
|
|
503
|
+
epi_max = float(np_mod.max(epi_arr))
|
|
504
|
+
epi_vals = epi_arr.tolist()
|
|
505
|
+
|
|
506
|
+
si_arr = np_mod.clip(
|
|
507
|
+
np_mod.fromiter(
|
|
508
|
+
(cast(float, get_attr(nd, ALIAS_SI, 0.0)) for _, nd in nodes_data),
|
|
509
|
+
dtype=float,
|
|
510
|
+
count=len(nodes_data),
|
|
511
|
+
),
|
|
512
|
+
0.0,
|
|
513
|
+
1.0,
|
|
514
|
+
)
|
|
515
|
+
si_vals = si_arr.tolist()
|
|
516
|
+
|
|
517
|
+
vf_arr = np_mod.fromiter(
|
|
518
|
+
(cast(float, get_attr(nd, ALIAS_VF, 0.0)) for _, nd in nodes_data),
|
|
519
|
+
dtype=float,
|
|
520
|
+
count=len(nodes_data),
|
|
521
|
+
)
|
|
522
|
+
vf_vals = vf_arr.tolist()
|
|
523
|
+
|
|
524
|
+
if dnfr_max > 0:
|
|
525
|
+
dnfr_arr = np_mod.clip(
|
|
526
|
+
np_mod.fromiter(
|
|
527
|
+
(
|
|
528
|
+
abs(cast(float, get_attr(nd, ALIAS_DNFR, 0.0)))
|
|
529
|
+
for _, nd in nodes_data
|
|
530
|
+
),
|
|
531
|
+
dtype=float,
|
|
532
|
+
count=len(nodes_data),
|
|
533
|
+
)
|
|
534
|
+
/ dnfr_max,
|
|
535
|
+
0.0,
|
|
536
|
+
1.0,
|
|
537
|
+
)
|
|
538
|
+
dnfr_norms = dnfr_arr.tolist()
|
|
539
|
+
else:
|
|
540
|
+
dnfr_norms = [0.0] * len(nodes)
|
|
541
|
+
else:
|
|
542
|
+
epi_vals = [cast(float, get_attr(nd, ALIAS_EPI, 0.0)) for _, nd in nodes_data]
|
|
543
|
+
epi_min, epi_max = min_max_range(epi_vals, default=(0.0, 1.0))
|
|
544
|
+
si_vals = [clamp01(get_attr(nd, ALIAS_SI, 0.0)) for _, nd in nodes_data]
|
|
545
|
+
vf_vals = [cast(float, get_attr(nd, ALIAS_VF, 0.0)) for _, nd in nodes_data]
|
|
546
|
+
dnfr_norms = [
|
|
547
|
+
normalize_dnfr(nd, dnfr_max) if dnfr_max > 0 else 0.0
|
|
548
|
+
for _, nd in nodes_data
|
|
549
|
+
]
|
|
550
|
+
|
|
551
|
+
epi_map = {node: epi_vals[idx] for idx, node in enumerate(nodes)}
|
|
552
|
+
|
|
553
|
+
trig_cache = get_trig_cache(G, np=np_mod)
|
|
554
|
+
trig_local = compute_theta_trig(nodes_data, np=np_mod)
|
|
555
|
+
cos_map = dict(trig_cache.cos)
|
|
556
|
+
sin_map = dict(trig_cache.sin)
|
|
557
|
+
cos_map.update(trig_local.cos)
|
|
558
|
+
sin_map.update(trig_local.sin)
|
|
559
|
+
|
|
560
|
+
neighbors_map = {n: tuple(G.neighbors(n)) for n in nodes}
|
|
561
|
+
|
|
562
|
+
if Wm_last is None:
|
|
563
|
+
coherence_nodes, weight_matrix = coherence_matrix(G)
|
|
564
|
+
if coherence_nodes is None:
|
|
565
|
+
coherence_nodes = []
|
|
566
|
+
weight_matrix = None
|
|
567
|
+
else:
|
|
568
|
+
coherence_nodes = list(nodes)
|
|
569
|
+
weight_matrix = Wm_last
|
|
570
|
+
|
|
571
|
+
coherence_nodes = list(coherence_nodes)
|
|
572
|
+
weight_index = {node: idx for idx, node in enumerate(coherence_nodes)}
|
|
573
|
+
|
|
574
|
+
node_index_map: dict[Any, int] | None = None
|
|
575
|
+
|
|
576
|
+
if supports_vector:
|
|
577
|
+
size = len(coherence_nodes)
|
|
578
|
+
matrix_np = (
|
|
579
|
+
_coherence_matrix_to_numpy(weight_matrix, size, np_mod) if size else None
|
|
580
|
+
)
|
|
581
|
+
if matrix_np is not None and size:
|
|
582
|
+
cos_weight = np_mod.fromiter(
|
|
583
|
+
(float(cos_map.get(node, 0.0)) for node in coherence_nodes),
|
|
584
|
+
dtype=float,
|
|
585
|
+
count=size,
|
|
586
|
+
)
|
|
587
|
+
sin_weight = np_mod.fromiter(
|
|
588
|
+
(float(sin_map.get(node, 0.0)) for node in coherence_nodes),
|
|
589
|
+
dtype=float,
|
|
590
|
+
count=size,
|
|
591
|
+
)
|
|
592
|
+
weighted_sync = _weighted_phase_sync_vectorized(
|
|
593
|
+
matrix_np,
|
|
594
|
+
cos_weight,
|
|
595
|
+
sin_weight,
|
|
596
|
+
np_mod,
|
|
597
|
+
)
|
|
598
|
+
rloc_map = {
|
|
599
|
+
coherence_nodes[idx]: float(weighted_sync[idx]) for idx in range(size)
|
|
600
|
+
}
|
|
601
|
+
else:
|
|
602
|
+
rloc_map = {}
|
|
603
|
+
|
|
604
|
+
node_index_map = {node: idx for idx, node in enumerate(nodes)}
|
|
605
|
+
if not rloc_map:
|
|
606
|
+
cos_arr = np_mod.fromiter(
|
|
607
|
+
(float(cos_map.get(node, 0.0)) for node in nodes),
|
|
608
|
+
dtype=float,
|
|
609
|
+
count=len(nodes),
|
|
610
|
+
)
|
|
611
|
+
sin_arr = np_mod.fromiter(
|
|
612
|
+
(float(sin_map.get(node, 0.0)) for node in nodes),
|
|
613
|
+
dtype=float,
|
|
614
|
+
count=len(nodes),
|
|
615
|
+
)
|
|
616
|
+
rloc_values = _unweighted_phase_sync_vectorized(
|
|
617
|
+
nodes,
|
|
618
|
+
neighbors_map,
|
|
619
|
+
cos_arr,
|
|
620
|
+
sin_arr,
|
|
621
|
+
node_index_map,
|
|
622
|
+
np_mod,
|
|
623
|
+
)
|
|
624
|
+
else:
|
|
625
|
+
rloc_values = [rloc_map.get(node, 0.0) for node in nodes]
|
|
626
|
+
else:
|
|
627
|
+
if n_jobs and n_jobs > 1 and len(nodes) > 1:
|
|
628
|
+
approx_chunk = math.ceil(len(nodes) / n_jobs) if n_jobs else None
|
|
629
|
+
chunk_size = resolve_chunk_size(
|
|
630
|
+
approx_chunk,
|
|
631
|
+
len(nodes),
|
|
632
|
+
minimum=1,
|
|
633
|
+
)
|
|
634
|
+
rloc_values = []
|
|
635
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
636
|
+
futures = [
|
|
637
|
+
executor.submit(
|
|
638
|
+
_rlocal_worker,
|
|
639
|
+
RLocalWorkerArgs(
|
|
640
|
+
chunk=nodes[idx : idx + chunk_size],
|
|
641
|
+
coherence_nodes=coherence_nodes,
|
|
642
|
+
weight_matrix=weight_matrix,
|
|
643
|
+
weight_index=weight_index,
|
|
644
|
+
neighbors_map=neighbors_map,
|
|
645
|
+
cos_map=cos_map,
|
|
646
|
+
sin_map=sin_map,
|
|
647
|
+
),
|
|
648
|
+
)
|
|
649
|
+
for idx in range(0, len(nodes), chunk_size)
|
|
650
|
+
]
|
|
651
|
+
for fut in futures:
|
|
652
|
+
rloc_values.extend(fut.result())
|
|
653
|
+
else:
|
|
654
|
+
rloc_values = _rlocal_worker(
|
|
655
|
+
RLocalWorkerArgs(
|
|
656
|
+
chunk=nodes,
|
|
657
|
+
coherence_nodes=coherence_nodes,
|
|
658
|
+
weight_matrix=weight_matrix,
|
|
659
|
+
weight_index=weight_index,
|
|
660
|
+
neighbors_map=neighbors_map,
|
|
661
|
+
cos_map=cos_map,
|
|
662
|
+
sin_map=sin_map,
|
|
663
|
+
)
|
|
664
|
+
)
|
|
665
|
+
|
|
666
|
+
if isinstance(Wi_last, (list, tuple)) and Wi_last:
|
|
667
|
+
wi_values = [
|
|
668
|
+
Wi_last[i] if i < len(Wi_last) else None for i in range(len(nodes))
|
|
669
|
+
]
|
|
670
|
+
else:
|
|
671
|
+
wi_values = [None] * len(nodes)
|
|
672
|
+
|
|
673
|
+
compute_symmetry = bool(dcfg.get("compute_symmetry", True))
|
|
674
|
+
neighbor_means: list[float | None]
|
|
675
|
+
if compute_symmetry:
|
|
676
|
+
if supports_vector and node_index_map is not None and len(nodes):
|
|
677
|
+
neighbor_means = _neighbor_means_vectorized(
|
|
678
|
+
nodes,
|
|
679
|
+
neighbors_map,
|
|
680
|
+
epi_arr,
|
|
681
|
+
node_index_map,
|
|
682
|
+
np_mod,
|
|
683
|
+
)
|
|
684
|
+
elif n_jobs and n_jobs > 1 and len(nodes) > 1:
|
|
685
|
+
approx_chunk = math.ceil(len(nodes) / n_jobs) if n_jobs else None
|
|
686
|
+
chunk_size = resolve_chunk_size(
|
|
687
|
+
approx_chunk,
|
|
688
|
+
len(nodes),
|
|
689
|
+
minimum=1,
|
|
690
|
+
)
|
|
691
|
+
neighbor_means = cast(list[float | None], [])
|
|
692
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
693
|
+
submit = cast(Callable[..., Any], executor.submit)
|
|
694
|
+
futures = [
|
|
695
|
+
submit(
|
|
696
|
+
cast(
|
|
697
|
+
Callable[[NeighborMeanWorkerArgs], list[float | None]],
|
|
698
|
+
_neighbor_mean_worker,
|
|
699
|
+
),
|
|
700
|
+
NeighborMeanWorkerArgs(
|
|
701
|
+
chunk=nodes[idx : idx + chunk_size],
|
|
702
|
+
neighbors_map=neighbors_map,
|
|
703
|
+
epi_map=epi_map,
|
|
704
|
+
),
|
|
705
|
+
)
|
|
706
|
+
for idx in range(0, len(nodes), chunk_size)
|
|
707
|
+
]
|
|
708
|
+
for fut in futures:
|
|
709
|
+
neighbor_means.extend(cast(list[float | None], fut.result()))
|
|
710
|
+
else:
|
|
711
|
+
neighbor_means = _neighbor_mean_worker(
|
|
712
|
+
NeighborMeanWorkerArgs(
|
|
713
|
+
chunk=nodes,
|
|
714
|
+
neighbors_map=neighbors_map,
|
|
715
|
+
epi_map=epi_map,
|
|
716
|
+
)
|
|
717
|
+
)
|
|
718
|
+
else:
|
|
719
|
+
neighbor_means = [None] * len(nodes)
|
|
720
|
+
|
|
721
|
+
node_payload: DiagnosisPayloadChunk = []
|
|
722
|
+
for idx, node in enumerate(nodes):
|
|
723
|
+
node_payload.append(
|
|
724
|
+
{
|
|
725
|
+
"node": node,
|
|
726
|
+
"Si": si_vals[idx],
|
|
727
|
+
"EPI": epi_vals[idx],
|
|
728
|
+
"VF": vf_vals[idx],
|
|
729
|
+
"dnfr_norm": dnfr_norms[idx],
|
|
730
|
+
"R_local": rloc_values[idx],
|
|
731
|
+
"W_i": wi_values[idx],
|
|
732
|
+
"neighbor_epi_mean": neighbor_means[idx],
|
|
733
|
+
}
|
|
182
734
|
)
|
|
183
735
|
|
|
736
|
+
shared = {
|
|
737
|
+
"dcfg": dcfg,
|
|
738
|
+
"compute_symmetry": compute_symmetry,
|
|
739
|
+
"epi_min": float(epi_min),
|
|
740
|
+
"epi_max": float(epi_max),
|
|
741
|
+
"dissonance_hi": float(dcfg.get("dissonance", {}).get("dnfr_hi", 0.5)),
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
if n_jobs and n_jobs > 1 and len(node_payload) > 1:
|
|
745
|
+
approx_chunk = math.ceil(len(node_payload) / n_jobs) if n_jobs else None
|
|
746
|
+
chunk_size = resolve_chunk_size(
|
|
747
|
+
approx_chunk,
|
|
748
|
+
len(node_payload),
|
|
749
|
+
minimum=1,
|
|
750
|
+
)
|
|
751
|
+
diag_pairs: DiagnosisResultList = []
|
|
752
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
753
|
+
submit = cast(Callable[..., Any], executor.submit)
|
|
754
|
+
futures = [
|
|
755
|
+
submit(
|
|
756
|
+
cast(
|
|
757
|
+
Callable[
|
|
758
|
+
[list[dict[str, Any]], dict[str, Any]],
|
|
759
|
+
list[tuple[Any, dict[str, Any]]],
|
|
760
|
+
],
|
|
761
|
+
_diagnosis_worker_chunk,
|
|
762
|
+
),
|
|
763
|
+
node_payload[idx : idx + chunk_size],
|
|
764
|
+
shared,
|
|
765
|
+
)
|
|
766
|
+
for idx in range(0, len(node_payload), chunk_size)
|
|
767
|
+
]
|
|
768
|
+
for fut in futures:
|
|
769
|
+
diag_pairs.extend(cast(DiagnosisResultList, fut.result()))
|
|
770
|
+
else:
|
|
771
|
+
diag_pairs = [_node_diagnostics(item, shared) for item in node_payload]
|
|
772
|
+
|
|
773
|
+
diag_map = dict(diag_pairs)
|
|
774
|
+
diag: dict[NodeId, DiagnosisPayload] = {
|
|
775
|
+
node: diag_map.get(node, {}) for node in nodes
|
|
776
|
+
}
|
|
777
|
+
|
|
184
778
|
append_metric(hist, key, diag)
|
|
185
779
|
|
|
186
780
|
|
|
187
|
-
def dissonance_events(G, ctx:
|
|
781
|
+
def dissonance_events(G: TNFRGraph, ctx: DiagnosisSharedState | None = None) -> None:
|
|
188
782
|
"""Emit per-node structural dissonance start/end events.
|
|
189
783
|
|
|
190
784
|
Events are recorded as ``"dissonance_start"`` and ``"dissonance_end"``.
|
|
@@ -193,11 +787,11 @@ def dissonance_events(G, ctx: dict[str, Any] | None = None):
|
|
|
193
787
|
del ctx
|
|
194
788
|
|
|
195
789
|
hist = ensure_history(G)
|
|
196
|
-
#
|
|
790
|
+
# Dissonance events are recorded in ``history['events']``
|
|
197
791
|
norms = G.graph.get("_sel_norms", {})
|
|
198
792
|
dnfr_max = float(norms.get("dnfr_max", 1.0)) or 1.0
|
|
199
793
|
step_idx = len(hist.get("C_steps", []))
|
|
200
|
-
nodes = list(G.nodes())
|
|
794
|
+
nodes: list[NodeId] = list(G.nodes())
|
|
201
795
|
for n in nodes:
|
|
202
796
|
nd = G.nodes[n]
|
|
203
797
|
dn = normalize_dnfr(nd, dnfr_max)
|
|
@@ -219,11 +813,16 @@ def dissonance_events(G, ctx: dict[str, Any] | None = None):
|
|
|
219
813
|
)
|
|
220
814
|
|
|
221
815
|
|
|
222
|
-
def register_diagnosis_callbacks(G) -> None:
|
|
816
|
+
def register_diagnosis_callbacks(G: TNFRGraph) -> None:
|
|
817
|
+
"""Attach diagnosis observers (Si/dissonance tracking) to ``G``."""
|
|
818
|
+
|
|
819
|
+
raw_jobs = G.graph.get("DIAGNOSIS_N_JOBS")
|
|
820
|
+
n_jobs = _coerce_jobs(raw_jobs)
|
|
821
|
+
|
|
223
822
|
callback_manager.register_callback(
|
|
224
823
|
G,
|
|
225
824
|
event=CallbackEvent.AFTER_STEP.value,
|
|
226
|
-
func=_diagnosis_step,
|
|
825
|
+
func=partial(_diagnosis_step, n_jobs=n_jobs),
|
|
227
826
|
name="diagnosis_step",
|
|
228
827
|
)
|
|
229
828
|
callback_manager.register_callback(
|