tnfr 4.5.2__py3-none-any.whl → 7.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +275 -51
- tnfr/__init__.pyi +33 -0
- tnfr/_compat.py +10 -0
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +49 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +117 -31
- tnfr/alias.pyi +108 -0
- tnfr/cache.py +6 -572
- tnfr/cache.pyi +16 -0
- tnfr/callback_utils.py +16 -38
- tnfr/callback_utils.pyi +79 -0
- tnfr/cli/__init__.py +34 -14
- tnfr/cli/__init__.pyi +26 -0
- tnfr/cli/arguments.py +211 -28
- tnfr/cli/arguments.pyi +27 -0
- tnfr/cli/execution.py +470 -50
- tnfr/cli/execution.pyi +70 -0
- tnfr/cli/utils.py +18 -3
- tnfr/cli/utils.pyi +8 -0
- tnfr/config/__init__.py +13 -0
- tnfr/config/__init__.pyi +10 -0
- tnfr/{constants_glyphs.py → config/constants.py} +26 -20
- tnfr/config/constants.pyi +12 -0
- tnfr/config/feature_flags.py +83 -0
- tnfr/{config.py → config/init.py} +11 -7
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +93 -0
- tnfr/config/operator_names.pyi +28 -0
- tnfr/config/presets.py +84 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/constants/__init__.py +80 -29
- tnfr/constants/__init__.pyi +92 -0
- tnfr/constants/aliases.py +31 -0
- tnfr/constants/core.py +4 -4
- tnfr/constants/core.pyi +17 -0
- tnfr/constants/init.py +1 -1
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +7 -15
- tnfr/constants/metric.pyi +19 -0
- tnfr/dynamics/__init__.py +165 -633
- tnfr/dynamics/__init__.pyi +82 -0
- tnfr/dynamics/adaptation.py +267 -0
- tnfr/dynamics/aliases.py +23 -0
- tnfr/dynamics/coordination.py +385 -0
- tnfr/dynamics/dnfr.py +2283 -400
- tnfr/dynamics/dnfr.pyi +24 -0
- tnfr/dynamics/integrators.py +406 -98
- tnfr/dynamics/integrators.pyi +34 -0
- tnfr/dynamics/runtime.py +881 -0
- tnfr/dynamics/sampling.py +10 -5
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +719 -0
- tnfr/execution.py +70 -48
- tnfr/execution.pyi +45 -0
- tnfr/flatten.py +13 -9
- tnfr/flatten.pyi +21 -0
- tnfr/gamma.py +66 -53
- tnfr/gamma.pyi +34 -0
- tnfr/glyph_history.py +110 -52
- tnfr/glyph_history.pyi +35 -0
- tnfr/glyph_runtime.py +16 -0
- tnfr/glyph_runtime.pyi +9 -0
- tnfr/immutable.py +69 -28
- tnfr/immutable.pyi +34 -0
- tnfr/initialization.py +16 -16
- tnfr/initialization.pyi +65 -0
- tnfr/io.py +6 -240
- tnfr/io.pyi +16 -0
- tnfr/locking.pyi +7 -0
- tnfr/mathematics/__init__.py +81 -0
- tnfr/mathematics/backend.py +426 -0
- tnfr/mathematics/dynamics.py +398 -0
- tnfr/mathematics/epi.py +254 -0
- tnfr/mathematics/generators.py +222 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/operators.py +233 -0
- tnfr/mathematics/operators_factory.py +71 -0
- tnfr/mathematics/projection.py +78 -0
- tnfr/mathematics/runtime.py +173 -0
- tnfr/mathematics/spaces.py +247 -0
- tnfr/mathematics/transforms.py +292 -0
- tnfr/metrics/__init__.py +10 -10
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/coherence.py +993 -324
- tnfr/metrics/common.py +23 -16
- tnfr/metrics/common.pyi +46 -0
- tnfr/metrics/core.py +251 -35
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +708 -111
- tnfr/metrics/diagnosis.pyi +85 -0
- tnfr/metrics/export.py +27 -15
- tnfr/metrics/glyph_timing.py +232 -42
- tnfr/metrics/reporting.py +33 -22
- tnfr/metrics/reporting.pyi +12 -0
- tnfr/metrics/sense_index.py +987 -43
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +214 -23
- tnfr/metrics/trig.pyi +13 -0
- tnfr/metrics/trig_cache.py +115 -22
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/node.py +542 -136
- tnfr/node.pyi +178 -0
- tnfr/observers.py +152 -35
- tnfr/observers.pyi +31 -0
- tnfr/ontosim.py +23 -19
- tnfr/ontosim.pyi +28 -0
- tnfr/operators/__init__.py +601 -82
- tnfr/operators/__init__.pyi +45 -0
- tnfr/operators/definitions.py +513 -0
- tnfr/operators/definitions.pyi +78 -0
- tnfr/operators/grammar.py +760 -0
- tnfr/operators/jitter.py +107 -38
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/registry.py +75 -0
- tnfr/operators/registry.pyi +13 -0
- tnfr/operators/remesh.py +149 -88
- tnfr/py.typed +0 -0
- tnfr/rng.py +46 -143
- tnfr/rng.pyi +14 -0
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/selector.py +25 -19
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +72 -62
- tnfr/sense.pyi +23 -0
- tnfr/structural.py +522 -262
- tnfr/structural.pyi +69 -0
- tnfr/telemetry/__init__.py +35 -0
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/nu_f.py +423 -0
- tnfr/telemetry/nu_f.pyi +123 -0
- tnfr/telemetry/verbosity.py +37 -0
- tnfr/tokens.py +1 -3
- tnfr/tokens.pyi +36 -0
- tnfr/trace.py +270 -113
- tnfr/trace.pyi +40 -0
- tnfr/types.py +574 -6
- tnfr/types.pyi +331 -0
- tnfr/units.py +69 -0
- tnfr/units.pyi +16 -0
- tnfr/utils/__init__.py +217 -0
- tnfr/utils/__init__.pyi +202 -0
- tnfr/utils/cache.py +2395 -0
- tnfr/utils/cache.pyi +468 -0
- tnfr/utils/chunks.py +104 -0
- tnfr/utils/chunks.pyi +21 -0
- tnfr/{collections_utils.py → utils/data.py} +147 -90
- tnfr/utils/data.pyi +64 -0
- tnfr/utils/graph.py +85 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +770 -0
- tnfr/utils/init.pyi +78 -0
- tnfr/utils/io.py +456 -0
- tnfr/{helpers → utils}/numeric.py +51 -24
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +113 -0
- tnfr/validation/__init__.pyi +77 -0
- tnfr/validation/compatibility.py +95 -0
- tnfr/validation/compatibility.pyi +6 -0
- tnfr/validation/grammar.py +71 -0
- tnfr/validation/grammar.pyi +40 -0
- tnfr/validation/graph.py +138 -0
- tnfr/validation/graph.pyi +17 -0
- tnfr/validation/rules.py +281 -0
- tnfr/validation/rules.pyi +55 -0
- tnfr/validation/runtime.py +263 -0
- tnfr/validation/runtime.pyi +31 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +37 -0
- tnfr/validation/spectral.py +159 -0
- tnfr/validation/spectral.pyi +46 -0
- tnfr/validation/syntax.py +40 -0
- tnfr/validation/syntax.pyi +10 -0
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/viz/__init__.py +9 -0
- tnfr/viz/matplotlib.py +246 -0
- tnfr-7.0.0.dist-info/METADATA +179 -0
- tnfr-7.0.0.dist-info/RECORD +185 -0
- {tnfr-4.5.2.dist-info → tnfr-7.0.0.dist-info}/licenses/LICENSE.md +1 -1
- tnfr/grammar.py +0 -344
- tnfr/graph_utils.py +0 -84
- tnfr/helpers/__init__.py +0 -71
- tnfr/import_utils.py +0 -228
- tnfr/json_utils.py +0 -162
- tnfr/logging_utils.py +0 -116
- tnfr/presets.py +0 -60
- tnfr/validators.py +0 -84
- tnfr/value_utils.py +0 -59
- tnfr-4.5.2.dist-info/METADATA +0 -379
- tnfr-4.5.2.dist-info/RECORD +0 -67
- {tnfr-4.5.2.dist-info → tnfr-7.0.0.dist-info}/WHEEL +0 -0
- {tnfr-4.5.2.dist-info → tnfr-7.0.0.dist-info}/entry_points.txt +0 -0
- {tnfr-4.5.2.dist-info → tnfr-7.0.0.dist-info}/top_level.txt +0 -0
tnfr/metrics/coherence.py
CHANGED
|
@@ -3,22 +3,28 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import math
|
|
6
|
+
from collections.abc import Callable, Iterable, Mapping, Sequence
|
|
7
|
+
from concurrent.futures import ProcessPoolExecutor
|
|
6
8
|
from dataclasses import dataclass
|
|
7
|
-
from
|
|
9
|
+
from types import ModuleType
|
|
10
|
+
from typing import Any, MutableMapping, cast
|
|
8
11
|
|
|
9
|
-
|
|
10
|
-
from ..
|
|
11
|
-
get_aliases,
|
|
12
|
-
get_param,
|
|
13
|
-
)
|
|
12
|
+
from .._compat import TypeAlias
|
|
13
|
+
from ..alias import collect_attr, collect_theta_attr, get_attr, set_attr
|
|
14
14
|
from ..callback_utils import CallbackEvent, callback_manager
|
|
15
|
-
from ..
|
|
16
|
-
from ..
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
15
|
+
from ..constants import get_param
|
|
16
|
+
from ..constants.aliases import (
|
|
17
|
+
ALIAS_D2VF,
|
|
18
|
+
ALIAS_DNFR,
|
|
19
|
+
ALIAS_DSI,
|
|
20
|
+
ALIAS_DVF,
|
|
21
|
+
ALIAS_DEPI,
|
|
22
|
+
ALIAS_EPI,
|
|
23
|
+
ALIAS_SI,
|
|
24
|
+
ALIAS_VF,
|
|
25
|
+
)
|
|
26
|
+
from ..glyph_history import append_metric, ensure_history
|
|
27
|
+
from ..utils import clamp01
|
|
22
28
|
from ..observers import (
|
|
23
29
|
DEFAULT_GLYPH_LOAD_SPAN,
|
|
24
30
|
DEFAULT_WBAR_SPAN,
|
|
@@ -27,20 +33,30 @@ from ..observers import (
|
|
|
27
33
|
phase_sync,
|
|
28
34
|
)
|
|
29
35
|
from ..sense import sigma_vector
|
|
30
|
-
from ..
|
|
31
|
-
|
|
36
|
+
from ..types import (
|
|
37
|
+
CoherenceMetric,
|
|
38
|
+
FloatArray,
|
|
39
|
+
FloatMatrix,
|
|
40
|
+
GlyphLoadDistribution,
|
|
41
|
+
HistoryState,
|
|
42
|
+
NodeId,
|
|
43
|
+
ParallelWijPayload,
|
|
44
|
+
SigmaVector,
|
|
45
|
+
TNFRGraph,
|
|
46
|
+
)
|
|
47
|
+
from ..utils import (
|
|
48
|
+
ensure_node_index_map,
|
|
49
|
+
get_logger,
|
|
50
|
+
get_numpy,
|
|
51
|
+
normalize_weights,
|
|
52
|
+
resolve_chunk_size,
|
|
53
|
+
)
|
|
54
|
+
from .common import compute_coherence, min_max_range
|
|
55
|
+
from .trig_cache import compute_theta_trig, get_trig_cache
|
|
32
56
|
|
|
33
57
|
logger = get_logger(__name__)
|
|
34
58
|
|
|
35
|
-
|
|
36
|
-
ALIAS_EPI = get_aliases("EPI")
|
|
37
|
-
ALIAS_VF = get_aliases("VF")
|
|
38
|
-
ALIAS_SI = get_aliases("SI")
|
|
39
|
-
ALIAS_DNFR = get_aliases("DNFR")
|
|
40
|
-
ALIAS_DEPI = get_aliases("DEPI")
|
|
41
|
-
ALIAS_DSI = get_aliases("DSI")
|
|
42
|
-
ALIAS_DVF = get_aliases("DVF")
|
|
43
|
-
ALIAS_D2VF = get_aliases("D2VF")
|
|
59
|
+
GLYPH_LOAD_STABILIZERS_KEY = "glyph_load_stabilizers"
|
|
44
60
|
|
|
45
61
|
|
|
46
62
|
@dataclass
|
|
@@ -55,16 +71,53 @@ class SimilarityInputs:
|
|
|
55
71
|
sin_vals: Sequence[float] | None = None
|
|
56
72
|
|
|
57
73
|
|
|
74
|
+
CoherenceMatrixDense = list[list[float]]
|
|
75
|
+
CoherenceMatrixSparse = list[tuple[int, int, float]]
|
|
76
|
+
CoherenceMatrixPayload = CoherenceMatrixDense | CoherenceMatrixSparse
|
|
77
|
+
PhaseSyncWeights: TypeAlias = (
|
|
78
|
+
Sequence[float] | CoherenceMatrixSparse | CoherenceMatrixDense
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
SimilarityComponents = tuple[float, float, float, float]
|
|
82
|
+
VectorizedComponents: TypeAlias = tuple[
|
|
83
|
+
FloatMatrix, FloatMatrix, FloatMatrix, FloatMatrix
|
|
84
|
+
]
|
|
85
|
+
ScalarOrArray: TypeAlias = float | FloatArray
|
|
86
|
+
StabilityChunkArgs = tuple[
|
|
87
|
+
Sequence[float],
|
|
88
|
+
Sequence[float],
|
|
89
|
+
Sequence[float],
|
|
90
|
+
Sequence[float | None],
|
|
91
|
+
Sequence[float],
|
|
92
|
+
Sequence[float | None],
|
|
93
|
+
Sequence[float | None],
|
|
94
|
+
float,
|
|
95
|
+
float,
|
|
96
|
+
float,
|
|
97
|
+
]
|
|
98
|
+
StabilityChunkResult = tuple[
|
|
99
|
+
int,
|
|
100
|
+
int,
|
|
101
|
+
float,
|
|
102
|
+
float,
|
|
103
|
+
list[float],
|
|
104
|
+
list[float],
|
|
105
|
+
list[float],
|
|
106
|
+
]
|
|
107
|
+
|
|
108
|
+
MetricValue: TypeAlias = CoherenceMetric
|
|
109
|
+
MetricProvider = Callable[[], MetricValue]
|
|
110
|
+
MetricRecord: TypeAlias = tuple[MetricValue | MetricProvider, str]
|
|
58
111
|
def _compute_wij_phase_epi_vf_si_vectorized(
|
|
59
|
-
epi,
|
|
60
|
-
vf,
|
|
61
|
-
si,
|
|
62
|
-
cos_th,
|
|
63
|
-
sin_th,
|
|
64
|
-
epi_range,
|
|
65
|
-
vf_range,
|
|
66
|
-
np,
|
|
67
|
-
):
|
|
112
|
+
epi: FloatArray,
|
|
113
|
+
vf: FloatArray,
|
|
114
|
+
si: FloatArray,
|
|
115
|
+
cos_th: FloatArray,
|
|
116
|
+
sin_th: FloatArray,
|
|
117
|
+
epi_range: float,
|
|
118
|
+
vf_range: float,
|
|
119
|
+
np: ModuleType,
|
|
120
|
+
) -> VectorizedComponents:
|
|
68
121
|
"""Vectorized computation of similarity components.
|
|
69
122
|
|
|
70
123
|
All parameters are expected to be NumPy arrays already cast to ``float``
|
|
@@ -75,9 +128,7 @@ def _compute_wij_phase_epi_vf_si_vectorized(
|
|
|
75
128
|
epi_range = epi_range if epi_range > 0 else 1.0
|
|
76
129
|
vf_range = vf_range if vf_range > 0 else 1.0
|
|
77
130
|
s_phase = 0.5 * (
|
|
78
|
-
1.0
|
|
79
|
-
+ cos_th[:, None] * cos_th[None, :]
|
|
80
|
-
+ sin_th[:, None] * sin_th[None, :]
|
|
131
|
+
1.0 + cos_th[:, None] * cos_th[None, :] + sin_th[:, None] * sin_th[None, :]
|
|
81
132
|
)
|
|
82
133
|
s_epi = 1.0 - np.abs(epi[:, None] - epi[None, :]) / epi_range
|
|
83
134
|
s_vf = 1.0 - np.abs(vf[:, None] - vf[None, :]) / vf_range
|
|
@@ -90,13 +141,13 @@ def compute_wij_phase_epi_vf_si(
|
|
|
90
141
|
i: int | None = None,
|
|
91
142
|
j: int | None = None,
|
|
92
143
|
*,
|
|
93
|
-
trig=None,
|
|
94
|
-
G:
|
|
95
|
-
nodes: Sequence[
|
|
144
|
+
trig: Any | None = None,
|
|
145
|
+
G: TNFRGraph | None = None,
|
|
146
|
+
nodes: Sequence[NodeId] | None = None,
|
|
96
147
|
epi_range: float = 1.0,
|
|
97
148
|
vf_range: float = 1.0,
|
|
98
|
-
np=None,
|
|
99
|
-
):
|
|
149
|
+
np: ModuleType | None = None,
|
|
150
|
+
) -> SimilarityComponents | VectorizedComponents:
|
|
100
151
|
"""Return similarity components for nodes ``i`` and ``j``.
|
|
101
152
|
|
|
102
153
|
When ``np`` is provided and ``i`` and ``j`` are ``None`` the computation is
|
|
@@ -126,11 +177,11 @@ def compute_wij_phase_epi_vf_si(
|
|
|
126
177
|
si_vals = inputs.si_vals
|
|
127
178
|
|
|
128
179
|
if np is not None and i is None and j is None:
|
|
129
|
-
epi = np.asarray(epi_vals)
|
|
130
|
-
vf = np.asarray(vf_vals)
|
|
131
|
-
si = np.asarray(si_vals)
|
|
132
|
-
cos_th = np.asarray(cos_vals, dtype=float)
|
|
133
|
-
sin_th = np.asarray(sin_vals, dtype=float)
|
|
180
|
+
epi = cast(FloatArray, np.asarray(epi_vals, dtype=float))
|
|
181
|
+
vf = cast(FloatArray, np.asarray(vf_vals, dtype=float))
|
|
182
|
+
si = cast(FloatArray, np.asarray(si_vals, dtype=float))
|
|
183
|
+
cos_th = cast(FloatArray, np.asarray(cos_vals, dtype=float))
|
|
184
|
+
sin_th = cast(FloatArray, np.asarray(sin_vals, dtype=float))
|
|
134
185
|
return _compute_wij_phase_epi_vf_si_vectorized(
|
|
135
186
|
epi,
|
|
136
187
|
vf,
|
|
@@ -158,33 +209,42 @@ def compute_wij_phase_epi_vf_si(
|
|
|
158
209
|
|
|
159
210
|
|
|
160
211
|
def _combine_similarity(
|
|
161
|
-
s_phase,
|
|
162
|
-
s_epi,
|
|
163
|
-
s_vf,
|
|
164
|
-
s_si,
|
|
165
|
-
phase_w,
|
|
166
|
-
epi_w,
|
|
167
|
-
vf_w,
|
|
168
|
-
si_w,
|
|
169
|
-
np=None,
|
|
170
|
-
):
|
|
212
|
+
s_phase: ScalarOrArray,
|
|
213
|
+
s_epi: ScalarOrArray,
|
|
214
|
+
s_vf: ScalarOrArray,
|
|
215
|
+
s_si: ScalarOrArray,
|
|
216
|
+
phase_w: float,
|
|
217
|
+
epi_w: float,
|
|
218
|
+
vf_w: float,
|
|
219
|
+
si_w: float,
|
|
220
|
+
np: ModuleType | None = None,
|
|
221
|
+
) -> ScalarOrArray:
|
|
171
222
|
wij = phase_w * s_phase + epi_w * s_epi + vf_w * s_vf + si_w * s_si
|
|
172
223
|
if np is not None:
|
|
173
|
-
return np.clip(wij, 0.0, 1.0)
|
|
224
|
+
return cast(FloatArray, np.clip(wij, 0.0, 1.0))
|
|
174
225
|
return clamp01(wij)
|
|
175
226
|
|
|
176
227
|
|
|
177
228
|
def _wij_components_weights(
|
|
178
|
-
G,
|
|
179
|
-
nodes,
|
|
229
|
+
G: TNFRGraph,
|
|
230
|
+
nodes: Sequence[NodeId] | None,
|
|
180
231
|
inputs: SimilarityInputs,
|
|
181
|
-
wnorm,
|
|
232
|
+
wnorm: Mapping[str, float],
|
|
182
233
|
i: int | None = None,
|
|
183
234
|
j: int | None = None,
|
|
184
235
|
epi_range: float = 1.0,
|
|
185
236
|
vf_range: float = 1.0,
|
|
186
|
-
np=None,
|
|
187
|
-
)
|
|
237
|
+
np: ModuleType | None = None,
|
|
238
|
+
) -> tuple[
|
|
239
|
+
ScalarOrArray,
|
|
240
|
+
ScalarOrArray,
|
|
241
|
+
ScalarOrArray,
|
|
242
|
+
ScalarOrArray,
|
|
243
|
+
float,
|
|
244
|
+
float,
|
|
245
|
+
float,
|
|
246
|
+
float,
|
|
247
|
+
]:
|
|
188
248
|
"""Return similarity components together with their weights.
|
|
189
249
|
|
|
190
250
|
This consolidates repeated computations ensuring that both the
|
|
@@ -210,17 +270,17 @@ def _wij_components_weights(
|
|
|
210
270
|
|
|
211
271
|
|
|
212
272
|
def _wij_vectorized(
|
|
213
|
-
G,
|
|
214
|
-
nodes,
|
|
273
|
+
G: TNFRGraph,
|
|
274
|
+
nodes: Sequence[NodeId],
|
|
215
275
|
inputs: SimilarityInputs,
|
|
216
|
-
wnorm,
|
|
217
|
-
epi_min,
|
|
218
|
-
epi_max,
|
|
219
|
-
vf_min,
|
|
220
|
-
vf_max,
|
|
221
|
-
self_diag,
|
|
222
|
-
np,
|
|
223
|
-
):
|
|
276
|
+
wnorm: Mapping[str, float],
|
|
277
|
+
epi_min: float,
|
|
278
|
+
epi_max: float,
|
|
279
|
+
vf_min: float,
|
|
280
|
+
vf_max: float,
|
|
281
|
+
self_diag: bool,
|
|
282
|
+
np: ModuleType,
|
|
283
|
+
) -> FloatMatrix:
|
|
224
284
|
epi_range = epi_max - epi_min if epi_max > epi_min else 1.0
|
|
225
285
|
vf_range = vf_max - vf_min if vf_max > vf_min else 1.0
|
|
226
286
|
(
|
|
@@ -241,65 +301,110 @@ def _wij_vectorized(
|
|
|
241
301
|
vf_range=vf_range,
|
|
242
302
|
np=np,
|
|
243
303
|
)
|
|
244
|
-
|
|
245
|
-
|
|
304
|
+
wij_matrix = cast(
|
|
305
|
+
FloatMatrix,
|
|
306
|
+
_combine_similarity(
|
|
307
|
+
s_phase, s_epi, s_vf, s_si, phase_w, epi_w, vf_w, si_w, np=np
|
|
308
|
+
),
|
|
246
309
|
)
|
|
247
310
|
if self_diag:
|
|
248
|
-
np.fill_diagonal(
|
|
311
|
+
np.fill_diagonal(wij_matrix, 1.0)
|
|
249
312
|
else:
|
|
250
|
-
np.fill_diagonal(
|
|
251
|
-
return
|
|
313
|
+
np.fill_diagonal(wij_matrix, 0.0)
|
|
314
|
+
return wij_matrix
|
|
252
315
|
|
|
253
316
|
|
|
254
|
-
def
|
|
255
|
-
wij: list[list[float]],
|
|
317
|
+
def _compute_wij_value_raw(
|
|
256
318
|
i: int,
|
|
257
319
|
j: int,
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
320
|
+
epi_vals: Sequence[float],
|
|
321
|
+
vf_vals: Sequence[float],
|
|
322
|
+
si_vals: Sequence[float],
|
|
323
|
+
cos_vals: Sequence[float],
|
|
324
|
+
sin_vals: Sequence[float],
|
|
325
|
+
weights: tuple[float, float, float, float],
|
|
261
326
|
epi_range: float,
|
|
262
327
|
vf_range: float,
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
328
|
+
) -> float:
|
|
329
|
+
epi_range = epi_range if epi_range > 0 else 1.0
|
|
330
|
+
vf_range = vf_range if vf_range > 0 else 1.0
|
|
331
|
+
phase_w, epi_w, vf_w, si_w = weights
|
|
332
|
+
cos_i = cos_vals[i]
|
|
333
|
+
sin_i = sin_vals[i]
|
|
334
|
+
cos_j = cos_vals[j]
|
|
335
|
+
sin_j = sin_vals[j]
|
|
336
|
+
s_phase = 0.5 * (1.0 + (cos_i * cos_j + sin_i * sin_j))
|
|
337
|
+
s_epi = 1.0 - abs(epi_vals[i] - epi_vals[j]) / epi_range
|
|
338
|
+
s_vf = 1.0 - abs(vf_vals[i] - vf_vals[j]) / vf_range
|
|
339
|
+
s_si = 1.0 - abs(si_vals[i] - si_vals[j])
|
|
340
|
+
wij = phase_w * s_phase + epi_w * s_epi + vf_w * s_vf + si_w * s_si
|
|
341
|
+
return clamp01(wij)
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
_PARALLEL_WIJ_DATA: ParallelWijPayload | None = None
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
def _init_parallel_wij(data: ParallelWijPayload) -> None:
|
|
348
|
+
"""Store immutable state for parallel ``wij`` computation."""
|
|
349
|
+
|
|
350
|
+
global _PARALLEL_WIJ_DATA
|
|
351
|
+
_PARALLEL_WIJ_DATA = data
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def _parallel_wij_worker(
|
|
355
|
+
pairs: Sequence[tuple[int, int]],
|
|
356
|
+
) -> list[tuple[int, int, float]]:
|
|
357
|
+
"""Compute coherence weights for ``pairs`` using shared state."""
|
|
358
|
+
|
|
359
|
+
if _PARALLEL_WIJ_DATA is None:
|
|
360
|
+
raise RuntimeError("Parallel coherence data not initialized")
|
|
361
|
+
|
|
362
|
+
data = _PARALLEL_WIJ_DATA
|
|
363
|
+
epi_vals: Sequence[float] = data["epi_vals"]
|
|
364
|
+
vf_vals: Sequence[float] = data["vf_vals"]
|
|
365
|
+
si_vals: Sequence[float] = data["si_vals"]
|
|
366
|
+
cos_vals: Sequence[float] = data["cos_vals"]
|
|
367
|
+
sin_vals: Sequence[float] = data["sin_vals"]
|
|
368
|
+
weights: tuple[float, float, float, float] = data["weights"]
|
|
369
|
+
epi_range: float = data["epi_range"]
|
|
370
|
+
vf_range: float = data["vf_range"]
|
|
371
|
+
|
|
372
|
+
compute = _compute_wij_value_raw
|
|
373
|
+
return [
|
|
374
|
+
(
|
|
375
|
+
i,
|
|
376
|
+
j,
|
|
377
|
+
compute(
|
|
378
|
+
i,
|
|
379
|
+
j,
|
|
380
|
+
epi_vals,
|
|
381
|
+
vf_vals,
|
|
382
|
+
si_vals,
|
|
383
|
+
cos_vals,
|
|
384
|
+
sin_vals,
|
|
385
|
+
weights,
|
|
386
|
+
epi_range,
|
|
387
|
+
vf_range,
|
|
388
|
+
),
|
|
389
|
+
)
|
|
390
|
+
for i, j in pairs
|
|
391
|
+
]
|
|
288
392
|
|
|
289
393
|
|
|
290
394
|
def _wij_loops(
|
|
291
|
-
G,
|
|
292
|
-
nodes: Sequence[
|
|
293
|
-
node_to_index:
|
|
395
|
+
G: TNFRGraph,
|
|
396
|
+
nodes: Sequence[NodeId],
|
|
397
|
+
node_to_index: Mapping[NodeId, int],
|
|
294
398
|
inputs: SimilarityInputs,
|
|
295
|
-
wnorm:
|
|
399
|
+
wnorm: Mapping[str, float],
|
|
296
400
|
epi_min: float,
|
|
297
401
|
epi_max: float,
|
|
298
402
|
vf_min: float,
|
|
299
403
|
vf_max: float,
|
|
300
404
|
neighbors_only: bool,
|
|
301
405
|
self_diag: bool,
|
|
302
|
-
|
|
406
|
+
n_jobs: int | None = 1,
|
|
407
|
+
) -> CoherenceMatrixDense:
|
|
303
408
|
n = len(nodes)
|
|
304
409
|
cos_vals = inputs.cos_vals
|
|
305
410
|
sin_vals = inputs.sin_vals
|
|
@@ -310,47 +415,107 @@ def _wij_loops(
|
|
|
310
415
|
sin_vals = [trig_local.sin[n] for n in nodes]
|
|
311
416
|
inputs.cos_vals = cos_vals
|
|
312
417
|
inputs.sin_vals = sin_vals
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
418
|
+
assert cos_vals is not None
|
|
419
|
+
assert sin_vals is not None
|
|
420
|
+
epi_vals = list(inputs.epi_vals)
|
|
421
|
+
vf_vals = list(inputs.vf_vals)
|
|
422
|
+
si_vals = list(inputs.si_vals)
|
|
423
|
+
cos_vals_list = list(cos_vals)
|
|
424
|
+
sin_vals_list = list(sin_vals)
|
|
425
|
+
inputs.epi_vals = epi_vals
|
|
426
|
+
inputs.vf_vals = vf_vals
|
|
427
|
+
inputs.si_vals = si_vals
|
|
428
|
+
inputs.cos_vals = cos_vals_list
|
|
429
|
+
inputs.sin_vals = sin_vals_list
|
|
430
|
+
wij = [[1.0 if (self_diag and i == j) else 0.0 for j in range(n)] for i in range(n)]
|
|
317
431
|
epi_range = epi_max - epi_min if epi_max > epi_min else 1.0
|
|
318
432
|
vf_range = vf_max - vf_min if vf_max > vf_min else 1.0
|
|
433
|
+
weights = (
|
|
434
|
+
float(wnorm["phase"]),
|
|
435
|
+
float(wnorm["epi"]),
|
|
436
|
+
float(wnorm["vf"]),
|
|
437
|
+
float(wnorm["si"]),
|
|
438
|
+
)
|
|
439
|
+
pair_list: list[tuple[int, int]] = []
|
|
319
440
|
if neighbors_only:
|
|
441
|
+
seen: set[tuple[int, int]] = set()
|
|
320
442
|
for u, v in G.edges():
|
|
321
443
|
i = node_to_index[u]
|
|
322
444
|
j = node_to_index[v]
|
|
323
445
|
if i == j:
|
|
324
446
|
continue
|
|
325
|
-
|
|
326
|
-
|
|
447
|
+
pair = (i, j) if i < j else (j, i)
|
|
448
|
+
if pair in seen:
|
|
449
|
+
continue
|
|
450
|
+
seen.add(pair)
|
|
451
|
+
pair_list.append(pair)
|
|
452
|
+
else:
|
|
453
|
+
for i in range(n):
|
|
454
|
+
for j in range(i + 1, n):
|
|
455
|
+
pair_list.append((i, j))
|
|
456
|
+
|
|
457
|
+
total_pairs = len(pair_list)
|
|
458
|
+
max_workers = 1
|
|
459
|
+
if n_jobs is not None:
|
|
460
|
+
try:
|
|
461
|
+
max_workers = int(n_jobs)
|
|
462
|
+
except (TypeError, ValueError):
|
|
463
|
+
max_workers = 1
|
|
464
|
+
if max_workers <= 1 or total_pairs == 0:
|
|
465
|
+
for i, j in pair_list:
|
|
466
|
+
wij_ij = _compute_wij_value_raw(
|
|
327
467
|
i,
|
|
328
468
|
j,
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
469
|
+
epi_vals,
|
|
470
|
+
vf_vals,
|
|
471
|
+
si_vals,
|
|
472
|
+
cos_vals,
|
|
473
|
+
sin_vals,
|
|
474
|
+
weights,
|
|
332
475
|
epi_range,
|
|
333
476
|
vf_range,
|
|
334
|
-
wnorm,
|
|
335
477
|
)
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
478
|
+
wij[i][j] = wij[j][i] = wij_ij
|
|
479
|
+
return wij
|
|
480
|
+
|
|
481
|
+
approx_chunk = math.ceil(total_pairs / max_workers) if max_workers else None
|
|
482
|
+
chunk_size = resolve_chunk_size(
|
|
483
|
+
approx_chunk,
|
|
484
|
+
total_pairs,
|
|
485
|
+
minimum=1,
|
|
486
|
+
)
|
|
487
|
+
payload: ParallelWijPayload = {
|
|
488
|
+
"epi_vals": tuple(epi_vals),
|
|
489
|
+
"vf_vals": tuple(vf_vals),
|
|
490
|
+
"si_vals": tuple(si_vals),
|
|
491
|
+
"cos_vals": tuple(cos_vals),
|
|
492
|
+
"sin_vals": tuple(sin_vals),
|
|
493
|
+
"weights": weights,
|
|
494
|
+
"epi_range": float(epi_range),
|
|
495
|
+
"vf_range": float(vf_range),
|
|
496
|
+
}
|
|
497
|
+
|
|
498
|
+
def _init() -> None:
|
|
499
|
+
_init_parallel_wij(payload)
|
|
500
|
+
|
|
501
|
+
with ProcessPoolExecutor(max_workers=max_workers, initializer=_init) as executor:
|
|
502
|
+
futures = []
|
|
503
|
+
for start in range(0, total_pairs, chunk_size):
|
|
504
|
+
chunk = pair_list[start : start + chunk_size]
|
|
505
|
+
futures.append(executor.submit(_parallel_wij_worker, chunk))
|
|
506
|
+
for future in futures:
|
|
507
|
+
for i, j, value in future.result():
|
|
508
|
+
wij[i][j] = wij[j][i] = value
|
|
350
509
|
return wij
|
|
351
510
|
|
|
352
511
|
|
|
353
|
-
def _compute_stats(
|
|
512
|
+
def _compute_stats(
|
|
513
|
+
values: Iterable[float] | Any,
|
|
514
|
+
row_sum: Iterable[float] | Any,
|
|
515
|
+
n: int,
|
|
516
|
+
self_diag: bool,
|
|
517
|
+
np: ModuleType | None = None,
|
|
518
|
+
) -> tuple[float, float, float, list[float], int]:
|
|
354
519
|
"""Return aggregate statistics for ``values`` and normalized row sums.
|
|
355
520
|
|
|
356
521
|
``values`` and ``row_sum`` can be any iterables. They are normalized to
|
|
@@ -360,62 +525,41 @@ def _compute_stats(values, row_sum, n, self_diag, np=None):
|
|
|
360
525
|
"""
|
|
361
526
|
|
|
362
527
|
if np is not None:
|
|
363
|
-
# Normalize inputs to NumPy arrays
|
|
364
528
|
if not isinstance(values, np.ndarray):
|
|
365
|
-
|
|
529
|
+
values_arr = np.asarray(list(values), dtype=float)
|
|
366
530
|
else:
|
|
367
|
-
|
|
531
|
+
values_arr = cast(Any, values.astype(float))
|
|
368
532
|
if not isinstance(row_sum, np.ndarray):
|
|
369
|
-
|
|
533
|
+
row_arr = np.asarray(list(row_sum), dtype=float)
|
|
370
534
|
else:
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
def min_fn(v):
|
|
377
|
-
return float(v.min()) if v.size else 0.0
|
|
378
|
-
|
|
379
|
-
def max_fn(v):
|
|
380
|
-
return float(v.max()) if v.size else 0.0
|
|
381
|
-
|
|
382
|
-
def mean_fn(v):
|
|
383
|
-
return float(v.mean()) if v.size else 0.0
|
|
384
|
-
|
|
385
|
-
def wi_fn(r, d):
|
|
386
|
-
return (r / d).astype(float).tolist()
|
|
387
|
-
|
|
535
|
+
row_arr = cast(Any, row_sum.astype(float))
|
|
536
|
+
count_val = int(values_arr.size)
|
|
537
|
+
min_val = float(values_arr.min()) if values_arr.size else 0.0
|
|
538
|
+
max_val = float(values_arr.max()) if values_arr.size else 0.0
|
|
539
|
+
mean_val = float(values_arr.mean()) if values_arr.size else 0.0
|
|
388
540
|
else:
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
def min_fn(v):
|
|
397
|
-
return min(v) if v else 0.0
|
|
398
|
-
|
|
399
|
-
def max_fn(v):
|
|
400
|
-
return max(v) if v else 0.0
|
|
541
|
+
values_list = list(values)
|
|
542
|
+
row_arr = list(row_sum)
|
|
543
|
+
count_val = len(values_list)
|
|
544
|
+
min_val = min(values_list) if values_list else 0.0
|
|
545
|
+
max_val = max(values_list) if values_list else 0.0
|
|
546
|
+
mean_val = sum(values_list) / len(values_list) if values_list else 0.0
|
|
401
547
|
|
|
402
|
-
def mean_fn(v):
|
|
403
|
-
return sum(v) / len(v) if v else 0.0
|
|
404
|
-
|
|
405
|
-
def wi_fn(r, d):
|
|
406
|
-
return [float(r[i]) / d for i in range(n)]
|
|
407
|
-
|
|
408
|
-
count_val = size_fn(values)
|
|
409
|
-
min_val = min_fn(values)
|
|
410
|
-
max_val = max_fn(values)
|
|
411
|
-
mean_val = mean_fn(values)
|
|
412
548
|
row_count = n if self_diag else n - 1
|
|
413
549
|
denom = max(1, row_count)
|
|
414
|
-
|
|
550
|
+
if np is not None:
|
|
551
|
+
Wi = (row_arr / denom).astype(float).tolist() # type: ignore[operator]
|
|
552
|
+
else:
|
|
553
|
+
Wi = [float(row_arr[i]) / denom for i in range(n)]
|
|
415
554
|
return min_val, max_val, mean_val, Wi, count_val
|
|
416
555
|
|
|
417
556
|
|
|
418
|
-
def _coherence_numpy(
|
|
557
|
+
def _coherence_numpy(
|
|
558
|
+
wij: Any,
|
|
559
|
+
mode: str,
|
|
560
|
+
thr: float,
|
|
561
|
+
np: ModuleType,
|
|
562
|
+
) -> tuple[int, Any, Any, CoherenceMatrixPayload]:
|
|
419
563
|
"""Aggregate coherence weights using vectorized operations.
|
|
420
564
|
|
|
421
565
|
Produces the structural weight matrix ``W`` along with the list of off
|
|
@@ -430,42 +574,125 @@ def _coherence_numpy(wij, mode, thr, np):
|
|
|
430
574
|
W = wij.tolist()
|
|
431
575
|
else:
|
|
432
576
|
idx = np.where((wij >= thr) & mask)
|
|
433
|
-
W = [
|
|
434
|
-
(int(i), int(j), float(wij[i, j]))
|
|
435
|
-
for i, j in zip(idx[0], idx[1])
|
|
436
|
-
]
|
|
577
|
+
W = [(int(i), int(j), float(wij[i, j])) for i, j in zip(idx[0], idx[1])]
|
|
437
578
|
return n, values, row_sum, W
|
|
438
579
|
|
|
439
580
|
|
|
440
|
-
def
|
|
581
|
+
def _coherence_python_worker(
|
|
582
|
+
args: tuple[Sequence[Sequence[float]], int, str, float],
|
|
583
|
+
) -> tuple[int, list[float], list[float], CoherenceMatrixSparse]:
|
|
584
|
+
rows, start, mode, thr = args
|
|
585
|
+
values: list[float] = []
|
|
586
|
+
row_sum: list[float] = []
|
|
587
|
+
sparse: list[tuple[int, int, float]] = []
|
|
588
|
+
dense_mode = mode == "dense"
|
|
589
|
+
|
|
590
|
+
for offset, row in enumerate(rows):
|
|
591
|
+
i = start + offset
|
|
592
|
+
total = 0.0
|
|
593
|
+
for j, w in enumerate(row):
|
|
594
|
+
total += w
|
|
595
|
+
if i != j:
|
|
596
|
+
values.append(w)
|
|
597
|
+
if not dense_mode and w >= thr:
|
|
598
|
+
sparse.append((i, j, w))
|
|
599
|
+
row_sum.append(total)
|
|
600
|
+
|
|
601
|
+
return start, values, row_sum, sparse
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
def _coherence_python(
|
|
605
|
+
wij: Sequence[Sequence[float]],
|
|
606
|
+
mode: str,
|
|
607
|
+
thr: float,
|
|
608
|
+
n_jobs: int | None = 1,
|
|
609
|
+
) -> tuple[int, list[float], list[float], CoherenceMatrixPayload]:
|
|
441
610
|
"""Aggregate coherence weights using pure Python loops."""
|
|
442
611
|
|
|
443
612
|
n = len(wij)
|
|
444
613
|
values: list[float] = []
|
|
445
614
|
row_sum = [0.0] * n
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
values.append(w)
|
|
453
|
-
row_sum[i] += w
|
|
615
|
+
|
|
616
|
+
if n_jobs is not None:
|
|
617
|
+
try:
|
|
618
|
+
max_workers = int(n_jobs)
|
|
619
|
+
except (TypeError, ValueError):
|
|
620
|
+
max_workers = 1
|
|
454
621
|
else:
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
622
|
+
max_workers = 1
|
|
623
|
+
|
|
624
|
+
if max_workers <= 1:
|
|
625
|
+
if mode == "dense":
|
|
626
|
+
W: CoherenceMatrixDense = [list(row) for row in wij]
|
|
627
|
+
for i in range(n):
|
|
628
|
+
for j in range(n):
|
|
629
|
+
w = W[i][j]
|
|
630
|
+
if i != j:
|
|
631
|
+
values.append(w)
|
|
632
|
+
row_sum[i] += w
|
|
633
|
+
else:
|
|
634
|
+
W_sparse: CoherenceMatrixSparse = []
|
|
635
|
+
for i in range(n):
|
|
636
|
+
row_i = wij[i]
|
|
637
|
+
for j in range(n):
|
|
638
|
+
w = row_i[j]
|
|
639
|
+
if i != j:
|
|
640
|
+
values.append(w)
|
|
641
|
+
if w >= thr:
|
|
642
|
+
W_sparse.append((i, j, w))
|
|
643
|
+
row_sum[i] += w
|
|
644
|
+
return n, values, row_sum, W if mode == "dense" else W_sparse
|
|
645
|
+
|
|
646
|
+
approx_chunk = math.ceil(n / max_workers) if max_workers else None
|
|
647
|
+
chunk_size = resolve_chunk_size(
|
|
648
|
+
approx_chunk,
|
|
649
|
+
n,
|
|
650
|
+
minimum=1,
|
|
651
|
+
)
|
|
652
|
+
tasks = []
|
|
653
|
+
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
|
654
|
+
for start in range(0, n, chunk_size):
|
|
655
|
+
rows = wij[start : start + chunk_size]
|
|
656
|
+
tasks.append(
|
|
657
|
+
executor.submit(
|
|
658
|
+
_coherence_python_worker,
|
|
659
|
+
(tuple(tuple(row) for row in rows), start, mode, thr),
|
|
660
|
+
)
|
|
661
|
+
)
|
|
662
|
+
results = [task.result() for task in tasks]
|
|
466
663
|
|
|
664
|
+
results.sort(key=lambda item: item[0])
|
|
665
|
+
sparse_entries: list[tuple[int, int, float]] | None = (
|
|
666
|
+
[] if mode != "dense" else None
|
|
667
|
+
)
|
|
668
|
+
for start, chunk_values, chunk_row_sum, chunk_sparse in results:
|
|
669
|
+
values.extend(chunk_values)
|
|
670
|
+
for offset, total in enumerate(chunk_row_sum):
|
|
671
|
+
row_sum[start + offset] = total
|
|
672
|
+
if sparse_entries is not None:
|
|
673
|
+
sparse_entries.extend(chunk_sparse)
|
|
467
674
|
|
|
468
|
-
|
|
675
|
+
if mode == "dense":
|
|
676
|
+
W_dense: CoherenceMatrixDense = [list(row) for row in wij]
|
|
677
|
+
return n, values, row_sum, W_dense
|
|
678
|
+
sparse_result: CoherenceMatrixSparse = (
|
|
679
|
+
sparse_entries if sparse_entries is not None else []
|
|
680
|
+
)
|
|
681
|
+
return n, values, row_sum, sparse_result
|
|
682
|
+
|
|
683
|
+
|
|
684
|
+
def _finalize_wij(
|
|
685
|
+
G: TNFRGraph,
|
|
686
|
+
nodes: Sequence[NodeId],
|
|
687
|
+
wij: FloatMatrix | Sequence[Sequence[float]],
|
|
688
|
+
mode: str,
|
|
689
|
+
thr: float,
|
|
690
|
+
scope: str,
|
|
691
|
+
self_diag: bool,
|
|
692
|
+
np: ModuleType | None = None,
|
|
693
|
+
*,
|
|
694
|
+
n_jobs: int = 1,
|
|
695
|
+
) -> tuple[list[NodeId], CoherenceMatrixPayload]:
|
|
469
696
|
"""Finalize the coherence matrix ``wij`` and store results in history.
|
|
470
697
|
|
|
471
698
|
When ``np`` is provided and ``wij`` is a NumPy array, the computation is
|
|
@@ -474,11 +701,11 @@ def _finalize_wij(G, nodes, wij, mode, thr, scope, self_diag, np=None):
|
|
|
474
701
|
"""
|
|
475
702
|
|
|
476
703
|
use_np = np is not None and isinstance(wij, np.ndarray)
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
704
|
+
if use_np:
|
|
705
|
+
assert np is not None
|
|
706
|
+
n, values, row_sum, W = _coherence_numpy(wij, mode, thr, np)
|
|
707
|
+
else:
|
|
708
|
+
n, values, row_sum, W = _coherence_python(wij, mode, thr, n_jobs=n_jobs)
|
|
482
709
|
|
|
483
710
|
min_val, max_val, mean_val, Wi, count_val = _compute_stats(
|
|
484
711
|
values, row_sum, n, self_diag, np if use_np else None
|
|
@@ -497,37 +724,58 @@ def _finalize_wij(G, nodes, wij, mode, thr, scope, self_diag, np=None):
|
|
|
497
724
|
append_metric(hist, cfg.get("history_key", "W_sparse"), W)
|
|
498
725
|
append_metric(hist, cfg.get("Wi_history_key", "W_i"), Wi)
|
|
499
726
|
append_metric(hist, cfg.get("stats_history_key", "W_stats"), stats)
|
|
500
|
-
return nodes, W
|
|
727
|
+
return list(nodes), W
|
|
728
|
+
|
|
501
729
|
|
|
730
|
+
def coherence_matrix(
|
|
731
|
+
G: TNFRGraph,
|
|
732
|
+
use_numpy: bool | None = None,
|
|
733
|
+
*,
|
|
734
|
+
n_jobs: int | None = None,
|
|
735
|
+
) -> tuple[list[NodeId] | None, CoherenceMatrixPayload | None]:
|
|
736
|
+
"""Compute the coherence weight matrix for ``G``.
|
|
737
|
+
|
|
738
|
+
Parameters
|
|
739
|
+
----------
|
|
740
|
+
G:
|
|
741
|
+
Graph whose nodes encode the structural attributes.
|
|
742
|
+
use_numpy:
|
|
743
|
+
When ``True`` the vectorised NumPy implementation is forced. When
|
|
744
|
+
``False`` the pure Python fallback is used. ``None`` selects NumPy
|
|
745
|
+
automatically when available.
|
|
746
|
+
n_jobs:
|
|
747
|
+
Maximum worker processes to use for the Python fallback. ``None`` or
|
|
748
|
+
values less than or equal to one preserve the serial behaviour.
|
|
749
|
+
"""
|
|
502
750
|
|
|
503
|
-
def coherence_matrix(G, use_numpy: bool | None = None):
|
|
504
751
|
cfg = get_param(G, "COHERENCE")
|
|
505
752
|
if not cfg.get("enabled", True):
|
|
506
753
|
return None, None
|
|
507
754
|
|
|
508
|
-
node_to_index = ensure_node_index_map(G)
|
|
509
|
-
nodes = list(node_to_index.keys())
|
|
755
|
+
node_to_index: Mapping[NodeId, int] = ensure_node_index_map(G)
|
|
756
|
+
nodes: list[NodeId] = list(node_to_index.keys())
|
|
510
757
|
n = len(nodes)
|
|
511
758
|
if n == 0:
|
|
512
759
|
return nodes, []
|
|
513
760
|
|
|
514
761
|
# NumPy handling for optional vectorized operations
|
|
515
762
|
np = get_numpy()
|
|
516
|
-
use_np = (
|
|
517
|
-
|
|
518
|
-
)
|
|
763
|
+
use_np = np is not None if use_numpy is None else (use_numpy and np is not None)
|
|
764
|
+
|
|
765
|
+
cfg_jobs = cfg.get("n_jobs")
|
|
766
|
+
parallel_jobs = n_jobs if n_jobs is not None else cfg_jobs
|
|
519
767
|
|
|
520
768
|
# Precompute indices to avoid repeated list.index calls within loops
|
|
521
769
|
|
|
522
|
-
th_vals =
|
|
770
|
+
th_vals = collect_theta_attr(G, nodes, 0.0, np=np if use_np else None)
|
|
523
771
|
epi_vals = collect_attr(G, nodes, ALIAS_EPI, 0.0, np=np if use_np else None)
|
|
524
772
|
vf_vals = collect_attr(G, nodes, ALIAS_VF, 0.0, np=np if use_np else None)
|
|
525
773
|
si_vals = collect_attr(G, nodes, ALIAS_SI, 0.0, np=np if use_np else None)
|
|
526
|
-
|
|
527
|
-
np
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
774
|
+
if use_np:
|
|
775
|
+
assert np is not None
|
|
776
|
+
si_vals = np.clip(si_vals, 0.0, 1.0)
|
|
777
|
+
else:
|
|
778
|
+
si_vals = [clamp01(v) for v in si_vals]
|
|
531
779
|
epi_min, epi_max = min_max_range(epi_vals)
|
|
532
780
|
vf_min, vf_max = min_max_range(vf_vals)
|
|
533
781
|
|
|
@@ -557,7 +805,8 @@ def coherence_matrix(G, use_numpy: bool | None = None):
|
|
|
557
805
|
sin_vals=sin_vals,
|
|
558
806
|
)
|
|
559
807
|
if use_np:
|
|
560
|
-
|
|
808
|
+
assert np is not None
|
|
809
|
+
wij_matrix = _wij_vectorized(
|
|
561
810
|
G,
|
|
562
811
|
nodes,
|
|
563
812
|
inputs,
|
|
@@ -576,7 +825,8 @@ def coherence_matrix(G, use_numpy: bool | None = None):
|
|
|
576
825
|
j = node_to_index[v]
|
|
577
826
|
adj[i, j] = True
|
|
578
827
|
adj[j, i] = True
|
|
579
|
-
|
|
828
|
+
wij_matrix = cast(FloatMatrix, np.where(adj, wij_matrix, 0.0))
|
|
829
|
+
wij: FloatMatrix | CoherenceMatrixDense = wij_matrix
|
|
580
830
|
else:
|
|
581
831
|
wij = _wij_loops(
|
|
582
832
|
G,
|
|
@@ -590,14 +840,29 @@ def coherence_matrix(G, use_numpy: bool | None = None):
|
|
|
590
840
|
vf_max,
|
|
591
841
|
neighbors_only,
|
|
592
842
|
self_diag,
|
|
843
|
+
n_jobs=parallel_jobs,
|
|
593
844
|
)
|
|
594
845
|
|
|
595
|
-
return _finalize_wij(
|
|
846
|
+
return _finalize_wij(
|
|
847
|
+
G,
|
|
848
|
+
nodes,
|
|
849
|
+
wij,
|
|
850
|
+
mode,
|
|
851
|
+
thr,
|
|
852
|
+
scope,
|
|
853
|
+
self_diag,
|
|
854
|
+
np,
|
|
855
|
+
n_jobs=parallel_jobs if not use_np else 1,
|
|
856
|
+
)
|
|
596
857
|
|
|
597
858
|
|
|
598
859
|
def local_phase_sync_weighted(
|
|
599
|
-
G
|
|
600
|
-
|
|
860
|
+
G: TNFRGraph,
|
|
861
|
+
n: NodeId,
|
|
862
|
+
nodes_order: Sequence[NodeId] | None = None,
|
|
863
|
+
W_row: PhaseSyncWeights | None = None,
|
|
864
|
+
node_to_index: Mapping[NodeId, int] | None = None,
|
|
865
|
+
) -> float:
|
|
601
866
|
"""Compute local phase synchrony using explicit weights.
|
|
602
867
|
|
|
603
868
|
``nodes_order`` is the node ordering used to build the coherence matrix
|
|
@@ -621,27 +886,52 @@ def local_phase_sync_weighted(
|
|
|
621
886
|
trig = get_trig_cache(G)
|
|
622
887
|
cos_map, sin_map = trig.cos, trig.sin
|
|
623
888
|
|
|
624
|
-
if (
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
num
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
889
|
+
if isinstance(W_row, Sequence) and W_row:
|
|
890
|
+
first = W_row[0]
|
|
891
|
+
if isinstance(first, (int, float)):
|
|
892
|
+
row_vals = cast(Sequence[float], W_row)
|
|
893
|
+
for w, nj in zip(row_vals, nodes_order):
|
|
894
|
+
if nj == n:
|
|
895
|
+
continue
|
|
896
|
+
den += w
|
|
897
|
+
cos_j = cos_map.get(nj)
|
|
898
|
+
sin_j = sin_map.get(nj)
|
|
899
|
+
if cos_j is None or sin_j is None:
|
|
900
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
901
|
+
cos_j = trig_j.cos[nj]
|
|
902
|
+
sin_j = trig_j.sin[nj]
|
|
903
|
+
num += w * complex(cos_j, sin_j)
|
|
904
|
+
return abs(num / den) if den else 0.0
|
|
905
|
+
|
|
906
|
+
if (
|
|
907
|
+
isinstance(first, Sequence)
|
|
908
|
+
and len(first) == 3
|
|
909
|
+
and isinstance(first[0], int)
|
|
910
|
+
and isinstance(first[1], int)
|
|
911
|
+
and isinstance(first[2], (int, float))
|
|
912
|
+
):
|
|
913
|
+
sparse_entries = cast(CoherenceMatrixSparse, W_row)
|
|
914
|
+
for ii, jj, w in sparse_entries:
|
|
915
|
+
if ii != i:
|
|
916
|
+
continue
|
|
917
|
+
nj = nodes_order[jj]
|
|
918
|
+
if nj == n:
|
|
919
|
+
continue
|
|
920
|
+
den += w
|
|
921
|
+
cos_j = cos_map.get(nj)
|
|
922
|
+
sin_j = sin_map.get(nj)
|
|
923
|
+
if cos_j is None or sin_j is None:
|
|
924
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
925
|
+
cos_j = trig_j.cos[nj]
|
|
926
|
+
sin_j = trig_j.sin[nj]
|
|
927
|
+
num += w * complex(cos_j, sin_j)
|
|
928
|
+
return abs(num / den) if den else 0.0
|
|
929
|
+
|
|
930
|
+
dense_matrix = cast(CoherenceMatrixDense, W_row)
|
|
931
|
+
if i is None:
|
|
932
|
+
raise ValueError("node index resolution failed for dense weights")
|
|
933
|
+
row_vals = cast(Sequence[float], dense_matrix[i])
|
|
934
|
+
for w, nj in zip(row_vals, nodes_order):
|
|
645
935
|
if nj == n:
|
|
646
936
|
continue
|
|
647
937
|
den += w
|
|
@@ -652,11 +942,28 @@ def local_phase_sync_weighted(
|
|
|
652
942
|
cos_j = trig_j.cos[nj]
|
|
653
943
|
sin_j = trig_j.sin[nj]
|
|
654
944
|
num += w * complex(cos_j, sin_j)
|
|
945
|
+
return abs(num / den) if den else 0.0
|
|
946
|
+
|
|
947
|
+
sparse_entries = cast(CoherenceMatrixSparse, W_row)
|
|
948
|
+
for ii, jj, w in sparse_entries:
|
|
949
|
+
if ii != i:
|
|
950
|
+
continue
|
|
951
|
+
nj = nodes_order[jj]
|
|
952
|
+
if nj == n:
|
|
953
|
+
continue
|
|
954
|
+
den += w
|
|
955
|
+
cos_j = cos_map.get(nj)
|
|
956
|
+
sin_j = sin_map.get(nj)
|
|
957
|
+
if cos_j is None or sin_j is None:
|
|
958
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
959
|
+
cos_j = trig_j.cos[nj]
|
|
960
|
+
sin_j = trig_j.sin[nj]
|
|
961
|
+
num += w * complex(cos_j, sin_j)
|
|
655
962
|
|
|
656
963
|
return abs(num / den) if den else 0.0
|
|
657
964
|
|
|
658
965
|
|
|
659
|
-
def local_phase_sync(G, n):
|
|
966
|
+
def local_phase_sync(G: TNFRGraph, n: NodeId) -> float:
|
|
660
967
|
"""Compute unweighted local phase synchronization for node ``n``."""
|
|
661
968
|
nodes, W = coherence_matrix(G)
|
|
662
969
|
if nodes is None:
|
|
@@ -664,7 +971,7 @@ def local_phase_sync(G, n):
|
|
|
664
971
|
return local_phase_sync_weighted(G, n, nodes_order=nodes, W_row=W)
|
|
665
972
|
|
|
666
973
|
|
|
667
|
-
def _coherence_step(G, ctx: dict[str, Any] | None = None):
|
|
974
|
+
def _coherence_step(G: TNFRGraph, ctx: dict[str, Any] | None = None) -> None:
|
|
668
975
|
del ctx
|
|
669
976
|
|
|
670
977
|
if not get_param(G, "COHERENCE").get("enabled", True):
|
|
@@ -672,7 +979,9 @@ def _coherence_step(G, ctx: dict[str, Any] | None = None):
|
|
|
672
979
|
coherence_matrix(G)
|
|
673
980
|
|
|
674
981
|
|
|
675
|
-
def register_coherence_callbacks(G) -> None:
|
|
982
|
+
def register_coherence_callbacks(G: TNFRGraph) -> None:
|
|
983
|
+
"""Attach coherence matrix maintenance to the ``AFTER_STEP`` event."""
|
|
984
|
+
|
|
676
985
|
callback_manager.register_callback(
|
|
677
986
|
G,
|
|
678
987
|
event=CallbackEvent.AFTER_STEP.value,
|
|
@@ -687,18 +996,29 @@ def register_coherence_callbacks(G) -> None:
|
|
|
687
996
|
|
|
688
997
|
|
|
689
998
|
def _record_metrics(
|
|
690
|
-
hist:
|
|
999
|
+
hist: HistoryState,
|
|
1000
|
+
*pairs: MetricRecord,
|
|
1001
|
+
evaluate: bool = False,
|
|
691
1002
|
) -> None:
|
|
692
|
-
"""
|
|
1003
|
+
"""Record metric values for the trace history."""
|
|
693
1004
|
|
|
694
|
-
|
|
695
|
-
|
|
1005
|
+
metrics = cast(MutableMapping[str, list[Any]], hist)
|
|
1006
|
+
for payload, key in pairs:
|
|
1007
|
+
if evaluate:
|
|
1008
|
+
provider = cast(MetricProvider, payload)
|
|
1009
|
+
append_metric(metrics, key, provider())
|
|
1010
|
+
else:
|
|
1011
|
+
append_metric(metrics, key, payload)
|
|
696
1012
|
|
|
697
1013
|
|
|
698
|
-
def _update_coherence(G, hist) -> None:
|
|
1014
|
+
def _update_coherence(G: TNFRGraph, hist: HistoryState) -> None:
|
|
699
1015
|
"""Update network coherence and related means."""
|
|
700
1016
|
|
|
701
|
-
|
|
1017
|
+
coherence_payload = cast(
|
|
1018
|
+
tuple[CoherenceMetric, float, float],
|
|
1019
|
+
compute_coherence(G, return_means=True),
|
|
1020
|
+
)
|
|
1021
|
+
C, dnfr_mean, depi_mean = coherence_payload
|
|
702
1022
|
_record_metrics(
|
|
703
1023
|
hist,
|
|
704
1024
|
(C, "C_steps"),
|
|
@@ -714,7 +1034,7 @@ def _update_coherence(G, hist) -> None:
|
|
|
714
1034
|
_record_metrics(hist, (wbar, "W_bar"))
|
|
715
1035
|
|
|
716
1036
|
|
|
717
|
-
def _update_phase_sync(G, hist) -> None:
|
|
1037
|
+
def _update_phase_sync(G: TNFRGraph, hist: HistoryState) -> None:
|
|
718
1038
|
"""Capture phase synchrony and Kuramoto order."""
|
|
719
1039
|
|
|
720
1040
|
ps = phase_sync(G)
|
|
@@ -726,18 +1046,35 @@ def _update_phase_sync(G, hist) -> None:
|
|
|
726
1046
|
)
|
|
727
1047
|
|
|
728
1048
|
|
|
729
|
-
def _update_sigma(G, hist) -> None:
|
|
1049
|
+
def _update_sigma(G: TNFRGraph, hist: HistoryState) -> None:
|
|
730
1050
|
"""Record glyph load and associated Σ⃗ vector."""
|
|
731
1051
|
|
|
732
|
-
|
|
1052
|
+
metrics = cast(MutableMapping[str, list[Any]], hist)
|
|
1053
|
+
if "glyph_load_estab" in metrics:
|
|
1054
|
+
raise ValueError(
|
|
1055
|
+
"History payloads using 'glyph_load_estab' are no longer supported. "
|
|
1056
|
+
"Rename the series to 'glyph_load_stabilizers' before loading the graph."
|
|
1057
|
+
)
|
|
1058
|
+
stabilizer_series = metrics.get(GLYPH_LOAD_STABILIZERS_KEY)
|
|
1059
|
+
|
|
1060
|
+
if stabilizer_series is None:
|
|
1061
|
+
stabilizer_series = cast(
|
|
1062
|
+
list[Any], metrics.setdefault(GLYPH_LOAD_STABILIZERS_KEY, [])
|
|
1063
|
+
)
|
|
1064
|
+
else:
|
|
1065
|
+
stabilizer_series = cast(list[Any], stabilizer_series)
|
|
1066
|
+
|
|
1067
|
+
gl: GlyphLoadDistribution = glyph_load(G, window=DEFAULT_GLYPH_LOAD_SPAN)
|
|
1068
|
+
stabilizers = float(gl.get("_stabilizers", 0.0))
|
|
1069
|
+
disruptors = float(gl.get("_disruptors", 0.0))
|
|
733
1070
|
_record_metrics(
|
|
734
1071
|
hist,
|
|
735
|
-
(
|
|
736
|
-
(
|
|
1072
|
+
(stabilizers, GLYPH_LOAD_STABILIZERS_KEY),
|
|
1073
|
+
(disruptors, "glyph_load_disr"),
|
|
737
1074
|
)
|
|
738
1075
|
|
|
739
|
-
dist = {k: v for k, v in gl.items() if not k.startswith("_")}
|
|
740
|
-
sig = sigma_vector(dist)
|
|
1076
|
+
dist: GlyphLoadDistribution = {k: v for k, v in gl.items() if not k.startswith("_")}
|
|
1077
|
+
sig: SigmaVector = sigma_vector(dist)
|
|
741
1078
|
_record_metrics(
|
|
742
1079
|
hist,
|
|
743
1080
|
(sig.get("x", 0.0), "sense_sigma_x"),
|
|
@@ -747,51 +1084,341 @@ def _update_sigma(G, hist) -> None:
|
|
|
747
1084
|
)
|
|
748
1085
|
|
|
749
1086
|
|
|
750
|
-
def
|
|
751
|
-
"""
|
|
1087
|
+
def _stability_chunk_worker(args: StabilityChunkArgs) -> StabilityChunkResult:
|
|
1088
|
+
"""Compute stability aggregates for a chunk of nodes."""
|
|
752
1089
|
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
1090
|
+
(
|
|
1091
|
+
dnfr_vals,
|
|
1092
|
+
depi_vals,
|
|
1093
|
+
si_curr_vals,
|
|
1094
|
+
si_prev_vals,
|
|
1095
|
+
vf_curr_vals,
|
|
1096
|
+
vf_prev_vals,
|
|
1097
|
+
dvf_prev_vals,
|
|
1098
|
+
dt,
|
|
1099
|
+
eps_dnfr,
|
|
1100
|
+
eps_depi,
|
|
1101
|
+
) = args
|
|
1102
|
+
|
|
1103
|
+
inv_dt = (1.0 / dt) if dt else 0.0
|
|
1104
|
+
stable = 0
|
|
1105
|
+
delta_sum = 0.0
|
|
757
1106
|
B_sum = 0.0
|
|
758
|
-
|
|
1107
|
+
delta_vals: list[float] = []
|
|
1108
|
+
dvf_dt_vals: list[float] = []
|
|
1109
|
+
B_vals: list[float] = []
|
|
1110
|
+
|
|
1111
|
+
for idx in range(len(si_curr_vals)):
|
|
1112
|
+
curr_si = float(si_curr_vals[idx])
|
|
1113
|
+
prev_si_raw = si_prev_vals[idx]
|
|
1114
|
+
prev_si = float(prev_si_raw) if prev_si_raw is not None else curr_si
|
|
1115
|
+
delta = curr_si - prev_si
|
|
1116
|
+
delta_vals.append(delta)
|
|
1117
|
+
delta_sum += delta
|
|
1118
|
+
|
|
1119
|
+
curr_vf = float(vf_curr_vals[idx])
|
|
1120
|
+
prev_vf_raw = vf_prev_vals[idx]
|
|
1121
|
+
prev_vf = float(prev_vf_raw) if prev_vf_raw is not None else curr_vf
|
|
1122
|
+
dvf_dt = (curr_vf - prev_vf) * inv_dt if dt else 0.0
|
|
1123
|
+
prev_dvf_raw = dvf_prev_vals[idx]
|
|
1124
|
+
prev_dvf = float(prev_dvf_raw) if prev_dvf_raw is not None else dvf_dt
|
|
1125
|
+
B = (dvf_dt - prev_dvf) * inv_dt if dt else 0.0
|
|
1126
|
+
dvf_dt_vals.append(dvf_dt)
|
|
1127
|
+
B_vals.append(B)
|
|
1128
|
+
B_sum += B
|
|
759
1129
|
|
|
760
|
-
for _, nd in G.nodes(data=True):
|
|
761
1130
|
if (
|
|
762
|
-
abs(
|
|
763
|
-
and abs(
|
|
1131
|
+
abs(float(dnfr_vals[idx])) <= eps_dnfr
|
|
1132
|
+
and abs(float(depi_vals[idx])) <= eps_depi
|
|
764
1133
|
):
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
vf_prev = nd.get("_prev_vf", vf_curr)
|
|
777
|
-
dvf_dt = (vf_curr - vf_prev) / dt
|
|
778
|
-
dvf_prev = nd.get("_prev_dvf", dvf_dt)
|
|
779
|
-
B = (dvf_dt - dvf_prev) / dt
|
|
780
|
-
nd["_prev_vf"] = vf_curr
|
|
781
|
-
nd["_prev_dvf"] = dvf_dt
|
|
782
|
-
set_attr(nd, ALIAS_DVF, dvf_dt)
|
|
783
|
-
set_attr(nd, ALIAS_D2VF, B)
|
|
784
|
-
B_sum += B
|
|
785
|
-
B_count += 1
|
|
786
|
-
|
|
787
|
-
hist["stable_frac"].append(stables / total)
|
|
788
|
-
hist["delta_Si"].append(
|
|
789
|
-
delta_si_sum / delta_si_count if delta_si_count else 0.0
|
|
1134
|
+
stable += 1
|
|
1135
|
+
|
|
1136
|
+
chunk_len = len(si_curr_vals)
|
|
1137
|
+
return (
|
|
1138
|
+
stable,
|
|
1139
|
+
chunk_len,
|
|
1140
|
+
delta_sum,
|
|
1141
|
+
B_sum,
|
|
1142
|
+
delta_vals,
|
|
1143
|
+
dvf_dt_vals,
|
|
1144
|
+
B_vals,
|
|
790
1145
|
)
|
|
791
|
-
hist["B"].append(B_sum / B_count if B_count else 0.0)
|
|
792
1146
|
|
|
793
1147
|
|
|
794
|
-
def
|
|
1148
|
+
def _track_stability(
|
|
1149
|
+
G: TNFRGraph,
|
|
1150
|
+
hist: MutableMapping[str, Any],
|
|
1151
|
+
dt: float,
|
|
1152
|
+
eps_dnfr: float,
|
|
1153
|
+
eps_depi: float,
|
|
1154
|
+
*,
|
|
1155
|
+
n_jobs: int | None = None,
|
|
1156
|
+
) -> None:
|
|
1157
|
+
"""Track per-node stability and derivative metrics."""
|
|
1158
|
+
|
|
1159
|
+
nodes: tuple[NodeId, ...] = tuple(G.nodes)
|
|
1160
|
+
total_nodes = len(nodes)
|
|
1161
|
+
if not total_nodes:
|
|
1162
|
+
hist.setdefault("stable_frac", []).append(0.0)
|
|
1163
|
+
hist.setdefault("delta_Si", []).append(0.0)
|
|
1164
|
+
hist.setdefault("B", []).append(0.0)
|
|
1165
|
+
return
|
|
1166
|
+
|
|
1167
|
+
np_mod = get_numpy()
|
|
1168
|
+
|
|
1169
|
+
dnfr_vals = collect_attr(G, nodes, ALIAS_DNFR, 0.0, np=np_mod)
|
|
1170
|
+
depi_vals = collect_attr(G, nodes, ALIAS_DEPI, 0.0, np=np_mod)
|
|
1171
|
+
si_curr_vals = collect_attr(G, nodes, ALIAS_SI, 0.0, np=np_mod)
|
|
1172
|
+
vf_curr_vals = collect_attr(G, nodes, ALIAS_VF, 0.0, np=np_mod)
|
|
1173
|
+
|
|
1174
|
+
prev_si_data = [G.nodes[n].get("_prev_Si") for n in nodes]
|
|
1175
|
+
prev_vf_data = [G.nodes[n].get("_prev_vf") for n in nodes]
|
|
1176
|
+
prev_dvf_data = [G.nodes[n].get("_prev_dvf") for n in nodes]
|
|
1177
|
+
|
|
1178
|
+
inv_dt = (1.0 / dt) if dt else 0.0
|
|
1179
|
+
|
|
1180
|
+
if np_mod is not None:
|
|
1181
|
+
np = np_mod
|
|
1182
|
+
dnfr_arr = dnfr_vals
|
|
1183
|
+
depi_arr = depi_vals
|
|
1184
|
+
si_curr_arr = si_curr_vals
|
|
1185
|
+
vf_curr_arr = vf_curr_vals
|
|
1186
|
+
|
|
1187
|
+
si_prev_arr = np.asarray(
|
|
1188
|
+
[
|
|
1189
|
+
(
|
|
1190
|
+
float(prev_si_data[idx])
|
|
1191
|
+
if prev_si_data[idx] is not None
|
|
1192
|
+
else float(si_curr_arr[idx])
|
|
1193
|
+
)
|
|
1194
|
+
for idx in range(total_nodes)
|
|
1195
|
+
],
|
|
1196
|
+
dtype=float,
|
|
1197
|
+
)
|
|
1198
|
+
vf_prev_arr = np.asarray(
|
|
1199
|
+
[
|
|
1200
|
+
(
|
|
1201
|
+
float(prev_vf_data[idx])
|
|
1202
|
+
if prev_vf_data[idx] is not None
|
|
1203
|
+
else float(vf_curr_arr[idx])
|
|
1204
|
+
)
|
|
1205
|
+
for idx in range(total_nodes)
|
|
1206
|
+
],
|
|
1207
|
+
dtype=float,
|
|
1208
|
+
)
|
|
1209
|
+
|
|
1210
|
+
if dt:
|
|
1211
|
+
dvf_dt_arr = (vf_curr_arr - vf_prev_arr) * inv_dt
|
|
1212
|
+
else:
|
|
1213
|
+
dvf_dt_arr = np.zeros_like(vf_curr_arr, dtype=float)
|
|
1214
|
+
|
|
1215
|
+
dvf_prev_arr = np.asarray(
|
|
1216
|
+
[
|
|
1217
|
+
(
|
|
1218
|
+
float(prev_dvf_data[idx])
|
|
1219
|
+
if prev_dvf_data[idx] is not None
|
|
1220
|
+
else float(dvf_dt_arr[idx])
|
|
1221
|
+
)
|
|
1222
|
+
for idx in range(total_nodes)
|
|
1223
|
+
],
|
|
1224
|
+
dtype=float,
|
|
1225
|
+
)
|
|
1226
|
+
|
|
1227
|
+
if dt:
|
|
1228
|
+
B_arr = (dvf_dt_arr - dvf_prev_arr) * inv_dt
|
|
1229
|
+
else:
|
|
1230
|
+
B_arr = np.zeros_like(dvf_dt_arr, dtype=float)
|
|
1231
|
+
|
|
1232
|
+
stable_mask = (np.abs(dnfr_arr) <= eps_dnfr) & (np.abs(depi_arr) <= eps_depi)
|
|
1233
|
+
stable_frac = float(stable_mask.mean()) if total_nodes else 0.0
|
|
1234
|
+
|
|
1235
|
+
delta_si_arr = si_curr_arr - si_prev_arr
|
|
1236
|
+
delta_si_mean = float(delta_si_arr.mean()) if total_nodes else 0.0
|
|
1237
|
+
B_mean = float(B_arr.mean()) if total_nodes else 0.0
|
|
1238
|
+
|
|
1239
|
+
hist.setdefault("stable_frac", []).append(stable_frac)
|
|
1240
|
+
hist.setdefault("delta_Si", []).append(delta_si_mean)
|
|
1241
|
+
hist.setdefault("B", []).append(B_mean)
|
|
1242
|
+
|
|
1243
|
+
for idx, node in enumerate(nodes):
|
|
1244
|
+
nd = G.nodes[node]
|
|
1245
|
+
curr_si = float(si_curr_arr[idx])
|
|
1246
|
+
delta_val = float(delta_si_arr[idx])
|
|
1247
|
+
nd["_prev_Si"] = curr_si
|
|
1248
|
+
set_attr(nd, ALIAS_DSI, delta_val)
|
|
1249
|
+
|
|
1250
|
+
curr_vf = float(vf_curr_arr[idx])
|
|
1251
|
+
nd["_prev_vf"] = curr_vf
|
|
1252
|
+
|
|
1253
|
+
dvf_dt_val = float(dvf_dt_arr[idx])
|
|
1254
|
+
nd["_prev_dvf"] = dvf_dt_val
|
|
1255
|
+
set_attr(nd, ALIAS_DVF, dvf_dt_val)
|
|
1256
|
+
set_attr(nd, ALIAS_D2VF, float(B_arr[idx]))
|
|
1257
|
+
|
|
1258
|
+
return
|
|
1259
|
+
|
|
1260
|
+
# NumPy not available: optionally parallel fallback or sequential computation.
|
|
1261
|
+
dnfr_list = list(dnfr_vals)
|
|
1262
|
+
depi_list = list(depi_vals)
|
|
1263
|
+
si_curr_list = list(si_curr_vals)
|
|
1264
|
+
vf_curr_list = list(vf_curr_vals)
|
|
1265
|
+
|
|
1266
|
+
if n_jobs and n_jobs > 1:
|
|
1267
|
+
approx_chunk = math.ceil(total_nodes / n_jobs) if n_jobs else None
|
|
1268
|
+
chunk_size = resolve_chunk_size(
|
|
1269
|
+
approx_chunk,
|
|
1270
|
+
total_nodes,
|
|
1271
|
+
minimum=1,
|
|
1272
|
+
)
|
|
1273
|
+
chunk_results: list[
|
|
1274
|
+
tuple[
|
|
1275
|
+
int,
|
|
1276
|
+
tuple[int, int, float, float, list[float], list[float], list[float]],
|
|
1277
|
+
]
|
|
1278
|
+
] = []
|
|
1279
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
1280
|
+
futures: list[tuple[int, Any]] = []
|
|
1281
|
+
for start in range(0, total_nodes, chunk_size):
|
|
1282
|
+
end = min(start + chunk_size, total_nodes)
|
|
1283
|
+
chunk_args = (
|
|
1284
|
+
dnfr_list[start:end],
|
|
1285
|
+
depi_list[start:end],
|
|
1286
|
+
si_curr_list[start:end],
|
|
1287
|
+
prev_si_data[start:end],
|
|
1288
|
+
vf_curr_list[start:end],
|
|
1289
|
+
prev_vf_data[start:end],
|
|
1290
|
+
prev_dvf_data[start:end],
|
|
1291
|
+
dt,
|
|
1292
|
+
eps_dnfr,
|
|
1293
|
+
eps_depi,
|
|
1294
|
+
)
|
|
1295
|
+
futures.append(
|
|
1296
|
+
(start, executor.submit(_stability_chunk_worker, chunk_args))
|
|
1297
|
+
)
|
|
1298
|
+
|
|
1299
|
+
for start, fut in futures:
|
|
1300
|
+
chunk_results.append((start, fut.result()))
|
|
1301
|
+
|
|
1302
|
+
chunk_results.sort(key=lambda item: item[0])
|
|
1303
|
+
|
|
1304
|
+
stable_total = 0
|
|
1305
|
+
delta_sum = 0.0
|
|
1306
|
+
B_sum = 0.0
|
|
1307
|
+
delta_vals_all: list[float] = []
|
|
1308
|
+
dvf_dt_all: list[float] = []
|
|
1309
|
+
B_vals_all: list[float] = []
|
|
1310
|
+
|
|
1311
|
+
for _, result in chunk_results:
|
|
1312
|
+
(
|
|
1313
|
+
stable_count,
|
|
1314
|
+
chunk_len,
|
|
1315
|
+
chunk_delta_sum,
|
|
1316
|
+
chunk_B_sum,
|
|
1317
|
+
delta_vals,
|
|
1318
|
+
dvf_vals,
|
|
1319
|
+
B_vals,
|
|
1320
|
+
) = result
|
|
1321
|
+
stable_total += stable_count
|
|
1322
|
+
delta_sum += chunk_delta_sum
|
|
1323
|
+
B_sum += chunk_B_sum
|
|
1324
|
+
delta_vals_all.extend(delta_vals)
|
|
1325
|
+
dvf_dt_all.extend(dvf_vals)
|
|
1326
|
+
B_vals_all.extend(B_vals)
|
|
1327
|
+
|
|
1328
|
+
total = len(delta_vals_all)
|
|
1329
|
+
stable_frac = stable_total / total if total else 0.0
|
|
1330
|
+
delta_si_mean = delta_sum / total if total else 0.0
|
|
1331
|
+
B_mean = B_sum / total if total else 0.0
|
|
1332
|
+
|
|
1333
|
+
else:
|
|
1334
|
+
stable_total = 0
|
|
1335
|
+
delta_sum = 0.0
|
|
1336
|
+
B_sum = 0.0
|
|
1337
|
+
delta_vals_all = []
|
|
1338
|
+
dvf_dt_all = []
|
|
1339
|
+
B_vals_all = []
|
|
1340
|
+
|
|
1341
|
+
for idx in range(total_nodes):
|
|
1342
|
+
curr_si = float(si_curr_list[idx])
|
|
1343
|
+
prev_si_raw = prev_si_data[idx]
|
|
1344
|
+
prev_si = float(prev_si_raw) if prev_si_raw is not None else curr_si
|
|
1345
|
+
delta = curr_si - prev_si
|
|
1346
|
+
delta_vals_all.append(delta)
|
|
1347
|
+
delta_sum += delta
|
|
1348
|
+
|
|
1349
|
+
curr_vf = float(vf_curr_list[idx])
|
|
1350
|
+
prev_vf_raw = prev_vf_data[idx]
|
|
1351
|
+
prev_vf = float(prev_vf_raw) if prev_vf_raw is not None else curr_vf
|
|
1352
|
+
dvf_dt_val = (curr_vf - prev_vf) * inv_dt if dt else 0.0
|
|
1353
|
+
prev_dvf_raw = prev_dvf_data[idx]
|
|
1354
|
+
prev_dvf = float(prev_dvf_raw) if prev_dvf_raw is not None else dvf_dt_val
|
|
1355
|
+
B_val = (dvf_dt_val - prev_dvf) * inv_dt if dt else 0.0
|
|
1356
|
+
dvf_dt_all.append(dvf_dt_val)
|
|
1357
|
+
B_vals_all.append(B_val)
|
|
1358
|
+
B_sum += B_val
|
|
1359
|
+
|
|
1360
|
+
if (
|
|
1361
|
+
abs(float(dnfr_list[idx])) <= eps_dnfr
|
|
1362
|
+
and abs(float(depi_list[idx])) <= eps_depi
|
|
1363
|
+
):
|
|
1364
|
+
stable_total += 1
|
|
1365
|
+
|
|
1366
|
+
total = len(delta_vals_all)
|
|
1367
|
+
stable_frac = stable_total / total if total else 0.0
|
|
1368
|
+
delta_si_mean = delta_sum / total if total else 0.0
|
|
1369
|
+
B_mean = B_sum / total if total else 0.0
|
|
1370
|
+
|
|
1371
|
+
hist.setdefault("stable_frac", []).append(stable_frac)
|
|
1372
|
+
hist.setdefault("delta_Si", []).append(delta_si_mean)
|
|
1373
|
+
hist.setdefault("B", []).append(B_mean)
|
|
1374
|
+
|
|
1375
|
+
for idx, node in enumerate(nodes):
|
|
1376
|
+
nd = G.nodes[node]
|
|
1377
|
+
curr_si = float(si_curr_list[idx])
|
|
1378
|
+
delta_val = float(delta_vals_all[idx])
|
|
1379
|
+
nd["_prev_Si"] = curr_si
|
|
1380
|
+
set_attr(nd, ALIAS_DSI, delta_val)
|
|
1381
|
+
|
|
1382
|
+
curr_vf = float(vf_curr_list[idx])
|
|
1383
|
+
nd["_prev_vf"] = curr_vf
|
|
1384
|
+
|
|
1385
|
+
dvf_dt_val = float(dvf_dt_all[idx])
|
|
1386
|
+
nd["_prev_dvf"] = dvf_dt_val
|
|
1387
|
+
set_attr(nd, ALIAS_DVF, dvf_dt_val)
|
|
1388
|
+
set_attr(nd, ALIAS_D2VF, float(B_vals_all[idx]))
|
|
1389
|
+
|
|
1390
|
+
|
|
1391
|
+
def _si_chunk_stats(
|
|
1392
|
+
values: Sequence[float], si_hi: float, si_lo: float
|
|
1393
|
+
) -> tuple[float, int, int, int]:
|
|
1394
|
+
"""Compute partial Si aggregates for ``values``.
|
|
1395
|
+
|
|
1396
|
+
The helper keeps the logic shared between the sequential and parallel
|
|
1397
|
+
fallbacks when NumPy is unavailable.
|
|
1398
|
+
"""
|
|
1399
|
+
|
|
1400
|
+
total = 0.0
|
|
1401
|
+
count = 0
|
|
1402
|
+
hi_count = 0
|
|
1403
|
+
lo_count = 0
|
|
1404
|
+
for s in values:
|
|
1405
|
+
if math.isnan(s):
|
|
1406
|
+
continue
|
|
1407
|
+
total += s
|
|
1408
|
+
count += 1
|
|
1409
|
+
if s >= si_hi:
|
|
1410
|
+
hi_count += 1
|
|
1411
|
+
if s <= si_lo:
|
|
1412
|
+
lo_count += 1
|
|
1413
|
+
return total, count, hi_count, lo_count
|
|
1414
|
+
|
|
1415
|
+
|
|
1416
|
+
def _aggregate_si(
|
|
1417
|
+
G: TNFRGraph,
|
|
1418
|
+
hist: MutableMapping[str, list[float]],
|
|
1419
|
+
*,
|
|
1420
|
+
n_jobs: int | None = None,
|
|
1421
|
+
) -> None:
|
|
795
1422
|
"""Aggregate Si statistics across nodes."""
|
|
796
1423
|
|
|
797
1424
|
try:
|
|
@@ -800,27 +1427,69 @@ def _aggregate_si(G, hist):
|
|
|
800
1427
|
si_hi = float(thr_sel.get("si_hi", thr_def.get("hi", 0.66)))
|
|
801
1428
|
si_lo = float(thr_sel.get("si_lo", thr_def.get("lo", 0.33)))
|
|
802
1429
|
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
1430
|
+
node_ids = list(G.nodes)
|
|
1431
|
+
if not node_ids:
|
|
1432
|
+
hist["Si_mean"].append(0.0)
|
|
1433
|
+
hist["Si_hi_frac"].append(0.0)
|
|
1434
|
+
hist["Si_lo_frac"].append(0.0)
|
|
1435
|
+
return
|
|
1436
|
+
|
|
1437
|
+
sis = []
|
|
1438
|
+
for node in node_ids:
|
|
1439
|
+
raw = get_attr(
|
|
1440
|
+
G.nodes[node],
|
|
1441
|
+
ALIAS_SI,
|
|
1442
|
+
None,
|
|
1443
|
+
conv=lambda value: value, # Preserve NaN sentinels
|
|
1444
|
+
)
|
|
1445
|
+
try:
|
|
1446
|
+
sis.append(float(raw) if raw is not None else math.nan)
|
|
1447
|
+
except (TypeError, ValueError):
|
|
1448
|
+
sis.append(math.nan)
|
|
1449
|
+
|
|
1450
|
+
np_mod = get_numpy()
|
|
1451
|
+
if np_mod is not None:
|
|
1452
|
+
sis_array = np_mod.asarray(sis, dtype=float)
|
|
1453
|
+
valid = sis_array[~np_mod.isnan(sis_array)]
|
|
1454
|
+
n = int(valid.size)
|
|
1455
|
+
if n:
|
|
1456
|
+
hist["Si_mean"].append(float(valid.mean()))
|
|
1457
|
+
hi_frac = np_mod.count_nonzero(valid >= si_hi) / n
|
|
1458
|
+
lo_frac = np_mod.count_nonzero(valid <= si_lo) / n
|
|
1459
|
+
hist["Si_hi_frac"].append(float(hi_frac))
|
|
1460
|
+
hist["Si_lo_frac"].append(float(lo_frac))
|
|
1461
|
+
else:
|
|
1462
|
+
hist["Si_mean"].append(0.0)
|
|
1463
|
+
hist["Si_hi_frac"].append(0.0)
|
|
1464
|
+
hist["Si_lo_frac"].append(0.0)
|
|
1465
|
+
return
|
|
1466
|
+
|
|
1467
|
+
if n_jobs is not None and n_jobs > 1:
|
|
1468
|
+
approx_chunk = math.ceil(len(sis) / n_jobs) if n_jobs else None
|
|
1469
|
+
chunk_size = resolve_chunk_size(
|
|
1470
|
+
approx_chunk,
|
|
1471
|
+
len(sis),
|
|
1472
|
+
minimum=1,
|
|
1473
|
+
)
|
|
1474
|
+
futures = []
|
|
1475
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
1476
|
+
for idx in range(0, len(sis), chunk_size):
|
|
1477
|
+
chunk = sis[idx : idx + chunk_size]
|
|
1478
|
+
futures.append(
|
|
1479
|
+
executor.submit(_si_chunk_stats, chunk, si_hi, si_lo)
|
|
1480
|
+
)
|
|
1481
|
+
totals = [future.result() for future in futures]
|
|
1482
|
+
total = sum(part[0] for part in totals)
|
|
1483
|
+
count = sum(part[1] for part in totals)
|
|
1484
|
+
hi_count = sum(part[2] for part in totals)
|
|
1485
|
+
lo_count = sum(part[3] for part in totals)
|
|
1486
|
+
else:
|
|
1487
|
+
total, count, hi_count, lo_count = _si_chunk_stats(sis, si_hi, si_lo)
|
|
808
1488
|
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
total += s
|
|
814
|
-
if s >= si_hi:
|
|
815
|
-
hi_count += 1
|
|
816
|
-
if s <= si_lo:
|
|
817
|
-
lo_count += 1
|
|
818
|
-
|
|
819
|
-
n = len(sis)
|
|
820
|
-
if n:
|
|
821
|
-
hist["Si_mean"].append(total / n)
|
|
822
|
-
hist["Si_hi_frac"].append(hi_count / n)
|
|
823
|
-
hist["Si_lo_frac"].append(lo_count / n)
|
|
1489
|
+
if count:
|
|
1490
|
+
hist["Si_mean"].append(total / count)
|
|
1491
|
+
hist["Si_hi_frac"].append(hi_count / count)
|
|
1492
|
+
hist["Si_lo_frac"].append(lo_count / count)
|
|
824
1493
|
else:
|
|
825
1494
|
hist["Si_mean"].append(0.0)
|
|
826
1495
|
hist["Si_hi_frac"].append(0.0)
|