tnfr 4.5.2__py3-none-any.whl → 6.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tnfr/__init__.py +228 -49
- tnfr/__init__.pyi +40 -0
- tnfr/_compat.py +11 -0
- tnfr/_version.py +7 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +106 -21
- tnfr/alias.pyi +140 -0
- tnfr/cache.py +666 -512
- tnfr/cache.pyi +232 -0
- tnfr/callback_utils.py +2 -9
- tnfr/callback_utils.pyi +105 -0
- tnfr/cli/__init__.py +21 -7
- tnfr/cli/__init__.pyi +47 -0
- tnfr/cli/arguments.py +42 -20
- tnfr/cli/arguments.pyi +33 -0
- tnfr/cli/execution.py +54 -20
- tnfr/cli/execution.pyi +80 -0
- tnfr/cli/utils.py +0 -2
- tnfr/cli/utils.pyi +8 -0
- tnfr/config/__init__.py +12 -0
- tnfr/config/__init__.pyi +8 -0
- tnfr/config/constants.py +104 -0
- tnfr/config/constants.pyi +12 -0
- tnfr/{config.py → config/init.py} +11 -7
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +106 -0
- tnfr/config/operator_names.pyi +28 -0
- tnfr/config/presets.py +104 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/constants/__init__.py +78 -24
- tnfr/constants/__init__.pyi +104 -0
- tnfr/constants/core.py +1 -2
- tnfr/constants/core.pyi +17 -0
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +4 -12
- tnfr/constants/metric.pyi +19 -0
- tnfr/constants_glyphs.py +9 -91
- tnfr/constants_glyphs.pyi +12 -0
- tnfr/dynamics/__init__.py +112 -634
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +201 -0
- tnfr/dynamics/aliases.py +22 -0
- tnfr/dynamics/coordination.py +343 -0
- tnfr/dynamics/dnfr.py +1936 -354
- tnfr/dynamics/dnfr.pyi +33 -0
- tnfr/dynamics/integrators.py +369 -75
- tnfr/dynamics/integrators.pyi +35 -0
- tnfr/dynamics/runtime.py +521 -0
- tnfr/dynamics/sampling.py +8 -5
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +680 -0
- tnfr/execution.py +56 -41
- tnfr/execution.pyi +65 -0
- tnfr/flatten.py +7 -7
- tnfr/flatten.pyi +28 -0
- tnfr/gamma.py +54 -37
- tnfr/gamma.pyi +40 -0
- tnfr/glyph_history.py +85 -38
- tnfr/glyph_history.pyi +53 -0
- tnfr/grammar.py +19 -338
- tnfr/grammar.pyi +13 -0
- tnfr/helpers/__init__.py +110 -30
- tnfr/helpers/__init__.pyi +66 -0
- tnfr/helpers/numeric.py +1 -0
- tnfr/helpers/numeric.pyi +12 -0
- tnfr/immutable.py +55 -19
- tnfr/immutable.pyi +37 -0
- tnfr/initialization.py +12 -10
- tnfr/initialization.pyi +73 -0
- tnfr/io.py +99 -34
- tnfr/io.pyi +11 -0
- tnfr/locking.pyi +7 -0
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/coherence.py +934 -294
- tnfr/metrics/common.py +1 -3
- tnfr/metrics/common.pyi +15 -0
- tnfr/metrics/core.py +192 -34
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +707 -101
- tnfr/metrics/diagnosis.pyi +89 -0
- tnfr/metrics/export.py +27 -13
- tnfr/metrics/glyph_timing.py +218 -38
- tnfr/metrics/reporting.py +22 -18
- tnfr/metrics/reporting.pyi +12 -0
- tnfr/metrics/sense_index.py +199 -25
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +53 -18
- tnfr/metrics/trig.pyi +12 -0
- tnfr/metrics/trig_cache.py +3 -7
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/node.py +148 -125
- tnfr/node.pyi +161 -0
- tnfr/observers.py +44 -30
- tnfr/observers.pyi +46 -0
- tnfr/ontosim.py +14 -13
- tnfr/ontosim.pyi +33 -0
- tnfr/operators/__init__.py +84 -52
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/definitions.py +181 -0
- tnfr/operators/definitions.pyi +92 -0
- tnfr/operators/jitter.py +86 -23
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/registry.py +80 -0
- tnfr/operators/registry.pyi +15 -0
- tnfr/operators/remesh.py +141 -57
- tnfr/presets.py +9 -54
- tnfr/presets.pyi +7 -0
- tnfr/py.typed +0 -0
- tnfr/rng.py +259 -73
- tnfr/rng.pyi +14 -0
- tnfr/selector.py +24 -17
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +55 -43
- tnfr/sense.pyi +30 -0
- tnfr/structural.py +44 -267
- tnfr/structural.pyi +46 -0
- tnfr/telemetry/__init__.py +13 -0
- tnfr/telemetry/verbosity.py +37 -0
- tnfr/tokens.py +3 -2
- tnfr/tokens.pyi +41 -0
- tnfr/trace.py +272 -82
- tnfr/trace.pyi +68 -0
- tnfr/types.py +345 -6
- tnfr/types.pyi +145 -0
- tnfr/utils/__init__.py +158 -0
- tnfr/utils/__init__.pyi +133 -0
- tnfr/utils/cache.py +755 -0
- tnfr/utils/cache.pyi +156 -0
- tnfr/{collections_utils.py → utils/data.py} +57 -90
- tnfr/utils/data.pyi +73 -0
- tnfr/utils/graph.py +87 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +746 -0
- tnfr/utils/init.pyi +85 -0
- tnfr/{json_utils.py → utils/io.py} +13 -18
- tnfr/utils/io.pyi +10 -0
- tnfr/utils/validators.py +130 -0
- tnfr/utils/validators.pyi +19 -0
- tnfr/validation/__init__.py +25 -0
- tnfr/validation/__init__.pyi +17 -0
- tnfr/validation/compatibility.py +59 -0
- tnfr/validation/compatibility.pyi +8 -0
- tnfr/validation/grammar.py +149 -0
- tnfr/validation/grammar.pyi +11 -0
- tnfr/validation/rules.py +194 -0
- tnfr/validation/rules.pyi +18 -0
- tnfr/validation/syntax.py +151 -0
- tnfr/validation/syntax.pyi +7 -0
- tnfr-6.0.0.dist-info/METADATA +135 -0
- tnfr-6.0.0.dist-info/RECORD +157 -0
- tnfr/graph_utils.py +0 -84
- tnfr/import_utils.py +0 -228
- tnfr/logging_utils.py +0 -116
- tnfr/validators.py +0 -84
- tnfr/value_utils.py +0 -59
- tnfr-4.5.2.dist-info/METADATA +0 -379
- tnfr-4.5.2.dist-info/RECORD +0 -67
- {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/WHEEL +0 -0
- {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/entry_points.txt +0 -0
- {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/licenses/LICENSE.md +0 -0
- {tnfr-4.5.2.dist-info → tnfr-6.0.0.dist-info}/top_level.txt +0 -0
tnfr/metrics/coherence.py
CHANGED
|
@@ -3,8 +3,13 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import math
|
|
6
|
+
from collections.abc import Callable, Iterable, Mapping, Sequence
|
|
7
|
+
from concurrent.futures import ProcessPoolExecutor
|
|
6
8
|
from dataclasses import dataclass
|
|
7
|
-
from
|
|
9
|
+
from types import ModuleType
|
|
10
|
+
from typing import Any, MutableMapping, TypedDict, cast
|
|
11
|
+
|
|
12
|
+
from .._compat import TypeAlias
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
from ..constants import (
|
|
@@ -12,11 +17,19 @@ from ..constants import (
|
|
|
12
17
|
get_param,
|
|
13
18
|
)
|
|
14
19
|
from ..callback_utils import CallbackEvent, callback_manager
|
|
15
|
-
from ..glyph_history import
|
|
16
|
-
from ..alias import collect_attr,
|
|
17
|
-
from ..collections_utils import normalize_weights
|
|
20
|
+
from ..glyph_history import append_metric, ensure_history
|
|
21
|
+
from ..alias import collect_attr, collect_theta_attr, set_attr
|
|
18
22
|
from ..helpers.numeric import clamp01
|
|
19
|
-
from ..
|
|
23
|
+
from ..types import (
|
|
24
|
+
CoherenceMetric,
|
|
25
|
+
FloatArray,
|
|
26
|
+
FloatMatrix,
|
|
27
|
+
GlyphLoadDistribution,
|
|
28
|
+
HistoryState,
|
|
29
|
+
NodeId,
|
|
30
|
+
SigmaVector,
|
|
31
|
+
TNFRGraph,
|
|
32
|
+
)
|
|
20
33
|
from .common import compute_coherence, min_max_range
|
|
21
34
|
from .trig_cache import compute_theta_trig, get_trig_cache
|
|
22
35
|
from ..observers import (
|
|
@@ -27,12 +40,15 @@ from ..observers import (
|
|
|
27
40
|
phase_sync,
|
|
28
41
|
)
|
|
29
42
|
from ..sense import sigma_vector
|
|
30
|
-
from ..
|
|
31
|
-
|
|
43
|
+
from ..utils import (
|
|
44
|
+
ensure_node_index_map,
|
|
45
|
+
get_logger,
|
|
46
|
+
get_numpy,
|
|
47
|
+
normalize_weights,
|
|
48
|
+
)
|
|
32
49
|
|
|
33
50
|
logger = get_logger(__name__)
|
|
34
51
|
|
|
35
|
-
ALIAS_THETA = get_aliases("THETA")
|
|
36
52
|
ALIAS_EPI = get_aliases("EPI")
|
|
37
53
|
ALIAS_VF = get_aliases("VF")
|
|
38
54
|
ALIAS_SI = get_aliases("SI")
|
|
@@ -42,6 +58,8 @@ ALIAS_DSI = get_aliases("DSI")
|
|
|
42
58
|
ALIAS_DVF = get_aliases("DVF")
|
|
43
59
|
ALIAS_D2VF = get_aliases("D2VF")
|
|
44
60
|
|
|
61
|
+
GLYPH_LOAD_STABILIZERS_KEY = "glyph_load_stabilizers"
|
|
62
|
+
|
|
45
63
|
|
|
46
64
|
@dataclass
|
|
47
65
|
class SimilarityInputs:
|
|
@@ -55,16 +73,66 @@ class SimilarityInputs:
|
|
|
55
73
|
sin_vals: Sequence[float] | None = None
|
|
56
74
|
|
|
57
75
|
|
|
76
|
+
CoherenceMatrixDense = list[list[float]]
|
|
77
|
+
CoherenceMatrixSparse = list[tuple[int, int, float]]
|
|
78
|
+
CoherenceMatrixPayload = CoherenceMatrixDense | CoherenceMatrixSparse
|
|
79
|
+
PhaseSyncWeights: TypeAlias = (
|
|
80
|
+
Sequence[float] | CoherenceMatrixSparse | CoherenceMatrixDense
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
SimilarityComponents = tuple[float, float, float, float]
|
|
84
|
+
VectorizedComponents: TypeAlias = (
|
|
85
|
+
tuple[FloatMatrix, FloatMatrix, FloatMatrix, FloatMatrix]
|
|
86
|
+
)
|
|
87
|
+
ScalarOrArray: TypeAlias = float | FloatArray
|
|
88
|
+
StabilityChunkArgs = tuple[
|
|
89
|
+
Sequence[float],
|
|
90
|
+
Sequence[float],
|
|
91
|
+
Sequence[float],
|
|
92
|
+
Sequence[float | None],
|
|
93
|
+
Sequence[float],
|
|
94
|
+
Sequence[float | None],
|
|
95
|
+
Sequence[float | None],
|
|
96
|
+
float,
|
|
97
|
+
float,
|
|
98
|
+
float,
|
|
99
|
+
]
|
|
100
|
+
StabilityChunkResult = tuple[
|
|
101
|
+
int,
|
|
102
|
+
int,
|
|
103
|
+
float,
|
|
104
|
+
float,
|
|
105
|
+
list[float],
|
|
106
|
+
list[float],
|
|
107
|
+
list[float],
|
|
108
|
+
]
|
|
109
|
+
|
|
110
|
+
MetricValue: TypeAlias = CoherenceMetric
|
|
111
|
+
MetricProvider = Callable[[], MetricValue]
|
|
112
|
+
MetricRecord: TypeAlias = tuple[MetricValue | MetricProvider, str]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class ParallelWijPayload(TypedDict):
|
|
116
|
+
epi_vals: Sequence[float]
|
|
117
|
+
vf_vals: Sequence[float]
|
|
118
|
+
si_vals: Sequence[float]
|
|
119
|
+
cos_vals: Sequence[float]
|
|
120
|
+
sin_vals: Sequence[float]
|
|
121
|
+
weights: tuple[float, float, float, float]
|
|
122
|
+
epi_range: float
|
|
123
|
+
vf_range: float
|
|
124
|
+
|
|
125
|
+
|
|
58
126
|
def _compute_wij_phase_epi_vf_si_vectorized(
|
|
59
|
-
epi,
|
|
60
|
-
vf,
|
|
61
|
-
si,
|
|
62
|
-
cos_th,
|
|
63
|
-
sin_th,
|
|
64
|
-
epi_range,
|
|
65
|
-
vf_range,
|
|
66
|
-
np,
|
|
67
|
-
):
|
|
127
|
+
epi: FloatArray,
|
|
128
|
+
vf: FloatArray,
|
|
129
|
+
si: FloatArray,
|
|
130
|
+
cos_th: FloatArray,
|
|
131
|
+
sin_th: FloatArray,
|
|
132
|
+
epi_range: float,
|
|
133
|
+
vf_range: float,
|
|
134
|
+
np: ModuleType,
|
|
135
|
+
) -> VectorizedComponents:
|
|
68
136
|
"""Vectorized computation of similarity components.
|
|
69
137
|
|
|
70
138
|
All parameters are expected to be NumPy arrays already cast to ``float``
|
|
@@ -90,13 +158,13 @@ def compute_wij_phase_epi_vf_si(
|
|
|
90
158
|
i: int | None = None,
|
|
91
159
|
j: int | None = None,
|
|
92
160
|
*,
|
|
93
|
-
trig=None,
|
|
94
|
-
G:
|
|
95
|
-
nodes: Sequence[
|
|
161
|
+
trig: Any | None = None,
|
|
162
|
+
G: TNFRGraph | None = None,
|
|
163
|
+
nodes: Sequence[NodeId] | None = None,
|
|
96
164
|
epi_range: float = 1.0,
|
|
97
165
|
vf_range: float = 1.0,
|
|
98
|
-
np=None,
|
|
99
|
-
):
|
|
166
|
+
np: ModuleType | None = None,
|
|
167
|
+
) -> SimilarityComponents | VectorizedComponents:
|
|
100
168
|
"""Return similarity components for nodes ``i`` and ``j``.
|
|
101
169
|
|
|
102
170
|
When ``np`` is provided and ``i`` and ``j`` are ``None`` the computation is
|
|
@@ -126,11 +194,11 @@ def compute_wij_phase_epi_vf_si(
|
|
|
126
194
|
si_vals = inputs.si_vals
|
|
127
195
|
|
|
128
196
|
if np is not None and i is None and j is None:
|
|
129
|
-
epi = np.asarray(epi_vals)
|
|
130
|
-
vf = np.asarray(vf_vals)
|
|
131
|
-
si = np.asarray(si_vals)
|
|
132
|
-
cos_th = np.asarray(cos_vals, dtype=float)
|
|
133
|
-
sin_th = np.asarray(sin_vals, dtype=float)
|
|
197
|
+
epi = cast(FloatArray, np.asarray(epi_vals, dtype=float))
|
|
198
|
+
vf = cast(FloatArray, np.asarray(vf_vals, dtype=float))
|
|
199
|
+
si = cast(FloatArray, np.asarray(si_vals, dtype=float))
|
|
200
|
+
cos_th = cast(FloatArray, np.asarray(cos_vals, dtype=float))
|
|
201
|
+
sin_th = cast(FloatArray, np.asarray(sin_vals, dtype=float))
|
|
134
202
|
return _compute_wij_phase_epi_vf_si_vectorized(
|
|
135
203
|
epi,
|
|
136
204
|
vf,
|
|
@@ -158,33 +226,42 @@ def compute_wij_phase_epi_vf_si(
|
|
|
158
226
|
|
|
159
227
|
|
|
160
228
|
def _combine_similarity(
|
|
161
|
-
s_phase,
|
|
162
|
-
s_epi,
|
|
163
|
-
s_vf,
|
|
164
|
-
s_si,
|
|
165
|
-
phase_w,
|
|
166
|
-
epi_w,
|
|
167
|
-
vf_w,
|
|
168
|
-
si_w,
|
|
169
|
-
np=None,
|
|
170
|
-
):
|
|
229
|
+
s_phase: ScalarOrArray,
|
|
230
|
+
s_epi: ScalarOrArray,
|
|
231
|
+
s_vf: ScalarOrArray,
|
|
232
|
+
s_si: ScalarOrArray,
|
|
233
|
+
phase_w: float,
|
|
234
|
+
epi_w: float,
|
|
235
|
+
vf_w: float,
|
|
236
|
+
si_w: float,
|
|
237
|
+
np: ModuleType | None = None,
|
|
238
|
+
) -> ScalarOrArray:
|
|
171
239
|
wij = phase_w * s_phase + epi_w * s_epi + vf_w * s_vf + si_w * s_si
|
|
172
240
|
if np is not None:
|
|
173
|
-
return np.clip(wij, 0.0, 1.0)
|
|
241
|
+
return cast(FloatArray, np.clip(wij, 0.0, 1.0))
|
|
174
242
|
return clamp01(wij)
|
|
175
243
|
|
|
176
244
|
|
|
177
245
|
def _wij_components_weights(
|
|
178
|
-
G,
|
|
179
|
-
nodes,
|
|
246
|
+
G: TNFRGraph,
|
|
247
|
+
nodes: Sequence[NodeId] | None,
|
|
180
248
|
inputs: SimilarityInputs,
|
|
181
|
-
wnorm,
|
|
249
|
+
wnorm: Mapping[str, float],
|
|
182
250
|
i: int | None = None,
|
|
183
251
|
j: int | None = None,
|
|
184
252
|
epi_range: float = 1.0,
|
|
185
253
|
vf_range: float = 1.0,
|
|
186
|
-
np=None,
|
|
187
|
-
)
|
|
254
|
+
np: ModuleType | None = None,
|
|
255
|
+
) -> tuple[
|
|
256
|
+
ScalarOrArray,
|
|
257
|
+
ScalarOrArray,
|
|
258
|
+
ScalarOrArray,
|
|
259
|
+
ScalarOrArray,
|
|
260
|
+
float,
|
|
261
|
+
float,
|
|
262
|
+
float,
|
|
263
|
+
float,
|
|
264
|
+
]:
|
|
188
265
|
"""Return similarity components together with their weights.
|
|
189
266
|
|
|
190
267
|
This consolidates repeated computations ensuring that both the
|
|
@@ -210,17 +287,17 @@ def _wij_components_weights(
|
|
|
210
287
|
|
|
211
288
|
|
|
212
289
|
def _wij_vectorized(
|
|
213
|
-
G,
|
|
214
|
-
nodes,
|
|
290
|
+
G: TNFRGraph,
|
|
291
|
+
nodes: Sequence[NodeId],
|
|
215
292
|
inputs: SimilarityInputs,
|
|
216
|
-
wnorm,
|
|
217
|
-
epi_min,
|
|
218
|
-
epi_max,
|
|
219
|
-
vf_min,
|
|
220
|
-
vf_max,
|
|
221
|
-
self_diag,
|
|
222
|
-
np,
|
|
223
|
-
):
|
|
293
|
+
wnorm: Mapping[str, float],
|
|
294
|
+
epi_min: float,
|
|
295
|
+
epi_max: float,
|
|
296
|
+
vf_min: float,
|
|
297
|
+
vf_max: float,
|
|
298
|
+
self_diag: bool,
|
|
299
|
+
np: ModuleType,
|
|
300
|
+
) -> FloatMatrix:
|
|
224
301
|
epi_range = epi_max - epi_min if epi_max > epi_min else 1.0
|
|
225
302
|
vf_range = vf_max - vf_min if vf_max > vf_min else 1.0
|
|
226
303
|
(
|
|
@@ -241,65 +318,110 @@ def _wij_vectorized(
|
|
|
241
318
|
vf_range=vf_range,
|
|
242
319
|
np=np,
|
|
243
320
|
)
|
|
244
|
-
|
|
245
|
-
|
|
321
|
+
wij_matrix = cast(
|
|
322
|
+
FloatMatrix,
|
|
323
|
+
_combine_similarity(
|
|
324
|
+
s_phase, s_epi, s_vf, s_si, phase_w, epi_w, vf_w, si_w, np=np
|
|
325
|
+
),
|
|
246
326
|
)
|
|
247
327
|
if self_diag:
|
|
248
|
-
np.fill_diagonal(
|
|
328
|
+
np.fill_diagonal(wij_matrix, 1.0)
|
|
249
329
|
else:
|
|
250
|
-
np.fill_diagonal(
|
|
251
|
-
return
|
|
330
|
+
np.fill_diagonal(wij_matrix, 0.0)
|
|
331
|
+
return wij_matrix
|
|
252
332
|
|
|
253
333
|
|
|
254
|
-
def
|
|
255
|
-
wij: list[list[float]],
|
|
334
|
+
def _compute_wij_value_raw(
|
|
256
335
|
i: int,
|
|
257
336
|
j: int,
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
337
|
+
epi_vals: Sequence[float],
|
|
338
|
+
vf_vals: Sequence[float],
|
|
339
|
+
si_vals: Sequence[float],
|
|
340
|
+
cos_vals: Sequence[float],
|
|
341
|
+
sin_vals: Sequence[float],
|
|
342
|
+
weights: tuple[float, float, float, float],
|
|
261
343
|
epi_range: float,
|
|
262
344
|
vf_range: float,
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
345
|
+
) -> float:
|
|
346
|
+
epi_range = epi_range if epi_range > 0 else 1.0
|
|
347
|
+
vf_range = vf_range if vf_range > 0 else 1.0
|
|
348
|
+
phase_w, epi_w, vf_w, si_w = weights
|
|
349
|
+
cos_i = cos_vals[i]
|
|
350
|
+
sin_i = sin_vals[i]
|
|
351
|
+
cos_j = cos_vals[j]
|
|
352
|
+
sin_j = sin_vals[j]
|
|
353
|
+
s_phase = 0.5 * (1.0 + (cos_i * cos_j + sin_i * sin_j))
|
|
354
|
+
s_epi = 1.0 - abs(epi_vals[i] - epi_vals[j]) / epi_range
|
|
355
|
+
s_vf = 1.0 - abs(vf_vals[i] - vf_vals[j]) / vf_range
|
|
356
|
+
s_si = 1.0 - abs(si_vals[i] - si_vals[j])
|
|
357
|
+
wij = phase_w * s_phase + epi_w * s_epi + vf_w * s_vf + si_w * s_si
|
|
358
|
+
return clamp01(wij)
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
_PARALLEL_WIJ_DATA: ParallelWijPayload | None = None
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
def _init_parallel_wij(data: ParallelWijPayload) -> None:
|
|
365
|
+
"""Store immutable state for parallel ``wij`` computation."""
|
|
366
|
+
|
|
367
|
+
global _PARALLEL_WIJ_DATA
|
|
368
|
+
_PARALLEL_WIJ_DATA = data
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
def _parallel_wij_worker(
|
|
372
|
+
pairs: Sequence[tuple[int, int]]
|
|
373
|
+
) -> list[tuple[int, int, float]]:
|
|
374
|
+
"""Compute coherence weights for ``pairs`` using shared state."""
|
|
375
|
+
|
|
376
|
+
if _PARALLEL_WIJ_DATA is None:
|
|
377
|
+
raise RuntimeError("Parallel coherence data not initialized")
|
|
378
|
+
|
|
379
|
+
data = _PARALLEL_WIJ_DATA
|
|
380
|
+
epi_vals: Sequence[float] = data["epi_vals"]
|
|
381
|
+
vf_vals: Sequence[float] = data["vf_vals"]
|
|
382
|
+
si_vals: Sequence[float] = data["si_vals"]
|
|
383
|
+
cos_vals: Sequence[float] = data["cos_vals"]
|
|
384
|
+
sin_vals: Sequence[float] = data["sin_vals"]
|
|
385
|
+
weights: tuple[float, float, float, float] = data["weights"]
|
|
386
|
+
epi_range: float = data["epi_range"]
|
|
387
|
+
vf_range: float = data["vf_range"]
|
|
388
|
+
|
|
389
|
+
compute = _compute_wij_value_raw
|
|
390
|
+
return [
|
|
391
|
+
(
|
|
392
|
+
i,
|
|
393
|
+
j,
|
|
394
|
+
compute(
|
|
395
|
+
i,
|
|
396
|
+
j,
|
|
397
|
+
epi_vals,
|
|
398
|
+
vf_vals,
|
|
399
|
+
si_vals,
|
|
400
|
+
cos_vals,
|
|
401
|
+
sin_vals,
|
|
402
|
+
weights,
|
|
403
|
+
epi_range,
|
|
404
|
+
vf_range,
|
|
405
|
+
),
|
|
406
|
+
)
|
|
407
|
+
for i, j in pairs
|
|
408
|
+
]
|
|
288
409
|
|
|
289
410
|
|
|
290
411
|
def _wij_loops(
|
|
291
|
-
G,
|
|
292
|
-
nodes: Sequence[
|
|
293
|
-
node_to_index:
|
|
412
|
+
G: TNFRGraph,
|
|
413
|
+
nodes: Sequence[NodeId],
|
|
414
|
+
node_to_index: Mapping[NodeId, int],
|
|
294
415
|
inputs: SimilarityInputs,
|
|
295
|
-
wnorm:
|
|
416
|
+
wnorm: Mapping[str, float],
|
|
296
417
|
epi_min: float,
|
|
297
418
|
epi_max: float,
|
|
298
419
|
vf_min: float,
|
|
299
420
|
vf_max: float,
|
|
300
421
|
neighbors_only: bool,
|
|
301
422
|
self_diag: bool,
|
|
302
|
-
|
|
423
|
+
n_jobs: int | None = 1,
|
|
424
|
+
) -> CoherenceMatrixDense:
|
|
303
425
|
n = len(nodes)
|
|
304
426
|
cos_vals = inputs.cos_vals
|
|
305
427
|
sin_vals = inputs.sin_vals
|
|
@@ -310,47 +432,105 @@ def _wij_loops(
|
|
|
310
432
|
sin_vals = [trig_local.sin[n] for n in nodes]
|
|
311
433
|
inputs.cos_vals = cos_vals
|
|
312
434
|
inputs.sin_vals = sin_vals
|
|
435
|
+
assert cos_vals is not None
|
|
436
|
+
assert sin_vals is not None
|
|
437
|
+
epi_vals = list(inputs.epi_vals)
|
|
438
|
+
vf_vals = list(inputs.vf_vals)
|
|
439
|
+
si_vals = list(inputs.si_vals)
|
|
440
|
+
cos_vals_list = list(cos_vals)
|
|
441
|
+
sin_vals_list = list(sin_vals)
|
|
442
|
+
inputs.epi_vals = epi_vals
|
|
443
|
+
inputs.vf_vals = vf_vals
|
|
444
|
+
inputs.si_vals = si_vals
|
|
445
|
+
inputs.cos_vals = cos_vals_list
|
|
446
|
+
inputs.sin_vals = sin_vals_list
|
|
313
447
|
wij = [
|
|
314
448
|
[1.0 if (self_diag and i == j) else 0.0 for j in range(n)]
|
|
315
449
|
for i in range(n)
|
|
316
450
|
]
|
|
317
451
|
epi_range = epi_max - epi_min if epi_max > epi_min else 1.0
|
|
318
452
|
vf_range = vf_max - vf_min if vf_max > vf_min else 1.0
|
|
453
|
+
weights = (
|
|
454
|
+
float(wnorm["phase"]),
|
|
455
|
+
float(wnorm["epi"]),
|
|
456
|
+
float(wnorm["vf"]),
|
|
457
|
+
float(wnorm["si"]),
|
|
458
|
+
)
|
|
459
|
+
pair_list: list[tuple[int, int]] = []
|
|
319
460
|
if neighbors_only:
|
|
461
|
+
seen: set[tuple[int, int]] = set()
|
|
320
462
|
for u, v in G.edges():
|
|
321
463
|
i = node_to_index[u]
|
|
322
464
|
j = node_to_index[v]
|
|
323
465
|
if i == j:
|
|
324
466
|
continue
|
|
325
|
-
|
|
326
|
-
|
|
467
|
+
pair = (i, j) if i < j else (j, i)
|
|
468
|
+
if pair in seen:
|
|
469
|
+
continue
|
|
470
|
+
seen.add(pair)
|
|
471
|
+
pair_list.append(pair)
|
|
472
|
+
else:
|
|
473
|
+
for i in range(n):
|
|
474
|
+
for j in range(i + 1, n):
|
|
475
|
+
pair_list.append((i, j))
|
|
476
|
+
|
|
477
|
+
total_pairs = len(pair_list)
|
|
478
|
+
max_workers = 1
|
|
479
|
+
if n_jobs is not None:
|
|
480
|
+
try:
|
|
481
|
+
max_workers = int(n_jobs)
|
|
482
|
+
except (TypeError, ValueError):
|
|
483
|
+
max_workers = 1
|
|
484
|
+
if max_workers <= 1 or total_pairs == 0:
|
|
485
|
+
for i, j in pair_list:
|
|
486
|
+
wij_ij = _compute_wij_value_raw(
|
|
327
487
|
i,
|
|
328
488
|
j,
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
489
|
+
epi_vals,
|
|
490
|
+
vf_vals,
|
|
491
|
+
si_vals,
|
|
492
|
+
cos_vals,
|
|
493
|
+
sin_vals,
|
|
494
|
+
weights,
|
|
332
495
|
epi_range,
|
|
333
496
|
vf_range,
|
|
334
|
-
wnorm,
|
|
335
497
|
)
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
498
|
+
wij[i][j] = wij[j][i] = wij_ij
|
|
499
|
+
return wij
|
|
500
|
+
|
|
501
|
+
chunk_size = max(1, math.ceil(total_pairs / max_workers))
|
|
502
|
+
payload: ParallelWijPayload = {
|
|
503
|
+
"epi_vals": tuple(epi_vals),
|
|
504
|
+
"vf_vals": tuple(vf_vals),
|
|
505
|
+
"si_vals": tuple(si_vals),
|
|
506
|
+
"cos_vals": tuple(cos_vals),
|
|
507
|
+
"sin_vals": tuple(sin_vals),
|
|
508
|
+
"weights": weights,
|
|
509
|
+
"epi_range": float(epi_range),
|
|
510
|
+
"vf_range": float(vf_range),
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
def _init() -> None:
|
|
514
|
+
_init_parallel_wij(payload)
|
|
515
|
+
|
|
516
|
+
with ProcessPoolExecutor(max_workers=max_workers, initializer=_init) as executor:
|
|
517
|
+
futures = []
|
|
518
|
+
for start in range(0, total_pairs, chunk_size):
|
|
519
|
+
chunk = pair_list[start:start + chunk_size]
|
|
520
|
+
futures.append(executor.submit(_parallel_wij_worker, chunk))
|
|
521
|
+
for future in futures:
|
|
522
|
+
for i, j, value in future.result():
|
|
523
|
+
wij[i][j] = wij[j][i] = value
|
|
350
524
|
return wij
|
|
351
525
|
|
|
352
526
|
|
|
353
|
-
def _compute_stats(
|
|
527
|
+
def _compute_stats(
|
|
528
|
+
values: Iterable[float] | Any,
|
|
529
|
+
row_sum: Iterable[float] | Any,
|
|
530
|
+
n: int,
|
|
531
|
+
self_diag: bool,
|
|
532
|
+
np: ModuleType | None = None,
|
|
533
|
+
) -> tuple[float, float, float, list[float], int]:
|
|
354
534
|
"""Return aggregate statistics for ``values`` and normalized row sums.
|
|
355
535
|
|
|
356
536
|
``values`` and ``row_sum`` can be any iterables. They are normalized to
|
|
@@ -360,62 +540,41 @@ def _compute_stats(values, row_sum, n, self_diag, np=None):
|
|
|
360
540
|
"""
|
|
361
541
|
|
|
362
542
|
if np is not None:
|
|
363
|
-
# Normalize inputs to NumPy arrays
|
|
364
543
|
if not isinstance(values, np.ndarray):
|
|
365
|
-
|
|
544
|
+
values_arr = np.asarray(list(values), dtype=float)
|
|
366
545
|
else:
|
|
367
|
-
|
|
546
|
+
values_arr = cast(Any, values.astype(float))
|
|
368
547
|
if not isinstance(row_sum, np.ndarray):
|
|
369
|
-
|
|
548
|
+
row_arr = np.asarray(list(row_sum), dtype=float)
|
|
370
549
|
else:
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
def min_fn(v):
|
|
377
|
-
return float(v.min()) if v.size else 0.0
|
|
378
|
-
|
|
379
|
-
def max_fn(v):
|
|
380
|
-
return float(v.max()) if v.size else 0.0
|
|
381
|
-
|
|
382
|
-
def mean_fn(v):
|
|
383
|
-
return float(v.mean()) if v.size else 0.0
|
|
384
|
-
|
|
385
|
-
def wi_fn(r, d):
|
|
386
|
-
return (r / d).astype(float).tolist()
|
|
387
|
-
|
|
550
|
+
row_arr = cast(Any, row_sum.astype(float))
|
|
551
|
+
count_val = int(values_arr.size)
|
|
552
|
+
min_val = float(values_arr.min()) if values_arr.size else 0.0
|
|
553
|
+
max_val = float(values_arr.max()) if values_arr.size else 0.0
|
|
554
|
+
mean_val = float(values_arr.mean()) if values_arr.size else 0.0
|
|
388
555
|
else:
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
def min_fn(v):
|
|
397
|
-
return min(v) if v else 0.0
|
|
398
|
-
|
|
399
|
-
def max_fn(v):
|
|
400
|
-
return max(v) if v else 0.0
|
|
556
|
+
values_list = list(values)
|
|
557
|
+
row_arr = list(row_sum)
|
|
558
|
+
count_val = len(values_list)
|
|
559
|
+
min_val = min(values_list) if values_list else 0.0
|
|
560
|
+
max_val = max(values_list) if values_list else 0.0
|
|
561
|
+
mean_val = sum(values_list) / len(values_list) if values_list else 0.0
|
|
401
562
|
|
|
402
|
-
def mean_fn(v):
|
|
403
|
-
return sum(v) / len(v) if v else 0.0
|
|
404
|
-
|
|
405
|
-
def wi_fn(r, d):
|
|
406
|
-
return [float(r[i]) / d for i in range(n)]
|
|
407
|
-
|
|
408
|
-
count_val = size_fn(values)
|
|
409
|
-
min_val = min_fn(values)
|
|
410
|
-
max_val = max_fn(values)
|
|
411
|
-
mean_val = mean_fn(values)
|
|
412
563
|
row_count = n if self_diag else n - 1
|
|
413
564
|
denom = max(1, row_count)
|
|
414
|
-
|
|
565
|
+
if np is not None:
|
|
566
|
+
Wi = (row_arr / denom).astype(float).tolist() # type: ignore[operator]
|
|
567
|
+
else:
|
|
568
|
+
Wi = [float(row_arr[i]) / denom for i in range(n)]
|
|
415
569
|
return min_val, max_val, mean_val, Wi, count_val
|
|
416
570
|
|
|
417
571
|
|
|
418
|
-
def _coherence_numpy(
|
|
572
|
+
def _coherence_numpy(
|
|
573
|
+
wij: Any,
|
|
574
|
+
mode: str,
|
|
575
|
+
thr: float,
|
|
576
|
+
np: ModuleType,
|
|
577
|
+
) -> tuple[int, Any, Any, CoherenceMatrixPayload]:
|
|
419
578
|
"""Aggregate coherence weights using vectorized operations.
|
|
420
579
|
|
|
421
580
|
Produces the structural weight matrix ``W`` along with the list of off
|
|
@@ -437,35 +596,114 @@ def _coherence_numpy(wij, mode, thr, np):
|
|
|
437
596
|
return n, values, row_sum, W
|
|
438
597
|
|
|
439
598
|
|
|
440
|
-
def
|
|
599
|
+
def _coherence_python_worker(
|
|
600
|
+
args: tuple[Sequence[Sequence[float]], int, str, float]
|
|
601
|
+
) -> tuple[int, list[float], list[float], CoherenceMatrixSparse]:
|
|
602
|
+
rows, start, mode, thr = args
|
|
603
|
+
values: list[float] = []
|
|
604
|
+
row_sum: list[float] = []
|
|
605
|
+
sparse: list[tuple[int, int, float]] = []
|
|
606
|
+
dense_mode = mode == "dense"
|
|
607
|
+
|
|
608
|
+
for offset, row in enumerate(rows):
|
|
609
|
+
i = start + offset
|
|
610
|
+
total = 0.0
|
|
611
|
+
for j, w in enumerate(row):
|
|
612
|
+
total += w
|
|
613
|
+
if i != j:
|
|
614
|
+
values.append(w)
|
|
615
|
+
if not dense_mode and w >= thr:
|
|
616
|
+
sparse.append((i, j, w))
|
|
617
|
+
row_sum.append(total)
|
|
618
|
+
|
|
619
|
+
return start, values, row_sum, sparse
|
|
620
|
+
|
|
621
|
+
|
|
622
|
+
def _coherence_python(
|
|
623
|
+
wij: Sequence[Sequence[float]],
|
|
624
|
+
mode: str,
|
|
625
|
+
thr: float,
|
|
626
|
+
n_jobs: int | None = 1,
|
|
627
|
+
) -> tuple[int, list[float], list[float], CoherenceMatrixPayload]:
|
|
441
628
|
"""Aggregate coherence weights using pure Python loops."""
|
|
442
629
|
|
|
443
630
|
n = len(wij)
|
|
444
631
|
values: list[float] = []
|
|
445
632
|
row_sum = [0.0] * n
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
values.append(w)
|
|
453
|
-
row_sum[i] += w
|
|
633
|
+
|
|
634
|
+
if n_jobs is not None:
|
|
635
|
+
try:
|
|
636
|
+
max_workers = int(n_jobs)
|
|
637
|
+
except (TypeError, ValueError):
|
|
638
|
+
max_workers = 1
|
|
454
639
|
else:
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
640
|
+
max_workers = 1
|
|
641
|
+
|
|
642
|
+
if max_workers <= 1:
|
|
643
|
+
if mode == "dense":
|
|
644
|
+
W: CoherenceMatrixDense = [list(row) for row in wij]
|
|
645
|
+
for i in range(n):
|
|
646
|
+
for j in range(n):
|
|
647
|
+
w = W[i][j]
|
|
648
|
+
if i != j:
|
|
649
|
+
values.append(w)
|
|
650
|
+
row_sum[i] += w
|
|
651
|
+
else:
|
|
652
|
+
W_sparse: CoherenceMatrixSparse = []
|
|
653
|
+
for i in range(n):
|
|
654
|
+
row_i = wij[i]
|
|
655
|
+
for j in range(n):
|
|
656
|
+
w = row_i[j]
|
|
657
|
+
if i != j:
|
|
658
|
+
values.append(w)
|
|
659
|
+
if w >= thr:
|
|
660
|
+
W_sparse.append((i, j, w))
|
|
661
|
+
row_sum[i] += w
|
|
662
|
+
return n, values, row_sum, W if mode == "dense" else W_sparse
|
|
663
|
+
|
|
664
|
+
chunk_size = max(1, math.ceil(n / max_workers))
|
|
665
|
+
tasks = []
|
|
666
|
+
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
|
667
|
+
for start in range(0, n, chunk_size):
|
|
668
|
+
rows = wij[start:start + chunk_size]
|
|
669
|
+
tasks.append(
|
|
670
|
+
executor.submit(
|
|
671
|
+
_coherence_python_worker,
|
|
672
|
+
(tuple(tuple(row) for row in rows), start, mode, thr),
|
|
673
|
+
)
|
|
674
|
+
)
|
|
675
|
+
results = [task.result() for task in tasks]
|
|
676
|
+
|
|
677
|
+
results.sort(key=lambda item: item[0])
|
|
678
|
+
sparse_entries: list[tuple[int, int, float]] | None = [] if mode != "dense" else None
|
|
679
|
+
for start, chunk_values, chunk_row_sum, chunk_sparse in results:
|
|
680
|
+
values.extend(chunk_values)
|
|
681
|
+
for offset, total in enumerate(chunk_row_sum):
|
|
682
|
+
row_sum[start + offset] = total
|
|
683
|
+
if sparse_entries is not None:
|
|
684
|
+
sparse_entries.extend(chunk_sparse)
|
|
685
|
+
|
|
686
|
+
if mode == "dense":
|
|
687
|
+
W_dense: CoherenceMatrixDense = [list(row) for row in wij]
|
|
688
|
+
return n, values, row_sum, W_dense
|
|
689
|
+
sparse_result: CoherenceMatrixSparse = (
|
|
690
|
+
sparse_entries if sparse_entries is not None else []
|
|
691
|
+
)
|
|
692
|
+
return n, values, row_sum, sparse_result
|
|
466
693
|
|
|
467
694
|
|
|
468
|
-
def _finalize_wij(
|
|
695
|
+
def _finalize_wij(
|
|
696
|
+
G: TNFRGraph,
|
|
697
|
+
nodes: Sequence[NodeId],
|
|
698
|
+
wij: FloatMatrix | Sequence[Sequence[float]],
|
|
699
|
+
mode: str,
|
|
700
|
+
thr: float,
|
|
701
|
+
scope: str,
|
|
702
|
+
self_diag: bool,
|
|
703
|
+
np: ModuleType | None = None,
|
|
704
|
+
*,
|
|
705
|
+
n_jobs: int = 1,
|
|
706
|
+
) -> tuple[list[NodeId], CoherenceMatrixPayload]:
|
|
469
707
|
"""Finalize the coherence matrix ``wij`` and store results in history.
|
|
470
708
|
|
|
471
709
|
When ``np`` is provided and ``wij`` is a NumPy array, the computation is
|
|
@@ -474,11 +712,11 @@ def _finalize_wij(G, nodes, wij, mode, thr, scope, self_diag, np=None):
|
|
|
474
712
|
"""
|
|
475
713
|
|
|
476
714
|
use_np = np is not None and isinstance(wij, np.ndarray)
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
715
|
+
if use_np:
|
|
716
|
+
assert np is not None
|
|
717
|
+
n, values, row_sum, W = _coherence_numpy(wij, mode, thr, np)
|
|
718
|
+
else:
|
|
719
|
+
n, values, row_sum, W = _coherence_python(wij, mode, thr, n_jobs=n_jobs)
|
|
482
720
|
|
|
483
721
|
min_val, max_val, mean_val, Wi, count_val = _compute_stats(
|
|
484
722
|
values, row_sum, n, self_diag, np if use_np else None
|
|
@@ -497,16 +735,36 @@ def _finalize_wij(G, nodes, wij, mode, thr, scope, self_diag, np=None):
|
|
|
497
735
|
append_metric(hist, cfg.get("history_key", "W_sparse"), W)
|
|
498
736
|
append_metric(hist, cfg.get("Wi_history_key", "W_i"), Wi)
|
|
499
737
|
append_metric(hist, cfg.get("stats_history_key", "W_stats"), stats)
|
|
500
|
-
return nodes, W
|
|
738
|
+
return list(nodes), W
|
|
739
|
+
|
|
501
740
|
|
|
741
|
+
def coherence_matrix(
|
|
742
|
+
G: TNFRGraph,
|
|
743
|
+
use_numpy: bool | None = None,
|
|
744
|
+
*,
|
|
745
|
+
n_jobs: int | None = None,
|
|
746
|
+
) -> tuple[list[NodeId] | None, CoherenceMatrixPayload | None]:
|
|
747
|
+
"""Compute the coherence weight matrix for ``G``.
|
|
748
|
+
|
|
749
|
+
Parameters
|
|
750
|
+
----------
|
|
751
|
+
G:
|
|
752
|
+
Graph whose nodes encode the structural attributes.
|
|
753
|
+
use_numpy:
|
|
754
|
+
When ``True`` the vectorised NumPy implementation is forced. When
|
|
755
|
+
``False`` the pure Python fallback is used. ``None`` selects NumPy
|
|
756
|
+
automatically when available.
|
|
757
|
+
n_jobs:
|
|
758
|
+
Maximum worker processes to use for the Python fallback. ``None`` or
|
|
759
|
+
values less than or equal to one preserve the serial behaviour.
|
|
760
|
+
"""
|
|
502
761
|
|
|
503
|
-
def coherence_matrix(G, use_numpy: bool | None = None):
|
|
504
762
|
cfg = get_param(G, "COHERENCE")
|
|
505
763
|
if not cfg.get("enabled", True):
|
|
506
764
|
return None, None
|
|
507
765
|
|
|
508
|
-
node_to_index = ensure_node_index_map(G)
|
|
509
|
-
nodes = list(node_to_index.keys())
|
|
766
|
+
node_to_index: Mapping[NodeId, int] = ensure_node_index_map(G)
|
|
767
|
+
nodes: list[NodeId] = list(node_to_index.keys())
|
|
510
768
|
n = len(nodes)
|
|
511
769
|
if n == 0:
|
|
512
770
|
return nodes, []
|
|
@@ -517,17 +775,20 @@ def coherence_matrix(G, use_numpy: bool | None = None):
|
|
|
517
775
|
np is not None if use_numpy is None else (use_numpy and np is not None)
|
|
518
776
|
)
|
|
519
777
|
|
|
778
|
+
cfg_jobs = cfg.get("n_jobs")
|
|
779
|
+
parallel_jobs = n_jobs if n_jobs is not None else cfg_jobs
|
|
780
|
+
|
|
520
781
|
# Precompute indices to avoid repeated list.index calls within loops
|
|
521
782
|
|
|
522
|
-
th_vals =
|
|
783
|
+
th_vals = collect_theta_attr(G, nodes, 0.0, np=np if use_np else None)
|
|
523
784
|
epi_vals = collect_attr(G, nodes, ALIAS_EPI, 0.0, np=np if use_np else None)
|
|
524
785
|
vf_vals = collect_attr(G, nodes, ALIAS_VF, 0.0, np=np if use_np else None)
|
|
525
786
|
si_vals = collect_attr(G, nodes, ALIAS_SI, 0.0, np=np if use_np else None)
|
|
526
|
-
|
|
527
|
-
np
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
787
|
+
if use_np:
|
|
788
|
+
assert np is not None
|
|
789
|
+
si_vals = np.clip(si_vals, 0.0, 1.0)
|
|
790
|
+
else:
|
|
791
|
+
si_vals = [clamp01(v) for v in si_vals]
|
|
531
792
|
epi_min, epi_max = min_max_range(epi_vals)
|
|
532
793
|
vf_min, vf_max = min_max_range(vf_vals)
|
|
533
794
|
|
|
@@ -557,7 +818,8 @@ def coherence_matrix(G, use_numpy: bool | None = None):
|
|
|
557
818
|
sin_vals=sin_vals,
|
|
558
819
|
)
|
|
559
820
|
if use_np:
|
|
560
|
-
|
|
821
|
+
assert np is not None
|
|
822
|
+
wij_matrix = _wij_vectorized(
|
|
561
823
|
G,
|
|
562
824
|
nodes,
|
|
563
825
|
inputs,
|
|
@@ -576,7 +838,8 @@ def coherence_matrix(G, use_numpy: bool | None = None):
|
|
|
576
838
|
j = node_to_index[v]
|
|
577
839
|
adj[i, j] = True
|
|
578
840
|
adj[j, i] = True
|
|
579
|
-
|
|
841
|
+
wij_matrix = cast(FloatMatrix, np.where(adj, wij_matrix, 0.0))
|
|
842
|
+
wij: FloatMatrix | CoherenceMatrixDense = wij_matrix
|
|
580
843
|
else:
|
|
581
844
|
wij = _wij_loops(
|
|
582
845
|
G,
|
|
@@ -590,14 +853,29 @@ def coherence_matrix(G, use_numpy: bool | None = None):
|
|
|
590
853
|
vf_max,
|
|
591
854
|
neighbors_only,
|
|
592
855
|
self_diag,
|
|
856
|
+
n_jobs=parallel_jobs,
|
|
593
857
|
)
|
|
594
858
|
|
|
595
|
-
return _finalize_wij(
|
|
859
|
+
return _finalize_wij(
|
|
860
|
+
G,
|
|
861
|
+
nodes,
|
|
862
|
+
wij,
|
|
863
|
+
mode,
|
|
864
|
+
thr,
|
|
865
|
+
scope,
|
|
866
|
+
self_diag,
|
|
867
|
+
np,
|
|
868
|
+
n_jobs=parallel_jobs if not use_np else 1,
|
|
869
|
+
)
|
|
596
870
|
|
|
597
871
|
|
|
598
872
|
def local_phase_sync_weighted(
|
|
599
|
-
G
|
|
600
|
-
|
|
873
|
+
G: TNFRGraph,
|
|
874
|
+
n: NodeId,
|
|
875
|
+
nodes_order: Sequence[NodeId] | None = None,
|
|
876
|
+
W_row: PhaseSyncWeights | None = None,
|
|
877
|
+
node_to_index: Mapping[NodeId, int] | None = None,
|
|
878
|
+
) -> float:
|
|
601
879
|
"""Compute local phase synchrony using explicit weights.
|
|
602
880
|
|
|
603
881
|
``nodes_order`` is the node ordering used to build the coherence matrix
|
|
@@ -621,27 +899,52 @@ def local_phase_sync_weighted(
|
|
|
621
899
|
trig = get_trig_cache(G)
|
|
622
900
|
cos_map, sin_map = trig.cos, trig.sin
|
|
623
901
|
|
|
624
|
-
if (
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
num
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
902
|
+
if isinstance(W_row, Sequence) and W_row:
|
|
903
|
+
first = W_row[0]
|
|
904
|
+
if isinstance(first, (int, float)):
|
|
905
|
+
row_vals = cast(Sequence[float], W_row)
|
|
906
|
+
for w, nj in zip(row_vals, nodes_order):
|
|
907
|
+
if nj == n:
|
|
908
|
+
continue
|
|
909
|
+
den += w
|
|
910
|
+
cos_j = cos_map.get(nj)
|
|
911
|
+
sin_j = sin_map.get(nj)
|
|
912
|
+
if cos_j is None or sin_j is None:
|
|
913
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
914
|
+
cos_j = trig_j.cos[nj]
|
|
915
|
+
sin_j = trig_j.sin[nj]
|
|
916
|
+
num += w * complex(cos_j, sin_j)
|
|
917
|
+
return abs(num / den) if den else 0.0
|
|
918
|
+
|
|
919
|
+
if (
|
|
920
|
+
isinstance(first, Sequence)
|
|
921
|
+
and len(first) == 3
|
|
922
|
+
and isinstance(first[0], int)
|
|
923
|
+
and isinstance(first[1], int)
|
|
924
|
+
and isinstance(first[2], (int, float))
|
|
925
|
+
):
|
|
926
|
+
sparse_entries = cast(CoherenceMatrixSparse, W_row)
|
|
927
|
+
for ii, jj, w in sparse_entries:
|
|
928
|
+
if ii != i:
|
|
929
|
+
continue
|
|
930
|
+
nj = nodes_order[jj]
|
|
931
|
+
if nj == n:
|
|
932
|
+
continue
|
|
933
|
+
den += w
|
|
934
|
+
cos_j = cos_map.get(nj)
|
|
935
|
+
sin_j = sin_map.get(nj)
|
|
936
|
+
if cos_j is None or sin_j is None:
|
|
937
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
938
|
+
cos_j = trig_j.cos[nj]
|
|
939
|
+
sin_j = trig_j.sin[nj]
|
|
940
|
+
num += w * complex(cos_j, sin_j)
|
|
941
|
+
return abs(num / den) if den else 0.0
|
|
942
|
+
|
|
943
|
+
dense_matrix = cast(CoherenceMatrixDense, W_row)
|
|
944
|
+
if i is None:
|
|
945
|
+
raise ValueError("node index resolution failed for dense weights")
|
|
946
|
+
row_vals = cast(Sequence[float], dense_matrix[i])
|
|
947
|
+
for w, nj in zip(row_vals, nodes_order):
|
|
645
948
|
if nj == n:
|
|
646
949
|
continue
|
|
647
950
|
den += w
|
|
@@ -652,11 +955,28 @@ def local_phase_sync_weighted(
|
|
|
652
955
|
cos_j = trig_j.cos[nj]
|
|
653
956
|
sin_j = trig_j.sin[nj]
|
|
654
957
|
num += w * complex(cos_j, sin_j)
|
|
958
|
+
return abs(num / den) if den else 0.0
|
|
959
|
+
|
|
960
|
+
sparse_entries = cast(CoherenceMatrixSparse, W_row)
|
|
961
|
+
for ii, jj, w in sparse_entries:
|
|
962
|
+
if ii != i:
|
|
963
|
+
continue
|
|
964
|
+
nj = nodes_order[jj]
|
|
965
|
+
if nj == n:
|
|
966
|
+
continue
|
|
967
|
+
den += w
|
|
968
|
+
cos_j = cos_map.get(nj)
|
|
969
|
+
sin_j = sin_map.get(nj)
|
|
970
|
+
if cos_j is None or sin_j is None:
|
|
971
|
+
trig_j = compute_theta_trig(((nj, G.nodes[nj]),))
|
|
972
|
+
cos_j = trig_j.cos[nj]
|
|
973
|
+
sin_j = trig_j.sin[nj]
|
|
974
|
+
num += w * complex(cos_j, sin_j)
|
|
655
975
|
|
|
656
976
|
return abs(num / den) if den else 0.0
|
|
657
977
|
|
|
658
978
|
|
|
659
|
-
def local_phase_sync(G, n):
|
|
979
|
+
def local_phase_sync(G: TNFRGraph, n: NodeId) -> float:
|
|
660
980
|
"""Compute unweighted local phase synchronization for node ``n``."""
|
|
661
981
|
nodes, W = coherence_matrix(G)
|
|
662
982
|
if nodes is None:
|
|
@@ -664,7 +984,7 @@ def local_phase_sync(G, n):
|
|
|
664
984
|
return local_phase_sync_weighted(G, n, nodes_order=nodes, W_row=W)
|
|
665
985
|
|
|
666
986
|
|
|
667
|
-
def _coherence_step(G, ctx: dict[str, Any] | None = None):
|
|
987
|
+
def _coherence_step(G: TNFRGraph, ctx: dict[str, Any] | None = None) -> None:
|
|
668
988
|
del ctx
|
|
669
989
|
|
|
670
990
|
if not get_param(G, "COHERENCE").get("enabled", True):
|
|
@@ -672,7 +992,7 @@ def _coherence_step(G, ctx: dict[str, Any] | None = None):
|
|
|
672
992
|
coherence_matrix(G)
|
|
673
993
|
|
|
674
994
|
|
|
675
|
-
def register_coherence_callbacks(G) -> None:
|
|
995
|
+
def register_coherence_callbacks(G: TNFRGraph) -> None:
|
|
676
996
|
callback_manager.register_callback(
|
|
677
997
|
G,
|
|
678
998
|
event=CallbackEvent.AFTER_STEP.value,
|
|
@@ -687,18 +1007,29 @@ def register_coherence_callbacks(G) -> None:
|
|
|
687
1007
|
|
|
688
1008
|
|
|
689
1009
|
def _record_metrics(
|
|
690
|
-
hist:
|
|
1010
|
+
hist: HistoryState,
|
|
1011
|
+
*pairs: MetricRecord,
|
|
1012
|
+
evaluate: bool = False,
|
|
691
1013
|
) -> None:
|
|
692
1014
|
"""Generic recorder for metric values."""
|
|
693
1015
|
|
|
694
|
-
|
|
695
|
-
|
|
1016
|
+
metrics = cast(MutableMapping[str, list[Any]], hist)
|
|
1017
|
+
for payload, key in pairs:
|
|
1018
|
+
if evaluate:
|
|
1019
|
+
provider = cast(MetricProvider, payload)
|
|
1020
|
+
append_metric(metrics, key, provider())
|
|
1021
|
+
else:
|
|
1022
|
+
append_metric(metrics, key, payload)
|
|
696
1023
|
|
|
697
1024
|
|
|
698
|
-
def _update_coherence(G, hist) -> None:
|
|
1025
|
+
def _update_coherence(G: TNFRGraph, hist: HistoryState) -> None:
|
|
699
1026
|
"""Update network coherence and related means."""
|
|
700
1027
|
|
|
701
|
-
|
|
1028
|
+
coherence_payload = cast(
|
|
1029
|
+
tuple[CoherenceMetric, float, float],
|
|
1030
|
+
compute_coherence(G, return_means=True),
|
|
1031
|
+
)
|
|
1032
|
+
C, dnfr_mean, depi_mean = coherence_payload
|
|
702
1033
|
_record_metrics(
|
|
703
1034
|
hist,
|
|
704
1035
|
(C, "C_steps"),
|
|
@@ -714,7 +1045,7 @@ def _update_coherence(G, hist) -> None:
|
|
|
714
1045
|
_record_metrics(hist, (wbar, "W_bar"))
|
|
715
1046
|
|
|
716
1047
|
|
|
717
|
-
def _update_phase_sync(G, hist) -> None:
|
|
1048
|
+
def _update_phase_sync(G: TNFRGraph, hist: HistoryState) -> None:
|
|
718
1049
|
"""Capture phase synchrony and Kuramoto order."""
|
|
719
1050
|
|
|
720
1051
|
ps = phase_sync(G)
|
|
@@ -726,18 +1057,37 @@ def _update_phase_sync(G, hist) -> None:
|
|
|
726
1057
|
)
|
|
727
1058
|
|
|
728
1059
|
|
|
729
|
-
def _update_sigma(G, hist) -> None:
|
|
1060
|
+
def _update_sigma(G: TNFRGraph, hist: HistoryState) -> None:
|
|
730
1061
|
"""Record glyph load and associated Σ⃗ vector."""
|
|
731
1062
|
|
|
732
|
-
|
|
1063
|
+
metrics = cast(MutableMapping[str, list[Any]], hist)
|
|
1064
|
+
if "glyph_load_estab" in metrics:
|
|
1065
|
+
raise ValueError(
|
|
1066
|
+
"History payloads using 'glyph_load_estab' are no longer supported. "
|
|
1067
|
+
"Rename the series to 'glyph_load_stabilizers' before loading the graph."
|
|
1068
|
+
)
|
|
1069
|
+
stabilizer_series = metrics.get(GLYPH_LOAD_STABILIZERS_KEY)
|
|
1070
|
+
|
|
1071
|
+
if stabilizer_series is None:
|
|
1072
|
+
stabilizer_series = cast(
|
|
1073
|
+
list[Any], metrics.setdefault(GLYPH_LOAD_STABILIZERS_KEY, [])
|
|
1074
|
+
)
|
|
1075
|
+
else:
|
|
1076
|
+
stabilizer_series = cast(list[Any], stabilizer_series)
|
|
1077
|
+
|
|
1078
|
+
gl: GlyphLoadDistribution = glyph_load(G, window=DEFAULT_GLYPH_LOAD_SPAN)
|
|
1079
|
+
stabilizers = float(gl.get("_stabilizers", 0.0))
|
|
1080
|
+
disruptors = float(gl.get("_disruptors", 0.0))
|
|
733
1081
|
_record_metrics(
|
|
734
1082
|
hist,
|
|
735
|
-
(
|
|
736
|
-
(
|
|
1083
|
+
(stabilizers, GLYPH_LOAD_STABILIZERS_KEY),
|
|
1084
|
+
(disruptors, "glyph_load_disr"),
|
|
737
1085
|
)
|
|
738
1086
|
|
|
739
|
-
dist = {
|
|
740
|
-
|
|
1087
|
+
dist: GlyphLoadDistribution = {
|
|
1088
|
+
k: v for k, v in gl.items() if not k.startswith("_")
|
|
1089
|
+
}
|
|
1090
|
+
sig: SigmaVector = sigma_vector(dist)
|
|
741
1091
|
_record_metrics(
|
|
742
1092
|
hist,
|
|
743
1093
|
(sig.get("x", 0.0), "sense_sigma_x"),
|
|
@@ -747,51 +1097,317 @@ def _update_sigma(G, hist) -> None:
|
|
|
747
1097
|
)
|
|
748
1098
|
|
|
749
1099
|
|
|
750
|
-
def
|
|
751
|
-
"""
|
|
1100
|
+
def _stability_chunk_worker(args: StabilityChunkArgs) -> StabilityChunkResult:
|
|
1101
|
+
"""Compute stability aggregates for a chunk of nodes."""
|
|
752
1102
|
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
1103
|
+
(
|
|
1104
|
+
dnfr_vals,
|
|
1105
|
+
depi_vals,
|
|
1106
|
+
si_curr_vals,
|
|
1107
|
+
si_prev_vals,
|
|
1108
|
+
vf_curr_vals,
|
|
1109
|
+
vf_prev_vals,
|
|
1110
|
+
dvf_prev_vals,
|
|
1111
|
+
dt,
|
|
1112
|
+
eps_dnfr,
|
|
1113
|
+
eps_depi,
|
|
1114
|
+
) = args
|
|
1115
|
+
|
|
1116
|
+
inv_dt = (1.0 / dt) if dt else 0.0
|
|
1117
|
+
stable = 0
|
|
1118
|
+
delta_sum = 0.0
|
|
757
1119
|
B_sum = 0.0
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
dvf_dt
|
|
778
|
-
|
|
779
|
-
B = (dvf_dt - dvf_prev) / dt
|
|
780
|
-
nd["_prev_vf"] = vf_curr
|
|
781
|
-
nd["_prev_dvf"] = dvf_dt
|
|
782
|
-
set_attr(nd, ALIAS_DVF, dvf_dt)
|
|
783
|
-
set_attr(nd, ALIAS_D2VF, B)
|
|
1120
|
+
delta_vals: list[float] = []
|
|
1121
|
+
dvf_dt_vals: list[float] = []
|
|
1122
|
+
B_vals: list[float] = []
|
|
1123
|
+
|
|
1124
|
+
for idx in range(len(si_curr_vals)):
|
|
1125
|
+
curr_si = float(si_curr_vals[idx])
|
|
1126
|
+
prev_si_raw = si_prev_vals[idx]
|
|
1127
|
+
prev_si = float(prev_si_raw) if prev_si_raw is not None else curr_si
|
|
1128
|
+
delta = curr_si - prev_si
|
|
1129
|
+
delta_vals.append(delta)
|
|
1130
|
+
delta_sum += delta
|
|
1131
|
+
|
|
1132
|
+
curr_vf = float(vf_curr_vals[idx])
|
|
1133
|
+
prev_vf_raw = vf_prev_vals[idx]
|
|
1134
|
+
prev_vf = float(prev_vf_raw) if prev_vf_raw is not None else curr_vf
|
|
1135
|
+
dvf_dt = (curr_vf - prev_vf) * inv_dt if dt else 0.0
|
|
1136
|
+
prev_dvf_raw = dvf_prev_vals[idx]
|
|
1137
|
+
prev_dvf = float(prev_dvf_raw) if prev_dvf_raw is not None else dvf_dt
|
|
1138
|
+
B = (dvf_dt - prev_dvf) * inv_dt if dt else 0.0
|
|
1139
|
+
dvf_dt_vals.append(dvf_dt)
|
|
1140
|
+
B_vals.append(B)
|
|
784
1141
|
B_sum += B
|
|
785
|
-
B_count += 1
|
|
786
1142
|
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
1143
|
+
if abs(float(dnfr_vals[idx])) <= eps_dnfr and abs(float(depi_vals[idx])) <= eps_depi:
|
|
1144
|
+
stable += 1
|
|
1145
|
+
|
|
1146
|
+
chunk_len = len(si_curr_vals)
|
|
1147
|
+
return (
|
|
1148
|
+
stable,
|
|
1149
|
+
chunk_len,
|
|
1150
|
+
delta_sum,
|
|
1151
|
+
B_sum,
|
|
1152
|
+
delta_vals,
|
|
1153
|
+
dvf_dt_vals,
|
|
1154
|
+
B_vals,
|
|
790
1155
|
)
|
|
791
|
-
hist["B"].append(B_sum / B_count if B_count else 0.0)
|
|
792
1156
|
|
|
793
1157
|
|
|
794
|
-
def
|
|
1158
|
+
def _track_stability(
|
|
1159
|
+
G: TNFRGraph,
|
|
1160
|
+
hist: MutableMapping[str, Any],
|
|
1161
|
+
dt: float,
|
|
1162
|
+
eps_dnfr: float,
|
|
1163
|
+
eps_depi: float,
|
|
1164
|
+
*,
|
|
1165
|
+
n_jobs: int | None = None,
|
|
1166
|
+
) -> None:
|
|
1167
|
+
"""Track per-node stability and derivative metrics."""
|
|
1168
|
+
|
|
1169
|
+
nodes: tuple[NodeId, ...] = tuple(G.nodes)
|
|
1170
|
+
total_nodes = len(nodes)
|
|
1171
|
+
if not total_nodes:
|
|
1172
|
+
hist.setdefault("stable_frac", []).append(0.0)
|
|
1173
|
+
hist.setdefault("delta_Si", []).append(0.0)
|
|
1174
|
+
hist.setdefault("B", []).append(0.0)
|
|
1175
|
+
return
|
|
1176
|
+
|
|
1177
|
+
np_mod = get_numpy()
|
|
1178
|
+
|
|
1179
|
+
dnfr_vals = collect_attr(G, nodes, ALIAS_DNFR, 0.0, np=np_mod)
|
|
1180
|
+
depi_vals = collect_attr(G, nodes, ALIAS_DEPI, 0.0, np=np_mod)
|
|
1181
|
+
si_curr_vals = collect_attr(G, nodes, ALIAS_SI, 0.0, np=np_mod)
|
|
1182
|
+
vf_curr_vals = collect_attr(G, nodes, ALIAS_VF, 0.0, np=np_mod)
|
|
1183
|
+
|
|
1184
|
+
prev_si_data = [G.nodes[n].get("_prev_Si") for n in nodes]
|
|
1185
|
+
prev_vf_data = [G.nodes[n].get("_prev_vf") for n in nodes]
|
|
1186
|
+
prev_dvf_data = [G.nodes[n].get("_prev_dvf") for n in nodes]
|
|
1187
|
+
|
|
1188
|
+
inv_dt = (1.0 / dt) if dt else 0.0
|
|
1189
|
+
|
|
1190
|
+
if np_mod is not None:
|
|
1191
|
+
np = np_mod
|
|
1192
|
+
dnfr_arr = dnfr_vals
|
|
1193
|
+
depi_arr = depi_vals
|
|
1194
|
+
si_curr_arr = si_curr_vals
|
|
1195
|
+
vf_curr_arr = vf_curr_vals
|
|
1196
|
+
|
|
1197
|
+
si_prev_arr = np.asarray(
|
|
1198
|
+
[
|
|
1199
|
+
float(prev_si_data[idx])
|
|
1200
|
+
if prev_si_data[idx] is not None
|
|
1201
|
+
else float(si_curr_arr[idx])
|
|
1202
|
+
for idx in range(total_nodes)
|
|
1203
|
+
],
|
|
1204
|
+
dtype=float,
|
|
1205
|
+
)
|
|
1206
|
+
vf_prev_arr = np.asarray(
|
|
1207
|
+
[
|
|
1208
|
+
float(prev_vf_data[idx])
|
|
1209
|
+
if prev_vf_data[idx] is not None
|
|
1210
|
+
else float(vf_curr_arr[idx])
|
|
1211
|
+
for idx in range(total_nodes)
|
|
1212
|
+
],
|
|
1213
|
+
dtype=float,
|
|
1214
|
+
)
|
|
1215
|
+
|
|
1216
|
+
if dt:
|
|
1217
|
+
dvf_dt_arr = (vf_curr_arr - vf_prev_arr) * inv_dt
|
|
1218
|
+
else:
|
|
1219
|
+
dvf_dt_arr = np.zeros_like(vf_curr_arr, dtype=float)
|
|
1220
|
+
|
|
1221
|
+
dvf_prev_arr = np.asarray(
|
|
1222
|
+
[
|
|
1223
|
+
float(prev_dvf_data[idx])
|
|
1224
|
+
if prev_dvf_data[idx] is not None
|
|
1225
|
+
else float(dvf_dt_arr[idx])
|
|
1226
|
+
for idx in range(total_nodes)
|
|
1227
|
+
],
|
|
1228
|
+
dtype=float,
|
|
1229
|
+
)
|
|
1230
|
+
|
|
1231
|
+
if dt:
|
|
1232
|
+
B_arr = (dvf_dt_arr - dvf_prev_arr) * inv_dt
|
|
1233
|
+
else:
|
|
1234
|
+
B_arr = np.zeros_like(dvf_dt_arr, dtype=float)
|
|
1235
|
+
|
|
1236
|
+
stable_mask = (np.abs(dnfr_arr) <= eps_dnfr) & (np.abs(depi_arr) <= eps_depi)
|
|
1237
|
+
stable_frac = float(stable_mask.mean()) if total_nodes else 0.0
|
|
1238
|
+
|
|
1239
|
+
delta_si_arr = si_curr_arr - si_prev_arr
|
|
1240
|
+
delta_si_mean = float(delta_si_arr.mean()) if total_nodes else 0.0
|
|
1241
|
+
B_mean = float(B_arr.mean()) if total_nodes else 0.0
|
|
1242
|
+
|
|
1243
|
+
hist.setdefault("stable_frac", []).append(stable_frac)
|
|
1244
|
+
hist.setdefault("delta_Si", []).append(delta_si_mean)
|
|
1245
|
+
hist.setdefault("B", []).append(B_mean)
|
|
1246
|
+
|
|
1247
|
+
for idx, node in enumerate(nodes):
|
|
1248
|
+
nd = G.nodes[node]
|
|
1249
|
+
curr_si = float(si_curr_arr[idx])
|
|
1250
|
+
delta_val = float(delta_si_arr[idx])
|
|
1251
|
+
nd["_prev_Si"] = curr_si
|
|
1252
|
+
set_attr(nd, ALIAS_DSI, delta_val)
|
|
1253
|
+
|
|
1254
|
+
curr_vf = float(vf_curr_arr[idx])
|
|
1255
|
+
nd["_prev_vf"] = curr_vf
|
|
1256
|
+
|
|
1257
|
+
dvf_dt_val = float(dvf_dt_arr[idx])
|
|
1258
|
+
nd["_prev_dvf"] = dvf_dt_val
|
|
1259
|
+
set_attr(nd, ALIAS_DVF, dvf_dt_val)
|
|
1260
|
+
set_attr(nd, ALIAS_D2VF, float(B_arr[idx]))
|
|
1261
|
+
|
|
1262
|
+
return
|
|
1263
|
+
|
|
1264
|
+
# NumPy not available: optionally parallel fallback or sequential computation.
|
|
1265
|
+
dnfr_list = list(dnfr_vals)
|
|
1266
|
+
depi_list = list(depi_vals)
|
|
1267
|
+
si_curr_list = list(si_curr_vals)
|
|
1268
|
+
vf_curr_list = list(vf_curr_vals)
|
|
1269
|
+
|
|
1270
|
+
if n_jobs and n_jobs > 1:
|
|
1271
|
+
chunk_size = max(1, math.ceil(total_nodes / n_jobs))
|
|
1272
|
+
chunk_results: list[tuple[int, tuple[int, int, float, float, list[float], list[float], list[float]]]] = []
|
|
1273
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
1274
|
+
futures: list[tuple[int, Any]] = []
|
|
1275
|
+
for start in range(0, total_nodes, chunk_size):
|
|
1276
|
+
end = min(start + chunk_size, total_nodes)
|
|
1277
|
+
chunk_args = (
|
|
1278
|
+
dnfr_list[start:end],
|
|
1279
|
+
depi_list[start:end],
|
|
1280
|
+
si_curr_list[start:end],
|
|
1281
|
+
prev_si_data[start:end],
|
|
1282
|
+
vf_curr_list[start:end],
|
|
1283
|
+
prev_vf_data[start:end],
|
|
1284
|
+
prev_dvf_data[start:end],
|
|
1285
|
+
dt,
|
|
1286
|
+
eps_dnfr,
|
|
1287
|
+
eps_depi,
|
|
1288
|
+
)
|
|
1289
|
+
futures.append((start, executor.submit(_stability_chunk_worker, chunk_args)))
|
|
1290
|
+
|
|
1291
|
+
for start, fut in futures:
|
|
1292
|
+
chunk_results.append((start, fut.result()))
|
|
1293
|
+
|
|
1294
|
+
chunk_results.sort(key=lambda item: item[0])
|
|
1295
|
+
|
|
1296
|
+
stable_total = 0
|
|
1297
|
+
delta_sum = 0.0
|
|
1298
|
+
B_sum = 0.0
|
|
1299
|
+
delta_vals_all: list[float] = []
|
|
1300
|
+
dvf_dt_all: list[float] = []
|
|
1301
|
+
B_vals_all: list[float] = []
|
|
1302
|
+
|
|
1303
|
+
for _, result in chunk_results:
|
|
1304
|
+
(
|
|
1305
|
+
stable_count,
|
|
1306
|
+
chunk_len,
|
|
1307
|
+
chunk_delta_sum,
|
|
1308
|
+
chunk_B_sum,
|
|
1309
|
+
delta_vals,
|
|
1310
|
+
dvf_vals,
|
|
1311
|
+
B_vals,
|
|
1312
|
+
) = result
|
|
1313
|
+
stable_total += stable_count
|
|
1314
|
+
delta_sum += chunk_delta_sum
|
|
1315
|
+
B_sum += chunk_B_sum
|
|
1316
|
+
delta_vals_all.extend(delta_vals)
|
|
1317
|
+
dvf_dt_all.extend(dvf_vals)
|
|
1318
|
+
B_vals_all.extend(B_vals)
|
|
1319
|
+
|
|
1320
|
+
total = len(delta_vals_all)
|
|
1321
|
+
stable_frac = stable_total / total if total else 0.0
|
|
1322
|
+
delta_si_mean = delta_sum / total if total else 0.0
|
|
1323
|
+
B_mean = B_sum / total if total else 0.0
|
|
1324
|
+
|
|
1325
|
+
else:
|
|
1326
|
+
stable_total = 0
|
|
1327
|
+
delta_sum = 0.0
|
|
1328
|
+
B_sum = 0.0
|
|
1329
|
+
delta_vals_all = []
|
|
1330
|
+
dvf_dt_all = []
|
|
1331
|
+
B_vals_all = []
|
|
1332
|
+
|
|
1333
|
+
for idx in range(total_nodes):
|
|
1334
|
+
curr_si = float(si_curr_list[idx])
|
|
1335
|
+
prev_si_raw = prev_si_data[idx]
|
|
1336
|
+
prev_si = float(prev_si_raw) if prev_si_raw is not None else curr_si
|
|
1337
|
+
delta = curr_si - prev_si
|
|
1338
|
+
delta_vals_all.append(delta)
|
|
1339
|
+
delta_sum += delta
|
|
1340
|
+
|
|
1341
|
+
curr_vf = float(vf_curr_list[idx])
|
|
1342
|
+
prev_vf_raw = prev_vf_data[idx]
|
|
1343
|
+
prev_vf = float(prev_vf_raw) if prev_vf_raw is not None else curr_vf
|
|
1344
|
+
dvf_dt_val = (curr_vf - prev_vf) * inv_dt if dt else 0.0
|
|
1345
|
+
prev_dvf_raw = prev_dvf_data[idx]
|
|
1346
|
+
prev_dvf = float(prev_dvf_raw) if prev_dvf_raw is not None else dvf_dt_val
|
|
1347
|
+
B_val = (dvf_dt_val - prev_dvf) * inv_dt if dt else 0.0
|
|
1348
|
+
dvf_dt_all.append(dvf_dt_val)
|
|
1349
|
+
B_vals_all.append(B_val)
|
|
1350
|
+
B_sum += B_val
|
|
1351
|
+
|
|
1352
|
+
if abs(float(dnfr_list[idx])) <= eps_dnfr and abs(float(depi_list[idx])) <= eps_depi:
|
|
1353
|
+
stable_total += 1
|
|
1354
|
+
|
|
1355
|
+
total = len(delta_vals_all)
|
|
1356
|
+
stable_frac = stable_total / total if total else 0.0
|
|
1357
|
+
delta_si_mean = delta_sum / total if total else 0.0
|
|
1358
|
+
B_mean = B_sum / total if total else 0.0
|
|
1359
|
+
|
|
1360
|
+
hist.setdefault("stable_frac", []).append(stable_frac)
|
|
1361
|
+
hist.setdefault("delta_Si", []).append(delta_si_mean)
|
|
1362
|
+
hist.setdefault("B", []).append(B_mean)
|
|
1363
|
+
|
|
1364
|
+
for idx, node in enumerate(nodes):
|
|
1365
|
+
nd = G.nodes[node]
|
|
1366
|
+
curr_si = float(si_curr_list[idx])
|
|
1367
|
+
delta_val = float(delta_vals_all[idx])
|
|
1368
|
+
nd["_prev_Si"] = curr_si
|
|
1369
|
+
set_attr(nd, ALIAS_DSI, delta_val)
|
|
1370
|
+
|
|
1371
|
+
curr_vf = float(vf_curr_list[idx])
|
|
1372
|
+
nd["_prev_vf"] = curr_vf
|
|
1373
|
+
|
|
1374
|
+
dvf_dt_val = float(dvf_dt_all[idx])
|
|
1375
|
+
nd["_prev_dvf"] = dvf_dt_val
|
|
1376
|
+
set_attr(nd, ALIAS_DVF, dvf_dt_val)
|
|
1377
|
+
set_attr(nd, ALIAS_D2VF, float(B_vals_all[idx]))
|
|
1378
|
+
|
|
1379
|
+
|
|
1380
|
+
def _si_chunk_stats(
|
|
1381
|
+
values: Sequence[float], si_hi: float, si_lo: float
|
|
1382
|
+
) -> tuple[float, int, int, int]:
|
|
1383
|
+
"""Compute partial Si aggregates for ``values``.
|
|
1384
|
+
|
|
1385
|
+
The helper keeps the logic shared between the sequential and parallel
|
|
1386
|
+
fallbacks when NumPy is unavailable.
|
|
1387
|
+
"""
|
|
1388
|
+
|
|
1389
|
+
total = 0.0
|
|
1390
|
+
count = 0
|
|
1391
|
+
hi_count = 0
|
|
1392
|
+
lo_count = 0
|
|
1393
|
+
for s in values:
|
|
1394
|
+
if math.isnan(s):
|
|
1395
|
+
continue
|
|
1396
|
+
total += s
|
|
1397
|
+
count += 1
|
|
1398
|
+
if s >= si_hi:
|
|
1399
|
+
hi_count += 1
|
|
1400
|
+
if s <= si_lo:
|
|
1401
|
+
lo_count += 1
|
|
1402
|
+
return total, count, hi_count, lo_count
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
def _aggregate_si(
|
|
1406
|
+
G: TNFRGraph,
|
|
1407
|
+
hist: MutableMapping[str, list[float]],
|
|
1408
|
+
*,
|
|
1409
|
+
n_jobs: int | None = None,
|
|
1410
|
+
) -> None:
|
|
795
1411
|
"""Aggregate Si statistics across nodes."""
|
|
796
1412
|
|
|
797
1413
|
try:
|
|
@@ -800,27 +1416,51 @@ def _aggregate_si(G, hist):
|
|
|
800
1416
|
si_hi = float(thr_sel.get("si_hi", thr_def.get("hi", 0.66)))
|
|
801
1417
|
si_lo = float(thr_sel.get("si_lo", thr_def.get("lo", 0.33)))
|
|
802
1418
|
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
1419
|
+
np_mod = get_numpy()
|
|
1420
|
+
if np_mod is not None:
|
|
1421
|
+
sis = collect_attr(G, G.nodes, ALIAS_SI, float("nan"), np=np_mod)
|
|
1422
|
+
valid = sis[~np_mod.isnan(sis)]
|
|
1423
|
+
n = int(valid.size)
|
|
1424
|
+
if n:
|
|
1425
|
+
hist["Si_mean"].append(float(valid.mean()))
|
|
1426
|
+
hi_frac = np_mod.count_nonzero(valid >= si_hi) / n
|
|
1427
|
+
lo_frac = np_mod.count_nonzero(valid <= si_lo) / n
|
|
1428
|
+
hist["Si_hi_frac"].append(float(hi_frac))
|
|
1429
|
+
hist["Si_lo_frac"].append(float(lo_frac))
|
|
1430
|
+
else:
|
|
1431
|
+
hist["Si_mean"].append(0.0)
|
|
1432
|
+
hist["Si_hi_frac"].append(0.0)
|
|
1433
|
+
hist["Si_lo_frac"].append(0.0)
|
|
1434
|
+
return
|
|
1435
|
+
|
|
1436
|
+
sis = collect_attr(G, G.nodes, ALIAS_SI, float("nan"))
|
|
1437
|
+
if not sis:
|
|
1438
|
+
hist["Si_mean"].append(0.0)
|
|
1439
|
+
hist["Si_hi_frac"].append(0.0)
|
|
1440
|
+
hist["Si_lo_frac"].append(0.0)
|
|
1441
|
+
return
|
|
1442
|
+
|
|
1443
|
+
if n_jobs is not None and n_jobs > 1:
|
|
1444
|
+
chunk_size = max(1, math.ceil(len(sis) / n_jobs))
|
|
1445
|
+
futures = []
|
|
1446
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
1447
|
+
for idx in range(0, len(sis), chunk_size):
|
|
1448
|
+
chunk = sis[idx:idx + chunk_size]
|
|
1449
|
+
futures.append(
|
|
1450
|
+
executor.submit(_si_chunk_stats, chunk, si_hi, si_lo)
|
|
1451
|
+
)
|
|
1452
|
+
totals = [future.result() for future in futures]
|
|
1453
|
+
total = sum(part[0] for part in totals)
|
|
1454
|
+
count = sum(part[1] for part in totals)
|
|
1455
|
+
hi_count = sum(part[2] for part in totals)
|
|
1456
|
+
lo_count = sum(part[3] for part in totals)
|
|
1457
|
+
else:
|
|
1458
|
+
total, count, hi_count, lo_count = _si_chunk_stats(sis, si_hi, si_lo)
|
|
808
1459
|
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
total += s
|
|
814
|
-
if s >= si_hi:
|
|
815
|
-
hi_count += 1
|
|
816
|
-
if s <= si_lo:
|
|
817
|
-
lo_count += 1
|
|
818
|
-
|
|
819
|
-
n = len(sis)
|
|
820
|
-
if n:
|
|
821
|
-
hist["Si_mean"].append(total / n)
|
|
822
|
-
hist["Si_hi_frac"].append(hi_count / n)
|
|
823
|
-
hist["Si_lo_frac"].append(lo_count / n)
|
|
1460
|
+
if count:
|
|
1461
|
+
hist["Si_mean"].append(total / count)
|
|
1462
|
+
hist["Si_hi_frac"].append(hi_count / count)
|
|
1463
|
+
hist["Si_lo_frac"].append(lo_count / count)
|
|
824
1464
|
else:
|
|
825
1465
|
hist["Si_mean"].append(0.0)
|
|
826
1466
|
hist["Si_hi_frac"].append(0.0)
|