tnfr 6.0.0__py3-none-any.whl → 7.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +50 -5
- tnfr/__init__.pyi +0 -7
- tnfr/_compat.py +0 -1
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +44 -2
- tnfr/alias.py +14 -13
- tnfr/alias.pyi +5 -37
- tnfr/cache.py +9 -729
- tnfr/cache.pyi +8 -224
- tnfr/callback_utils.py +16 -31
- tnfr/callback_utils.pyi +3 -29
- tnfr/cli/__init__.py +17 -11
- tnfr/cli/__init__.pyi +0 -21
- tnfr/cli/arguments.py +175 -14
- tnfr/cli/arguments.pyi +5 -11
- tnfr/cli/execution.py +434 -48
- tnfr/cli/execution.pyi +14 -24
- tnfr/cli/utils.py +20 -3
- tnfr/cli/utils.pyi +5 -5
- tnfr/config/__init__.py +2 -1
- tnfr/config/__init__.pyi +2 -0
- tnfr/config/feature_flags.py +83 -0
- tnfr/config/init.py +1 -1
- tnfr/config/operator_names.py +1 -14
- tnfr/config/presets.py +6 -26
- tnfr/constants/__init__.py +10 -13
- tnfr/constants/__init__.pyi +10 -22
- tnfr/constants/aliases.py +31 -0
- tnfr/constants/core.py +4 -3
- tnfr/constants/init.py +1 -1
- tnfr/constants/metric.py +3 -3
- tnfr/dynamics/__init__.py +64 -10
- tnfr/dynamics/__init__.pyi +3 -4
- tnfr/dynamics/adaptation.py +79 -13
- tnfr/dynamics/aliases.py +10 -9
- tnfr/dynamics/coordination.py +77 -35
- tnfr/dynamics/dnfr.py +575 -274
- tnfr/dynamics/dnfr.pyi +1 -10
- tnfr/dynamics/integrators.py +47 -33
- tnfr/dynamics/integrators.pyi +0 -1
- tnfr/dynamics/runtime.py +489 -129
- tnfr/dynamics/sampling.py +2 -0
- tnfr/dynamics/selectors.py +101 -62
- tnfr/execution.py +15 -8
- tnfr/execution.pyi +5 -25
- tnfr/flatten.py +7 -3
- tnfr/flatten.pyi +1 -8
- tnfr/gamma.py +22 -26
- tnfr/gamma.pyi +0 -6
- tnfr/glyph_history.py +37 -26
- tnfr/glyph_history.pyi +1 -19
- tnfr/glyph_runtime.py +16 -0
- tnfr/glyph_runtime.pyi +9 -0
- tnfr/immutable.py +20 -15
- tnfr/immutable.pyi +4 -7
- tnfr/initialization.py +5 -7
- tnfr/initialization.pyi +1 -9
- tnfr/io.py +6 -305
- tnfr/io.pyi +13 -8
- tnfr/mathematics/__init__.py +81 -0
- tnfr/mathematics/backend.py +426 -0
- tnfr/mathematics/dynamics.py +398 -0
- tnfr/mathematics/epi.py +254 -0
- tnfr/mathematics/generators.py +222 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/operators.py +233 -0
- tnfr/mathematics/operators_factory.py +71 -0
- tnfr/mathematics/projection.py +78 -0
- tnfr/mathematics/runtime.py +173 -0
- tnfr/mathematics/spaces.py +247 -0
- tnfr/mathematics/transforms.py +292 -0
- tnfr/metrics/__init__.py +10 -10
- tnfr/metrics/coherence.py +123 -94
- tnfr/metrics/common.py +22 -13
- tnfr/metrics/common.pyi +42 -11
- tnfr/metrics/core.py +72 -14
- tnfr/metrics/diagnosis.py +48 -57
- tnfr/metrics/diagnosis.pyi +3 -7
- tnfr/metrics/export.py +3 -5
- tnfr/metrics/glyph_timing.py +41 -31
- tnfr/metrics/reporting.py +13 -6
- tnfr/metrics/sense_index.py +884 -114
- tnfr/metrics/trig.py +167 -11
- tnfr/metrics/trig.pyi +1 -0
- tnfr/metrics/trig_cache.py +112 -15
- tnfr/node.py +400 -17
- tnfr/node.pyi +55 -38
- tnfr/observers.py +111 -8
- tnfr/observers.pyi +0 -15
- tnfr/ontosim.py +9 -6
- tnfr/ontosim.pyi +0 -5
- tnfr/operators/__init__.py +529 -42
- tnfr/operators/__init__.pyi +14 -0
- tnfr/operators/definitions.py +350 -18
- tnfr/operators/definitions.pyi +0 -14
- tnfr/operators/grammar.py +760 -0
- tnfr/operators/jitter.py +28 -22
- tnfr/operators/registry.py +7 -12
- tnfr/operators/registry.pyi +0 -2
- tnfr/operators/remesh.py +38 -61
- tnfr/rng.py +17 -300
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/selector.py +3 -4
- tnfr/selector.pyi +1 -1
- tnfr/sense.py +22 -24
- tnfr/sense.pyi +0 -7
- tnfr/structural.py +504 -21
- tnfr/structural.pyi +41 -18
- tnfr/telemetry/__init__.py +23 -1
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/nu_f.py +423 -0
- tnfr/telemetry/nu_f.pyi +123 -0
- tnfr/tokens.py +1 -4
- tnfr/tokens.pyi +1 -6
- tnfr/trace.py +20 -53
- tnfr/trace.pyi +9 -37
- tnfr/types.py +244 -15
- tnfr/types.pyi +200 -14
- tnfr/units.py +69 -0
- tnfr/units.pyi +16 -0
- tnfr/utils/__init__.py +107 -48
- tnfr/utils/__init__.pyi +80 -11
- tnfr/utils/cache.py +1705 -65
- tnfr/utils/cache.pyi +370 -58
- tnfr/utils/chunks.py +104 -0
- tnfr/utils/chunks.pyi +21 -0
- tnfr/utils/data.py +95 -5
- tnfr/utils/data.pyi +8 -17
- tnfr/utils/graph.py +2 -4
- tnfr/utils/init.py +31 -7
- tnfr/utils/init.pyi +4 -11
- tnfr/utils/io.py +313 -14
- tnfr/{helpers → utils}/numeric.py +50 -24
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +92 -4
- tnfr/validation/__init__.pyi +77 -17
- tnfr/validation/compatibility.py +79 -43
- tnfr/validation/compatibility.pyi +4 -6
- tnfr/validation/grammar.py +55 -133
- tnfr/validation/grammar.pyi +37 -8
- tnfr/validation/graph.py +138 -0
- tnfr/validation/graph.pyi +17 -0
- tnfr/validation/rules.py +161 -74
- tnfr/validation/rules.pyi +55 -18
- tnfr/validation/runtime.py +263 -0
- tnfr/validation/runtime.pyi +31 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +37 -0
- tnfr/validation/spectral.py +159 -0
- tnfr/validation/spectral.pyi +46 -0
- tnfr/validation/syntax.py +28 -139
- tnfr/validation/syntax.pyi +7 -4
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/viz/__init__.py +9 -0
- tnfr/viz/matplotlib.py +246 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/METADATA +63 -19
- tnfr-7.0.0.dist-info/RECORD +185 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/licenses/LICENSE.md +1 -1
- tnfr/constants_glyphs.py +0 -16
- tnfr/constants_glyphs.pyi +0 -12
- tnfr/grammar.py +0 -25
- tnfr/grammar.pyi +0 -13
- tnfr/helpers/__init__.py +0 -151
- tnfr/helpers/__init__.pyi +0 -66
- tnfr/helpers/numeric.pyi +0 -12
- tnfr/presets.py +0 -15
- tnfr/presets.pyi +0 -7
- tnfr/utils/io.pyi +0 -10
- tnfr/utils/validators.py +0 -130
- tnfr/utils/validators.pyi +0 -19
- tnfr-6.0.0.dist-info/RECORD +0 -157
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/WHEEL +0 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/entry_points.txt +0 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/top_level.txt +0 -0
tnfr/metrics/sense_index.py
CHANGED
|
@@ -1,46 +1,271 @@
|
|
|
1
|
-
"""Sense index helpers.
|
|
1
|
+
"""Sense index helpers for coherence monitoring.
|
|
2
|
+
|
|
3
|
+
This module implements the Sense index (Si) as a structural telemetry channel
|
|
4
|
+
that blends three signals: the node's structural frequency ``νf`` (how quickly
|
|
5
|
+
it reorganises), its phase coupling with neighbours (whether it stays locked to
|
|
6
|
+
their resonance), and the damping imposed by ``ΔNFR`` (internal reorganisation
|
|
7
|
+
pressure). By combining these contributions we can monitor how each node
|
|
8
|
+
maintains coherence inside a TNFR graph and surface whether the network is
|
|
9
|
+
favouring rapid reorganisation, synchrony, or ΔNFR attenuation.
|
|
10
|
+
|
|
11
|
+
Examples
|
|
12
|
+
--------
|
|
13
|
+
Build a minimal resonance graph where Si highlights how the structural weights
|
|
14
|
+
steer the interpretation of coherence.
|
|
15
|
+
|
|
16
|
+
>>> import networkx as nx
|
|
17
|
+
>>> from tnfr.metrics.sense_index import compute_Si
|
|
18
|
+
>>> G = nx.Graph()
|
|
19
|
+
>>> G.add_edge("sensor", "relay")
|
|
20
|
+
>>> G.nodes["sensor"].update({"nu_f": 0.9, "delta_nfr": 0.3, "phase": 0.0})
|
|
21
|
+
>>> G.nodes["relay"].update({"nu_f": 0.4, "delta_nfr": 0.05, "phase": 0.1})
|
|
22
|
+
>>> G.graph["SI_WEIGHTS"] = {"alpha": 0.5, "beta": 0.3, "gamma": 0.2}
|
|
23
|
+
>>> result = compute_Si(G, inplace=False)
|
|
24
|
+
>>> round(result["sensor"], 3), round(result["relay"], 3)
|
|
25
|
+
(0.767, 0.857)
|
|
26
|
+
|
|
27
|
+
The heavier ``alpha`` weight privileges the ``sensor`` node's fast ``νf`` even
|
|
28
|
+
though it suffers a larger ``ΔNFR``. Conversely, the ``relay`` keeps Si high
|
|
29
|
+
thanks to a calmer ``ΔNFR`` profile despite slower frequency, illustrating how
|
|
30
|
+
Si exposes the trade-off between structural cadence, phase alignment, and
|
|
31
|
+
internal reorganisation pressure.
|
|
32
|
+
"""
|
|
2
33
|
|
|
3
34
|
from __future__ import annotations
|
|
4
35
|
|
|
5
36
|
import math
|
|
6
37
|
from concurrent.futures import ProcessPoolExecutor
|
|
7
38
|
from functools import partial
|
|
8
|
-
from
|
|
39
|
+
from time import perf_counter
|
|
40
|
+
from typing import Any, Callable, Iterable, Iterator, Mapping, MutableMapping, cast
|
|
9
41
|
|
|
10
42
|
from ..alias import get_attr, set_attr
|
|
11
|
-
from ..constants import
|
|
12
|
-
from ..
|
|
13
|
-
from ..types import GraphLike
|
|
43
|
+
from ..constants.aliases import ALIAS_DNFR, ALIAS_SI, ALIAS_VF
|
|
44
|
+
from ..utils import angle_diff, angle_diff_array, clamp01
|
|
45
|
+
from ..types import GraphLike, NodeAttrMap
|
|
14
46
|
from ..utils import (
|
|
15
47
|
edge_version_cache,
|
|
16
48
|
get_numpy,
|
|
17
49
|
normalize_weights,
|
|
50
|
+
resolve_chunk_size,
|
|
18
51
|
stable_json,
|
|
19
52
|
)
|
|
20
|
-
from .trig import neighbor_phase_mean_list
|
|
21
|
-
|
|
22
53
|
from .common import (
|
|
54
|
+
_coerce_jobs,
|
|
55
|
+
_get_vf_dnfr_max,
|
|
23
56
|
ensure_neighbors_map,
|
|
24
57
|
merge_graph_weights,
|
|
25
|
-
_get_vf_dnfr_max,
|
|
26
58
|
)
|
|
59
|
+
from .trig import neighbor_phase_mean_bulk, neighbor_phase_mean_list
|
|
27
60
|
from .trig_cache import get_trig_cache
|
|
28
61
|
|
|
29
|
-
ALIAS_VF = get_aliases("VF")
|
|
30
|
-
ALIAS_DNFR = get_aliases("DNFR")
|
|
31
|
-
ALIAS_SI = get_aliases("SI")
|
|
32
|
-
|
|
33
62
|
PHASE_DISPERSION_KEY = "dSi_dphase_disp"
|
|
63
|
+
_SI_APPROX_BYTES_PER_NODE = 64
|
|
34
64
|
_VALID_SENSITIVITY_KEYS = frozenset(
|
|
35
65
|
{"dSi_dvf_norm", PHASE_DISPERSION_KEY, "dSi_ddnfr_norm"}
|
|
36
66
|
)
|
|
37
67
|
__all__ = ("get_Si_weights", "compute_Si_node", "compute_Si")
|
|
38
68
|
|
|
39
69
|
|
|
70
|
+
class _SiStructuralCache:
|
|
71
|
+
"""Cache aligned ``νf`` and ``ΔNFR`` arrays for vectorised Si."""
|
|
72
|
+
|
|
73
|
+
__slots__ = ("node_ids", "vf_values", "dnfr_values", "vf_snapshot", "dnfr_snapshot")
|
|
74
|
+
|
|
75
|
+
def __init__(self, node_ids: tuple[Any, ...]):
|
|
76
|
+
self.node_ids = node_ids
|
|
77
|
+
self.vf_values: Any | None = None
|
|
78
|
+
self.dnfr_values: Any | None = None
|
|
79
|
+
self.vf_snapshot: list[float] = []
|
|
80
|
+
self.dnfr_snapshot: list[float] = []
|
|
81
|
+
|
|
82
|
+
def rebuild(
|
|
83
|
+
self,
|
|
84
|
+
node_ids: Iterable[Any],
|
|
85
|
+
node_data: Mapping[Any, NodeAttrMap],
|
|
86
|
+
*,
|
|
87
|
+
np: Any,
|
|
88
|
+
) -> tuple[Any, Any]:
|
|
89
|
+
node_tuple = tuple(node_ids)
|
|
90
|
+
count = len(node_tuple)
|
|
91
|
+
if count == 0:
|
|
92
|
+
self.node_ids = node_tuple
|
|
93
|
+
self.vf_values = np.zeros(0, dtype=float)
|
|
94
|
+
self.dnfr_values = np.zeros(0, dtype=float)
|
|
95
|
+
self.vf_snapshot = []
|
|
96
|
+
self.dnfr_snapshot = []
|
|
97
|
+
return self.vf_values, self.dnfr_values
|
|
98
|
+
|
|
99
|
+
vf_arr = np.fromiter(
|
|
100
|
+
(float(get_attr(node_data[n], ALIAS_VF, 0.0)) for n in node_tuple),
|
|
101
|
+
dtype=float,
|
|
102
|
+
count=count,
|
|
103
|
+
)
|
|
104
|
+
dnfr_arr = np.fromiter(
|
|
105
|
+
(float(get_attr(node_data[n], ALIAS_DNFR, 0.0)) for n in node_tuple),
|
|
106
|
+
dtype=float,
|
|
107
|
+
count=count,
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
self.node_ids = node_tuple
|
|
111
|
+
self.vf_values = vf_arr
|
|
112
|
+
self.dnfr_values = dnfr_arr
|
|
113
|
+
self.vf_snapshot = [float(value) for value in vf_arr]
|
|
114
|
+
self.dnfr_snapshot = [float(value) for value in dnfr_arr]
|
|
115
|
+
return self.vf_values, self.dnfr_values
|
|
116
|
+
|
|
117
|
+
def ensure_current(
|
|
118
|
+
self,
|
|
119
|
+
node_ids: Iterable[Any],
|
|
120
|
+
node_data: Mapping[Any, NodeAttrMap],
|
|
121
|
+
*,
|
|
122
|
+
np: Any,
|
|
123
|
+
) -> tuple[Any, Any]:
|
|
124
|
+
node_tuple = tuple(node_ids)
|
|
125
|
+
if node_tuple != self.node_ids:
|
|
126
|
+
return self.rebuild(node_tuple, node_data, np=np)
|
|
127
|
+
|
|
128
|
+
for idx, node in enumerate(node_tuple):
|
|
129
|
+
nd = node_data[node]
|
|
130
|
+
vf = float(get_attr(nd, ALIAS_VF, 0.0))
|
|
131
|
+
if vf != self.vf_snapshot[idx]:
|
|
132
|
+
return self.rebuild(node_tuple, node_data, np=np)
|
|
133
|
+
dnfr = float(get_attr(nd, ALIAS_DNFR, 0.0))
|
|
134
|
+
if dnfr != self.dnfr_snapshot[idx]:
|
|
135
|
+
return self.rebuild(node_tuple, node_data, np=np)
|
|
136
|
+
|
|
137
|
+
return self.vf_values, self.dnfr_values
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def _build_structural_cache(
|
|
141
|
+
node_ids: Iterable[Any],
|
|
142
|
+
node_data: Mapping[Any, NodeAttrMap],
|
|
143
|
+
*,
|
|
144
|
+
np: Any,
|
|
145
|
+
) -> _SiStructuralCache:
|
|
146
|
+
cache = _SiStructuralCache(tuple(node_ids))
|
|
147
|
+
cache.rebuild(node_ids, node_data, np=np)
|
|
148
|
+
return cache
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _ensure_structural_arrays(
|
|
152
|
+
G: GraphLike,
|
|
153
|
+
node_ids: Iterable[Any],
|
|
154
|
+
node_data: Mapping[Any, NodeAttrMap],
|
|
155
|
+
*,
|
|
156
|
+
np: Any,
|
|
157
|
+
) -> tuple[Any, Any]:
|
|
158
|
+
node_key = tuple(node_ids)
|
|
159
|
+
|
|
160
|
+
def builder() -> _SiStructuralCache:
|
|
161
|
+
return _build_structural_cache(node_key, node_data, np=np)
|
|
162
|
+
|
|
163
|
+
cache = edge_version_cache(G, ("_si_structural", node_key), builder)
|
|
164
|
+
return cache.ensure_current(node_key, node_data, np=np)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def _ensure_si_buffers(
|
|
168
|
+
G: GraphLike,
|
|
169
|
+
*,
|
|
170
|
+
count: int,
|
|
171
|
+
np: Any,
|
|
172
|
+
) -> tuple[Any, Any, Any]:
|
|
173
|
+
"""Return reusable NumPy buffers sized for ``count`` nodes."""
|
|
174
|
+
|
|
175
|
+
def builder() -> tuple[Any, Any, Any]:
|
|
176
|
+
return (
|
|
177
|
+
np.empty(count, dtype=float),
|
|
178
|
+
np.empty(count, dtype=float),
|
|
179
|
+
np.empty(count, dtype=float),
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
return edge_version_cache(
|
|
183
|
+
G,
|
|
184
|
+
("_si_buffers", count),
|
|
185
|
+
builder,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _ensure_chunk_workspace(
|
|
190
|
+
G: GraphLike,
|
|
191
|
+
*,
|
|
192
|
+
mask_count: int,
|
|
193
|
+
np: Any,
|
|
194
|
+
) -> tuple[Any, Any]:
|
|
195
|
+
"""Return reusable scratch buffers sized to the masked neighbours."""
|
|
196
|
+
|
|
197
|
+
if mask_count <= 0:
|
|
198
|
+
mask_count = 1
|
|
199
|
+
|
|
200
|
+
def builder() -> tuple[Any, Any]:
|
|
201
|
+
return (
|
|
202
|
+
np.empty(mask_count, dtype=float),
|
|
203
|
+
np.empty(mask_count, dtype=float),
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
return edge_version_cache(
|
|
207
|
+
G,
|
|
208
|
+
("_si_chunk_workspace", mask_count),
|
|
209
|
+
builder,
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def _ensure_neighbor_bulk_buffers(
|
|
214
|
+
G: GraphLike,
|
|
215
|
+
*,
|
|
216
|
+
count: int,
|
|
217
|
+
np: Any,
|
|
218
|
+
) -> tuple[Any, Any, Any, Any, Any]:
|
|
219
|
+
"""Return reusable buffers for bulk neighbour phase aggregation."""
|
|
220
|
+
|
|
221
|
+
def builder() -> tuple[Any, Any, Any, Any, Any]:
|
|
222
|
+
return (
|
|
223
|
+
np.empty(count, dtype=float),
|
|
224
|
+
np.empty(count, dtype=float),
|
|
225
|
+
np.empty(count, dtype=float),
|
|
226
|
+
np.empty(count, dtype=float),
|
|
227
|
+
np.empty(count, dtype=float),
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
return edge_version_cache(
|
|
231
|
+
G,
|
|
232
|
+
("_si_neighbor_buffers", count),
|
|
233
|
+
builder,
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
|
|
40
237
|
def _normalise_si_sensitivity_mapping(
|
|
41
238
|
mapping: Mapping[str, float], *, warn: bool
|
|
42
239
|
) -> dict[str, float]:
|
|
43
|
-
"""
|
|
240
|
+
"""Preserve structural sensitivities compatible with the Si operator.
|
|
241
|
+
|
|
242
|
+
Parameters
|
|
243
|
+
----------
|
|
244
|
+
mapping : Mapping[str, float]
|
|
245
|
+
Mapping of raw sensitivity weights keyed by structural derivatives.
|
|
246
|
+
warn : bool
|
|
247
|
+
Compatibility flag kept for trace helpers. It is not used directly but
|
|
248
|
+
retained so upstream logging keeps a consistent signature.
|
|
249
|
+
|
|
250
|
+
Returns
|
|
251
|
+
-------
|
|
252
|
+
dict[str, float]
|
|
253
|
+
Sanitised mapping containing only the supported sensitivity keys.
|
|
254
|
+
|
|
255
|
+
Raises
|
|
256
|
+
------
|
|
257
|
+
ValueError
|
|
258
|
+
If the mapping defines keys outside of the supported sensitivity set.
|
|
259
|
+
|
|
260
|
+
Examples
|
|
261
|
+
--------
|
|
262
|
+
>>> _normalise_si_sensitivity_mapping({"dSi_dvf_norm": 1.0}, warn=False)
|
|
263
|
+
{'dSi_dvf_norm': 1.0}
|
|
264
|
+
>>> _normalise_si_sensitivity_mapping({"unknown": 1.0}, warn=False)
|
|
265
|
+
Traceback (most recent call last):
|
|
266
|
+
...
|
|
267
|
+
ValueError: Si sensitivity mappings accept only {dSi_ddnfr_norm, dSi_dphase_disp, dSi_dvf_norm}; unexpected key(s): unknown
|
|
268
|
+
"""
|
|
44
269
|
|
|
45
270
|
normalised = dict(mapping)
|
|
46
271
|
_ = warn # kept for API compatibility with trace helpers
|
|
@@ -56,7 +281,31 @@ def _normalise_si_sensitivity_mapping(
|
|
|
56
281
|
|
|
57
282
|
|
|
58
283
|
def _cache_weights(G: GraphLike) -> tuple[float, float, float]:
|
|
59
|
-
"""Normalise and
|
|
284
|
+
"""Normalise and persist Si weights attached to the graph coherence.
|
|
285
|
+
|
|
286
|
+
Parameters
|
|
287
|
+
----------
|
|
288
|
+
G : GraphLike
|
|
289
|
+
Graph structure whose global Si sensitivities must be harmonised.
|
|
290
|
+
|
|
291
|
+
Returns
|
|
292
|
+
-------
|
|
293
|
+
tuple[float, float, float]
|
|
294
|
+
Ordered tuple ``(alpha, beta, gamma)`` with normalised Si weights.
|
|
295
|
+
|
|
296
|
+
Raises
|
|
297
|
+
------
|
|
298
|
+
ValueError
|
|
299
|
+
Propagated if the graph stores unsupported sensitivity keys.
|
|
300
|
+
|
|
301
|
+
Examples
|
|
302
|
+
--------
|
|
303
|
+
>>> import networkx as nx
|
|
304
|
+
>>> G = nx.Graph()
|
|
305
|
+
>>> G.graph["SI_WEIGHTS"] = {"alpha": 0.2, "beta": 0.5, "gamma": 0.3}
|
|
306
|
+
>>> tuple(round(v, 2) for v in _cache_weights(G))
|
|
307
|
+
(0.2, 0.5, 0.3)
|
|
308
|
+
"""
|
|
60
309
|
|
|
61
310
|
w = merge_graph_weights(G, "SI_WEIGHTS")
|
|
62
311
|
cfg_key = stable_json(w)
|
|
@@ -85,7 +334,25 @@ def _cache_weights(G: GraphLike) -> tuple[float, float, float]:
|
|
|
85
334
|
|
|
86
335
|
|
|
87
336
|
def get_Si_weights(G: GraphLike) -> tuple[float, float, float]:
|
|
88
|
-
"""
|
|
337
|
+
"""Expose the normalised Si weights associated with ``G``.
|
|
338
|
+
|
|
339
|
+
Parameters
|
|
340
|
+
----------
|
|
341
|
+
G : GraphLike
|
|
342
|
+
Graph that carries optional ``SI_WEIGHTS`` metadata.
|
|
343
|
+
|
|
344
|
+
Returns
|
|
345
|
+
-------
|
|
346
|
+
tuple[float, float, float]
|
|
347
|
+
The ``(alpha, beta, gamma)`` weights after normalisation.
|
|
348
|
+
|
|
349
|
+
Examples
|
|
350
|
+
--------
|
|
351
|
+
>>> import networkx as nx
|
|
352
|
+
>>> G = nx.Graph()
|
|
353
|
+
>>> get_Si_weights(G)
|
|
354
|
+
(0.0, 0.0, 0.0)
|
|
355
|
+
"""
|
|
89
356
|
|
|
90
357
|
return _cache_weights(G)
|
|
91
358
|
|
|
@@ -103,7 +370,62 @@ def compute_Si_node(
|
|
|
103
370
|
inplace: bool,
|
|
104
371
|
**kwargs: Any,
|
|
105
372
|
) -> float:
|
|
106
|
-
"""
|
|
373
|
+
"""Evaluate how a node's structure tilts Si within its local resonance.
|
|
374
|
+
|
|
375
|
+
Parameters
|
|
376
|
+
----------
|
|
377
|
+
n : Any
|
|
378
|
+
Node identifier whose structural perception is computed.
|
|
379
|
+
nd : dict[str, Any]
|
|
380
|
+
Mutable node attributes containing cached structural magnitudes.
|
|
381
|
+
alpha : float
|
|
382
|
+
Normalised weight applied to the node's structural frequency, boosting
|
|
383
|
+
Si when the node reorganises faster than the network baseline.
|
|
384
|
+
beta : float
|
|
385
|
+
Normalised weight applied to the phase alignment term so that tighter
|
|
386
|
+
synchrony raises the index.
|
|
387
|
+
gamma : float
|
|
388
|
+
Normalised weight applied to the ΔNFR attenuation term, rewarding nodes
|
|
389
|
+
that keep internal turbulence under control.
|
|
390
|
+
vfmax : float
|
|
391
|
+
Maximum structural frequency used for normalisation.
|
|
392
|
+
dnfrmax : float
|
|
393
|
+
Maximum |ΔNFR| used for normalisation.
|
|
394
|
+
phase_dispersion : float, optional
|
|
395
|
+
Phase dispersion ratio in ``[0, 1]`` for the node against its
|
|
396
|
+
neighbours. The value must be supplied by the caller.
|
|
397
|
+
inplace : bool
|
|
398
|
+
Whether to write the resulting Si back to ``nd``.
|
|
399
|
+
**kwargs : Any
|
|
400
|
+
Additional keyword arguments are not accepted and will raise.
|
|
401
|
+
|
|
402
|
+
Returns
|
|
403
|
+
-------
|
|
404
|
+
float
|
|
405
|
+
The clamped Si value in ``[0, 1]``.
|
|
406
|
+
|
|
407
|
+
Raises
|
|
408
|
+
------
|
|
409
|
+
TypeError
|
|
410
|
+
If ``phase_dispersion`` is missing or unsupported keyword arguments
|
|
411
|
+
are provided.
|
|
412
|
+
|
|
413
|
+
Examples
|
|
414
|
+
--------
|
|
415
|
+
>>> nd = {"nu_f": 1.0, "delta_nfr": 0.1}
|
|
416
|
+
>>> compute_Si_node(
|
|
417
|
+
... "n0",
|
|
418
|
+
... nd,
|
|
419
|
+
... alpha=0.4,
|
|
420
|
+
... beta=0.3,
|
|
421
|
+
... gamma=0.3,
|
|
422
|
+
... vfmax=1.0,
|
|
423
|
+
... dnfrmax=1.0,
|
|
424
|
+
... phase_dispersion=0.2,
|
|
425
|
+
... inplace=False,
|
|
426
|
+
... )
|
|
427
|
+
0.91
|
|
428
|
+
"""
|
|
107
429
|
|
|
108
430
|
if kwargs:
|
|
109
431
|
unexpected = ", ".join(sorted(kwargs))
|
|
@@ -118,29 +440,13 @@ def compute_Si_node(
|
|
|
118
440
|
dnfr = get_attr(nd, ALIAS_DNFR, 0.0)
|
|
119
441
|
dnfr_norm = clamp01(abs(dnfr) / dnfrmax)
|
|
120
442
|
|
|
121
|
-
Si = (
|
|
122
|
-
alpha * vf_norm
|
|
123
|
-
+ beta * (1.0 - phase_dispersion)
|
|
124
|
-
+ gamma * (1.0 - dnfr_norm)
|
|
125
|
-
)
|
|
443
|
+
Si = alpha * vf_norm + beta * (1.0 - phase_dispersion) + gamma * (1.0 - dnfr_norm)
|
|
126
444
|
Si = clamp01(Si)
|
|
127
445
|
if inplace:
|
|
128
446
|
set_attr(nd, ALIAS_SI, Si)
|
|
129
447
|
return Si
|
|
130
448
|
|
|
131
449
|
|
|
132
|
-
def _coerce_jobs(raw_jobs: Any | None) -> int | None:
|
|
133
|
-
"""Normalise ``n_jobs`` values coming from user configuration."""
|
|
134
|
-
|
|
135
|
-
try:
|
|
136
|
-
jobs = None if raw_jobs is None else int(raw_jobs)
|
|
137
|
-
except (TypeError, ValueError):
|
|
138
|
-
return None
|
|
139
|
-
if jobs is not None and jobs <= 0:
|
|
140
|
-
return None
|
|
141
|
-
return jobs
|
|
142
|
-
|
|
143
|
-
|
|
144
450
|
def _compute_si_python_chunk(
|
|
145
451
|
chunk: Iterable[tuple[Any, tuple[Any, ...], float, float, float]],
|
|
146
452
|
*,
|
|
@@ -152,7 +458,49 @@ def _compute_si_python_chunk(
|
|
|
152
458
|
vfmax: float,
|
|
153
459
|
dnfrmax: float,
|
|
154
460
|
) -> dict[Any, float]:
|
|
155
|
-
"""
|
|
461
|
+
"""Propagate Si contributions for a node chunk using pure Python.
|
|
462
|
+
|
|
463
|
+
The fallback keeps the νf/phase/ΔNFR balance explicit so that structural
|
|
464
|
+
effects remain traceable even without vectorised support.
|
|
465
|
+
|
|
466
|
+
Parameters
|
|
467
|
+
----------
|
|
468
|
+
chunk : Iterable[tuple[Any, tuple[Any, ...], float, float, float]]
|
|
469
|
+
Iterable of node payloads ``(node, neighbors, theta, vf, dnfr)``.
|
|
470
|
+
cos_th : dict[Any, float]
|
|
471
|
+
Cached cosine values keyed by node identifiers.
|
|
472
|
+
sin_th : dict[Any, float]
|
|
473
|
+
Cached sine values keyed by node identifiers.
|
|
474
|
+
alpha : float
|
|
475
|
+
Normalised weight for structural frequency.
|
|
476
|
+
beta : float
|
|
477
|
+
Normalised weight for phase dispersion.
|
|
478
|
+
gamma : float
|
|
479
|
+
Normalised weight for ΔNFR dispersion.
|
|
480
|
+
vfmax : float
|
|
481
|
+
Maximum |νf| reference for normalisation.
|
|
482
|
+
dnfrmax : float
|
|
483
|
+
Maximum |ΔNFR| reference for normalisation.
|
|
484
|
+
|
|
485
|
+
Returns
|
|
486
|
+
-------
|
|
487
|
+
dict[Any, float]
|
|
488
|
+
Mapping of node identifiers to their clamped Si values.
|
|
489
|
+
|
|
490
|
+
Examples
|
|
491
|
+
--------
|
|
492
|
+
>>> _compute_si_python_chunk(
|
|
493
|
+
... [("n0", ("n1",), 0.0, 0.5, 0.1)],
|
|
494
|
+
... cos_th={"n1": 1.0},
|
|
495
|
+
... sin_th={"n1": 0.0},
|
|
496
|
+
... alpha=0.5,
|
|
497
|
+
... beta=0.3,
|
|
498
|
+
... gamma=0.2,
|
|
499
|
+
... vfmax=1.0,
|
|
500
|
+
... dnfrmax=1.0,
|
|
501
|
+
... )
|
|
502
|
+
{'n0': 0.73}
|
|
503
|
+
"""
|
|
156
504
|
|
|
157
505
|
results: dict[Any, float] = {}
|
|
158
506
|
for n, neigh, theta, vf, dnfr in chunk:
|
|
@@ -171,25 +519,156 @@ def _compute_si_python_chunk(
|
|
|
171
519
|
return results
|
|
172
520
|
|
|
173
521
|
|
|
522
|
+
def _iter_python_payload_chunks(
|
|
523
|
+
nodes_data: Iterable[tuple[Any, NodeAttrMap]],
|
|
524
|
+
*,
|
|
525
|
+
neighbors: Mapping[Any, Iterable[Any]],
|
|
526
|
+
thetas: Mapping[Any, float],
|
|
527
|
+
chunk_size: int,
|
|
528
|
+
) -> Iterator[tuple[tuple[Any, tuple[Any, ...], float, float, float], ...]]:
|
|
529
|
+
"""Yield lazily constructed Si payload chunks for the Python fallback.
|
|
530
|
+
|
|
531
|
+
Each batch keeps the structural triad explicit—θ, νf, and ΔNFR—so that the
|
|
532
|
+
downstream worker preserves the coherence balance enforced by the Si
|
|
533
|
+
operator. Streaming prevents a single monolithic buffer that would skew
|
|
534
|
+
memory pressure on dense graphs while still producing deterministic ΔNFR
|
|
535
|
+
sampling. The iterator is consumed lazily by :func:`compute_Si` so that the
|
|
536
|
+
Python fallback can submit and harvest chunk results incrementally, keeping
|
|
537
|
+
both memory usage and profiling telemetry representative of the streamed
|
|
538
|
+
execution.
|
|
539
|
+
"""
|
|
540
|
+
|
|
541
|
+
if chunk_size <= 0:
|
|
542
|
+
return
|
|
543
|
+
|
|
544
|
+
buffer: list[tuple[Any, tuple[Any, ...], float, float, float]] = []
|
|
545
|
+
for node, data in nodes_data:
|
|
546
|
+
theta = thetas.get(node, 0.0)
|
|
547
|
+
vf = float(get_attr(data, ALIAS_VF, 0.0))
|
|
548
|
+
dnfr = float(get_attr(data, ALIAS_DNFR, 0.0))
|
|
549
|
+
neigh = tuple(neighbors[node])
|
|
550
|
+
buffer.append((node, neigh, theta, vf, dnfr))
|
|
551
|
+
if len(buffer) >= chunk_size:
|
|
552
|
+
yield tuple(buffer)
|
|
553
|
+
buffer.clear()
|
|
554
|
+
|
|
555
|
+
if buffer:
|
|
556
|
+
yield tuple(buffer)
|
|
557
|
+
|
|
558
|
+
|
|
174
559
|
def compute_Si(
|
|
175
560
|
G: GraphLike,
|
|
176
561
|
*,
|
|
177
562
|
inplace: bool = True,
|
|
178
563
|
n_jobs: int | None = None,
|
|
179
|
-
|
|
180
|
-
|
|
564
|
+
chunk_size: int | None = None,
|
|
565
|
+
profile: MutableMapping[str, Any] | None = None,
|
|
566
|
+
) -> dict[Any, float] | Any:
|
|
567
|
+
"""Compute the Si metric for each node by integrating structural drivers.
|
|
568
|
+
|
|
569
|
+
Si (sense index) quantifies how effectively a node sustains coherent
|
|
570
|
+
reorganisation within the TNFR triad. The metric aggregates three
|
|
571
|
+
structural contributions: the node's structural frequency (weighted by
|
|
572
|
+
``alpha``), its phase alignment with neighbours (weighted by ``beta``),
|
|
573
|
+
and the attenuation of disruptive ΔNFR (weighted by ``gamma``). The
|
|
574
|
+
weights therefore bias Si towards faster reorganisation, tighter phase
|
|
575
|
+
coupling, or reduced dissonance respectively, depending on the scenario.
|
|
576
|
+
|
|
577
|
+
Parameters
|
|
578
|
+
----------
|
|
579
|
+
G : GraphLike
|
|
580
|
+
Graph that exposes ``νf`` (structural frequency), ``ΔNFR`` and phase
|
|
581
|
+
attributes for each node.
|
|
582
|
+
inplace : bool, default: True
|
|
583
|
+
If ``True`` the resulting Si values are written back to ``G``.
|
|
584
|
+
n_jobs : int or None, optional
|
|
585
|
+
Maximum number of worker processes for the pure-Python fallback. Use
|
|
586
|
+
``None`` to auto-detect the configuration.
|
|
587
|
+
chunk_size : int or None, optional
|
|
588
|
+
Maximum number of nodes processed per batch when building the Si
|
|
589
|
+
mapping. ``None`` derives a safe value from the node count, the
|
|
590
|
+
available CPUs, and conservative memory heuristics. Non-positive values
|
|
591
|
+
fall back to the automatic mode. Graphs may also provide a default via
|
|
592
|
+
``G.graph["SI_CHUNK_SIZE"]``.
|
|
593
|
+
profile : MutableMapping[str, Any] or None, optional
|
|
594
|
+
Mutable mapping that aggregates wall-clock durations for the internal
|
|
595
|
+
stages of the computation. The mapping receives the keys
|
|
596
|
+
``"cache_rebuild"``, ``"neighbor_phase_mean_bulk"``,
|
|
597
|
+
``"normalize_clamp"`` and ``"inplace_write"`` accumulating seconds for
|
|
598
|
+
each step, plus ``"path"`` describing whether the vectorised (NumPy)
|
|
599
|
+
or fallback implementation executed the call. When the Python fallback
|
|
600
|
+
streams chunk execution, ``"fallback_chunks"`` records how many payload
|
|
601
|
+
batches completed. Reusing the mapping across invocations accumulates
|
|
602
|
+
the timings and chunk counts.
|
|
603
|
+
|
|
604
|
+
Returns
|
|
605
|
+
-------
|
|
606
|
+
dict[Any, float] | numpy.ndarray
|
|
607
|
+
Mapping from node identifiers to their Si scores when ``inplace`` is
|
|
608
|
+
``False``. When ``inplace`` is ``True`` and the NumPy accelerated path
|
|
609
|
+
is available the function updates the graph in place and returns the
|
|
610
|
+
vector of Si values as a :class:`numpy.ndarray`. The pure-Python
|
|
611
|
+
fallback always returns a mapping for compatibility.
|
|
612
|
+
|
|
613
|
+
Raises
|
|
614
|
+
------
|
|
615
|
+
ValueError
|
|
616
|
+
Propagated if graph-level sensitivity settings include unsupported
|
|
617
|
+
keys or invalid weights.
|
|
618
|
+
|
|
619
|
+
Examples
|
|
620
|
+
--------
|
|
621
|
+
Build a minimal resonance graph with two nodes sharing a phase-locked
|
|
622
|
+
edge. The structural weights bias the result towards phase coherence.
|
|
623
|
+
|
|
624
|
+
>>> import networkx as nx
|
|
625
|
+
>>> from tnfr.metrics.sense_index import compute_Si
|
|
626
|
+
>>> G = nx.Graph()
|
|
627
|
+
>>> G.add_edge("a", "b")
|
|
628
|
+
>>> G.nodes["a"].update({"nu_f": 0.8, "delta_nfr": 0.2, "phase": 0.0})
|
|
629
|
+
>>> G.nodes["b"].update({"nu_f": 0.6, "delta_nfr": 0.1, "phase": 0.1})
|
|
630
|
+
>>> G.graph["SI_WEIGHTS"] = {"alpha": 0.3, "beta": 0.5, "gamma": 0.2}
|
|
631
|
+
>>> {k: round(v, 3) for k, v in compute_Si(G, inplace=False).items()}
|
|
632
|
+
{'a': 0.784, 'b': 0.809}
|
|
633
|
+
"""
|
|
634
|
+
|
|
635
|
+
if profile is not None:
|
|
636
|
+
for key in (
|
|
637
|
+
"cache_rebuild",
|
|
638
|
+
"neighbor_phase_mean_bulk",
|
|
639
|
+
"normalize_clamp",
|
|
640
|
+
"inplace_write",
|
|
641
|
+
"fallback_chunks",
|
|
642
|
+
):
|
|
643
|
+
profile.setdefault(key, 0.0)
|
|
644
|
+
|
|
645
|
+
def _profile_start() -> float:
|
|
646
|
+
return perf_counter()
|
|
647
|
+
|
|
648
|
+
def _profile_stop(key: str, start: float) -> None:
|
|
649
|
+
profile[key] = float(profile.get(key, 0.0)) + (perf_counter() - start)
|
|
650
|
+
|
|
651
|
+
def _profile_mark_path(path: str) -> None:
|
|
652
|
+
profile["path"] = path
|
|
653
|
+
|
|
654
|
+
else:
|
|
655
|
+
|
|
656
|
+
def _profile_start() -> float:
|
|
657
|
+
return 0.0
|
|
658
|
+
|
|
659
|
+
def _profile_stop(key: str, start: float) -> None:
|
|
660
|
+
return None
|
|
661
|
+
|
|
662
|
+
def _profile_mark_path(path: str) -> None:
|
|
663
|
+
return None
|
|
181
664
|
|
|
182
665
|
neighbors = ensure_neighbors_map(G)
|
|
183
666
|
alpha, beta, gamma = get_Si_weights(G)
|
|
184
|
-
vfmax, dnfrmax = _get_vf_dnfr_max(G)
|
|
185
|
-
|
|
186
667
|
np = get_numpy()
|
|
187
668
|
trig = get_trig_cache(G, np=np)
|
|
188
669
|
cos_th, sin_th, thetas = trig.cos, trig.sin, trig.theta
|
|
189
670
|
|
|
190
|
-
pm_fn = partial(
|
|
191
|
-
neighbor_phase_mean_list, cos_th=cos_th, sin_th=sin_th, np=np
|
|
192
|
-
)
|
|
671
|
+
pm_fn = partial(neighbor_phase_mean_list, cos_th=cos_th, sin_th=sin_th, np=np)
|
|
193
672
|
|
|
194
673
|
if n_jobs is None:
|
|
195
674
|
n_jobs = _coerce_jobs(G.graph.get("SI_N_JOBS"))
|
|
@@ -199,96 +678,387 @@ def compute_Si(
|
|
|
199
678
|
supports_vector = (
|
|
200
679
|
np is not None
|
|
201
680
|
and hasattr(np, "ndarray")
|
|
202
|
-
and all(
|
|
681
|
+
and all(
|
|
682
|
+
hasattr(np, attr)
|
|
683
|
+
for attr in (
|
|
684
|
+
"fromiter",
|
|
685
|
+
"abs",
|
|
686
|
+
"clip",
|
|
687
|
+
"remainder",
|
|
688
|
+
"zeros",
|
|
689
|
+
"add",
|
|
690
|
+
"bincount",
|
|
691
|
+
"arctan2",
|
|
692
|
+
"where",
|
|
693
|
+
"divide",
|
|
694
|
+
"errstate",
|
|
695
|
+
"max",
|
|
696
|
+
)
|
|
697
|
+
)
|
|
203
698
|
)
|
|
204
699
|
|
|
205
|
-
|
|
700
|
+
nodes_view = G.nodes
|
|
701
|
+
nodes_data = list(nodes_view(data=True))
|
|
206
702
|
if not nodes_data:
|
|
207
703
|
return {}
|
|
208
704
|
|
|
705
|
+
node_mapping = cast(Mapping[Any, NodeAttrMap], nodes_view)
|
|
706
|
+
node_count = len(nodes_data)
|
|
707
|
+
|
|
708
|
+
trig_order = list(getattr(trig, "order", ()))
|
|
709
|
+
node_ids: list[Any]
|
|
710
|
+
node_idx: dict[Any, int]
|
|
711
|
+
using_cache_order = False
|
|
712
|
+
if trig_order and len(trig_order) == node_count:
|
|
713
|
+
node_ids = trig_order
|
|
714
|
+
node_idx = dict(getattr(trig, "index", {}))
|
|
715
|
+
using_cache_order = len(node_idx) == len(node_ids)
|
|
716
|
+
if not using_cache_order:
|
|
717
|
+
node_idx = {n: i for i, n in enumerate(node_ids)}
|
|
718
|
+
else:
|
|
719
|
+
node_ids = [n for n, _ in nodes_data]
|
|
720
|
+
node_idx = {n: i for i, n in enumerate(node_ids)}
|
|
721
|
+
|
|
722
|
+
chunk_pref = chunk_size if chunk_size is not None else G.graph.get("SI_CHUNK_SIZE")
|
|
723
|
+
|
|
209
724
|
if supports_vector:
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
725
|
+
_profile_mark_path("vectorized")
|
|
726
|
+
node_key = tuple(node_ids)
|
|
727
|
+
count = len(node_key)
|
|
728
|
+
|
|
729
|
+
cache_theta = getattr(trig, "theta_values", None)
|
|
730
|
+
cache_cos = getattr(trig, "cos_values", None)
|
|
731
|
+
cache_sin = getattr(trig, "sin_values", None)
|
|
732
|
+
|
|
733
|
+
trig_index_map = dict(getattr(trig, "index", {}) or {})
|
|
734
|
+
index_arr: Any | None = None
|
|
735
|
+
cached_mask = None
|
|
736
|
+
if trig_index_map and count:
|
|
737
|
+
index_values: list[int] = []
|
|
738
|
+
mask_values: list[bool] = []
|
|
739
|
+
for node in node_ids:
|
|
740
|
+
cached_idx = trig_index_map.get(node)
|
|
741
|
+
if cached_idx is None:
|
|
742
|
+
index_values.append(-1)
|
|
743
|
+
mask_values.append(False)
|
|
744
|
+
else:
|
|
745
|
+
index_values.append(int(cached_idx))
|
|
746
|
+
mask_values.append(True)
|
|
747
|
+
cached_mask = np.asarray(mask_values, dtype=bool)
|
|
748
|
+
if cached_mask.any():
|
|
749
|
+
index_arr = np.asarray(index_values, dtype=np.intp)
|
|
750
|
+
if cached_mask is None:
|
|
751
|
+
cached_mask = np.zeros(count, dtype=bool)
|
|
752
|
+
|
|
753
|
+
def _gather_values(cache_values: Any | None, fallback_getter: Callable[[Any], float]) -> Any:
|
|
754
|
+
if (
|
|
755
|
+
index_arr is not None
|
|
756
|
+
and cache_values is not None
|
|
757
|
+
and cached_mask.size
|
|
758
|
+
and cached_mask.any()
|
|
759
|
+
):
|
|
760
|
+
out = np.empty(count, dtype=float)
|
|
761
|
+
cached_indices = np.nonzero(cached_mask)[0]
|
|
762
|
+
if cached_indices.size:
|
|
763
|
+
out[cached_indices] = np.take(
|
|
764
|
+
np.asarray(cache_values, dtype=float), index_arr[cached_indices]
|
|
765
|
+
)
|
|
766
|
+
missing_indices = np.nonzero(~cached_mask)[0]
|
|
767
|
+
if missing_indices.size:
|
|
768
|
+
missing_nodes = [node_ids[i] for i in missing_indices]
|
|
769
|
+
out[missing_indices] = np.fromiter(
|
|
770
|
+
(fallback_getter(node) for node in missing_nodes),
|
|
771
|
+
dtype=float,
|
|
772
|
+
count=missing_indices.size,
|
|
773
|
+
)
|
|
774
|
+
return out
|
|
775
|
+
return np.fromiter(
|
|
776
|
+
(fallback_getter(node) for node in node_ids),
|
|
777
|
+
dtype=float,
|
|
778
|
+
count=count,
|
|
779
|
+
)
|
|
780
|
+
|
|
781
|
+
cache_timer = _profile_start()
|
|
782
|
+
|
|
783
|
+
if using_cache_order and cache_theta is not None:
|
|
784
|
+
theta_arr = np.asarray(cache_theta, dtype=float)
|
|
785
|
+
else:
|
|
786
|
+
theta_arr = _gather_values(cache_theta, lambda node: thetas.get(node, 0.0))
|
|
787
|
+
|
|
788
|
+
if using_cache_order and cache_cos is not None:
|
|
789
|
+
cos_arr = np.asarray(cache_cos, dtype=float)
|
|
790
|
+
else:
|
|
791
|
+
cos_arr = _gather_values(
|
|
792
|
+
cache_cos,
|
|
793
|
+
lambda node: cos_th.get(node, math.cos(thetas.get(node, 0.0))),
|
|
794
|
+
)
|
|
795
|
+
|
|
796
|
+
if using_cache_order and cache_sin is not None:
|
|
797
|
+
sin_arr = np.asarray(cache_sin, dtype=float)
|
|
798
|
+
else:
|
|
799
|
+
sin_arr = _gather_values(
|
|
800
|
+
cache_sin,
|
|
801
|
+
lambda node: sin_th.get(node, math.sin(thetas.get(node, 0.0))),
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
cached_edge_src = None
|
|
805
|
+
cached_edge_dst = None
|
|
806
|
+
if using_cache_order:
|
|
807
|
+
cached_edge_src = getattr(trig, "edge_src", None)
|
|
808
|
+
cached_edge_dst = getattr(trig, "edge_dst", None)
|
|
809
|
+
if cached_edge_src is not None and cached_edge_dst is not None:
|
|
810
|
+
cached_edge_src = np.asarray(cached_edge_src, dtype=np.intp)
|
|
811
|
+
cached_edge_dst = np.asarray(cached_edge_dst, dtype=np.intp)
|
|
812
|
+
if cached_edge_src.shape != cached_edge_dst.shape:
|
|
813
|
+
cached_edge_src = None
|
|
814
|
+
cached_edge_dst = None
|
|
815
|
+
|
|
816
|
+
if cached_edge_src is not None and cached_edge_dst is not None:
|
|
817
|
+
edge_src = cached_edge_src
|
|
818
|
+
edge_dst = cached_edge_dst
|
|
819
|
+
else:
|
|
820
|
+
|
|
821
|
+
def _build_edge_arrays() -> tuple[Any, Any]:
|
|
822
|
+
edge_src_list: list[int] = []
|
|
823
|
+
edge_dst_list: list[int] = []
|
|
824
|
+
for node in node_ids:
|
|
825
|
+
dst_idx = node_idx[node]
|
|
826
|
+
for neighbor in neighbors[node]:
|
|
827
|
+
src_idx = node_idx.get(neighbor)
|
|
828
|
+
if src_idx is None:
|
|
829
|
+
continue
|
|
830
|
+
edge_src_list.append(src_idx)
|
|
831
|
+
edge_dst_list.append(dst_idx)
|
|
832
|
+
src_arr = np.asarray(edge_src_list, dtype=np.intp)
|
|
833
|
+
dst_arr = np.asarray(edge_dst_list, dtype=np.intp)
|
|
834
|
+
return src_arr, dst_arr
|
|
835
|
+
|
|
836
|
+
edge_src, edge_dst = edge_version_cache(
|
|
837
|
+
G,
|
|
838
|
+
("_si_edges", node_key),
|
|
839
|
+
_build_edge_arrays,
|
|
840
|
+
)
|
|
841
|
+
if using_cache_order:
|
|
842
|
+
trig.edge_src = edge_src
|
|
843
|
+
trig.edge_dst = edge_dst
|
|
844
|
+
|
|
845
|
+
(
|
|
846
|
+
neighbor_cos_sum,
|
|
847
|
+
neighbor_sin_sum,
|
|
848
|
+
neighbor_counts,
|
|
849
|
+
mean_cos_buf,
|
|
850
|
+
mean_sin_buf,
|
|
851
|
+
) = _ensure_neighbor_bulk_buffers(
|
|
852
|
+
G,
|
|
853
|
+
count=count,
|
|
854
|
+
np=np,
|
|
240
855
|
)
|
|
241
856
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
857
|
+
vf_arr, dnfr_arr = _ensure_structural_arrays(
|
|
858
|
+
G,
|
|
859
|
+
node_ids,
|
|
860
|
+
node_mapping,
|
|
861
|
+
np=np,
|
|
862
|
+
)
|
|
863
|
+
raw_vfmax = float(np.max(np.abs(vf_arr))) if getattr(vf_arr, "size", 0) else 0.0
|
|
864
|
+
raw_dnfrmax = (
|
|
865
|
+
float(np.max(np.abs(dnfr_arr))) if getattr(dnfr_arr, "size", 0) else 0.0
|
|
866
|
+
)
|
|
867
|
+
G.graph["_vfmax"] = raw_vfmax
|
|
868
|
+
G.graph["_dnfrmax"] = raw_dnfrmax
|
|
869
|
+
vfmax = 1.0 if raw_vfmax == 0.0 else raw_vfmax
|
|
870
|
+
dnfrmax = 1.0 if raw_dnfrmax == 0.0 else raw_dnfrmax
|
|
871
|
+
|
|
872
|
+
(
|
|
873
|
+
phase_dispersion,
|
|
874
|
+
raw_si,
|
|
875
|
+
si_values,
|
|
876
|
+
) = _ensure_si_buffers(
|
|
877
|
+
G,
|
|
878
|
+
count=count,
|
|
879
|
+
np=np,
|
|
880
|
+
)
|
|
881
|
+
|
|
882
|
+
_profile_stop("cache_rebuild", cache_timer)
|
|
883
|
+
|
|
884
|
+
neighbor_timer = _profile_start()
|
|
885
|
+
mean_theta, has_neighbors = neighbor_phase_mean_bulk(
|
|
886
|
+
edge_src,
|
|
887
|
+
edge_dst,
|
|
888
|
+
cos_values=cos_arr,
|
|
889
|
+
sin_values=sin_arr,
|
|
890
|
+
theta_values=theta_arr,
|
|
891
|
+
node_count=count,
|
|
892
|
+
np=np,
|
|
893
|
+
neighbor_cos_sum=neighbor_cos_sum,
|
|
894
|
+
neighbor_sin_sum=neighbor_sin_sum,
|
|
895
|
+
neighbor_counts=neighbor_counts,
|
|
896
|
+
mean_cos=mean_cos_buf,
|
|
897
|
+
mean_sin=mean_sin_buf,
|
|
898
|
+
)
|
|
899
|
+
_profile_stop("neighbor_phase_mean_bulk", neighbor_timer)
|
|
900
|
+
norm_timer = _profile_start()
|
|
901
|
+
# Reuse the Si buffers as scratch space to avoid transient allocations during
|
|
902
|
+
# the normalization pass and keep the structural buffers coherent with the
|
|
903
|
+
# cached layout.
|
|
904
|
+
np.abs(vf_arr, out=raw_si)
|
|
905
|
+
np.divide(raw_si, vfmax, out=raw_si)
|
|
906
|
+
np.clip(raw_si, 0.0, 1.0, out=raw_si)
|
|
907
|
+
vf_norm = raw_si
|
|
908
|
+
np.abs(dnfr_arr, out=si_values)
|
|
909
|
+
np.divide(si_values, dnfrmax, out=si_values)
|
|
910
|
+
np.clip(si_values, 0.0, 1.0, out=si_values)
|
|
911
|
+
dnfr_norm = si_values
|
|
912
|
+
phase_dispersion.fill(0.0)
|
|
913
|
+
neighbor_mask = np.asarray(has_neighbors, dtype=bool)
|
|
914
|
+
neighbor_count = int(neighbor_mask.sum())
|
|
915
|
+
use_chunked = False
|
|
916
|
+
if neighbor_count:
|
|
917
|
+
effective_chunk = resolve_chunk_size(
|
|
918
|
+
chunk_pref,
|
|
919
|
+
neighbor_count,
|
|
920
|
+
approx_bytes_per_item=_SI_APPROX_BYTES_PER_NODE,
|
|
921
|
+
)
|
|
922
|
+
if effective_chunk <= 0 or effective_chunk >= neighbor_count:
|
|
923
|
+
effective_chunk = neighbor_count
|
|
924
|
+
else:
|
|
925
|
+
use_chunked = True
|
|
926
|
+
|
|
927
|
+
if neighbor_count and not use_chunked:
|
|
928
|
+
angle_diff_array(
|
|
929
|
+
theta_arr,
|
|
930
|
+
mean_theta,
|
|
931
|
+
np=np,
|
|
932
|
+
out=phase_dispersion,
|
|
933
|
+
where=neighbor_mask,
|
|
934
|
+
)
|
|
935
|
+
np.abs(phase_dispersion, out=phase_dispersion, where=neighbor_mask)
|
|
936
|
+
np.divide(
|
|
937
|
+
phase_dispersion,
|
|
938
|
+
math.pi,
|
|
939
|
+
out=phase_dispersion,
|
|
940
|
+
where=neighbor_mask,
|
|
941
|
+
)
|
|
942
|
+
elif neighbor_count and use_chunked:
|
|
943
|
+
neighbor_indices = np.nonzero(neighbor_mask)[0]
|
|
944
|
+
chunk_theta, chunk_values = _ensure_chunk_workspace(
|
|
945
|
+
G,
|
|
946
|
+
mask_count=neighbor_count,
|
|
947
|
+
np=np,
|
|
948
|
+
)
|
|
949
|
+
for start in range(0, neighbor_count, effective_chunk):
|
|
950
|
+
end = min(start + effective_chunk, neighbor_count)
|
|
951
|
+
slice_indices = neighbor_indices[start:end]
|
|
952
|
+
chunk_len = end - start
|
|
953
|
+
theta_view = chunk_theta[:chunk_len]
|
|
954
|
+
values_view = chunk_values[:chunk_len]
|
|
955
|
+
np.take(theta_arr, slice_indices, out=theta_view)
|
|
956
|
+
np.take(mean_theta, slice_indices, out=values_view)
|
|
957
|
+
angle_diff_array(theta_view, values_view, np=np, out=values_view)
|
|
958
|
+
np.abs(values_view, out=values_view)
|
|
959
|
+
np.divide(values_view, math.pi, out=values_view)
|
|
960
|
+
phase_dispersion[slice_indices] = values_view
|
|
273
961
|
else:
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
phase_dispersion
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
962
|
+
np.abs(phase_dispersion, out=phase_dispersion)
|
|
963
|
+
np.divide(
|
|
964
|
+
phase_dispersion,
|
|
965
|
+
math.pi,
|
|
966
|
+
out=phase_dispersion,
|
|
967
|
+
where=neighbor_mask,
|
|
968
|
+
)
|
|
969
|
+
|
|
970
|
+
np.multiply(vf_norm, alpha, out=raw_si)
|
|
971
|
+
np.subtract(1.0, phase_dispersion, out=phase_dispersion)
|
|
972
|
+
np.multiply(phase_dispersion, beta, out=phase_dispersion)
|
|
973
|
+
np.add(raw_si, phase_dispersion, out=raw_si)
|
|
974
|
+
np.subtract(1.0, dnfr_norm, out=si_values)
|
|
975
|
+
np.multiply(si_values, gamma, out=si_values)
|
|
976
|
+
np.add(raw_si, si_values, out=raw_si)
|
|
977
|
+
np.clip(raw_si, 0.0, 1.0, out=si_values)
|
|
978
|
+
|
|
979
|
+
_profile_stop("normalize_clamp", norm_timer)
|
|
980
|
+
|
|
981
|
+
if inplace:
|
|
982
|
+
write_timer = _profile_start()
|
|
983
|
+
for idx, node in enumerate(node_ids):
|
|
984
|
+
set_attr(G.nodes[node], ALIAS_SI, float(si_values[idx]))
|
|
985
|
+
_profile_stop("inplace_write", write_timer)
|
|
986
|
+
return np.copy(si_values)
|
|
987
|
+
|
|
988
|
+
return {node: float(value) for node, value in zip(node_ids, si_values)}
|
|
989
|
+
|
|
990
|
+
vfmax, dnfrmax = _get_vf_dnfr_max(G)
|
|
991
|
+
|
|
992
|
+
out: dict[Any, float] = {}
|
|
993
|
+
_profile_mark_path("fallback")
|
|
994
|
+
if n_jobs is not None and n_jobs > 1:
|
|
995
|
+
node_count = len(nodes_data)
|
|
996
|
+
if node_count:
|
|
997
|
+
effective_chunk = resolve_chunk_size(
|
|
998
|
+
chunk_pref,
|
|
999
|
+
node_count,
|
|
1000
|
+
approx_bytes_per_item=_SI_APPROX_BYTES_PER_NODE,
|
|
1001
|
+
)
|
|
1002
|
+
if effective_chunk <= 0:
|
|
1003
|
+
effective_chunk = node_count
|
|
1004
|
+
payload_chunks = _iter_python_payload_chunks(
|
|
1005
|
+
nodes_data,
|
|
1006
|
+
neighbors=neighbors,
|
|
1007
|
+
thetas=thetas,
|
|
1008
|
+
chunk_size=effective_chunk,
|
|
1009
|
+
)
|
|
1010
|
+
chunk_count = 0
|
|
1011
|
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
|
1012
|
+
worker = partial(
|
|
1013
|
+
_compute_si_python_chunk,
|
|
1014
|
+
cos_th=cos_th,
|
|
1015
|
+
sin_th=sin_th,
|
|
282
1016
|
alpha=alpha,
|
|
283
1017
|
beta=beta,
|
|
284
1018
|
gamma=gamma,
|
|
285
1019
|
vfmax=vfmax,
|
|
286
1020
|
dnfrmax=dnfrmax,
|
|
287
|
-
phase_dispersion=phase_dispersion,
|
|
288
|
-
inplace=False,
|
|
289
1021
|
)
|
|
1022
|
+
payload_iter = iter(payload_chunks)
|
|
1023
|
+
futures: list[Any] = []
|
|
1024
|
+
for chunk in payload_iter:
|
|
1025
|
+
futures.append(executor.submit(worker, chunk))
|
|
1026
|
+
if len(futures) >= n_jobs:
|
|
1027
|
+
future = futures.pop(0)
|
|
1028
|
+
chunk_result = future.result()
|
|
1029
|
+
chunk_count += 1
|
|
1030
|
+
out.update(chunk_result)
|
|
1031
|
+
for future in futures:
|
|
1032
|
+
chunk_result = future.result()
|
|
1033
|
+
chunk_count += 1
|
|
1034
|
+
out.update(chunk_result)
|
|
1035
|
+
if profile is not None:
|
|
1036
|
+
profile["fallback_chunks"] = float(profile.get("fallback_chunks", 0.0)) + float(
|
|
1037
|
+
chunk_count
|
|
1038
|
+
)
|
|
1039
|
+
else:
|
|
1040
|
+
for n, nd in nodes_data:
|
|
1041
|
+
theta = thetas.get(n, 0.0)
|
|
1042
|
+
neigh = neighbors[n]
|
|
1043
|
+
th_bar = pm_fn(neigh, fallback=theta)
|
|
1044
|
+
phase_dispersion = abs(angle_diff(theta, th_bar)) / math.pi
|
|
1045
|
+
norm_timer = _profile_start()
|
|
1046
|
+
out[n] = compute_Si_node(
|
|
1047
|
+
n,
|
|
1048
|
+
nd,
|
|
1049
|
+
alpha=alpha,
|
|
1050
|
+
beta=beta,
|
|
1051
|
+
gamma=gamma,
|
|
1052
|
+
vfmax=vfmax,
|
|
1053
|
+
dnfrmax=dnfrmax,
|
|
1054
|
+
phase_dispersion=phase_dispersion,
|
|
1055
|
+
inplace=False,
|
|
1056
|
+
)
|
|
1057
|
+
_profile_stop("normalize_clamp", norm_timer)
|
|
290
1058
|
|
|
291
1059
|
if inplace:
|
|
1060
|
+
write_timer = _profile_start()
|
|
292
1061
|
for n, value in out.items():
|
|
293
1062
|
set_attr(G.nodes[n], ALIAS_SI, value)
|
|
1063
|
+
_profile_stop("inplace_write", write_timer)
|
|
294
1064
|
return out
|