tnfr 4.5.1__py3-none-any.whl → 4.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +91 -90
- tnfr/alias.py +546 -0
- tnfr/cache.py +578 -0
- tnfr/callback_utils.py +388 -0
- tnfr/cli/__init__.py +75 -0
- tnfr/cli/arguments.py +177 -0
- tnfr/cli/execution.py +288 -0
- tnfr/cli/utils.py +36 -0
- tnfr/collections_utils.py +300 -0
- tnfr/config.py +19 -28
- tnfr/constants/__init__.py +174 -0
- tnfr/constants/core.py +159 -0
- tnfr/constants/init.py +31 -0
- tnfr/constants/metric.py +110 -0
- tnfr/constants_glyphs.py +98 -0
- tnfr/dynamics/__init__.py +658 -0
- tnfr/dynamics/dnfr.py +733 -0
- tnfr/dynamics/integrators.py +267 -0
- tnfr/dynamics/sampling.py +31 -0
- tnfr/execution.py +201 -0
- tnfr/flatten.py +283 -0
- tnfr/gamma.py +302 -88
- tnfr/glyph_history.py +290 -0
- tnfr/grammar.py +285 -96
- tnfr/graph_utils.py +84 -0
- tnfr/helpers/__init__.py +71 -0
- tnfr/helpers/numeric.py +87 -0
- tnfr/immutable.py +178 -0
- tnfr/import_utils.py +228 -0
- tnfr/initialization.py +197 -0
- tnfr/io.py +246 -0
- tnfr/json_utils.py +162 -0
- tnfr/locking.py +37 -0
- tnfr/logging_utils.py +116 -0
- tnfr/metrics/__init__.py +41 -0
- tnfr/metrics/coherence.py +829 -0
- tnfr/metrics/common.py +151 -0
- tnfr/metrics/core.py +101 -0
- tnfr/metrics/diagnosis.py +234 -0
- tnfr/metrics/export.py +137 -0
- tnfr/metrics/glyph_timing.py +189 -0
- tnfr/metrics/reporting.py +148 -0
- tnfr/metrics/sense_index.py +120 -0
- tnfr/metrics/trig.py +181 -0
- tnfr/metrics/trig_cache.py +109 -0
- tnfr/node.py +214 -159
- tnfr/observers.py +126 -136
- tnfr/ontosim.py +134 -134
- tnfr/operators/__init__.py +420 -0
- tnfr/operators/jitter.py +203 -0
- tnfr/operators/remesh.py +485 -0
- tnfr/presets.py +46 -14
- tnfr/rng.py +254 -0
- tnfr/selector.py +210 -0
- tnfr/sense.py +284 -131
- tnfr/structural.py +207 -79
- tnfr/tokens.py +60 -0
- tnfr/trace.py +329 -94
- tnfr/types.py +43 -17
- tnfr/validators.py +70 -24
- tnfr/value_utils.py +59 -0
- tnfr-4.5.2.dist-info/METADATA +379 -0
- tnfr-4.5.2.dist-info/RECORD +67 -0
- tnfr/cli.py +0 -322
- tnfr/constants.py +0 -277
- tnfr/dynamics.py +0 -814
- tnfr/helpers.py +0 -264
- tnfr/main.py +0 -47
- tnfr/metrics.py +0 -597
- tnfr/operators.py +0 -525
- tnfr/program.py +0 -176
- tnfr/scenarios.py +0 -34
- tnfr-4.5.1.dist-info/METADATA +0 -221
- tnfr-4.5.1.dist-info/RECORD +0 -28
- {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/WHEEL +0 -0
- {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/entry_points.txt +0 -0
- {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/licenses/LICENSE.md +0 -0
- {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/top_level.txt +0 -0
tnfr/rng.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
"""Deterministic RNG helpers."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import random
|
|
6
|
+
import hashlib
|
|
7
|
+
import struct
|
|
8
|
+
from collections.abc import Iterator, MutableMapping
|
|
9
|
+
from typing import Any, Generic, Hashable, TypeVar
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
from cachetools import LRUCache, cached
|
|
13
|
+
from .constants import DEFAULTS, get_param
|
|
14
|
+
from .graph_utils import get_graph
|
|
15
|
+
from .locking import get_lock
|
|
16
|
+
|
|
17
|
+
MASK64 = 0xFFFFFFFFFFFFFFFF
|
|
18
|
+
|
|
19
|
+
_RNG_LOCK = get_lock("rng")
|
|
20
|
+
_DEFAULT_CACHE_MAXSIZE = int(DEFAULTS.get("JITTER_CACHE_SIZE", 128))
|
|
21
|
+
_CACHE_MAXSIZE = _DEFAULT_CACHE_MAXSIZE
|
|
22
|
+
_CACHE_LOCKED = False
|
|
23
|
+
|
|
24
|
+
K = TypeVar("K", bound=Hashable)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class _SeedHashCache(MutableMapping[tuple[int, int], int]):
|
|
28
|
+
"""Mutable mapping proxy exposing a configurable LRU cache."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, maxsize: int) -> None:
|
|
31
|
+
self._maxsize = 0
|
|
32
|
+
self._cache: LRUCache[tuple[int, int], int] | None = None
|
|
33
|
+
self.configure(maxsize)
|
|
34
|
+
|
|
35
|
+
def configure(self, maxsize: int) -> None:
|
|
36
|
+
"""Configure internal cache size, clearing previous entries."""
|
|
37
|
+
|
|
38
|
+
self._maxsize = int(maxsize)
|
|
39
|
+
if self._maxsize <= 0:
|
|
40
|
+
self._cache = None
|
|
41
|
+
else:
|
|
42
|
+
self._cache = LRUCache(maxsize=self._maxsize)
|
|
43
|
+
|
|
44
|
+
def __getitem__(self, key: tuple[int, int]) -> int:
|
|
45
|
+
if self._cache is None:
|
|
46
|
+
raise KeyError(key)
|
|
47
|
+
return self._cache[key]
|
|
48
|
+
|
|
49
|
+
def __setitem__(self, key: tuple[int, int], value: int) -> None:
|
|
50
|
+
if self._cache is not None:
|
|
51
|
+
self._cache[key] = value
|
|
52
|
+
|
|
53
|
+
def __delitem__(self, key: tuple[int, int]) -> None:
|
|
54
|
+
if self._cache is None:
|
|
55
|
+
raise KeyError(key)
|
|
56
|
+
del self._cache[key]
|
|
57
|
+
|
|
58
|
+
def __iter__(self) -> Iterator[tuple[int, int]]:
|
|
59
|
+
if self._cache is None:
|
|
60
|
+
return iter(())
|
|
61
|
+
return iter(self._cache)
|
|
62
|
+
|
|
63
|
+
def __len__(self) -> int:
|
|
64
|
+
if self._cache is None:
|
|
65
|
+
return 0
|
|
66
|
+
return len(self._cache)
|
|
67
|
+
|
|
68
|
+
def clear(self) -> None: # type: ignore[override]
|
|
69
|
+
if self._cache is not None:
|
|
70
|
+
self._cache.clear()
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def maxsize(self) -> int:
|
|
74
|
+
return self._maxsize
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def enabled(self) -> bool:
|
|
78
|
+
return self._cache is not None
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def data(self) -> LRUCache[tuple[int, int], int] | None:
|
|
82
|
+
"""Expose the underlying cache for diagnostics/tests."""
|
|
83
|
+
|
|
84
|
+
return self._cache
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class ScopedCounterCache(Generic[K]):
|
|
88
|
+
"""Thread-safe LRU cache storing monotonic counters by ``key``."""
|
|
89
|
+
|
|
90
|
+
def __init__(self, name: str, max_entries: int) -> None:
|
|
91
|
+
if max_entries < 0:
|
|
92
|
+
raise ValueError("max_entries must be non-negative")
|
|
93
|
+
self._lock = get_lock(name)
|
|
94
|
+
self._max_entries = int(max_entries)
|
|
95
|
+
self._cache: LRUCache[K, int] = LRUCache(maxsize=self._max_entries)
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def lock(self):
|
|
99
|
+
"""Return the lock guarding access to the underlying cache."""
|
|
100
|
+
|
|
101
|
+
return self._lock
|
|
102
|
+
|
|
103
|
+
@property
|
|
104
|
+
def max_entries(self) -> int:
|
|
105
|
+
"""Return the configured maximum number of cached entries."""
|
|
106
|
+
|
|
107
|
+
return self._max_entries
|
|
108
|
+
|
|
109
|
+
@property
|
|
110
|
+
def cache(self) -> LRUCache[K, int]:
|
|
111
|
+
"""Expose the underlying ``LRUCache`` for inspection."""
|
|
112
|
+
|
|
113
|
+
return self._cache
|
|
114
|
+
|
|
115
|
+
def configure(
|
|
116
|
+
self, *, force: bool = False, max_entries: int | None = None
|
|
117
|
+
) -> None:
|
|
118
|
+
"""Resize or reset the cache keeping previous settings."""
|
|
119
|
+
|
|
120
|
+
size = self._max_entries if max_entries is None else int(max_entries)
|
|
121
|
+
if size < 0:
|
|
122
|
+
raise ValueError("max_entries must be non-negative")
|
|
123
|
+
with self._lock:
|
|
124
|
+
if size != self._max_entries:
|
|
125
|
+
self._max_entries = size
|
|
126
|
+
force = True
|
|
127
|
+
if force:
|
|
128
|
+
self._cache = LRUCache(maxsize=self._max_entries)
|
|
129
|
+
|
|
130
|
+
def clear(self) -> None:
|
|
131
|
+
"""Clear stored counters preserving ``max_entries``."""
|
|
132
|
+
|
|
133
|
+
self.configure(force=True)
|
|
134
|
+
|
|
135
|
+
def bump(self, key: K) -> int:
|
|
136
|
+
"""Return current counter for ``key`` and increment it atomically."""
|
|
137
|
+
|
|
138
|
+
with self._lock:
|
|
139
|
+
value = int(self._cache.get(key, 0))
|
|
140
|
+
self._cache[key] = value + 1
|
|
141
|
+
return value
|
|
142
|
+
|
|
143
|
+
def __len__(self) -> int:
|
|
144
|
+
return len(self._cache)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
_seed_hash_cache = _SeedHashCache(_CACHE_MAXSIZE)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
@cached(cache=_seed_hash_cache, lock=_RNG_LOCK)
|
|
151
|
+
def seed_hash(seed_int: int, key_int: int) -> int:
|
|
152
|
+
"""Return a 64-bit hash derived from ``seed_int`` and ``key_int``."""
|
|
153
|
+
|
|
154
|
+
seed_bytes = struct.pack(
|
|
155
|
+
">QQ",
|
|
156
|
+
seed_int & MASK64,
|
|
157
|
+
key_int & MASK64,
|
|
158
|
+
)
|
|
159
|
+
return int.from_bytes(
|
|
160
|
+
hashlib.blake2b(seed_bytes, digest_size=8).digest(), "big"
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _sync_cache_size(G: Any | None) -> None:
|
|
165
|
+
"""Synchronise cache size with ``G`` when needed."""
|
|
166
|
+
|
|
167
|
+
global _CACHE_MAXSIZE
|
|
168
|
+
if G is None or _CACHE_LOCKED:
|
|
169
|
+
return
|
|
170
|
+
size = get_cache_maxsize(G)
|
|
171
|
+
with _RNG_LOCK:
|
|
172
|
+
if size != _CACHE_MAXSIZE:
|
|
173
|
+
_seed_hash_cache.configure(size)
|
|
174
|
+
_CACHE_MAXSIZE = size
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def make_rng(seed: int, key: int, G: Any | None = None) -> random.Random:
|
|
178
|
+
"""Return a ``random.Random`` for ``seed`` and ``key``.
|
|
179
|
+
|
|
180
|
+
When ``G`` is provided, ``JITTER_CACHE_SIZE`` is read from ``G`` and the
|
|
181
|
+
internal cache size is updated accordingly.
|
|
182
|
+
"""
|
|
183
|
+
_sync_cache_size(G)
|
|
184
|
+
seed_int = int(seed)
|
|
185
|
+
key_int = int(key)
|
|
186
|
+
return random.Random(seed_hash(seed_int, key_int))
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def clear_rng_cache() -> None:
|
|
190
|
+
"""Clear cached seed hashes."""
|
|
191
|
+
if _CACHE_MAXSIZE <= 0 or not _seed_hash_cache.enabled:
|
|
192
|
+
return
|
|
193
|
+
seed_hash.cache_clear()
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def get_cache_maxsize(G: Any) -> int:
|
|
197
|
+
"""Return RNG cache maximum size for ``G``."""
|
|
198
|
+
return int(get_param(G, "JITTER_CACHE_SIZE"))
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def cache_enabled(G: Any | None = None) -> bool:
|
|
202
|
+
"""Return ``True`` if RNG caching is enabled.
|
|
203
|
+
|
|
204
|
+
When ``G`` is provided, the cache size is synchronised with
|
|
205
|
+
``JITTER_CACHE_SIZE`` stored in ``G``.
|
|
206
|
+
"""
|
|
207
|
+
# Only synchronise the cache size with ``G`` when caching is enabled. This
|
|
208
|
+
# preserves explicit calls to :func:`set_cache_maxsize(0)` which are used in
|
|
209
|
+
# tests to temporarily disable caching regardless of graph defaults.
|
|
210
|
+
if _CACHE_MAXSIZE > 0:
|
|
211
|
+
_sync_cache_size(G)
|
|
212
|
+
return _CACHE_MAXSIZE > 0
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def base_seed(G: Any) -> int:
|
|
216
|
+
"""Return base RNG seed stored in ``G.graph``."""
|
|
217
|
+
graph = get_graph(G)
|
|
218
|
+
return int(graph.get("RANDOM_SEED", 0))
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _rng_for_step(seed: int, step: int) -> random.Random:
|
|
222
|
+
"""Return deterministic RNG for a simulation ``step``."""
|
|
223
|
+
|
|
224
|
+
return make_rng(seed, step)
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def set_cache_maxsize(size: int) -> None:
|
|
228
|
+
"""Update RNG cache maximum size.
|
|
229
|
+
|
|
230
|
+
``size`` must be a non-negative integer; ``0`` disables caching.
|
|
231
|
+
Changing the cache size resets any cached seed hashes.
|
|
232
|
+
If caching is disabled, ``clear_rng_cache`` has no effect.
|
|
233
|
+
"""
|
|
234
|
+
|
|
235
|
+
global _CACHE_MAXSIZE, _CACHE_LOCKED
|
|
236
|
+
new_size = int(size)
|
|
237
|
+
if new_size < 0:
|
|
238
|
+
raise ValueError("size must be non-negative")
|
|
239
|
+
with _RNG_LOCK:
|
|
240
|
+
_seed_hash_cache.configure(new_size)
|
|
241
|
+
_CACHE_MAXSIZE = new_size
|
|
242
|
+
_CACHE_LOCKED = new_size != _DEFAULT_CACHE_MAXSIZE
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
__all__ = (
|
|
246
|
+
"seed_hash",
|
|
247
|
+
"make_rng",
|
|
248
|
+
"get_cache_maxsize",
|
|
249
|
+
"set_cache_maxsize",
|
|
250
|
+
"base_seed",
|
|
251
|
+
"cache_enabled",
|
|
252
|
+
"clear_rng_cache",
|
|
253
|
+
"ScopedCounterCache",
|
|
254
|
+
)
|
tnfr/selector.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
"""Utilities to select glyphs based on structural metrics.
|
|
2
|
+
|
|
3
|
+
This module normalises thresholds, computes selection scores and applies
|
|
4
|
+
hysteresis when assigning glyphs to nodes.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import threading
|
|
10
|
+
from operator import itemgetter
|
|
11
|
+
from typing import Any, Mapping, TYPE_CHECKING
|
|
12
|
+
from weakref import WeakKeyDictionary
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
15
|
+
import networkx as nx # type: ignore[import-untyped]
|
|
16
|
+
|
|
17
|
+
from .constants import DEFAULTS
|
|
18
|
+
from .constants.core import SELECTOR_THRESHOLD_DEFAULTS
|
|
19
|
+
from .helpers.numeric import clamp01
|
|
20
|
+
from .metrics.common import compute_dnfr_accel_max
|
|
21
|
+
from .collections_utils import is_non_string_sequence
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
HYSTERESIS_GLYPHS: set[str] = {"IL", "OZ", "ZHIR", "THOL", "NAV", "RA"}
|
|
25
|
+
|
|
26
|
+
__all__ = (
|
|
27
|
+
"_selector_thresholds",
|
|
28
|
+
"_norms_para_selector",
|
|
29
|
+
"_calc_selector_score",
|
|
30
|
+
"_apply_selector_hysteresis",
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
_SelectorThresholdCacheEntry = tuple[
|
|
35
|
+
tuple[tuple[str, float], ...],
|
|
36
|
+
dict[str, float],
|
|
37
|
+
]
|
|
38
|
+
_SELECTOR_THRESHOLD_CACHE: WeakKeyDictionary[
|
|
39
|
+
"nx.Graph",
|
|
40
|
+
_SelectorThresholdCacheEntry,
|
|
41
|
+
] = WeakKeyDictionary()
|
|
42
|
+
_SELECTOR_THRESHOLD_CACHE_LOCK = threading.Lock()
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _sorted_items(mapping: Mapping[str, float]) -> tuple[tuple[str, float], ...]:
|
|
46
|
+
"""Return mapping items sorted by key.
|
|
47
|
+
|
|
48
|
+
Parameters
|
|
49
|
+
----------
|
|
50
|
+
mapping : Mapping[str, float]
|
|
51
|
+
Mapping whose items will be sorted.
|
|
52
|
+
|
|
53
|
+
Returns
|
|
54
|
+
-------
|
|
55
|
+
tuple[tuple[str, float], ...]
|
|
56
|
+
Key-sorted items providing a hashable representation for memoisation.
|
|
57
|
+
"""
|
|
58
|
+
return tuple(sorted(mapping.items()))
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _compute_selector_thresholds(
|
|
62
|
+
thr_sel_items: tuple[tuple[str, float], ...],
|
|
63
|
+
) -> dict[str, float]:
|
|
64
|
+
"""Construct selector thresholds for a graph.
|
|
65
|
+
|
|
66
|
+
Parameters
|
|
67
|
+
----------
|
|
68
|
+
thr_sel_items : tuple[tuple[str, float], ...]
|
|
69
|
+
Selector threshold items as ``(key, value)`` pairs.
|
|
70
|
+
|
|
71
|
+
Returns
|
|
72
|
+
-------
|
|
73
|
+
dict[str, float]
|
|
74
|
+
Normalised thresholds for selector metrics.
|
|
75
|
+
"""
|
|
76
|
+
thr_sel = dict(thr_sel_items)
|
|
77
|
+
|
|
78
|
+
out: dict[str, float] = {}
|
|
79
|
+
for key, default in SELECTOR_THRESHOLD_DEFAULTS.items():
|
|
80
|
+
val = thr_sel.get(key, default)
|
|
81
|
+
out[key] = clamp01(float(val))
|
|
82
|
+
return out
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def _selector_thresholds(G: "nx.Graph") -> dict[str, float]:
|
|
86
|
+
"""Return normalised thresholds for Si, ΔNFR and acceleration.
|
|
87
|
+
|
|
88
|
+
Parameters
|
|
89
|
+
----------
|
|
90
|
+
G : nx.Graph
|
|
91
|
+
Graph whose configuration stores selector thresholds.
|
|
92
|
+
|
|
93
|
+
Returns
|
|
94
|
+
-------
|
|
95
|
+
dict[str, float]
|
|
96
|
+
Dictionary with clamped hi/lo thresholds, memoised per graph.
|
|
97
|
+
"""
|
|
98
|
+
sel_defaults = DEFAULTS.get("SELECTOR_THRESHOLDS", {})
|
|
99
|
+
thr_sel = {**sel_defaults, **G.graph.get("SELECTOR_THRESHOLDS", {})}
|
|
100
|
+
thr_sel_items = _sorted_items(thr_sel)
|
|
101
|
+
|
|
102
|
+
with _SELECTOR_THRESHOLD_CACHE_LOCK:
|
|
103
|
+
cached = _SELECTOR_THRESHOLD_CACHE.get(G)
|
|
104
|
+
if cached is not None and cached[0] == thr_sel_items:
|
|
105
|
+
return cached[1]
|
|
106
|
+
|
|
107
|
+
thresholds = _compute_selector_thresholds(thr_sel_items)
|
|
108
|
+
|
|
109
|
+
with _SELECTOR_THRESHOLD_CACHE_LOCK:
|
|
110
|
+
cached = _SELECTOR_THRESHOLD_CACHE.get(G)
|
|
111
|
+
if cached is not None and cached[0] == thr_sel_items:
|
|
112
|
+
return cached[1]
|
|
113
|
+
_SELECTOR_THRESHOLD_CACHE[G] = (thr_sel_items, thresholds)
|
|
114
|
+
return thresholds
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _norms_para_selector(G: "nx.Graph") -> dict:
|
|
118
|
+
"""Compute and cache norms for ΔNFR and acceleration.
|
|
119
|
+
|
|
120
|
+
Parameters
|
|
121
|
+
----------
|
|
122
|
+
G : nx.Graph
|
|
123
|
+
Graph for which to compute maxima. Results are stored in ``G.graph``
|
|
124
|
+
under ``"_sel_norms"``.
|
|
125
|
+
|
|
126
|
+
Returns
|
|
127
|
+
-------
|
|
128
|
+
dict
|
|
129
|
+
Mapping with normalisation maxima for ``dnfr`` and ``accel``.
|
|
130
|
+
"""
|
|
131
|
+
norms = compute_dnfr_accel_max(G)
|
|
132
|
+
G.graph["_sel_norms"] = norms
|
|
133
|
+
return norms
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _calc_selector_score(
|
|
137
|
+
Si: float, dnfr: float, accel: float, weights: dict[str, float]
|
|
138
|
+
) -> float:
|
|
139
|
+
"""Compute weighted selector score.
|
|
140
|
+
|
|
141
|
+
Parameters
|
|
142
|
+
----------
|
|
143
|
+
Si : float
|
|
144
|
+
Normalised sense index.
|
|
145
|
+
dnfr : float
|
|
146
|
+
Normalised absolute ΔNFR value.
|
|
147
|
+
accel : float
|
|
148
|
+
Normalised acceleration (|d²EPI/dt²|).
|
|
149
|
+
weights : dict[str, float]
|
|
150
|
+
Normalised weights for ``"w_si"``, ``"w_dnfr"`` and ``"w_accel"``.
|
|
151
|
+
|
|
152
|
+
Returns
|
|
153
|
+
-------
|
|
154
|
+
float
|
|
155
|
+
Final weighted score.
|
|
156
|
+
"""
|
|
157
|
+
return (
|
|
158
|
+
weights["w_si"] * Si
|
|
159
|
+
+ weights["w_dnfr"] * (1.0 - dnfr)
|
|
160
|
+
+ weights["w_accel"] * (1.0 - accel)
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _apply_selector_hysteresis(
|
|
165
|
+
nd: dict[str, Any],
|
|
166
|
+
Si: float,
|
|
167
|
+
dnfr: float,
|
|
168
|
+
accel: float,
|
|
169
|
+
thr: dict[str, float],
|
|
170
|
+
margin: float,
|
|
171
|
+
) -> str | None:
|
|
172
|
+
"""Apply hysteresis when values are near thresholds.
|
|
173
|
+
|
|
174
|
+
Parameters
|
|
175
|
+
----------
|
|
176
|
+
nd : dict[str, Any]
|
|
177
|
+
Node attribute dictionary containing glyph history.
|
|
178
|
+
Si : float
|
|
179
|
+
Normalised sense index.
|
|
180
|
+
dnfr : float
|
|
181
|
+
Normalised absolute ΔNFR value.
|
|
182
|
+
accel : float
|
|
183
|
+
Normalised acceleration.
|
|
184
|
+
thr : dict[str, float]
|
|
185
|
+
Thresholds returned by :func:`_selector_thresholds`.
|
|
186
|
+
margin : float
|
|
187
|
+
Distance from thresholds below which the previous glyph is reused.
|
|
188
|
+
|
|
189
|
+
Returns
|
|
190
|
+
-------
|
|
191
|
+
str or None
|
|
192
|
+
Previous glyph if hysteresis applies, otherwise ``None``.
|
|
193
|
+
"""
|
|
194
|
+
# Batch extraction reduces dictionary lookups inside loops.
|
|
195
|
+
si_hi, si_lo, dnfr_hi, dnfr_lo, accel_hi, accel_lo = itemgetter(
|
|
196
|
+
"si_hi", "si_lo", "dnfr_hi", "dnfr_lo", "accel_hi", "accel_lo"
|
|
197
|
+
)(thr)
|
|
198
|
+
|
|
199
|
+
d_si = min(abs(Si - si_hi), abs(Si - si_lo))
|
|
200
|
+
d_dn = min(abs(dnfr - dnfr_hi), abs(dnfr - dnfr_lo))
|
|
201
|
+
d_ac = min(abs(accel - accel_hi), abs(accel - accel_lo))
|
|
202
|
+
certeza = min(d_si, d_dn, d_ac)
|
|
203
|
+
if certeza < margin:
|
|
204
|
+
hist = nd.get("glyph_history")
|
|
205
|
+
if not is_non_string_sequence(hist) or not hist:
|
|
206
|
+
return None
|
|
207
|
+
prev = hist[-1]
|
|
208
|
+
if isinstance(prev, str) and prev in HYSTERESIS_GLYPHS:
|
|
209
|
+
return prev
|
|
210
|
+
return None
|