tnfr 4.5.1__py3-none-any.whl → 6.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (170) hide show
  1. tnfr/__init__.py +270 -90
  2. tnfr/__init__.pyi +40 -0
  3. tnfr/_compat.py +11 -0
  4. tnfr/_version.py +7 -0
  5. tnfr/_version.pyi +7 -0
  6. tnfr/alias.py +631 -0
  7. tnfr/alias.pyi +140 -0
  8. tnfr/cache.py +732 -0
  9. tnfr/cache.pyi +232 -0
  10. tnfr/callback_utils.py +381 -0
  11. tnfr/callback_utils.pyi +105 -0
  12. tnfr/cli/__init__.py +89 -0
  13. tnfr/cli/__init__.pyi +47 -0
  14. tnfr/cli/arguments.py +199 -0
  15. tnfr/cli/arguments.pyi +33 -0
  16. tnfr/cli/execution.py +322 -0
  17. tnfr/cli/execution.pyi +80 -0
  18. tnfr/cli/utils.py +34 -0
  19. tnfr/cli/utils.pyi +8 -0
  20. tnfr/config/__init__.py +12 -0
  21. tnfr/config/__init__.pyi +8 -0
  22. tnfr/config/constants.py +104 -0
  23. tnfr/config/constants.pyi +12 -0
  24. tnfr/config/init.py +36 -0
  25. tnfr/config/init.pyi +8 -0
  26. tnfr/config/operator_names.py +106 -0
  27. tnfr/config/operator_names.pyi +28 -0
  28. tnfr/config/presets.py +104 -0
  29. tnfr/config/presets.pyi +7 -0
  30. tnfr/constants/__init__.py +228 -0
  31. tnfr/constants/__init__.pyi +104 -0
  32. tnfr/constants/core.py +158 -0
  33. tnfr/constants/core.pyi +17 -0
  34. tnfr/constants/init.py +31 -0
  35. tnfr/constants/init.pyi +12 -0
  36. tnfr/constants/metric.py +102 -0
  37. tnfr/constants/metric.pyi +19 -0
  38. tnfr/constants_glyphs.py +16 -0
  39. tnfr/constants_glyphs.pyi +12 -0
  40. tnfr/dynamics/__init__.py +136 -0
  41. tnfr/dynamics/__init__.pyi +83 -0
  42. tnfr/dynamics/adaptation.py +201 -0
  43. tnfr/dynamics/aliases.py +22 -0
  44. tnfr/dynamics/coordination.py +343 -0
  45. tnfr/dynamics/dnfr.py +2315 -0
  46. tnfr/dynamics/dnfr.pyi +33 -0
  47. tnfr/dynamics/integrators.py +561 -0
  48. tnfr/dynamics/integrators.pyi +35 -0
  49. tnfr/dynamics/runtime.py +521 -0
  50. tnfr/dynamics/sampling.py +34 -0
  51. tnfr/dynamics/sampling.pyi +7 -0
  52. tnfr/dynamics/selectors.py +680 -0
  53. tnfr/execution.py +216 -0
  54. tnfr/execution.pyi +65 -0
  55. tnfr/flatten.py +283 -0
  56. tnfr/flatten.pyi +28 -0
  57. tnfr/gamma.py +320 -89
  58. tnfr/gamma.pyi +40 -0
  59. tnfr/glyph_history.py +337 -0
  60. tnfr/glyph_history.pyi +53 -0
  61. tnfr/grammar.py +23 -153
  62. tnfr/grammar.pyi +13 -0
  63. tnfr/helpers/__init__.py +151 -0
  64. tnfr/helpers/__init__.pyi +66 -0
  65. tnfr/helpers/numeric.py +88 -0
  66. tnfr/helpers/numeric.pyi +12 -0
  67. tnfr/immutable.py +214 -0
  68. tnfr/immutable.pyi +37 -0
  69. tnfr/initialization.py +199 -0
  70. tnfr/initialization.pyi +73 -0
  71. tnfr/io.py +311 -0
  72. tnfr/io.pyi +11 -0
  73. tnfr/locking.py +37 -0
  74. tnfr/locking.pyi +7 -0
  75. tnfr/metrics/__init__.py +41 -0
  76. tnfr/metrics/__init__.pyi +20 -0
  77. tnfr/metrics/coherence.py +1469 -0
  78. tnfr/metrics/common.py +149 -0
  79. tnfr/metrics/common.pyi +15 -0
  80. tnfr/metrics/core.py +259 -0
  81. tnfr/metrics/core.pyi +13 -0
  82. tnfr/metrics/diagnosis.py +840 -0
  83. tnfr/metrics/diagnosis.pyi +89 -0
  84. tnfr/metrics/export.py +151 -0
  85. tnfr/metrics/glyph_timing.py +369 -0
  86. tnfr/metrics/reporting.py +152 -0
  87. tnfr/metrics/reporting.pyi +12 -0
  88. tnfr/metrics/sense_index.py +294 -0
  89. tnfr/metrics/sense_index.pyi +9 -0
  90. tnfr/metrics/trig.py +216 -0
  91. tnfr/metrics/trig.pyi +12 -0
  92. tnfr/metrics/trig_cache.py +105 -0
  93. tnfr/metrics/trig_cache.pyi +10 -0
  94. tnfr/node.py +255 -177
  95. tnfr/node.pyi +161 -0
  96. tnfr/observers.py +154 -150
  97. tnfr/observers.pyi +46 -0
  98. tnfr/ontosim.py +135 -134
  99. tnfr/ontosim.pyi +33 -0
  100. tnfr/operators/__init__.py +452 -0
  101. tnfr/operators/__init__.pyi +31 -0
  102. tnfr/operators/definitions.py +181 -0
  103. tnfr/operators/definitions.pyi +92 -0
  104. tnfr/operators/jitter.py +266 -0
  105. tnfr/operators/jitter.pyi +11 -0
  106. tnfr/operators/registry.py +80 -0
  107. tnfr/operators/registry.pyi +15 -0
  108. tnfr/operators/remesh.py +569 -0
  109. tnfr/presets.py +10 -23
  110. tnfr/presets.pyi +7 -0
  111. tnfr/py.typed +0 -0
  112. tnfr/rng.py +440 -0
  113. tnfr/rng.pyi +14 -0
  114. tnfr/selector.py +217 -0
  115. tnfr/selector.pyi +19 -0
  116. tnfr/sense.py +307 -142
  117. tnfr/sense.pyi +30 -0
  118. tnfr/structural.py +69 -164
  119. tnfr/structural.pyi +46 -0
  120. tnfr/telemetry/__init__.py +13 -0
  121. tnfr/telemetry/verbosity.py +37 -0
  122. tnfr/tokens.py +61 -0
  123. tnfr/tokens.pyi +41 -0
  124. tnfr/trace.py +520 -95
  125. tnfr/trace.pyi +68 -0
  126. tnfr/types.py +382 -17
  127. tnfr/types.pyi +145 -0
  128. tnfr/utils/__init__.py +158 -0
  129. tnfr/utils/__init__.pyi +133 -0
  130. tnfr/utils/cache.py +755 -0
  131. tnfr/utils/cache.pyi +156 -0
  132. tnfr/utils/data.py +267 -0
  133. tnfr/utils/data.pyi +73 -0
  134. tnfr/utils/graph.py +87 -0
  135. tnfr/utils/graph.pyi +10 -0
  136. tnfr/utils/init.py +746 -0
  137. tnfr/utils/init.pyi +85 -0
  138. tnfr/utils/io.py +157 -0
  139. tnfr/utils/io.pyi +10 -0
  140. tnfr/utils/validators.py +130 -0
  141. tnfr/utils/validators.pyi +19 -0
  142. tnfr/validation/__init__.py +25 -0
  143. tnfr/validation/__init__.pyi +17 -0
  144. tnfr/validation/compatibility.py +59 -0
  145. tnfr/validation/compatibility.pyi +8 -0
  146. tnfr/validation/grammar.py +149 -0
  147. tnfr/validation/grammar.pyi +11 -0
  148. tnfr/validation/rules.py +194 -0
  149. tnfr/validation/rules.pyi +18 -0
  150. tnfr/validation/syntax.py +151 -0
  151. tnfr/validation/syntax.pyi +7 -0
  152. tnfr-6.0.0.dist-info/METADATA +135 -0
  153. tnfr-6.0.0.dist-info/RECORD +157 -0
  154. tnfr/cli.py +0 -322
  155. tnfr/config.py +0 -41
  156. tnfr/constants.py +0 -277
  157. tnfr/dynamics.py +0 -814
  158. tnfr/helpers.py +0 -264
  159. tnfr/main.py +0 -47
  160. tnfr/metrics.py +0 -597
  161. tnfr/operators.py +0 -525
  162. tnfr/program.py +0 -176
  163. tnfr/scenarios.py +0 -34
  164. tnfr/validators.py +0 -38
  165. tnfr-4.5.1.dist-info/METADATA +0 -221
  166. tnfr-4.5.1.dist-info/RECORD +0 -28
  167. {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/WHEEL +0 -0
  168. {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/entry_points.txt +0 -0
  169. {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/licenses/LICENSE.md +0 -0
  170. {tnfr-4.5.1.dist-info → tnfr-6.0.0.dist-info}/top_level.txt +0 -0
tnfr/rng.py ADDED
@@ -0,0 +1,440 @@
1
+ """Deterministic RNG helpers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import hashlib
6
+ import random
7
+ import struct
8
+ import threading
9
+ from collections.abc import Iterator, MutableMapping
10
+ from dataclasses import dataclass
11
+ from typing import Any, Generic, Hashable, TypeVar, cast
12
+
13
+
14
+ from cachetools import cached # type: ignore[import-untyped]
15
+ from .constants import DEFAULTS, get_param
16
+ from .cache import CacheManager, InstrumentedLRUCache
17
+ from .utils import get_graph
18
+ from .locking import get_lock
19
+ from .types import GraphLike, TNFRGraph
20
+
21
+ MASK64 = 0xFFFFFFFFFFFFFFFF
22
+
23
+ _RNG_LOCK = get_lock("rng")
24
+ _DEFAULT_CACHE_MAXSIZE = int(DEFAULTS.get("JITTER_CACHE_SIZE", 128))
25
+ _CACHE_MAXSIZE = _DEFAULT_CACHE_MAXSIZE
26
+ _CACHE_LOCKED = False
27
+
28
+ K = TypeVar("K", bound=Hashable)
29
+ V = TypeVar("V")
30
+
31
+
32
+ @dataclass
33
+ class _SeedCacheState:
34
+ cache: InstrumentedLRUCache[tuple[int, int], int] | None
35
+ maxsize: int
36
+
37
+
38
+ @dataclass
39
+ class _CounterState(Generic[K]):
40
+ cache: InstrumentedLRUCache[K, int]
41
+ locks: dict[K, threading.RLock]
42
+ max_entries: int
43
+
44
+
45
+ _RNG_CACHE_MANAGER = CacheManager(default_capacity=_DEFAULT_CACHE_MAXSIZE)
46
+
47
+
48
+ class _SeedHashCache(MutableMapping[tuple[int, int], int]):
49
+ """Mutable mapping proxy exposing a configurable LRU cache."""
50
+
51
+ def __init__(
52
+ self,
53
+ *,
54
+ manager: CacheManager | None = None,
55
+ state_key: str = "seed_hash_cache",
56
+ default_maxsize: int = _DEFAULT_CACHE_MAXSIZE,
57
+ ) -> None:
58
+ self._manager = manager or _RNG_CACHE_MANAGER
59
+ self._state_key = state_key
60
+ self._default_maxsize = int(default_maxsize)
61
+ if not self._manager.has_override(self._state_key):
62
+ self._manager.configure(
63
+ overrides={self._state_key: self._default_maxsize}
64
+ )
65
+ self._manager.register(
66
+ self._state_key,
67
+ self._create_state,
68
+ reset=self._reset_state,
69
+ )
70
+
71
+ def _resolved_size(self, requested: int | None = None) -> int:
72
+ size = self._manager.get_capacity(
73
+ self._state_key,
74
+ requested=requested,
75
+ fallback=self._default_maxsize,
76
+ )
77
+ if size is None:
78
+ return 0
79
+ return int(size)
80
+
81
+ def _create_state(self) -> _SeedCacheState:
82
+ size = self._resolved_size()
83
+ if size <= 0:
84
+ return _SeedCacheState(cache=None, maxsize=0)
85
+ return _SeedCacheState(
86
+ cache=InstrumentedLRUCache(
87
+ size,
88
+ manager=self._manager,
89
+ metrics_key=self._state_key,
90
+ ),
91
+ maxsize=size,
92
+ )
93
+
94
+ def _reset_state(self, state: _SeedCacheState | None) -> _SeedCacheState:
95
+ return self._create_state()
96
+
97
+ def _get_state(self, *, create: bool = True) -> _SeedCacheState | None:
98
+ state = self._manager.get(self._state_key, create=create)
99
+ if state is None:
100
+ return None
101
+ if not isinstance(state, _SeedCacheState):
102
+ state = self._create_state()
103
+ self._manager.store(self._state_key, state)
104
+ return state
105
+
106
+ def configure(self, maxsize: int) -> None:
107
+ size = int(maxsize)
108
+ if size < 0:
109
+ raise ValueError("maxsize must be non-negative")
110
+ self._manager.configure(overrides={self._state_key: size})
111
+ self._manager.update(self._state_key, lambda _: self._create_state())
112
+
113
+ def __getitem__(self, key: tuple[int, int]) -> int:
114
+ state = self._get_state()
115
+ if state is None or state.cache is None:
116
+ raise KeyError(key)
117
+ value = state.cache[key]
118
+ self._manager.increment_hit(self._state_key)
119
+ return value
120
+
121
+ def __setitem__(self, key: tuple[int, int], value: int) -> None:
122
+ state = self._get_state()
123
+ if state is not None and state.cache is not None:
124
+ state.cache[key] = value
125
+
126
+ def __delitem__(self, key: tuple[int, int]) -> None:
127
+ state = self._get_state()
128
+ if state is None or state.cache is None:
129
+ raise KeyError(key)
130
+ del state.cache[key]
131
+
132
+ def __iter__(self) -> Iterator[tuple[int, int]]:
133
+ state = self._get_state(create=False)
134
+ if state is None or state.cache is None:
135
+ return iter(())
136
+ return iter(state.cache)
137
+
138
+ def __len__(self) -> int:
139
+ state = self._get_state(create=False)
140
+ if state is None or state.cache is None:
141
+ return 0
142
+ return len(state.cache)
143
+
144
+ def clear(self) -> None: # type: ignore[override]
145
+ self._manager.clear(self._state_key)
146
+
147
+ @property
148
+ def maxsize(self) -> int:
149
+ state = self._get_state()
150
+ return 0 if state is None else state.maxsize
151
+
152
+ @property
153
+ def enabled(self) -> bool:
154
+ state = self._get_state(create=False)
155
+ return bool(state and state.cache is not None)
156
+
157
+ @property
158
+ def data(self) -> InstrumentedLRUCache[tuple[int, int], int] | None:
159
+ """Expose the underlying cache for diagnostics/tests."""
160
+
161
+ state = self._get_state(create=False)
162
+ return None if state is None else state.cache
163
+
164
+
165
+ class ScopedCounterCache(Generic[K]):
166
+ """Thread-safe LRU cache storing monotonic counters by ``key``."""
167
+
168
+ def __init__(
169
+ self,
170
+ name: str,
171
+ max_entries: int | None = None,
172
+ *,
173
+ manager: CacheManager | None = None,
174
+ default_max_entries: int = _DEFAULT_CACHE_MAXSIZE,
175
+ ) -> None:
176
+ self._name = name
177
+ self._manager = manager or _RNG_CACHE_MANAGER
178
+ self._state_key = f"scoped_counter:{name}"
179
+ self._default_max_entries = int(default_max_entries)
180
+ requested = None if max_entries is None else int(max_entries)
181
+ if requested is not None and requested < 0:
182
+ raise ValueError("max_entries must be non-negative")
183
+ if not self._manager.has_override(self._state_key):
184
+ fallback = requested
185
+ if fallback is None:
186
+ fallback = self._default_max_entries
187
+ self._manager.configure(overrides={self._state_key: fallback})
188
+ elif requested is not None:
189
+ self._manager.configure(overrides={self._state_key: requested})
190
+ self._manager.register(
191
+ self._state_key,
192
+ self._create_state,
193
+ lock_factory=lambda: get_lock(name),
194
+ reset=self._reset_state,
195
+ )
196
+
197
+ def _resolved_entries(self, requested: int | None = None) -> int:
198
+ size = self._manager.get_capacity(
199
+ self._state_key,
200
+ requested=requested,
201
+ fallback=self._default_max_entries,
202
+ )
203
+ if size is None:
204
+ return 0
205
+ return int(size)
206
+
207
+ def _create_state(self, requested: int | None = None) -> _CounterState[K]:
208
+ size = self._resolved_entries(requested)
209
+ locks: dict[K, threading.RLock] = {}
210
+ return _CounterState(
211
+ cache=InstrumentedLRUCache(
212
+ size,
213
+ manager=self._manager,
214
+ metrics_key=self._state_key,
215
+ locks=locks,
216
+ ),
217
+ locks=locks,
218
+ max_entries=size,
219
+ )
220
+
221
+ def _reset_state(self, state: _CounterState[K] | None) -> _CounterState[K]:
222
+ return self._create_state()
223
+
224
+ def _get_state(self) -> _CounterState[K]:
225
+ state = self._manager.get(self._state_key)
226
+ if not isinstance(state, _CounterState):
227
+ state = self._create_state(0)
228
+ self._manager.store(self._state_key, state)
229
+ return state
230
+
231
+ @property
232
+ def lock(self) -> threading.Lock | threading.RLock:
233
+ """Return the lock guarding access to the underlying cache."""
234
+
235
+ return self._manager.get_lock(self._state_key)
236
+
237
+ @property
238
+ def max_entries(self) -> int:
239
+ """Return the configured maximum number of cached entries."""
240
+
241
+ return self._get_state().max_entries
242
+
243
+ @property
244
+ def cache(self) -> InstrumentedLRUCache[K, int]:
245
+ """Expose the instrumented cache for inspection."""
246
+
247
+ return self._get_state().cache
248
+
249
+ @property
250
+ def locks(self) -> dict[K, threading.RLock]:
251
+ """Return the mapping of per-key locks tracked by the cache."""
252
+
253
+ return self._get_state().locks
254
+
255
+ def configure(
256
+ self, *, force: bool = False, max_entries: int | None = None
257
+ ) -> None:
258
+ """Resize or reset the cache keeping previous settings."""
259
+
260
+ if max_entries is None:
261
+ size = self._resolved_entries()
262
+ update_policy = False
263
+ else:
264
+ size = int(max_entries)
265
+ if size < 0:
266
+ raise ValueError("max_entries must be non-negative")
267
+ update_policy = True
268
+
269
+ def _update(state: _CounterState[K] | None) -> _CounterState[K]:
270
+ if not isinstance(state, _CounterState) or force or state.max_entries != size:
271
+ locks: dict[K, threading.RLock] = {}
272
+ return _CounterState(
273
+ cache=InstrumentedLRUCache(
274
+ size,
275
+ manager=self._manager,
276
+ metrics_key=self._state_key,
277
+ locks=locks,
278
+ ),
279
+ locks=locks,
280
+ max_entries=size,
281
+ )
282
+ return cast(_CounterState[K], state)
283
+
284
+ if update_policy:
285
+ self._manager.configure(overrides={self._state_key: size})
286
+ self._manager.update(self._state_key, _update)
287
+
288
+ def clear(self) -> None:
289
+ """Clear stored counters preserving ``max_entries``."""
290
+
291
+ self.configure(force=True)
292
+
293
+ def bump(self, key: K) -> int:
294
+ """Return current counter for ``key`` and increment it atomically."""
295
+
296
+ result: dict[str, Any] = {}
297
+
298
+ def _update(state: _CounterState[K] | None) -> _CounterState[K]:
299
+ if not isinstance(state, _CounterState):
300
+ state = self._create_state(0)
301
+ cache = state.cache
302
+ locks = state.locks
303
+ if key not in locks:
304
+ locks[key] = threading.RLock()
305
+ value = int(cache.get(key, 0))
306
+ cache[key] = value + 1
307
+ result["value"] = value
308
+ return state
309
+
310
+ self._manager.update(self._state_key, _update)
311
+ return int(result.get("value", 0))
312
+
313
+ def __len__(self) -> int:
314
+ return len(self.cache)
315
+
316
+
317
+ _seed_hash_cache = _SeedHashCache()
318
+
319
+
320
+ def _compute_seed_hash(seed_int: int, key_int: int) -> int:
321
+ seed_bytes = struct.pack(
322
+ ">QQ",
323
+ seed_int & MASK64,
324
+ key_int & MASK64,
325
+ )
326
+ return int.from_bytes(
327
+ hashlib.blake2b(seed_bytes, digest_size=8).digest(), "big"
328
+ )
329
+
330
+
331
+ @cached(cache=_seed_hash_cache, lock=_RNG_LOCK)
332
+ def _cached_seed_hash(seed_int: int, key_int: int) -> int:
333
+ return _compute_seed_hash(seed_int, key_int)
334
+
335
+
336
+ def seed_hash(seed_int: int, key_int: int) -> int:
337
+ """Return a 64-bit hash derived from ``seed_int`` and ``key_int``."""
338
+
339
+ if _CACHE_MAXSIZE <= 0 or not _seed_hash_cache.enabled:
340
+ return _compute_seed_hash(seed_int, key_int)
341
+ return _cached_seed_hash(seed_int, key_int)
342
+
343
+
344
+ seed_hash.cache_clear = cast(Any, _cached_seed_hash).cache_clear # type: ignore[attr-defined]
345
+ seed_hash.cache = _seed_hash_cache # type: ignore[attr-defined]
346
+
347
+
348
+ def _sync_cache_size(G: TNFRGraph | GraphLike | None) -> None:
349
+ """Synchronise cache size with ``G`` when needed."""
350
+
351
+ global _CACHE_MAXSIZE
352
+ if G is None or _CACHE_LOCKED:
353
+ return
354
+ size = get_cache_maxsize(G)
355
+ with _RNG_LOCK:
356
+ if size != _seed_hash_cache.maxsize:
357
+ _seed_hash_cache.configure(size)
358
+ _CACHE_MAXSIZE = _seed_hash_cache.maxsize
359
+
360
+
361
+ def make_rng(
362
+ seed: int, key: int, G: TNFRGraph | GraphLike | None = None
363
+ ) -> random.Random:
364
+ """Return a ``random.Random`` for ``seed`` and ``key``.
365
+
366
+ When ``G`` is provided, ``JITTER_CACHE_SIZE`` is read from ``G`` and the
367
+ internal cache size is updated accordingly.
368
+ """
369
+ _sync_cache_size(G)
370
+ seed_int = int(seed)
371
+ key_int = int(key)
372
+ return random.Random(seed_hash(seed_int, key_int))
373
+
374
+
375
+ def clear_rng_cache() -> None:
376
+ """Clear cached seed hashes."""
377
+ if _seed_hash_cache.maxsize <= 0 or not _seed_hash_cache.enabled:
378
+ return
379
+ seed_hash.cache_clear() # type: ignore[attr-defined]
380
+
381
+
382
+ def get_cache_maxsize(G: TNFRGraph | GraphLike) -> int:
383
+ """Return RNG cache maximum size for ``G``."""
384
+ return int(get_param(G, "JITTER_CACHE_SIZE"))
385
+
386
+
387
+ def cache_enabled(G: TNFRGraph | GraphLike | None = None) -> bool:
388
+ """Return ``True`` if RNG caching is enabled.
389
+
390
+ When ``G`` is provided, the cache size is synchronised with
391
+ ``JITTER_CACHE_SIZE`` stored in ``G``.
392
+ """
393
+ # Only synchronise the cache size with ``G`` when caching is enabled. This
394
+ # preserves explicit calls to :func:`set_cache_maxsize(0)` which are used in
395
+ # tests to temporarily disable caching regardless of graph defaults.
396
+ if _seed_hash_cache.maxsize > 0:
397
+ _sync_cache_size(G)
398
+ return _seed_hash_cache.maxsize > 0
399
+
400
+
401
+ def base_seed(G: TNFRGraph | GraphLike) -> int:
402
+ """Return base RNG seed stored in ``G.graph``."""
403
+ graph = get_graph(G)
404
+ return int(graph.get("RANDOM_SEED", 0))
405
+
406
+
407
+ def _rng_for_step(seed: int, step: int) -> random.Random:
408
+ """Return deterministic RNG for a simulation ``step``."""
409
+
410
+ return make_rng(seed, step)
411
+
412
+
413
+ def set_cache_maxsize(size: int) -> None:
414
+ """Update RNG cache maximum size.
415
+
416
+ ``size`` must be a non-negative integer; ``0`` disables caching.
417
+ Changing the cache size resets any cached seed hashes.
418
+ If caching is disabled, ``clear_rng_cache`` has no effect.
419
+ """
420
+
421
+ global _CACHE_MAXSIZE, _CACHE_LOCKED
422
+ new_size = int(size)
423
+ if new_size < 0:
424
+ raise ValueError("size must be non-negative")
425
+ with _RNG_LOCK:
426
+ _seed_hash_cache.configure(new_size)
427
+ _CACHE_MAXSIZE = _seed_hash_cache.maxsize
428
+ _CACHE_LOCKED = new_size != _DEFAULT_CACHE_MAXSIZE
429
+
430
+
431
+ __all__ = (
432
+ "seed_hash",
433
+ "make_rng",
434
+ "get_cache_maxsize",
435
+ "set_cache_maxsize",
436
+ "base_seed",
437
+ "cache_enabled",
438
+ "clear_rng_cache",
439
+ "ScopedCounterCache",
440
+ )
tnfr/rng.pyi ADDED
@@ -0,0 +1,14 @@
1
+ from typing import Any
2
+
3
+ __all__: Any
4
+
5
+ def __getattr__(name: str) -> Any: ...
6
+
7
+ ScopedCounterCache: Any
8
+ base_seed: Any
9
+ cache_enabled: Any
10
+ clear_rng_cache: Any
11
+ get_cache_maxsize: Any
12
+ make_rng: Any
13
+ seed_hash: Any
14
+ set_cache_maxsize: Any
tnfr/selector.py ADDED
@@ -0,0 +1,217 @@
1
+ """Utilities to select glyphs based on structural metrics.
2
+
3
+ This module normalises thresholds, computes selection scores and applies
4
+ hysteresis when assigning glyphs to nodes.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import threading
10
+ from operator import itemgetter
11
+ from typing import Any, Mapping, TYPE_CHECKING, cast
12
+ from weakref import WeakKeyDictionary
13
+
14
+ if TYPE_CHECKING: # pragma: no cover
15
+ import networkx as nx
16
+
17
+ from .constants import DEFAULTS
18
+ from .constants.core import SELECTOR_THRESHOLD_DEFAULTS
19
+ from .helpers.numeric import clamp01
20
+ from .metrics.common import compute_dnfr_accel_max
21
+ from .utils import is_non_string_sequence
22
+ from .types import SelectorNorms, SelectorThresholds, SelectorWeights
23
+
24
+
25
+ HYSTERESIS_GLYPHS: set[str] = {"IL", "OZ", "ZHIR", "THOL", "NAV", "RA"}
26
+
27
+ __all__ = (
28
+ "_selector_thresholds",
29
+ "_selector_norms",
30
+ "_calc_selector_score",
31
+ "_apply_selector_hysteresis",
32
+ )
33
+
34
+
35
+ _SelectorThresholdItems = tuple[tuple[str, float], ...]
36
+ _SelectorThresholdCacheEntry = tuple[
37
+ _SelectorThresholdItems,
38
+ SelectorThresholds,
39
+ ]
40
+ _SELECTOR_THRESHOLD_CACHE: WeakKeyDictionary[
41
+ "nx.Graph",
42
+ _SelectorThresholdCacheEntry,
43
+ ] = WeakKeyDictionary()
44
+ _SELECTOR_THRESHOLD_CACHE_LOCK = threading.Lock()
45
+
46
+
47
+ def _sorted_items(mapping: Mapping[str, float]) -> _SelectorThresholdItems:
48
+ """Return mapping items sorted by key.
49
+
50
+ Parameters
51
+ ----------
52
+ mapping : Mapping[str, float]
53
+ Mapping whose items will be sorted.
54
+
55
+ Returns
56
+ -------
57
+ tuple[tuple[str, float], ...]
58
+ Key-sorted items providing a hashable representation for memoisation.
59
+ """
60
+ return tuple(sorted(mapping.items()))
61
+
62
+
63
+ def _compute_selector_thresholds(
64
+ thr_sel_items: _SelectorThresholdItems,
65
+ ) -> SelectorThresholds:
66
+ """Construct selector thresholds for a graph.
67
+
68
+ Parameters
69
+ ----------
70
+ thr_sel_items : tuple[tuple[str, float], ...]
71
+ Selector threshold items as ``(key, value)`` pairs.
72
+
73
+ Returns
74
+ -------
75
+ dict[str, float]
76
+ Normalised thresholds for selector metrics.
77
+ """
78
+ thr_sel = dict(thr_sel_items)
79
+
80
+ out: dict[str, float] = {}
81
+ for key, default in SELECTOR_THRESHOLD_DEFAULTS.items():
82
+ val = thr_sel.get(key, default)
83
+ out[key] = clamp01(float(val))
84
+ return cast(SelectorThresholds, out)
85
+
86
+
87
+ def _selector_thresholds(G: "nx.Graph") -> SelectorThresholds:
88
+ """Return normalised thresholds for Si, ΔNFR and acceleration.
89
+
90
+ Parameters
91
+ ----------
92
+ G : nx.Graph
93
+ Graph whose configuration stores selector thresholds.
94
+
95
+ Returns
96
+ -------
97
+ dict[str, float]
98
+ Dictionary with clamped hi/lo thresholds, memoised per graph.
99
+ """
100
+ sel_defaults = DEFAULTS.get("SELECTOR_THRESHOLDS", {})
101
+ thr_sel = {**sel_defaults, **G.graph.get("SELECTOR_THRESHOLDS", {})}
102
+ thr_sel_items = _sorted_items(thr_sel)
103
+
104
+ with _SELECTOR_THRESHOLD_CACHE_LOCK:
105
+ cached = _SELECTOR_THRESHOLD_CACHE.get(G)
106
+ if cached is not None and cached[0] == thr_sel_items:
107
+ return cached[1]
108
+
109
+ thresholds = _compute_selector_thresholds(thr_sel_items)
110
+
111
+ with _SELECTOR_THRESHOLD_CACHE_LOCK:
112
+ cached = _SELECTOR_THRESHOLD_CACHE.get(G)
113
+ if cached is not None and cached[0] == thr_sel_items:
114
+ return cached[1]
115
+ _SELECTOR_THRESHOLD_CACHE[G] = (thr_sel_items, thresholds)
116
+ return thresholds
117
+
118
+
119
+ def _selector_norms(G: "nx.Graph") -> SelectorNorms:
120
+ """Compute and cache selector norms for ΔNFR and acceleration.
121
+
122
+ Parameters
123
+ ----------
124
+ G : nx.Graph
125
+ Graph for which to compute maxima. Results are stored in ``G.graph``
126
+ under ``"_sel_norms"``.
127
+
128
+ Returns
129
+ -------
130
+ dict
131
+ Mapping with normalisation maxima for ``dnfr`` and ``accel``.
132
+ """
133
+ norms = compute_dnfr_accel_max(G)
134
+ G.graph["_sel_norms"] = norms
135
+ return norms
136
+
137
+
138
+ def _calc_selector_score(
139
+ Si: float, dnfr: float, accel: float, weights: SelectorWeights
140
+ ) -> float:
141
+ """Compute weighted selector score.
142
+
143
+ Parameters
144
+ ----------
145
+ Si : float
146
+ Normalised sense index.
147
+ dnfr : float
148
+ Normalised absolute ΔNFR value.
149
+ accel : float
150
+ Normalised acceleration (|d²EPI/dt²|).
151
+ weights : dict[str, float]
152
+ Normalised weights for ``"w_si"``, ``"w_dnfr"`` and ``"w_accel"``.
153
+
154
+ Returns
155
+ -------
156
+ float
157
+ Final weighted score.
158
+ """
159
+ return (
160
+ weights["w_si"] * Si
161
+ + weights["w_dnfr"] * (1.0 - dnfr)
162
+ + weights["w_accel"] * (1.0 - accel)
163
+ )
164
+
165
+
166
+ def _apply_selector_hysteresis(
167
+ nd: dict[str, Any],
168
+ Si: float,
169
+ dnfr: float,
170
+ accel: float,
171
+ thr: dict[str, float],
172
+ margin: float | None,
173
+ ) -> str | None:
174
+ """Apply hysteresis when values are near thresholds.
175
+
176
+ Parameters
177
+ ----------
178
+ nd : dict[str, Any]
179
+ Node attribute dictionary containing glyph history.
180
+ Si : float
181
+ Normalised sense index.
182
+ dnfr : float
183
+ Normalised absolute ΔNFR value.
184
+ accel : float
185
+ Normalised acceleration.
186
+ thr : dict[str, float]
187
+ Thresholds returned by :func:`_selector_thresholds`.
188
+ margin : float or None
189
+ When positive, distance from thresholds below which the previous
190
+ glyph is reused. Falsy margins disable hysteresis entirely, letting
191
+ selectors bypass the reuse logic.
192
+
193
+ Returns
194
+ -------
195
+ str or None
196
+ Previous glyph if hysteresis applies, otherwise ``None``.
197
+ """
198
+ # Batch extraction reduces dictionary lookups inside loops.
199
+ if not margin:
200
+ return None
201
+
202
+ si_hi, si_lo, dnfr_hi, dnfr_lo, accel_hi, accel_lo = itemgetter(
203
+ "si_hi", "si_lo", "dnfr_hi", "dnfr_lo", "accel_hi", "accel_lo"
204
+ )(thr)
205
+
206
+ d_si = min(abs(Si - si_hi), abs(Si - si_lo))
207
+ d_dn = min(abs(dnfr - dnfr_hi), abs(dnfr - dnfr_lo))
208
+ d_ac = min(abs(accel - accel_hi), abs(accel - accel_lo))
209
+ certeza = min(d_si, d_dn, d_ac)
210
+ if certeza < margin:
211
+ hist = nd.get("glyph_history")
212
+ if not is_non_string_sequence(hist) or not hist:
213
+ return None
214
+ prev = hist[-1]
215
+ if isinstance(prev, str) and prev in HYSTERESIS_GLYPHS:
216
+ return prev
217
+ return None
tnfr/selector.pyi ADDED
@@ -0,0 +1,19 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Mapping
4
+
5
+ __all__: Any
6
+
7
+ def __getattr__(name: str) -> Any: ...
8
+
9
+ def _apply_selector_hysteresis(
10
+ nd: dict[str, Any],
11
+ Si: float,
12
+ dnfr: float,
13
+ accel: float,
14
+ thr: Mapping[str, float],
15
+ margin: float | None,
16
+ ) -> str | None: ...
17
+ _calc_selector_score: Any
18
+ _selector_norms: Any
19
+ _selector_thresholds: Any