tnfr 4.5.1__py3-none-any.whl → 4.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tnfr might be problematic. Click here for more details.

Files changed (78) hide show
  1. tnfr/__init__.py +91 -90
  2. tnfr/alias.py +546 -0
  3. tnfr/cache.py +578 -0
  4. tnfr/callback_utils.py +388 -0
  5. tnfr/cli/__init__.py +75 -0
  6. tnfr/cli/arguments.py +177 -0
  7. tnfr/cli/execution.py +288 -0
  8. tnfr/cli/utils.py +36 -0
  9. tnfr/collections_utils.py +300 -0
  10. tnfr/config.py +19 -28
  11. tnfr/constants/__init__.py +174 -0
  12. tnfr/constants/core.py +159 -0
  13. tnfr/constants/init.py +31 -0
  14. tnfr/constants/metric.py +110 -0
  15. tnfr/constants_glyphs.py +98 -0
  16. tnfr/dynamics/__init__.py +658 -0
  17. tnfr/dynamics/dnfr.py +733 -0
  18. tnfr/dynamics/integrators.py +267 -0
  19. tnfr/dynamics/sampling.py +31 -0
  20. tnfr/execution.py +201 -0
  21. tnfr/flatten.py +283 -0
  22. tnfr/gamma.py +302 -88
  23. tnfr/glyph_history.py +290 -0
  24. tnfr/grammar.py +285 -96
  25. tnfr/graph_utils.py +84 -0
  26. tnfr/helpers/__init__.py +71 -0
  27. tnfr/helpers/numeric.py +87 -0
  28. tnfr/immutable.py +178 -0
  29. tnfr/import_utils.py +228 -0
  30. tnfr/initialization.py +197 -0
  31. tnfr/io.py +246 -0
  32. tnfr/json_utils.py +162 -0
  33. tnfr/locking.py +37 -0
  34. tnfr/logging_utils.py +116 -0
  35. tnfr/metrics/__init__.py +41 -0
  36. tnfr/metrics/coherence.py +829 -0
  37. tnfr/metrics/common.py +151 -0
  38. tnfr/metrics/core.py +101 -0
  39. tnfr/metrics/diagnosis.py +234 -0
  40. tnfr/metrics/export.py +137 -0
  41. tnfr/metrics/glyph_timing.py +189 -0
  42. tnfr/metrics/reporting.py +148 -0
  43. tnfr/metrics/sense_index.py +120 -0
  44. tnfr/metrics/trig.py +181 -0
  45. tnfr/metrics/trig_cache.py +109 -0
  46. tnfr/node.py +214 -159
  47. tnfr/observers.py +126 -136
  48. tnfr/ontosim.py +134 -134
  49. tnfr/operators/__init__.py +420 -0
  50. tnfr/operators/jitter.py +203 -0
  51. tnfr/operators/remesh.py +485 -0
  52. tnfr/presets.py +46 -14
  53. tnfr/rng.py +254 -0
  54. tnfr/selector.py +210 -0
  55. tnfr/sense.py +284 -131
  56. tnfr/structural.py +207 -79
  57. tnfr/tokens.py +60 -0
  58. tnfr/trace.py +329 -94
  59. tnfr/types.py +43 -17
  60. tnfr/validators.py +70 -24
  61. tnfr/value_utils.py +59 -0
  62. tnfr-4.5.2.dist-info/METADATA +379 -0
  63. tnfr-4.5.2.dist-info/RECORD +67 -0
  64. tnfr/cli.py +0 -322
  65. tnfr/constants.py +0 -277
  66. tnfr/dynamics.py +0 -814
  67. tnfr/helpers.py +0 -264
  68. tnfr/main.py +0 -47
  69. tnfr/metrics.py +0 -597
  70. tnfr/operators.py +0 -525
  71. tnfr/program.py +0 -176
  72. tnfr/scenarios.py +0 -34
  73. tnfr-4.5.1.dist-info/METADATA +0 -221
  74. tnfr-4.5.1.dist-info/RECORD +0 -28
  75. {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/WHEEL +0 -0
  76. {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/entry_points.txt +0 -0
  77. {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/licenses/LICENSE.md +0 -0
  78. {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,267 @@
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ from collections.abc import Mapping
5
+ from typing import Any, Literal
6
+
7
+ import networkx as nx # type: ignore[import-untyped]
8
+
9
+ from ..constants import (
10
+ DEFAULTS,
11
+ get_aliases,
12
+ )
13
+ from ..gamma import _get_gamma_spec, eval_gamma
14
+ from ..alias import get_attr, get_attr_str, set_attr, set_attr_str
15
+
16
+ ALIAS_VF = get_aliases("VF")
17
+ ALIAS_DNFR = get_aliases("DNFR")
18
+ ALIAS_DEPI = get_aliases("DEPI")
19
+ ALIAS_EPI = get_aliases("EPI")
20
+ ALIAS_EPI_KIND = get_aliases("EPI_KIND")
21
+ ALIAS_D2EPI = get_aliases("D2EPI")
22
+
23
+ __all__ = (
24
+ "prepare_integration_params",
25
+ "update_epi_via_nodal_equation",
26
+ )
27
+
28
+
29
+ def prepare_integration_params(
30
+ G,
31
+ dt: float | None = None,
32
+ t: float | None = None,
33
+ method: Literal["euler", "rk4"] | None = None,
34
+ ):
35
+ """Validate and normalise ``dt``, ``t`` and ``method`` for integration.
36
+
37
+ Returns ``(dt_step, steps, t0, method)`` where ``dt_step`` is the
38
+ effective step, ``steps`` the number of substeps and ``t0`` the prepared
39
+ initial time.
40
+ """
41
+ if dt is None:
42
+ dt = float(G.graph.get("DT", DEFAULTS["DT"]))
43
+ else:
44
+ if not isinstance(dt, (int, float)):
45
+ raise TypeError("dt must be a number")
46
+ if dt < 0:
47
+ raise ValueError("dt must be non-negative")
48
+ dt = float(dt)
49
+
50
+ if t is None:
51
+ t = float(G.graph.get("_t", 0.0))
52
+ else:
53
+ t = float(t)
54
+
55
+ method = (
56
+ method
57
+ or G.graph.get(
58
+ "INTEGRATOR_METHOD", DEFAULTS.get("INTEGRATOR_METHOD", "euler")
59
+ )
60
+ ).lower()
61
+ if method not in ("euler", "rk4"):
62
+ raise ValueError("method must be 'euler' or 'rk4'")
63
+
64
+ dt_min = float(G.graph.get("DT_MIN", DEFAULTS.get("DT_MIN", 0.0)))
65
+ if dt_min > 0 and dt > dt_min:
66
+ steps = int(math.ceil(dt / dt_min))
67
+ else:
68
+ steps = 1
69
+ # ``steps`` is guaranteed to be ≥1 at this point
70
+ dt_step = dt / steps
71
+
72
+ return dt_step, steps, t, method
73
+
74
+
75
+ def _apply_increments(
76
+ G: Any,
77
+ dt_step: float,
78
+ increments: dict[Any, tuple[float, ...]],
79
+ *,
80
+ method: str,
81
+ ) -> dict[Any, tuple[float, float, float]]:
82
+ """Combine precomputed increments to update node states."""
83
+
84
+ new_states: dict[Any, tuple[float, float, float]] = {}
85
+ for n, nd in G.nodes(data=True):
86
+ vf, dnfr, dEPI_dt_prev, epi_i = _node_state(nd)
87
+ ks = increments[n]
88
+ if method == "rk4":
89
+ k1, k2, k3, k4 = ks
90
+ # RK4: EPIₙ₊₁ = EPIᵢ + Δt/6·(k1 + 2k2 + 2k3 + k4)
91
+ epi = epi_i + (dt_step / 6.0) * (k1 + 2 * k2 + 2 * k3 + k4)
92
+ dEPI_dt = k4
93
+ else:
94
+ (k1,) = ks
95
+ # Euler: EPIₙ₊₁ = EPIᵢ + Δt·k1 where k1 = νf·ΔNFR + Γ
96
+ epi = epi_i + dt_step * k1
97
+ dEPI_dt = k1
98
+ d2epi = (dEPI_dt - dEPI_dt_prev) / dt_step if dt_step != 0 else 0.0
99
+ new_states[n] = (epi, dEPI_dt, d2epi)
100
+ return new_states
101
+
102
+
103
+ def _collect_nodal_increments(
104
+ G: Any,
105
+ gamma_maps: tuple[dict[Any, float], ...],
106
+ *,
107
+ method: str,
108
+ ) -> dict[Any, tuple[float, ...]]:
109
+ """Combine node base state with staged Γ contributions.
110
+
111
+ ``gamma_maps`` must contain one entry for Euler integration and four for
112
+ RK4. The helper merges the structural frequency/ΔNFR base contribution
113
+ with the supplied Γ evaluations.
114
+ """
115
+
116
+ increments: dict[Any, tuple[float, ...]] = {}
117
+ for n, nd in G.nodes(data=True):
118
+ vf, dnfr, *_ = _node_state(nd)
119
+ base = vf * dnfr
120
+ gammas = [gm.get(n, 0.0) for gm in gamma_maps]
121
+
122
+ if method == "rk4":
123
+ if len(gammas) != 4:
124
+ raise ValueError("rk4 integration requires four gamma maps")
125
+ k1, k2, k3, k4 = gammas
126
+ increments[n] = (
127
+ base + k1,
128
+ base + k2,
129
+ base + k3,
130
+ base + k4,
131
+ )
132
+ else:
133
+ if len(gammas) != 1:
134
+ raise ValueError("euler integration requires one gamma map")
135
+ (k1,) = gammas
136
+ increments[n] = (base + k1,)
137
+
138
+ return increments
139
+
140
+
141
+ def _build_gamma_increments(
142
+ G: Any,
143
+ dt_step: float,
144
+ t_local: float,
145
+ *,
146
+ method: str,
147
+ ) -> dict[Any, tuple[float, ...]]:
148
+ """Evaluate Γ contributions and merge them with ``νf·ΔNFR`` base terms."""
149
+
150
+ if method == "rk4":
151
+ gamma_count = 4
152
+ elif method == "euler":
153
+ gamma_count = 1
154
+ else:
155
+ raise ValueError("method must be 'euler' or 'rk4'")
156
+
157
+ gamma_spec = G.graph.get("_gamma_spec")
158
+ if gamma_spec is None:
159
+ gamma_spec = _get_gamma_spec(G)
160
+
161
+ gamma_type = ""
162
+ if isinstance(gamma_spec, Mapping):
163
+ gamma_type = str(gamma_spec.get("type", "")).lower()
164
+
165
+ if gamma_type == "none":
166
+ gamma_maps = tuple({} for _ in range(gamma_count))
167
+ return _collect_nodal_increments(G, gamma_maps, method=method)
168
+
169
+ if method == "rk4":
170
+ t_mid = t_local + dt_step / 2.0
171
+ t_end = t_local + dt_step
172
+ g1_map = {n: eval_gamma(G, n, t_local) for n in G.nodes}
173
+ g_mid_map = {n: eval_gamma(G, n, t_mid) for n in G.nodes}
174
+ g4_map = {n: eval_gamma(G, n, t_end) for n in G.nodes}
175
+ gamma_maps = (g1_map, g_mid_map, g_mid_map, g4_map)
176
+ else: # method == "euler"
177
+ gamma_maps = ({n: eval_gamma(G, n, t_local) for n in G.nodes},)
178
+
179
+ return _collect_nodal_increments(G, gamma_maps, method=method)
180
+
181
+
182
+ def _integrate_euler(G, dt_step: float, t_local: float):
183
+ """One explicit Euler integration step."""
184
+ increments = _build_gamma_increments(
185
+ G,
186
+ dt_step,
187
+ t_local,
188
+ method="euler",
189
+ )
190
+ return _apply_increments(G, dt_step, increments, method="euler")
191
+
192
+
193
+ def _integrate_rk4(G, dt_step: float, t_local: float):
194
+ """One Runge–Kutta order-4 integration step."""
195
+ increments = _build_gamma_increments(
196
+ G,
197
+ dt_step,
198
+ t_local,
199
+ method="rk4",
200
+ )
201
+ return _apply_increments(G, dt_step, increments, method="rk4")
202
+
203
+
204
+ def update_epi_via_nodal_equation(
205
+ G,
206
+ *,
207
+ dt: float | None = None,
208
+ t: float | None = None,
209
+ method: Literal["euler", "rk4"] | None = None,
210
+ ) -> None:
211
+ """TNFR nodal equation.
212
+
213
+ Implements the extended nodal equation:
214
+ ∂EPI/∂t = νf · ΔNFR(t) + Γi(R)
215
+
216
+ Where:
217
+ - EPI is the node's Primary Information Structure.
218
+ - νf is the node's structural frequency (Hz_str).
219
+ - ΔNFR(t) is the nodal gradient (reorganisation need), typically a mix
220
+ of components (e.g. phase θ, EPI, νf).
221
+ - Γi(R) is the optional network coupling as a function of Kuramoto order
222
+ ``R`` (see :mod:`gamma`), used to modulate network integration.
223
+
224
+ TNFR references: nodal equation (manual), νf/ΔNFR/EPI glossary, Γ operator.
225
+ Side effects: caches dEPI and updates EPI via explicit integration.
226
+ """
227
+ if not isinstance(
228
+ G, (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)
229
+ ):
230
+ raise TypeError("G must be a networkx graph instance")
231
+
232
+ dt_step, steps, t0, method = prepare_integration_params(G, dt, t, method)
233
+
234
+ t_local = t0
235
+ for _ in range(steps):
236
+ if method == "rk4":
237
+ updates = _integrate_rk4(G, dt_step, t_local)
238
+ else:
239
+ updates = _integrate_euler(G, dt_step, t_local)
240
+
241
+ for n, (epi, dEPI_dt, d2epi) in updates.items():
242
+ nd = G.nodes[n]
243
+ epi_kind = get_attr_str(nd, ALIAS_EPI_KIND, "")
244
+ set_attr(nd, ALIAS_EPI, epi)
245
+ if epi_kind:
246
+ set_attr_str(nd, ALIAS_EPI_KIND, epi_kind)
247
+ set_attr(nd, ALIAS_DEPI, dEPI_dt)
248
+ set_attr(nd, ALIAS_D2EPI, d2epi)
249
+
250
+ t_local += dt_step
251
+
252
+ G.graph["_t"] = t_local
253
+
254
+
255
+ def _node_state(nd: dict[str, Any]) -> tuple[float, float, float, float]:
256
+ """Return common node state attributes.
257
+
258
+ Extracts ``νf``, ``ΔNFR``, previous ``dEPI/dt`` and current ``EPI``
259
+ using alias helpers, providing ``0.0`` defaults when attributes are
260
+ missing.
261
+ """
262
+
263
+ vf = get_attr(nd, ALIAS_VF, 0.0)
264
+ dnfr = get_attr(nd, ALIAS_DNFR, 0.0)
265
+ dEPI_dt_prev = get_attr(nd, ALIAS_DEPI, 0.0)
266
+ epi_i = get_attr(nd, ALIAS_EPI, 0.0)
267
+ return vf, dnfr, dEPI_dt_prev, epi_i
@@ -0,0 +1,31 @@
1
+ from __future__ import annotations
2
+
3
+ from ..cache import cached_node_list
4
+ from ..rng import _rng_for_step, base_seed
5
+
6
+ __all__ = ("update_node_sample",)
7
+
8
+
9
+ def update_node_sample(G, *, step: int) -> None:
10
+ """Refresh ``G.graph['_node_sample']`` with a random subset of nodes.
11
+
12
+ The sample is limited by ``UM_CANDIDATE_COUNT`` and refreshed every
13
+ simulation step. When the network is small (``< 50`` nodes) or the limit
14
+ is non‑positive, the full node set is used and sampling is effectively
15
+ disabled. A snapshot of nodes is cached via a
16
+ :class:`~tnfr.cache.NodeCache` instance stored in
17
+ ``G.graph['_node_list_cache']`` and reused across steps; it is only refreshed
18
+ when the graph size changes. Sampling operates directly on the cached
19
+ tuple of nodes.
20
+ """
21
+ graph = G.graph
22
+ limit = int(graph.get("UM_CANDIDATE_COUNT", 0))
23
+ nodes = cached_node_list(G)
24
+ current_n = len(nodes)
25
+ if limit <= 0 or current_n < 50 or limit >= current_n:
26
+ graph["_node_sample"] = nodes
27
+ return
28
+
29
+ seed = base_seed(G)
30
+ rng = _rng_for_step(seed, step)
31
+ graph["_node_sample"] = rng.sample(nodes, limit)
tnfr/execution.py ADDED
@@ -0,0 +1,201 @@
1
+ """Execution helpers for canonical TNFR programs."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections import deque
6
+ from collections.abc import Callable, Iterable, Sequence
7
+ from typing import Any, Optional
8
+
9
+ import networkx as nx # networkx is used at runtime
10
+
11
+ from .collections_utils import (
12
+ MAX_MATERIALIZE_DEFAULT,
13
+ ensure_collection,
14
+ is_non_string_sequence,
15
+ )
16
+ from .constants import get_param
17
+ from .dynamics import step
18
+ from .flatten import _flatten
19
+ from .glyph_history import ensure_history
20
+ from .grammar import apply_glyph_with_grammar
21
+ from .tokens import OpTag, TARGET, THOL, WAIT, Token
22
+ from .types import Glyph
23
+
24
+ Node = Any
25
+ AdvanceFn = Callable[[Any], None]
26
+ HandlerFn = Callable[
27
+ [nx.Graph, Any, Optional[list[Node]], deque, AdvanceFn],
28
+ Optional[list[Node]],
29
+ ]
30
+
31
+ __all__ = [
32
+ "AdvanceFn",
33
+ "CANONICAL_PRESET_NAME",
34
+ "CANONICAL_PROGRAM_TOKENS",
35
+ "HANDLERS",
36
+ "_apply_glyph_to_targets",
37
+ "_record_trace",
38
+ "compile_sequence",
39
+ "basic_canonical_example",
40
+ "block",
41
+ "play",
42
+ "seq",
43
+ "target",
44
+ "wait",
45
+ ]
46
+
47
+
48
+ CANONICAL_PRESET_NAME = "ejemplo_canonico"
49
+ CANONICAL_PROGRAM_TOKENS: tuple[Token, ...] = (
50
+ Glyph.SHA,
51
+ Glyph.AL,
52
+ Glyph.RA,
53
+ Glyph.ZHIR,
54
+ Glyph.NUL,
55
+ Glyph.THOL,
56
+ )
57
+
58
+
59
+ def _window(G) -> int:
60
+ return int(get_param(G, "GLYPH_HYSTERESIS_WINDOW"))
61
+
62
+
63
+ def _apply_glyph_to_targets(
64
+ G, g: Glyph | str, nodes: Optional[Iterable[Node]] = None
65
+ ):
66
+ """Apply ``g`` to ``nodes`` (or all nodes) respecting the grammar."""
67
+
68
+ nodes_iter = G.nodes() if nodes is None else nodes
69
+ w = _window(G)
70
+ apply_glyph_with_grammar(G, nodes_iter, g, w)
71
+
72
+
73
+ def _advance(G, step_fn: AdvanceFn):
74
+ step_fn(G)
75
+
76
+
77
+ def _record_trace(trace: deque, G, op: OpTag, **data) -> None:
78
+ trace.append({"t": float(G.graph.get("_t", 0.0)), "op": op.name, **data})
79
+
80
+
81
+ def _advance_and_record(
82
+ G,
83
+ trace: deque,
84
+ label: OpTag,
85
+ step_fn: AdvanceFn,
86
+ *,
87
+ times: int = 1,
88
+ **data,
89
+ ) -> None:
90
+ for _ in range(times):
91
+ _advance(G, step_fn)
92
+ _record_trace(trace, G, label, **data)
93
+
94
+
95
+ def _handle_target(
96
+ G, payload: TARGET, _curr_target, trace: deque, _step_fn: AdvanceFn
97
+ ):
98
+ """Handle a ``TARGET`` token and return the active node set."""
99
+
100
+ nodes_src = G.nodes() if payload.nodes is None else payload.nodes
101
+ nodes = ensure_collection(nodes_src, max_materialize=None)
102
+ curr_target = nodes if is_non_string_sequence(nodes) else tuple(nodes)
103
+ _record_trace(trace, G, OpTag.TARGET, n=len(curr_target))
104
+ return curr_target
105
+
106
+
107
+ def _handle_wait(
108
+ G, steps: int, curr_target, trace: deque, step_fn: AdvanceFn
109
+ ):
110
+ _advance_and_record(G, trace, OpTag.WAIT, step_fn, times=steps, k=steps)
111
+ return curr_target
112
+
113
+
114
+ def _handle_glyph(
115
+ G,
116
+ g: str,
117
+ curr_target,
118
+ trace: deque,
119
+ step_fn: AdvanceFn,
120
+ label: OpTag = OpTag.GLYPH,
121
+ ):
122
+ _apply_glyph_to_targets(G, g, curr_target)
123
+ _advance_and_record(G, trace, label, step_fn, g=g)
124
+ return curr_target
125
+
126
+
127
+ def _handle_thol(
128
+ G, g, curr_target, trace: deque, step_fn: AdvanceFn
129
+ ):
130
+ return _handle_glyph(
131
+ G, g or Glyph.THOL.value, curr_target, trace, step_fn, label=OpTag.THOL
132
+ )
133
+
134
+
135
+ HANDLERS: dict[OpTag, HandlerFn] = {
136
+ OpTag.TARGET: _handle_target,
137
+ OpTag.WAIT: _handle_wait,
138
+ OpTag.GLYPH: _handle_glyph,
139
+ OpTag.THOL: _handle_thol,
140
+ }
141
+
142
+
143
+ def play(
144
+ G, sequence: Sequence[Token], step_fn: Optional[AdvanceFn] = None
145
+ ) -> None:
146
+ """Execute a canonical sequence on graph ``G``."""
147
+
148
+ step_fn = step_fn or step
149
+
150
+ curr_target: Optional[list[Node]] = None
151
+
152
+ history = ensure_history(G)
153
+ maxlen = int(get_param(G, "PROGRAM_TRACE_MAXLEN"))
154
+ trace = history.get("program_trace")
155
+ if not isinstance(trace, deque) or trace.maxlen != maxlen:
156
+ trace = deque(trace or [], maxlen=maxlen)
157
+ history["program_trace"] = trace
158
+
159
+ for op, payload in _flatten(sequence):
160
+ handler: HandlerFn | None = HANDLERS.get(op)
161
+ if handler is None:
162
+ raise ValueError(f"Unknown operation: {op}")
163
+ curr_target = handler(G, payload, curr_target, trace, step_fn)
164
+
165
+
166
+ def compile_sequence(
167
+ sequence: Iterable[Token] | Sequence[Token] | Any,
168
+ *,
169
+ max_materialize: int | None = MAX_MATERIALIZE_DEFAULT,
170
+ ) -> list[tuple[OpTag, Any]]:
171
+ """Return the operations executed by :func:`play` for ``sequence``."""
172
+
173
+ return _flatten(sequence, max_materialize=max_materialize)
174
+
175
+
176
+ def seq(*tokens: Token) -> list[Token]:
177
+ return list(tokens)
178
+
179
+
180
+ def block(
181
+ *tokens: Token, repeat: int = 1, close: Optional[Glyph] = None
182
+ ) -> THOL:
183
+ return THOL(body=list(tokens), repeat=repeat, force_close=close)
184
+
185
+
186
+ def target(nodes: Optional[Iterable[Node]] = None) -> TARGET:
187
+ return TARGET(nodes=nodes)
188
+
189
+
190
+ def wait(steps: int = 1) -> WAIT:
191
+ return WAIT(steps=max(1, int(steps)))
192
+
193
+
194
+ def basic_canonical_example() -> list[Token]:
195
+ """Reference canonical sequence.
196
+
197
+ Returns a copy of the canonical preset tokens to keep CLI defaults aligned
198
+ with :func:`tnfr.presets.get_preset`.
199
+ """
200
+
201
+ return list(CANONICAL_PROGRAM_TOKENS)