tnfr 3.5.0__tar.gz → 4.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- {tnfr-3.5.0/src/tnfr.egg-info → tnfr-4.1.0}/PKG-INFO +12 -1
- {tnfr-3.5.0 → tnfr-4.1.0}/README.md +11 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/pyproject.toml +4 -2
- tnfr-4.1.0/src/tnfr/__init__.py +59 -0
- tnfr-4.1.0/src/tnfr/cli.py +193 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr/constants.py +51 -18
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr/dynamics.py +67 -29
- tnfr-4.1.0/src/tnfr/gamma.py +100 -0
- tnfr-4.1.0/src/tnfr/grammar.py +155 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr/helpers.py +43 -12
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr/main.py +20 -10
- tnfr-4.1.0/src/tnfr/metrics.py +211 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr/operators.py +34 -5
- tnfr-4.1.0/src/tnfr/presets.py +25 -0
- tnfr-4.1.0/src/tnfr/program.py +176 -0
- tnfr-4.1.0/src/tnfr/scenarios.py +28 -0
- tnfr-4.1.0/src/tnfr/sense.py +215 -0
- tnfr-4.1.0/src/tnfr/trace.py +150 -0
- tnfr-4.1.0/src/tnfr/types.py +17 -0
- {tnfr-3.5.0 → tnfr-4.1.0/src/tnfr.egg-info}/PKG-INFO +12 -1
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr.egg-info/SOURCES.txt +15 -0
- tnfr-4.1.0/src/tnfr.egg-info/entry_points.txt +2 -0
- tnfr-4.1.0/tests/test_cli_sanity.py +12 -0
- tnfr-4.1.0/tests/test_gamma.py +20 -0
- tnfr-4.1.0/tests/test_grammar.py +56 -0
- tnfr-4.1.0/tests/test_invariants.py +84 -0
- tnfr-4.1.0/tests/test_program.py +17 -0
- tnfr-3.5.0/src/tnfr/__init__.py +0 -23
- tnfr-3.5.0/src/tnfr.egg-info/entry_points.txt +0 -2
- {tnfr-3.5.0 → tnfr-4.1.0}/LICENSE.md +0 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/setup.cfg +0 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr/observers.py +0 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr/ontosim.py +0 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr.egg-info/dependency_links.txt +0 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr.egg-info/requires.txt +0 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/src/tnfr.egg-info/top_level.txt +0 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/tests/test_edge_cases.py +0 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/tests/test_history.py +0 -0
- {tnfr-3.5.0 → tnfr-4.1.0}/tests/test_remesh.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tnfr
|
|
3
|
-
Version:
|
|
3
|
+
Version: 4.1.0
|
|
4
4
|
Summary: TNFR canónica: dinámica glífica modular sobre redes.
|
|
5
5
|
Author: fmg
|
|
6
6
|
License: MIT
|
|
@@ -59,6 +59,8 @@ Dynamic: license-file
|
|
|
59
59
|
|
|
60
60
|
* **Network re-mesh.** Mixes the current state with a past one (memory `τ`) to stabilize the network, with clear precedence for `α` and conditions based on recent stability and synchrony history.
|
|
61
61
|
|
|
62
|
+
* **Γ(R) coupling.** Optional network term added to the nodal equation, parameterized by global phase order `R` with gain `β` and threshold `R0` (see `DEFAULTS["GAMMA"]`).
|
|
63
|
+
|
|
62
64
|
* **Callbacks & observers.** The `Γ(R)` system lets you hook functions before/after each step and after re-mesh, enabling monitoring or external intervention.
|
|
63
65
|
|
|
64
66
|
---
|
|
@@ -81,6 +83,9 @@ Dynamic: license-file
|
|
|
81
83
|
|
|
82
84
|
**Mastering these pieces will let you extend the simulation, build analysis pipelines and connect the theory with computational applications.**
|
|
83
85
|
|
|
86
|
+
## Optional Node environment
|
|
87
|
+
The repository includes a minimal `package.json` and `netlify.toml` used for an experimental Remix web demo. They are not required for the core Python package; feel free to ignore them unless you plan to build the demo via `npm run build`.
|
|
88
|
+
|
|
84
89
|
## Testing
|
|
85
90
|
|
|
86
91
|
Install the dependencies and project in editable mode before running the test suite with `pytest`:
|
|
@@ -89,4 +94,10 @@ Install the dependencies and project in editable mode before running the test su
|
|
|
89
94
|
pip install networkx
|
|
90
95
|
pip install -e .
|
|
91
96
|
pytest
|
|
97
|
+
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## Installation
|
|
101
|
+
```
|
|
102
|
+
pip install tnfr
|
|
92
103
|
```
|
|
@@ -32,6 +32,8 @@
|
|
|
32
32
|
|
|
33
33
|
* **Network re-mesh.** Mixes the current state with a past one (memory `τ`) to stabilize the network, with clear precedence for `α` and conditions based on recent stability and synchrony history.
|
|
34
34
|
|
|
35
|
+
* **Γ(R) coupling.** Optional network term added to the nodal equation, parameterized by global phase order `R` with gain `β` and threshold `R0` (see `DEFAULTS["GAMMA"]`).
|
|
36
|
+
|
|
35
37
|
* **Callbacks & observers.** The `Γ(R)` system lets you hook functions before/after each step and after re-mesh, enabling monitoring or external intervention.
|
|
36
38
|
|
|
37
39
|
---
|
|
@@ -54,6 +56,9 @@
|
|
|
54
56
|
|
|
55
57
|
**Mastering these pieces will let you extend the simulation, build analysis pipelines and connect the theory with computational applications.**
|
|
56
58
|
|
|
59
|
+
## Optional Node environment
|
|
60
|
+
The repository includes a minimal `package.json` and `netlify.toml` used for an experimental Remix web demo. They are not required for the core Python package; feel free to ignore them unless you plan to build the demo via `npm run build`.
|
|
61
|
+
|
|
57
62
|
## Testing
|
|
58
63
|
|
|
59
64
|
Install the dependencies and project in editable mode before running the test suite with `pytest`:
|
|
@@ -62,4 +67,10 @@ Install the dependencies and project in editable mode before running the test su
|
|
|
62
67
|
pip install networkx
|
|
63
68
|
pip install -e .
|
|
64
69
|
pytest
|
|
70
|
+
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
## Installation
|
|
74
|
+
```
|
|
75
|
+
pip install tnfr
|
|
65
76
|
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "tnfr"
|
|
3
|
-
version = "
|
|
3
|
+
version = "4.1.0"
|
|
4
4
|
description = "TNFR canónica: dinámica glífica modular sobre redes."
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.9"
|
|
@@ -28,7 +28,7 @@ classifiers = [
|
|
|
28
28
|
dependencies = ["networkx>=2.6"]
|
|
29
29
|
|
|
30
30
|
[project.scripts]
|
|
31
|
-
tnfr = "tnfr.
|
|
31
|
+
tnfr = "tnfr.cli:main"
|
|
32
32
|
|
|
33
33
|
[project.urls]
|
|
34
34
|
Homepage = "https://pypi.org/project/tnfr/"
|
|
@@ -37,3 +37,5 @@ Repository = "https://github.com/fermga/Teoria-de-la-naturaleza-fractal-resonant
|
|
|
37
37
|
[build-system]
|
|
38
38
|
requires = ["setuptools>=61", "wheel"]
|
|
39
39
|
build-backend = "setuptools.build_meta"
|
|
40
|
+
|
|
41
|
+
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
"""
|
|
4
|
+
TNFR — Teoría de la Naturaleza Fractal Resonante
|
|
5
|
+
API pública del paquete.
|
|
6
|
+
|
|
7
|
+
Ecuación nodal:
|
|
8
|
+
∂EPI/∂t = νf · ΔNFR(t)
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
__version__ = "4.1.0"
|
|
12
|
+
|
|
13
|
+
# Re-exports de la API pública
|
|
14
|
+
from .dynamics import step, run, set_delta_nfr_hook
|
|
15
|
+
from .ontosim import preparar_red
|
|
16
|
+
from .observers import attach_standard_observer, coherencia_global, orden_kuramoto
|
|
17
|
+
from .gamma import GAMMA_REGISTRY, eval_gamma, kuramoto_R_psi
|
|
18
|
+
from .grammar import enforce_canonical_grammar, on_applied_glifo
|
|
19
|
+
from .sense import (
|
|
20
|
+
GLYPHS_CANONICAL, glyph_angle, glyph_unit,
|
|
21
|
+
sigma_vector_node, sigma_vector_global,
|
|
22
|
+
push_sigma_snapshot, sigma_series, sigma_rose,
|
|
23
|
+
register_sigma_callback,
|
|
24
|
+
)
|
|
25
|
+
from .metrics import (
|
|
26
|
+
register_metrics_callbacks,
|
|
27
|
+
Tg_global, Tg_by_node,
|
|
28
|
+
latency_series, glifogram_series,
|
|
29
|
+
glyph_top, glyph_dwell_stats,
|
|
30
|
+
)
|
|
31
|
+
from .trace import register_trace
|
|
32
|
+
from .program import play, seq, block, target, wait, THOL, TARGET, WAIT, ejemplo_canonico_basico
|
|
33
|
+
from .cli import main as cli_main
|
|
34
|
+
from .scenarios import build_graph
|
|
35
|
+
from .presets import get_preset
|
|
36
|
+
from .types import NodeState
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
__all__ = [
|
|
40
|
+
"preparar_red",
|
|
41
|
+
"step", "run", "set_delta_nfr_hook",
|
|
42
|
+
|
|
43
|
+
"attach_standard_observer", "coherencia_global", "orden_kuramoto",
|
|
44
|
+
"GAMMA_REGISTRY", "eval_gamma", "kuramoto_R_psi",
|
|
45
|
+
"enforce_canonical_grammar", "on_applied_glifo",
|
|
46
|
+
"GLYPHS_CANONICAL", "glyph_angle", "glyph_unit",
|
|
47
|
+
"sigma_vector_node", "sigma_vector_global",
|
|
48
|
+
"push_sigma_snapshot", "sigma_series", "sigma_rose",
|
|
49
|
+
"register_sigma_callback",
|
|
50
|
+
"register_metrics_callbacks",
|
|
51
|
+
"register_trace",
|
|
52
|
+
"Tg_global", "Tg_by_node",
|
|
53
|
+
"latency_series", "glifogram_series",
|
|
54
|
+
"glyph_top", "glyph_dwell_stats",
|
|
55
|
+
"play", "seq", "block", "target", "wait", "THOL", "TARGET", "WAIT",
|
|
56
|
+
"cli_main", "build_graph", "get_preset", "NodeState",
|
|
57
|
+
"ejemplo_canonico_basico",
|
|
58
|
+
"__version__",
|
|
59
|
+
]
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
import argparse
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, Dict, List, Optional
|
|
5
|
+
|
|
6
|
+
try: # pragma: no cover - opcional
|
|
7
|
+
import yaml # type: ignore
|
|
8
|
+
except Exception: # pragma: no cover - yaml es opcional
|
|
9
|
+
yaml = None
|
|
10
|
+
|
|
11
|
+
import networkx as nx
|
|
12
|
+
|
|
13
|
+
from .constants import inject_defaults, DEFAULTS
|
|
14
|
+
from .sense import register_sigma_callback, sigma_series, sigma_rose
|
|
15
|
+
from .metrics import (
|
|
16
|
+
register_metrics_callbacks,
|
|
17
|
+
Tg_global,
|
|
18
|
+
latency_series,
|
|
19
|
+
glifogram_series,
|
|
20
|
+
glyph_top,
|
|
21
|
+
)
|
|
22
|
+
from .trace import register_trace
|
|
23
|
+
from .program import play, seq, block, wait, target
|
|
24
|
+
from .dynamics import step, _update_history, default_glyph_selector, parametric_glyph_selector
|
|
25
|
+
from .gamma import GAMMA_REGISTRY
|
|
26
|
+
from .scenarios import build_graph
|
|
27
|
+
from .presets import get_preset
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _save_json(path: str, data: Any) -> None:
|
|
31
|
+
with open(path, "w", encoding="utf-8") as f:
|
|
32
|
+
json.dump(data, f, ensure_ascii=False, indent=2)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _load_sequence(path: str) -> List[Any]:
|
|
36
|
+
with open(path, "r", encoding="utf-8") as f:
|
|
37
|
+
text = f.read()
|
|
38
|
+
if path.endswith(".yaml") or path.endswith(".yml"):
|
|
39
|
+
if not yaml:
|
|
40
|
+
raise RuntimeError("pyyaml no está instalado, usa JSON o instala pyyaml")
|
|
41
|
+
data = yaml.safe_load(text)
|
|
42
|
+
else:
|
|
43
|
+
data = json.loads(text)
|
|
44
|
+
|
|
45
|
+
def parse_token(tok: Any):
|
|
46
|
+
if isinstance(tok, str):
|
|
47
|
+
return tok
|
|
48
|
+
if isinstance(tok, dict):
|
|
49
|
+
if "WAIT" in tok:
|
|
50
|
+
return wait(int(tok["WAIT"]))
|
|
51
|
+
if "TARGET" in tok:
|
|
52
|
+
return target(tok["TARGET"])
|
|
53
|
+
if "THOL" in tok:
|
|
54
|
+
spec = tok["THOL"] or {}
|
|
55
|
+
b = [_parse_inner(x) for x in spec.get("body", [])]
|
|
56
|
+
return block(*b, repeat=int(spec.get("repeat", 1)), close=spec.get("close"))
|
|
57
|
+
raise ValueError(f"Token inválido: {tok}")
|
|
58
|
+
|
|
59
|
+
def _parse_inner(x: Any):
|
|
60
|
+
return parse_token(x)
|
|
61
|
+
|
|
62
|
+
return [parse_token(t) for t in data]
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _attach_callbacks(G: nx.Graph) -> None:
|
|
66
|
+
inject_defaults(G, DEFAULTS)
|
|
67
|
+
register_sigma_callback(G)
|
|
68
|
+
register_metrics_callbacks(G)
|
|
69
|
+
register_trace(G)
|
|
70
|
+
_update_history(G)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def cmd_run(args: argparse.Namespace) -> int:
|
|
74
|
+
G = build_graph(n=args.nodes, topology=args.topology, seed=args.seed)
|
|
75
|
+
_attach_callbacks(G)
|
|
76
|
+
G.graph.setdefault("GRAMMAR_CANON", DEFAULTS["GRAMMAR_CANON"]).update({"enabled": bool(args.grammar_canon)})
|
|
77
|
+
G.graph["glyph_selector"] = default_glyph_selector if args.selector == "basic" else parametric_glyph_selector
|
|
78
|
+
G.graph["GAMMA"] = {"type": args.gamma}
|
|
79
|
+
|
|
80
|
+
if args.preset:
|
|
81
|
+
program = get_preset(args.preset)
|
|
82
|
+
play(G, program)
|
|
83
|
+
else:
|
|
84
|
+
steps = int(args.steps or 100)
|
|
85
|
+
for _ in range(steps):
|
|
86
|
+
step(G)
|
|
87
|
+
|
|
88
|
+
if args.save_history:
|
|
89
|
+
_save_json(args.save_history, G.graph.get("history", {}))
|
|
90
|
+
|
|
91
|
+
if args.summary:
|
|
92
|
+
tg = Tg_global(G, normalize=True)
|
|
93
|
+
lat = latency_series(G)
|
|
94
|
+
print("Top glifos por Tg:", glyph_top(G, k=5))
|
|
95
|
+
if lat["value"]:
|
|
96
|
+
print("Latencia media:", sum(lat["value"]) / max(1, len(lat["value"])) )
|
|
97
|
+
return 0
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def cmd_sequence(args: argparse.Namespace) -> int:
|
|
101
|
+
G = build_graph(n=args.nodes, topology=args.topology, seed=args.seed)
|
|
102
|
+
_attach_callbacks(G)
|
|
103
|
+
G.graph.setdefault("GRAMMAR_CANON", DEFAULTS["GRAMMAR_CANON"]).update({"enabled": bool(args.grammar_canon)})
|
|
104
|
+
G.graph["glyph_selector"] = default_glyph_selector if args.selector == "basic" else parametric_glyph_selector
|
|
105
|
+
G.graph["GAMMA"] = {"type": args.gamma}
|
|
106
|
+
|
|
107
|
+
if args.preset:
|
|
108
|
+
program = get_preset(args.preset)
|
|
109
|
+
elif args.sequence_file:
|
|
110
|
+
program = _load_sequence(args.sequence_file)
|
|
111
|
+
else:
|
|
112
|
+
program = seq("A’L", "E’N", "I’L", block("O’Z", "Z’HIR", "I’L", repeat=1), "R’A", "SH’A")
|
|
113
|
+
|
|
114
|
+
play(G, program)
|
|
115
|
+
|
|
116
|
+
if args.save_history:
|
|
117
|
+
_save_json(args.save_history, G.graph.get("history", {}))
|
|
118
|
+
return 0
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def cmd_metrics(args: argparse.Namespace) -> int:
|
|
122
|
+
G = build_graph(n=args.nodes, topology=args.topology, seed=args.seed)
|
|
123
|
+
_attach_callbacks(G)
|
|
124
|
+
G.graph.setdefault("GRAMMAR_CANON", DEFAULTS["GRAMMAR_CANON"]).update({"enabled": bool(args.grammar_canon)})
|
|
125
|
+
G.graph["glyph_selector"] = default_glyph_selector if args.selector == "basic" else parametric_glyph_selector
|
|
126
|
+
G.graph["GAMMA"] = {"type": args.gamma}
|
|
127
|
+
for _ in range(int(args.steps or 200)):
|
|
128
|
+
step(G)
|
|
129
|
+
|
|
130
|
+
tg = Tg_global(G, normalize=True)
|
|
131
|
+
lat = latency_series(G)
|
|
132
|
+
rose = sigma_rose(G)
|
|
133
|
+
glifo = glifogram_series(G)
|
|
134
|
+
|
|
135
|
+
out = {
|
|
136
|
+
"Tg_global": tg,
|
|
137
|
+
"latency_mean": (sum(lat["value"]) / max(1, len(lat["value"])) ) if lat["value"] else 0.0,
|
|
138
|
+
"rose": rose,
|
|
139
|
+
"glifogram": {k: v[:10] for k, v in glifo.items()},
|
|
140
|
+
}
|
|
141
|
+
if args.save:
|
|
142
|
+
_save_json(args.save, out)
|
|
143
|
+
else:
|
|
144
|
+
print(json.dumps(out, ensure_ascii=False, indent=2))
|
|
145
|
+
return 0
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def main(argv: Optional[List[str]] = None) -> int:
|
|
149
|
+
p = argparse.ArgumentParser(prog="tnfr")
|
|
150
|
+
sub = p.add_subparsers(dest="cmd")
|
|
151
|
+
|
|
152
|
+
p_run = sub.add_parser("run", help="Correr escenario libre o preset y opcionalmente exportar history")
|
|
153
|
+
p_run.add_argument("--nodes", type=int, default=24)
|
|
154
|
+
p_run.add_argument("--topology", choices=["ring", "complete", "erdos"], default="ring")
|
|
155
|
+
p_run.add_argument("--steps", type=int, default=200)
|
|
156
|
+
p_run.add_argument("--seed", type=int, default=1)
|
|
157
|
+
p_run.add_argument("--preset", type=str, default=None)
|
|
158
|
+
p_run.add_argument("--save-history", dest="save_history", type=str, default=None)
|
|
159
|
+
p_run.add_argument("--summary", action="store_true")
|
|
160
|
+
p_run.add_argument("--no-canon", dest="grammar_canon", action="store_false", default=True, help="Desactiva gramática canónica")
|
|
161
|
+
p_run.add_argument("--selector", choices=["basic", "param"], default="basic")
|
|
162
|
+
p_run.add_argument("--gamma", choices=list(GAMMA_REGISTRY.keys()), default="none")
|
|
163
|
+
p_run.set_defaults(func=cmd_run)
|
|
164
|
+
|
|
165
|
+
p_seq = sub.add_parser("sequence", help="Ejecutar una secuencia (preset o YAML/JSON)")
|
|
166
|
+
p_seq.add_argument("--nodes", type=int, default=24)
|
|
167
|
+
p_seq.add_argument("--topology", choices=["ring", "complete", "erdos"], default="ring")
|
|
168
|
+
p_seq.add_argument("--seed", type=int, default=1)
|
|
169
|
+
p_seq.add_argument("--preset", type=str, default=None)
|
|
170
|
+
p_seq.add_argument("--sequence-file", type=str, default=None)
|
|
171
|
+
p_seq.add_argument("--save-history", dest="save_history", type=str, default=None)
|
|
172
|
+
p_seq.set_defaults(func=cmd_sequence)
|
|
173
|
+
|
|
174
|
+
p_met = sub.add_parser("metrics", help="Correr breve y volcar métricas clave")
|
|
175
|
+
p_met.add_argument("--nodes", type=int, default=24)
|
|
176
|
+
p_met.add_argument("--topology", choices=["ring", "complete", "erdos"], default="ring")
|
|
177
|
+
p_met.add_argument("--steps", type=int, default=300)
|
|
178
|
+
p_met.add_argument("--seed", type=int, default=1)
|
|
179
|
+
p_met.add_argument("--no-canon", dest="grammar_canon", action="store_false", default=True, help="Desactiva gramática canónica")
|
|
180
|
+
p_met.add_argument("--selector", choices=["basic", "param"], default="basic")
|
|
181
|
+
p_met.add_argument("--gamma", choices=list(GAMMA_REGISTRY.keys()), default="none")
|
|
182
|
+
p_met.add_argument("--save", type=str, default=None)
|
|
183
|
+
p_met.set_defaults(func=cmd_metrics)
|
|
184
|
+
|
|
185
|
+
args = p.parse_args(argv)
|
|
186
|
+
if not hasattr(args, "func"):
|
|
187
|
+
p.print_help()
|
|
188
|
+
return 1
|
|
189
|
+
return int(args.func(args))
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
if __name__ == "__main__": # pragma: no cover
|
|
193
|
+
raise SystemExit(main())
|
|
@@ -10,7 +10,7 @@ from typing import Dict, Any
|
|
|
10
10
|
# -------------------------
|
|
11
11
|
# Parámetros canónicos
|
|
12
12
|
# -------------------------
|
|
13
|
-
DEFAULTS: Dict[str, Any] = {
|
|
13
|
+
DEFAULTS: Dict[str, Any] = {
|
|
14
14
|
# Discretización
|
|
15
15
|
"DT": 1.0,
|
|
16
16
|
|
|
@@ -72,8 +72,9 @@ DEFAULTS: Dict[str, Any] = {
|
|
|
72
72
|
# Criterios de estabilidad (para activar RE’MESH de red)
|
|
73
73
|
"EPS_DNFR_STABLE": 1e-3,
|
|
74
74
|
"EPS_DEPI_STABLE": 1e-3,
|
|
75
|
-
"FRACTION_STABLE_REMESH": 0.80, # fracción de nodos estables requerida
|
|
76
|
-
"REMESH_COOLDOWN_VENTANA": 20, # pasos mínimos entre RE’MESH
|
|
75
|
+
"FRACTION_STABLE_REMESH": 0.80, # fracción de nodos estables requerida
|
|
76
|
+
"REMESH_COOLDOWN_VENTANA": 20, # pasos mínimos entre RE’MESH
|
|
77
|
+
"REMESH_COOLDOWN_TS": 0.0, # cooldown adicional por tiempo simulado
|
|
77
78
|
# Gating adicional basado en observadores (conmutador + ventana)
|
|
78
79
|
"REMESH_REQUIRE_STABILITY": False, # si True, exige ventana de estabilidad multi-métrica
|
|
79
80
|
"REMESH_STABILITY_WINDOW": 25, # tamaño de ventana para evaluar estabilidad
|
|
@@ -82,8 +83,9 @@ DEFAULTS: Dict[str, Any] = {
|
|
|
82
83
|
"REMESH_LOG_EVENTS": True, # guarda eventos y metadatos del RE’MESH
|
|
83
84
|
|
|
84
85
|
# RE’MESH: memoria τ y mezcla α
|
|
85
|
-
"REMESH_TAU": 8, # pasos hacia atrás
|
|
86
|
-
"REMESH_ALPHA": 0.5, # mezcla con pasado
|
|
86
|
+
"REMESH_TAU": 8, # pasos hacia atrás
|
|
87
|
+
"REMESH_ALPHA": 0.5, # mezcla con pasado
|
|
88
|
+
"REMESH_ALPHA_HARD": False, # si True ignora GLYPH_FACTORS['REMESH_alpha']
|
|
87
89
|
|
|
88
90
|
# Histéresis glífica
|
|
89
91
|
"GLYPH_HYSTERESIS_WINDOW": 7,
|
|
@@ -92,13 +94,13 @@ DEFAULTS: Dict[str, Any] = {
|
|
|
92
94
|
"GLYPH_SELECTOR_MARGIN": 0.05,
|
|
93
95
|
|
|
94
96
|
# Ventana para estimar la carga glífica en history/plots
|
|
95
|
-
"GLYPH_LOAD_WINDOW": 50,
|
|
97
|
+
"GLYPH_LOAD_WINDOW": 50,
|
|
96
98
|
|
|
97
99
|
# Tamaño de ventana para coherencia promedio W̄
|
|
98
100
|
"WBAR_WINDOW": 25,
|
|
99
101
|
|
|
100
102
|
# Factores suaves por glifo (operadores)
|
|
101
|
-
"GLYPH_FACTORS": {
|
|
103
|
+
"GLYPH_FACTORS": {
|
|
102
104
|
"AL_boost": 0.05, # A’L — pequeña emisión
|
|
103
105
|
"EN_mix": 0.25, # E’N — mezcla con vecindad
|
|
104
106
|
"IL_dnfr_factor": 0.7, # I’L — reduce ΔNFR
|
|
@@ -107,7 +109,7 @@ DEFAULTS: Dict[str, Any] = {
|
|
|
107
109
|
"RA_epi_diff": 0.15, # R’A — difusión EPI
|
|
108
110
|
"SHA_vf_factor": 0.85, # SH’A — baja νf
|
|
109
111
|
"VAL_scale": 1.15, # VA’L — expande EPI
|
|
110
|
-
"NUL_scale": 0.85, # NU’L — contrae EPI
|
|
112
|
+
"NUL_scale": 0.85, # NU’L — contrae EPI
|
|
111
113
|
"THOL_accel": 0.10, # T’HOL — acelera (seg. deriv.) si hay umbral
|
|
112
114
|
"ZHIR_theta_shift": 1.57079632679, # Z’HIR — desplazamiento ~π/2
|
|
113
115
|
"NAV_jitter": 0.05, # NA’V — pequeña inestabilidad creativa
|
|
@@ -147,22 +149,53 @@ DEFAULTS: Dict[str, Any] = {
|
|
|
147
149
|
"dnfr_hi": 0.50, "dnfr_lo": 0.10,
|
|
148
150
|
"accel_hi": 0.50, "accel_lo": 0.10
|
|
149
151
|
},
|
|
150
|
-
# Callbacks Γ(R)
|
|
151
|
-
"
|
|
152
|
-
|
|
152
|
+
# Callbacks Γ(R)
|
|
153
|
+
"GAMMA": {
|
|
154
|
+
"type": "none", # 'none' | 'kuramoto_linear' | 'kuramoto_bandpass'
|
|
155
|
+
"beta": 0.0,
|
|
156
|
+
"R0": 0.0,
|
|
157
|
+
},
|
|
158
|
+
"CALLBACKS_STRICT": False, # si True, un error en callback detiene; si False, se loguea y continúa
|
|
159
|
+
"VALIDATORS_STRICT": False, # si True, alerta si se clampa fuera de rango
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
# Gramática glífica canónica
|
|
163
|
+
DEFAULTS.setdefault("GRAMMAR_CANON", {
|
|
164
|
+
"enabled": True, # activar la gramática canónica
|
|
165
|
+
"zhir_requires_oz_window": 3, # cuántos pasos atrás buscamos O’Z
|
|
166
|
+
"zhir_dnfr_min": 0.05, # si |ΔNFR|_norm < este valor, no permitimos Z’HIR sin O’Z
|
|
167
|
+
"thol_min_len": 2,
|
|
168
|
+
"thol_max_len": 6,
|
|
169
|
+
"thol_close_dnfr": 0.15, # si el campo calma, cerramos con SH’A/NU’L
|
|
170
|
+
"si_high": 0.66, # umbral para elegir NU’L vs SH’A al cerrar
|
|
171
|
+
})
|
|
153
172
|
|
|
154
173
|
|
|
155
174
|
# -------------------------
|
|
156
175
|
# Utilidades
|
|
157
176
|
# -------------------------
|
|
158
177
|
|
|
159
|
-
def attach_defaults(G, override: bool = False) -> None:
|
|
160
|
-
"""Escribe DEFAULTS en G.graph (sin sobreescribir si override=False)."""
|
|
161
|
-
G.graph.setdefault("_tnfr_defaults_attached", False)
|
|
162
|
-
for k, v in DEFAULTS.items():
|
|
163
|
-
if override or k not in G.graph:
|
|
164
|
-
G.graph[k] = v
|
|
165
|
-
G.graph["_tnfr_defaults_attached"] = True
|
|
178
|
+
def attach_defaults(G, override: bool = False) -> None:
|
|
179
|
+
"""Escribe DEFAULTS en G.graph (sin sobreescribir si override=False)."""
|
|
180
|
+
G.graph.setdefault("_tnfr_defaults_attached", False)
|
|
181
|
+
for k, v in DEFAULTS.items():
|
|
182
|
+
if override or k not in G.graph:
|
|
183
|
+
G.graph[k] = v
|
|
184
|
+
G.graph["_tnfr_defaults_attached"] = True
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def inject_defaults(G, defaults: Dict[str, Any] = DEFAULTS, override: bool = False) -> None:
|
|
188
|
+
"""Alias de conveniencia para inyectar ``DEFAULTS`` en ``G.graph``.
|
|
189
|
+
|
|
190
|
+
Permite pasar un diccionario de *defaults* alternativo y mantiene la
|
|
191
|
+
semántica de ``attach_defaults`` existente. Si ``override`` es ``True`` se
|
|
192
|
+
sobreescriben valores ya presentes.
|
|
193
|
+
"""
|
|
194
|
+
G.graph.setdefault("_tnfr_defaults_attached", False)
|
|
195
|
+
for k, v in defaults.items():
|
|
196
|
+
if override or k not in G.graph:
|
|
197
|
+
G.graph[k] = v
|
|
198
|
+
G.graph["_tnfr_defaults_attached"] = True
|
|
166
199
|
|
|
167
200
|
|
|
168
201
|
def merge_overrides(G, **overrides) -> None:
|
|
@@ -18,7 +18,9 @@ import networkx as nx
|
|
|
18
18
|
|
|
19
19
|
from .observers import sincronía_fase, carga_glifica, orden_kuramoto, sigma_vector
|
|
20
20
|
from .operators import aplicar_remesh_si_estabilizacion_global
|
|
21
|
+
from .grammar import select_and_apply_with_grammar
|
|
21
22
|
from .constants import DEFAULTS, ALIAS_VF, ALIAS_THETA, ALIAS_DNFR, ALIAS_EPI, ALIAS_SI, ALIAS_dEPI, ALIAS_D2EPI
|
|
23
|
+
from .gamma import eval_gamma
|
|
22
24
|
from .helpers import (
|
|
23
25
|
clamp, clamp01, list_mean, phase_distance,
|
|
24
26
|
_get_attr, _set_attr, media_vecinal, fase_media,
|
|
@@ -52,6 +54,7 @@ def _write_dnfr_metadata(G, *, weights: dict, hook_name: str, note: str | None =
|
|
|
52
54
|
|
|
53
55
|
|
|
54
56
|
def default_compute_delta_nfr(G) -> None:
|
|
57
|
+
"""Calcula ΔNFR mezclando gradientes de fase, EPI y νf según pesos."""
|
|
55
58
|
w = G.graph.get("DNFR_WEIGHTS", DEFAULTS["DNFR_WEIGHTS"]) # dict
|
|
56
59
|
w_phase = float(w.get("phase", 0.34))
|
|
57
60
|
w_epi = float(w.get("epi", 0.33))
|
|
@@ -121,7 +124,23 @@ def dnfr_epi_vf_mixed(G) -> None:
|
|
|
121
124
|
# Ecuación nodal
|
|
122
125
|
# -------------------------
|
|
123
126
|
|
|
124
|
-
def update_epi_via_nodal_equation(G, *, dt: float = None) -> None:
|
|
127
|
+
def update_epi_via_nodal_equation(G, *, dt: float = None, t: float | None = None) -> None:
|
|
128
|
+
"""Ecuación nodal TNFR.
|
|
129
|
+
|
|
130
|
+
Implementa la forma extendida de la ecuación nodal:
|
|
131
|
+
∂EPI/∂t = νf · ΔNFR(t) + Γi(R)
|
|
132
|
+
|
|
133
|
+
Donde:
|
|
134
|
+
- EPI es la Estructura Primaria de Información del nodo.
|
|
135
|
+
- νf es la frecuencia estructural del nodo (Hz_str).
|
|
136
|
+
- ΔNFR(t) es el gradiente nodal (necesidad de reorganización),
|
|
137
|
+
típicamente una mezcla de componentes (p. ej. fase θ, EPI, νf).
|
|
138
|
+
- Γi(R) es el acoplamiento de red opcional en función del orden de Kuramoto R
|
|
139
|
+
(ver gamma.py), usado para modular la integración en red.
|
|
140
|
+
|
|
141
|
+
Referencias TNFR: ecuación nodal (manual), glosario νf/ΔNFR/EPI, operador Γ.
|
|
142
|
+
Efectos secundarios: cachea dEPI y actualiza EPI por integración explícita.
|
|
143
|
+
"""
|
|
125
144
|
if not isinstance(G, (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)):
|
|
126
145
|
raise TypeError("G must be a networkx graph instance")
|
|
127
146
|
if dt is None:
|
|
@@ -132,16 +151,22 @@ def update_epi_via_nodal_equation(G, *, dt: float = None) -> None:
|
|
|
132
151
|
if dt < 0:
|
|
133
152
|
raise ValueError("dt must be non-negative")
|
|
134
153
|
dt = float(dt)
|
|
154
|
+
if t is None:
|
|
155
|
+
t = float(G.graph.get("_t", 0.0))
|
|
156
|
+
else:
|
|
157
|
+
t = float(t)
|
|
135
158
|
for n in G.nodes():
|
|
136
159
|
nd = G.nodes[n]
|
|
137
160
|
vf = _get_attr(nd, ALIAS_VF, 0.0)
|
|
138
161
|
dnfr = _get_attr(nd, ALIAS_DNFR, 0.0)
|
|
139
162
|
dEPI_dt_prev = _get_attr(nd, ALIAS_dEPI, 0.0)
|
|
140
163
|
dEPI_dt = vf * dnfr
|
|
164
|
+
dEPI_dt += eval_gamma(G, n, t)
|
|
141
165
|
epi = _get_attr(nd, ALIAS_EPI, 0.0) + dt * dEPI_dt
|
|
142
166
|
_set_attr(nd, ALIAS_EPI, epi)
|
|
143
167
|
_set_attr(nd, ALIAS_dEPI, dEPI_dt)
|
|
144
168
|
_set_attr(nd, ALIAS_D2EPI, (dEPI_dt - dEPI_dt_prev) / dt if dt != 0 else 0.0)
|
|
169
|
+
G.graph["_t"] = t + dt
|
|
145
170
|
|
|
146
171
|
|
|
147
172
|
# -------------------------
|
|
@@ -162,7 +187,7 @@ def integrar_epi_euler(G, dt: float | None = None) -> None:
|
|
|
162
187
|
update_epi_via_nodal_equation(G, dt=dt)
|
|
163
188
|
|
|
164
189
|
|
|
165
|
-
def aplicar_clamps_canonicos(nd: Dict[str, Any], G=None) -> None:
|
|
190
|
+
def aplicar_clamps_canonicos(nd: Dict[str, Any], G=None, node=None) -> None:
|
|
166
191
|
eps_min = float((G.graph.get("EPI_MIN") if G is not None else DEFAULTS["EPI_MIN"]))
|
|
167
192
|
eps_max = float((G.graph.get("EPI_MAX") if G is not None else DEFAULTS["EPI_MAX"]))
|
|
168
193
|
vf_min = float((G.graph.get("VF_MIN") if G is not None else DEFAULTS["VF_MIN"]))
|
|
@@ -172,6 +197,14 @@ def aplicar_clamps_canonicos(nd: Dict[str, Any], G=None) -> None:
|
|
|
172
197
|
vf = _get_attr(nd, ALIAS_VF, 0.0)
|
|
173
198
|
th = _get_attr(nd, ALIAS_THETA, 0.0)
|
|
174
199
|
|
|
200
|
+
strict = bool((G.graph.get("VALIDATORS_STRICT") if G is not None else DEFAULTS.get("VALIDATORS_STRICT", False)))
|
|
201
|
+
if strict and G is not None:
|
|
202
|
+
hist = G.graph.setdefault("history", {}).setdefault("clamp_alerts", [])
|
|
203
|
+
if epi < eps_min or epi > eps_max:
|
|
204
|
+
hist.append({"node": node, "attr": "EPI", "value": float(epi)})
|
|
205
|
+
if vf < vf_min or vf > vf_max:
|
|
206
|
+
hist.append({"node": node, "attr": "VF", "value": float(vf)})
|
|
207
|
+
|
|
175
208
|
_set_attr(nd, ALIAS_EPI, clamp(epi, eps_min, eps_max))
|
|
176
209
|
_set_attr(nd, ALIAS_VF, clamp(vf, vf_min, vf_max))
|
|
177
210
|
if (G.graph.get("THETA_WRAP") if G is not None else DEFAULTS["THETA_WRAP"]):
|
|
@@ -187,6 +220,10 @@ def coordinar_fase_global_vecinal(G, fuerza_global: float | None = None, fuerza_
|
|
|
187
220
|
"""
|
|
188
221
|
g = G.graph
|
|
189
222
|
defaults = DEFAULTS
|
|
223
|
+
hist = g.setdefault("history", {})
|
|
224
|
+
hist_state = hist.setdefault("phase_state", [])
|
|
225
|
+
hist_R = hist.setdefault("phase_R", [])
|
|
226
|
+
hist_disr = hist.setdefault("phase_disr", [])
|
|
190
227
|
# 0) Si hay fuerzas explícitas, usar y salir del modo adaptativo
|
|
191
228
|
if (fuerza_global is not None) or (fuerza_vecinal is not None):
|
|
192
229
|
kG = float(
|
|
@@ -248,20 +285,15 @@ def coordinar_fase_global_vecinal(G, fuerza_global: float | None = None, fuerza_
|
|
|
248
285
|
kL = _step(kL, kL_t, kL_min, kL_max)
|
|
249
286
|
|
|
250
287
|
# 5) Persistir en G.graph y log de serie
|
|
251
|
-
g["PHASE_K_GLOBAL"] = kG
|
|
252
|
-
g["PHASE_K_LOCAL"] = kL
|
|
253
|
-
hist = g.setdefault("history", {})
|
|
254
|
-
hist_kG = hist.setdefault("phase_kG", [])
|
|
255
|
-
hist_kL = hist.setdefault("phase_kL", [])
|
|
256
|
-
hist_state = hist.setdefault("phase_state", [])
|
|
257
|
-
hist_R = hist.setdefault("phase_R", [])
|
|
258
|
-
hist_disr = hist.setdefault("phase_disr", [])
|
|
259
|
-
hist_kG.append(float(kG))
|
|
260
|
-
hist_kL.append(float(kL))
|
|
261
288
|
hist_state.append(state)
|
|
262
289
|
hist_R.append(float(R))
|
|
263
290
|
hist_disr.append(float(disr))
|
|
264
291
|
|
|
292
|
+
g["PHASE_K_GLOBAL"] = kG
|
|
293
|
+
g["PHASE_K_LOCAL"] = kL
|
|
294
|
+
hist.setdefault("phase_kG", []).append(float(kG))
|
|
295
|
+
hist.setdefault("phase_kL", []).append(float(kL))
|
|
296
|
+
|
|
265
297
|
# 6) Fase GLOBAL (centroide) para empuje
|
|
266
298
|
X = list(math.cos(_get_attr(G.nodes[n], ALIAS_THETA, 0.0)) for n in G.nodes())
|
|
267
299
|
Y = list(math.sin(_get_attr(G.nodes[n], ALIAS_THETA, 0.0)) for n in G.nodes())
|
|
@@ -320,6 +352,21 @@ def _norms_para_selector(G) -> dict:
|
|
|
320
352
|
G.graph["_sel_norms"] = norms
|
|
321
353
|
return norms
|
|
322
354
|
|
|
355
|
+
|
|
356
|
+
def _soft_grammar_prefilter(G, n, cand, dnfr, accel):
|
|
357
|
+
"""Gramática suave: evita repeticiones antes de la canónica."""
|
|
358
|
+
gram = G.graph.get("GRAMMAR", DEFAULTS.get("GRAMMAR", {}))
|
|
359
|
+
gwin = int(gram.get("window", 3))
|
|
360
|
+
avoid = set(gram.get("avoid_repeats", []))
|
|
361
|
+
force_dn = float(gram.get("force_dnfr", 0.60))
|
|
362
|
+
force_ac = float(gram.get("force_accel", 0.60))
|
|
363
|
+
fallbacks = gram.get("fallbacks", {})
|
|
364
|
+
nd = G.nodes[n]
|
|
365
|
+
if cand in avoid and reciente_glifo(nd, cand, gwin):
|
|
366
|
+
if not (dnfr >= force_dn or accel >= force_ac):
|
|
367
|
+
cand = fallbacks.get(cand, cand)
|
|
368
|
+
return cand
|
|
369
|
+
|
|
323
370
|
def parametric_glyph_selector(G, n) -> str:
|
|
324
371
|
"""Multiobjetivo: combina Si, |ΔNFR|_norm y |accel|_norm + histéresis.
|
|
325
372
|
Reglas base:
|
|
@@ -387,22 +434,9 @@ def parametric_glyph_selector(G, n) -> str:
|
|
|
387
434
|
elif score <= 0.33 and cand in ("NA’V","R’A","I’L"):
|
|
388
435
|
cand = "O’Z" if dnfr >= dnfr_lo else "Z’HIR"
|
|
389
436
|
except NameError:
|
|
390
|
-
# por si 'score' no se definió (robustez), no forzamos nada
|
|
391
437
|
pass
|
|
392
438
|
|
|
393
|
-
|
|
394
|
-
gram = G.graph.get("GRAMMAR", DEFAULTS.get("GRAMMAR", {}))
|
|
395
|
-
gwin = int(gram.get("window", 3))
|
|
396
|
-
avoid = set(gram.get("avoid_repeats", []))
|
|
397
|
-
force_dn = float(gram.get("force_dnfr", 0.60))
|
|
398
|
-
force_ac = float(gram.get("force_accel", 0.60))
|
|
399
|
-
fallbacks = gram.get("fallbacks", {})
|
|
400
|
-
|
|
401
|
-
if cand in avoid and reciente_glifo(nd, cand, gwin):
|
|
402
|
-
# Solo permitimos repetir si el campo "insiste": dnfr o accel altos (ya normalizados)
|
|
403
|
-
if not (dnfr >= force_dn or accel >= force_ac):
|
|
404
|
-
cand = fallbacks.get(cand, "R’A")
|
|
405
|
-
|
|
439
|
+
cand = _soft_grammar_prefilter(G, n, cand, dnfr, accel)
|
|
406
440
|
return cand
|
|
407
441
|
|
|
408
442
|
# -------------------------
|
|
@@ -432,16 +466,20 @@ def step(G, *, dt: float | None = None, use_Si: bool = True, apply_glyphs: bool
|
|
|
432
466
|
selector = G.graph.get("glyph_selector", default_glyph_selector)
|
|
433
467
|
from .operators import aplicar_glifo
|
|
434
468
|
window = int(G.graph.get("GLYPH_HYSTERESIS_WINDOW", DEFAULTS["GLYPH_HYSTERESIS_WINDOW"]))
|
|
469
|
+
use_canon = bool(G.graph.get("GRAMMAR_CANON", DEFAULTS.get("GRAMMAR_CANON", {})).get("enabled", False))
|
|
435
470
|
for n in G.nodes():
|
|
436
|
-
|
|
437
|
-
|
|
471
|
+
if use_canon:
|
|
472
|
+
select_and_apply_with_grammar(G, n, selector, window)
|
|
473
|
+
else:
|
|
474
|
+
g = selector(G, n)
|
|
475
|
+
aplicar_glifo(G, n, g, window=window)
|
|
438
476
|
|
|
439
477
|
# 4) Ecuación nodal
|
|
440
478
|
update_epi_via_nodal_equation(G, dt=dt)
|
|
441
479
|
|
|
442
480
|
# 5) Clamps
|
|
443
481
|
for n in G.nodes():
|
|
444
|
-
aplicar_clamps_canonicos(G.nodes[n], G)
|
|
482
|
+
aplicar_clamps_canonicos(G.nodes[n], G, n)
|
|
445
483
|
|
|
446
484
|
# 6) Coordinación de fase
|
|
447
485
|
coordinar_fase_global_vecinal(G, None, None)
|