tnfr 4.5.1__py3-none-any.whl → 4.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +91 -90
- tnfr/alias.py +546 -0
- tnfr/cache.py +578 -0
- tnfr/callback_utils.py +388 -0
- tnfr/cli/__init__.py +75 -0
- tnfr/cli/arguments.py +177 -0
- tnfr/cli/execution.py +288 -0
- tnfr/cli/utils.py +36 -0
- tnfr/collections_utils.py +300 -0
- tnfr/config.py +19 -28
- tnfr/constants/__init__.py +174 -0
- tnfr/constants/core.py +159 -0
- tnfr/constants/init.py +31 -0
- tnfr/constants/metric.py +110 -0
- tnfr/constants_glyphs.py +98 -0
- tnfr/dynamics/__init__.py +658 -0
- tnfr/dynamics/dnfr.py +733 -0
- tnfr/dynamics/integrators.py +267 -0
- tnfr/dynamics/sampling.py +31 -0
- tnfr/execution.py +201 -0
- tnfr/flatten.py +283 -0
- tnfr/gamma.py +302 -88
- tnfr/glyph_history.py +290 -0
- tnfr/grammar.py +285 -96
- tnfr/graph_utils.py +84 -0
- tnfr/helpers/__init__.py +71 -0
- tnfr/helpers/numeric.py +87 -0
- tnfr/immutable.py +178 -0
- tnfr/import_utils.py +228 -0
- tnfr/initialization.py +197 -0
- tnfr/io.py +246 -0
- tnfr/json_utils.py +162 -0
- tnfr/locking.py +37 -0
- tnfr/logging_utils.py +116 -0
- tnfr/metrics/__init__.py +41 -0
- tnfr/metrics/coherence.py +829 -0
- tnfr/metrics/common.py +151 -0
- tnfr/metrics/core.py +101 -0
- tnfr/metrics/diagnosis.py +234 -0
- tnfr/metrics/export.py +137 -0
- tnfr/metrics/glyph_timing.py +189 -0
- tnfr/metrics/reporting.py +148 -0
- tnfr/metrics/sense_index.py +120 -0
- tnfr/metrics/trig.py +181 -0
- tnfr/metrics/trig_cache.py +109 -0
- tnfr/node.py +214 -159
- tnfr/observers.py +126 -136
- tnfr/ontosim.py +134 -134
- tnfr/operators/__init__.py +420 -0
- tnfr/operators/jitter.py +203 -0
- tnfr/operators/remesh.py +485 -0
- tnfr/presets.py +46 -14
- tnfr/rng.py +254 -0
- tnfr/selector.py +210 -0
- tnfr/sense.py +284 -131
- tnfr/structural.py +207 -79
- tnfr/tokens.py +60 -0
- tnfr/trace.py +329 -94
- tnfr/types.py +43 -17
- tnfr/validators.py +70 -24
- tnfr/value_utils.py +59 -0
- tnfr-4.5.2.dist-info/METADATA +379 -0
- tnfr-4.5.2.dist-info/RECORD +67 -0
- tnfr/cli.py +0 -322
- tnfr/constants.py +0 -277
- tnfr/dynamics.py +0 -814
- tnfr/helpers.py +0 -264
- tnfr/main.py +0 -47
- tnfr/metrics.py +0 -597
- tnfr/operators.py +0 -525
- tnfr/program.py +0 -176
- tnfr/scenarios.py +0 -34
- tnfr-4.5.1.dist-info/METADATA +0 -221
- tnfr-4.5.1.dist-info/RECORD +0 -28
- {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/WHEEL +0 -0
- {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/entry_points.txt +0 -0
- {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/licenses/LICENSE.md +0 -0
- {tnfr-4.5.1.dist-info → tnfr-4.5.2.dist-info}/top_level.txt +0 -0
tnfr/operators.py
DELETED
|
@@ -1,525 +0,0 @@
|
|
|
1
|
-
# operators.py — TNFR canónica (ASCII-safe)
|
|
2
|
-
from __future__ import annotations
|
|
3
|
-
from typing import Dict, Any, Optional, Iterable
|
|
4
|
-
import math
|
|
5
|
-
import random
|
|
6
|
-
import hashlib
|
|
7
|
-
import networkx as nx
|
|
8
|
-
from networkx.algorithms import community as nx_comm
|
|
9
|
-
|
|
10
|
-
from .constants import DEFAULTS, ALIAS_EPI
|
|
11
|
-
from .helpers import (
|
|
12
|
-
clamp,
|
|
13
|
-
clamp01,
|
|
14
|
-
list_mean,
|
|
15
|
-
invoke_callbacks,
|
|
16
|
-
_get_attr,
|
|
17
|
-
_set_attr,
|
|
18
|
-
_get_attr_str,
|
|
19
|
-
_set_attr_str,
|
|
20
|
-
)
|
|
21
|
-
from .node import NodoProtocol, NodoNX
|
|
22
|
-
from collections import deque
|
|
23
|
-
|
|
24
|
-
"""
|
|
25
|
-
Este módulo implementa:
|
|
26
|
-
- Los 13 glifos como operadores locales suaves.
|
|
27
|
-
- Un dispatcher `aplicar_glifo` que mapea el nombre del glifo (con apóstrofo tipográfico) a su función.
|
|
28
|
-
- RE’MESH de red: `aplicar_remesh_red` y `aplicar_remesh_si_estabilización_global`.
|
|
29
|
-
|
|
30
|
-
Nota sobre α (alpha) de RE’MESH: se toma por prioridad de
|
|
31
|
-
1) G.graph["GLYPH_FACTORS"]["REMESH_alpha"]
|
|
32
|
-
2) G.graph["REMESH_ALPHA"]
|
|
33
|
-
3) DEFAULTS["REMESH_ALPHA"]
|
|
34
|
-
"""
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
def _node_offset(G, n) -> int:
|
|
38
|
-
"""Deterministic node index used for jitter seeds."""
|
|
39
|
-
mapping = G.graph.get("_node_offset_map")
|
|
40
|
-
if mapping is None or len(mapping) != G.number_of_nodes():
|
|
41
|
-
mapping = {node: idx for idx, node in enumerate(sorted(G.nodes(), key=lambda x: str(x)))}
|
|
42
|
-
G.graph["_node_offset_map"] = mapping
|
|
43
|
-
return int(mapping.get(n, 0))
|
|
44
|
-
|
|
45
|
-
# -------------------------
|
|
46
|
-
# Glifos (operadores locales)
|
|
47
|
-
# -------------------------
|
|
48
|
-
|
|
49
|
-
def _fase_media_node(node: NodoProtocol) -> float:
|
|
50
|
-
x = y = 0.0
|
|
51
|
-
count = 0
|
|
52
|
-
for v in node.neighbors():
|
|
53
|
-
th = getattr(v, "theta", 0.0)
|
|
54
|
-
x += math.cos(th)
|
|
55
|
-
y += math.sin(th)
|
|
56
|
-
count += 1
|
|
57
|
-
if count == 0:
|
|
58
|
-
return getattr(node, "theta", 0.0)
|
|
59
|
-
return math.atan2(y / count, x / count)
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
def _op_AL(node: NodoProtocol) -> None: # A’L — Emisión
|
|
63
|
-
f = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("AL_boost", 0.05))
|
|
64
|
-
node.EPI = node.EPI + f
|
|
65
|
-
node.epi_kind = "A’L"
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def _op_EN(node: NodoProtocol) -> None: # E’N — Recepción
|
|
69
|
-
mix = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("EN_mix", 0.25))
|
|
70
|
-
epi = node.EPI
|
|
71
|
-
neigh = list(node.neighbors())
|
|
72
|
-
if not neigh:
|
|
73
|
-
return
|
|
74
|
-
epi_bar = list_mean(v.EPI for v in neigh) if neigh else epi
|
|
75
|
-
node.EPI = (1 - mix) * epi + mix * epi_bar
|
|
76
|
-
|
|
77
|
-
candidatos = [(abs(node.EPI), node.epi_kind)]
|
|
78
|
-
for v in neigh:
|
|
79
|
-
candidatos.append((abs(v.EPI), v.epi_kind))
|
|
80
|
-
node.epi_kind = max(candidatos, key=lambda x: x[0])[1] or "E’N"
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
def _op_IL(node: NodoProtocol) -> None: # I’L — Coherencia
|
|
84
|
-
factor = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("IL_dnfr_factor", 0.7))
|
|
85
|
-
node.dnfr = factor * getattr(node, "dnfr", 0.0)
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
def _op_OZ(node: NodoProtocol) -> None: # O’Z — Disonancia
|
|
89
|
-
factor = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("OZ_dnfr_factor", 1.3))
|
|
90
|
-
dnfr = getattr(node, "dnfr", 0.0)
|
|
91
|
-
if bool(node.graph.get("OZ_NOISE_MODE", False)):
|
|
92
|
-
base_seed = int(node.graph.get("RANDOM_SEED", 0))
|
|
93
|
-
step_idx = len(node.graph.get("history", {}).get("C_steps", []))
|
|
94
|
-
rnd = random.Random(base_seed + step_idx * 1000003 + node.offset() % 1009)
|
|
95
|
-
sigma = float(node.graph.get("OZ_SIGMA", 0.1))
|
|
96
|
-
noise = sigma * (2.0 * rnd.random() - 1.0)
|
|
97
|
-
node.dnfr = dnfr + noise
|
|
98
|
-
else:
|
|
99
|
-
node.dnfr = factor * dnfr if abs(dnfr) > 1e-9 else 0.1
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def _op_UM(node: NodoProtocol) -> None: # U’M — Acoplamiento
|
|
103
|
-
k = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("UM_theta_push", 0.25))
|
|
104
|
-
th = node.theta
|
|
105
|
-
thL = _fase_media_node(node)
|
|
106
|
-
d = ((thL - th + math.pi) % (2 * math.pi) - math.pi)
|
|
107
|
-
node.theta = th + k * d
|
|
108
|
-
|
|
109
|
-
if bool(node.graph.get("UM_FUNCTIONAL_LINKS", False)):
|
|
110
|
-
thr = float(node.graph.get("UM_COMPAT_THRESHOLD", DEFAULTS.get("UM_COMPAT_THRESHOLD", 0.75)))
|
|
111
|
-
epi_i = node.EPI
|
|
112
|
-
si_i = node.Si
|
|
113
|
-
for j in node.all_nodes():
|
|
114
|
-
if j is node or node.has_edge(j):
|
|
115
|
-
continue
|
|
116
|
-
th_j = j.theta
|
|
117
|
-
dphi = abs(((th_j - th + math.pi) % (2 * math.pi)) - math.pi) / math.pi
|
|
118
|
-
epi_j = j.EPI
|
|
119
|
-
si_j = j.Si
|
|
120
|
-
epi_sim = 1.0 - abs(epi_i - epi_j) / (abs(epi_i) + abs(epi_j) + 1e-9)
|
|
121
|
-
si_sim = 1.0 - abs(si_i - si_j)
|
|
122
|
-
compat = (1 - dphi) * 0.5 + 0.25 * epi_sim + 0.25 * si_sim
|
|
123
|
-
if compat >= thr:
|
|
124
|
-
node.add_edge(j, compat)
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
def _op_RA(node: NodoProtocol) -> None: # R’A — Resonancia
|
|
128
|
-
diff = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("RA_epi_diff", 0.15))
|
|
129
|
-
epi = node.EPI
|
|
130
|
-
neigh = list(node.neighbors())
|
|
131
|
-
if not neigh:
|
|
132
|
-
return
|
|
133
|
-
epi_bar = list_mean(v.EPI for v in neigh)
|
|
134
|
-
node.EPI = epi + diff * (epi_bar - epi)
|
|
135
|
-
|
|
136
|
-
candidatos = [(abs(node.EPI), node.epi_kind)]
|
|
137
|
-
for v in neigh:
|
|
138
|
-
candidatos.append((abs(v.EPI), v.epi_kind))
|
|
139
|
-
node.epi_kind = max(candidatos, key=lambda x: x[0])[1] or "R’A"
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
def _op_SHA(node: NodoProtocol) -> None: # SH’A — Silencio
|
|
143
|
-
factor = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("SHA_vf_factor", 0.85))
|
|
144
|
-
node.vf = factor * node.vf
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
def _op_VAL(node: NodoProtocol) -> None: # VA’L — Expansión
|
|
148
|
-
s = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("VAL_scale", 1.15))
|
|
149
|
-
node.EPI = s * node.EPI
|
|
150
|
-
node.epi_kind = "VA’L"
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
def _op_NUL(node: NodoProtocol) -> None: # NU’L — Contracción
|
|
154
|
-
s = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("NUL_scale", 0.85))
|
|
155
|
-
node.EPI = s * node.EPI
|
|
156
|
-
node.epi_kind = "NU’L"
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
def _op_THOL(node: NodoProtocol) -> None: # T’HOL — Autoorganización
|
|
160
|
-
a = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("THOL_accel", 0.10))
|
|
161
|
-
node.dnfr = node.dnfr + a * getattr(node, "d2EPI", 0.0)
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
def _op_ZHIR(node: NodoProtocol) -> None: # Z’HIR — Mutación
|
|
165
|
-
shift = float(node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"]).get("ZHIR_theta_shift", 1.57079632679))
|
|
166
|
-
node.theta = node.theta + shift
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
def _op_NAV(node: NodoProtocol) -> None: # NA’V — Transición
|
|
170
|
-
dnfr = node.dnfr
|
|
171
|
-
vf = node.vf
|
|
172
|
-
gf = node.graph.get("GLYPH_FACTORS", DEFAULTS["GLYPH_FACTORS"])
|
|
173
|
-
eta = float(gf.get("NAV_eta", 0.5))
|
|
174
|
-
strict = bool(node.graph.get("NAV_STRICT", False))
|
|
175
|
-
if strict:
|
|
176
|
-
base = vf
|
|
177
|
-
else:
|
|
178
|
-
sign = 1.0 if dnfr >= 0 else -1.0
|
|
179
|
-
target = sign * vf
|
|
180
|
-
base = (1.0 - eta) * dnfr + eta * target
|
|
181
|
-
j = float(gf.get("NAV_jitter", 0.05))
|
|
182
|
-
if bool(node.graph.get("NAV_RANDOM", True)):
|
|
183
|
-
base_seed = int(node.graph.get("RANDOM_SEED", 0))
|
|
184
|
-
step_idx = len(node.graph.get("history", {}).get("C_steps", []))
|
|
185
|
-
rnd = random.Random(base_seed + step_idx * 1000003 + node.offset() % 1009)
|
|
186
|
-
jitter = j * (2.0 * rnd.random() - 1.0)
|
|
187
|
-
else:
|
|
188
|
-
jitter = j * (1 if base >= 0 else -1)
|
|
189
|
-
node.dnfr = base + jitter
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
def _op_REMESH(node: NodoProtocol) -> None: # RE’MESH — aviso
|
|
193
|
-
step_idx = len(node.graph.get("history", {}).get("C_steps", []))
|
|
194
|
-
last_warn = node.graph.get("_remesh_warn_step", None)
|
|
195
|
-
if last_warn != step_idx:
|
|
196
|
-
msg = "RE’MESH es a escala de red. Usa aplicar_remesh_si_estabilizacion_global(G) o aplicar_remesh_red(G)."
|
|
197
|
-
node.graph.setdefault("history", {}).setdefault("events", []).append(("warn", {"step": step_idx, "node": None, "msg": msg}))
|
|
198
|
-
node.graph["_remesh_warn_step"] = step_idx
|
|
199
|
-
return
|
|
200
|
-
|
|
201
|
-
# -------------------------
|
|
202
|
-
# Dispatcher
|
|
203
|
-
# -------------------------
|
|
204
|
-
|
|
205
|
-
_NAME_TO_OP = {
|
|
206
|
-
"A’L": _op_AL, "E’N": _op_EN, "I’L": _op_IL, "O’Z": _op_OZ, "U’M": _op_UM,
|
|
207
|
-
"R’A": _op_RA, "SH’A": _op_SHA, "VA’L": _op_VAL, "NU’L": _op_NUL,
|
|
208
|
-
"T’HOL": _op_THOL, "Z’HIR": _op_ZHIR, "NA’V": _op_NAV, "RE’MESH": _op_REMESH,
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
def _wrap(fn):
|
|
213
|
-
def inner(obj, n=None):
|
|
214
|
-
node = obj if n is None else NodoNX(obj, n)
|
|
215
|
-
return fn(node)
|
|
216
|
-
return inner
|
|
217
|
-
|
|
218
|
-
op_AL = _wrap(_op_AL)
|
|
219
|
-
op_EN = _wrap(_op_EN)
|
|
220
|
-
op_IL = _wrap(_op_IL)
|
|
221
|
-
op_OZ = _wrap(_op_OZ)
|
|
222
|
-
op_UM = _wrap(_op_UM)
|
|
223
|
-
op_RA = _wrap(_op_RA)
|
|
224
|
-
op_SHA = _wrap(_op_SHA)
|
|
225
|
-
op_VAL = _wrap(_op_VAL)
|
|
226
|
-
op_NUL = _wrap(_op_NUL)
|
|
227
|
-
op_THOL = _wrap(_op_THOL)
|
|
228
|
-
op_ZHIR = _wrap(_op_ZHIR)
|
|
229
|
-
op_NAV = _wrap(_op_NAV)
|
|
230
|
-
op_REMESH = _wrap(_op_REMESH)
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
def aplicar_glifo_obj(node: NodoProtocol, glifo: str, *, window: Optional[int] = None) -> None:
|
|
234
|
-
"""Aplica ``glifo`` a un objeto que cumple :class:`NodoProtocol`."""
|
|
235
|
-
|
|
236
|
-
glifo = str(glifo)
|
|
237
|
-
op = _NAME_TO_OP.get(glifo)
|
|
238
|
-
if not op:
|
|
239
|
-
return
|
|
240
|
-
if window is None:
|
|
241
|
-
window = int(node.graph.get("GLYPH_HYSTERESIS_WINDOW", DEFAULTS["GLYPH_HYSTERESIS_WINDOW"]))
|
|
242
|
-
node.push_glifo(glifo, window)
|
|
243
|
-
op(node)
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
def aplicar_glifo(G, n, glifo: str, *, window: Optional[int] = None) -> None:
|
|
247
|
-
"""Adaptador para operar sobre grafos ``networkx``."""
|
|
248
|
-
node = NodoNX(G, n)
|
|
249
|
-
aplicar_glifo_obj(node, glifo, window=window)
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
# -------------------------
|
|
253
|
-
# RE’MESH de red (usa _epi_hist capturado en dynamics.step)
|
|
254
|
-
# -------------------------
|
|
255
|
-
|
|
256
|
-
def _remesh_alpha_info(G):
|
|
257
|
-
"""Devuelve `(alpha, source)` con precedencia explícita."""
|
|
258
|
-
if bool(G.graph.get("REMESH_ALPHA_HARD", DEFAULTS.get("REMESH_ALPHA_HARD", False))):
|
|
259
|
-
val = float(G.graph.get("REMESH_ALPHA", DEFAULTS["REMESH_ALPHA"]))
|
|
260
|
-
return val, "REMESH_ALPHA"
|
|
261
|
-
gf = G.graph.get("GLYPH_FACTORS", DEFAULTS.get("GLYPH_FACTORS", {}))
|
|
262
|
-
if "REMESH_alpha" in gf:
|
|
263
|
-
return float(gf["REMESH_alpha"]), "GLYPH_FACTORS.REMESH_alpha"
|
|
264
|
-
if "REMESH_ALPHA" in G.graph:
|
|
265
|
-
return float(G.graph["REMESH_ALPHA"]), "REMESH_ALPHA"
|
|
266
|
-
return float(DEFAULTS["REMESH_ALPHA"]), "DEFAULTS.REMESH_ALPHA"
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
def aplicar_remesh_red(G) -> None:
|
|
270
|
-
"""RE’MESH a escala de red usando _epi_hist con memoria multi-escala."""
|
|
271
|
-
tau_g = int(G.graph.get("REMESH_TAU_GLOBAL", G.graph.get("REMESH_TAU", DEFAULTS["REMESH_TAU_GLOBAL"])))
|
|
272
|
-
tau_l = int(G.graph.get("REMESH_TAU_LOCAL", G.graph.get("REMESH_TAU", DEFAULTS["REMESH_TAU_LOCAL"])))
|
|
273
|
-
tau_req = max(tau_g, tau_l)
|
|
274
|
-
alpha, alpha_src = _remesh_alpha_info(G)
|
|
275
|
-
G.graph["_REMESH_ALPHA_SRC"] = alpha_src
|
|
276
|
-
hist = G.graph.get("_epi_hist", deque())
|
|
277
|
-
if len(hist) < tau_req + 1:
|
|
278
|
-
return
|
|
279
|
-
|
|
280
|
-
past_g = hist[-(tau_g + 1)]
|
|
281
|
-
past_l = hist[-(tau_l + 1)]
|
|
282
|
-
|
|
283
|
-
# --- Topología + snapshot EPI (ANTES) ---
|
|
284
|
-
try:
|
|
285
|
-
n_nodes = G.number_of_nodes()
|
|
286
|
-
n_edges = G.number_of_edges()
|
|
287
|
-
degs = sorted(d for _, d in G.degree())
|
|
288
|
-
topo_str = f"n={n_nodes};m={n_edges};deg=" + ",".join(map(str, degs))
|
|
289
|
-
topo_hash = hashlib.sha1(topo_str.encode()).hexdigest()[:12]
|
|
290
|
-
except Exception:
|
|
291
|
-
topo_hash = None
|
|
292
|
-
|
|
293
|
-
def _epi_items():
|
|
294
|
-
for node in G.nodes():
|
|
295
|
-
yield node, _get_attr(G.nodes[node], ALIAS_EPI, 0.0)
|
|
296
|
-
|
|
297
|
-
epi_mean_before = list_mean(v for _, v in _epi_items())
|
|
298
|
-
epi_checksum_before = hashlib.sha1(
|
|
299
|
-
str(sorted((str(k), round(v, 6)) for k, v in _epi_items())).encode()
|
|
300
|
-
).hexdigest()[:12]
|
|
301
|
-
|
|
302
|
-
# --- Mezcla (1-α)·now + α·old ---
|
|
303
|
-
for n in G.nodes():
|
|
304
|
-
nd = G.nodes[n]
|
|
305
|
-
epi_now = _get_attr(nd, ALIAS_EPI, 0.0)
|
|
306
|
-
epi_old_l = float(past_l.get(n, epi_now))
|
|
307
|
-
epi_old_g = float(past_g.get(n, epi_now))
|
|
308
|
-
mixed = (1 - alpha) * epi_now + alpha * epi_old_l
|
|
309
|
-
mixed = (1 - alpha) * mixed + alpha * epi_old_g
|
|
310
|
-
_set_attr(nd, ALIAS_EPI, mixed)
|
|
311
|
-
|
|
312
|
-
# --- Snapshot EPI (DESPUÉS) ---
|
|
313
|
-
epi_mean_after = list_mean(_get_attr(G.nodes[n], ALIAS_EPI, 0.0) for n in G.nodes())
|
|
314
|
-
epi_checksum_after = hashlib.sha1(
|
|
315
|
-
str(sorted((str(n), round(_get_attr(G.nodes[n], ALIAS_EPI, 0.0), 6)) for n in G.nodes())).encode()
|
|
316
|
-
).hexdigest()[:12]
|
|
317
|
-
|
|
318
|
-
# --- Metadatos y logging de evento ---
|
|
319
|
-
step_idx = len(G.graph.get("history", {}).get("C_steps", []))
|
|
320
|
-
meta = {
|
|
321
|
-
"alpha": alpha,
|
|
322
|
-
"alpha_source": alpha_src,
|
|
323
|
-
"tau_global": tau_g,
|
|
324
|
-
"tau_local": tau_l,
|
|
325
|
-
"step": step_idx,
|
|
326
|
-
# firmas
|
|
327
|
-
"topo_hash": topo_hash,
|
|
328
|
-
"epi_mean_before": float(epi_mean_before),
|
|
329
|
-
"epi_mean_after": float(epi_mean_after),
|
|
330
|
-
"epi_checksum_before": epi_checksum_before,
|
|
331
|
-
"epi_checksum_after": epi_checksum_after,
|
|
332
|
-
}
|
|
333
|
-
|
|
334
|
-
# Snapshot opcional de métricas recientes
|
|
335
|
-
h = G.graph.get("history", {})
|
|
336
|
-
if h:
|
|
337
|
-
if h.get("stable_frac"): meta["stable_frac_last"] = h["stable_frac"][-1]
|
|
338
|
-
if h.get("phase_sync"): meta["phase_sync_last"] = h["phase_sync"][-1]
|
|
339
|
-
if h.get("glyph_load_disr"): meta["glyph_disr_last"] = h["glyph_load_disr"][-1]
|
|
340
|
-
|
|
341
|
-
G.graph["_REMESH_META"] = meta
|
|
342
|
-
if G.graph.get("REMESH_LOG_EVENTS", DEFAULTS["REMESH_LOG_EVENTS"]):
|
|
343
|
-
ev = G.graph.setdefault("history", {}).setdefault("remesh_events", [])
|
|
344
|
-
ev.append(dict(meta))
|
|
345
|
-
|
|
346
|
-
# Callbacks Γ(R)
|
|
347
|
-
invoke_callbacks(G, "on_remesh", dict(meta))
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
def aplicar_remesh_red_topologico(
|
|
351
|
-
G,
|
|
352
|
-
mode: Optional[str] = None,
|
|
353
|
-
*,
|
|
354
|
-
k: Optional[int] = None,
|
|
355
|
-
p_rewire: float = 0.2,
|
|
356
|
-
seed: Optional[int] = None,
|
|
357
|
-
) -> None:
|
|
358
|
-
"""Remallado topológico aproximado.
|
|
359
|
-
|
|
360
|
-
- mode="knn": conecta cada nodo con sus ``k`` vecinos más similares en EPI
|
|
361
|
-
con probabilidad ``p_rewire``.
|
|
362
|
-
- mode="mst": sólo preserva un árbol de expansión mínima según distancia EPI.
|
|
363
|
-
- mode="community": agrupa por comunidades modulares y las conecta por
|
|
364
|
-
similitud intercomunidad.
|
|
365
|
-
|
|
366
|
-
Siempre preserva conectividad añadiendo un MST base.
|
|
367
|
-
"""
|
|
368
|
-
nodes = list(G.nodes())
|
|
369
|
-
n_before = len(nodes)
|
|
370
|
-
if n_before <= 1:
|
|
371
|
-
return
|
|
372
|
-
rnd = random.Random(seed)
|
|
373
|
-
|
|
374
|
-
if mode is None:
|
|
375
|
-
mode = str(G.graph.get("REMESH_MODE", DEFAULTS.get("REMESH_MODE", "knn")))
|
|
376
|
-
mode = str(mode)
|
|
377
|
-
|
|
378
|
-
# Similaridad basada en EPI (distancia absoluta)
|
|
379
|
-
epi = {n: _get_attr(G.nodes[n], ALIAS_EPI, 0.0) for n in nodes}
|
|
380
|
-
H = nx.Graph()
|
|
381
|
-
H.add_nodes_from(nodes)
|
|
382
|
-
for i, u in enumerate(nodes):
|
|
383
|
-
for v in nodes[i + 1 :]:
|
|
384
|
-
w = abs(epi[u] - epi[v])
|
|
385
|
-
H.add_edge(u, v, weight=w)
|
|
386
|
-
mst = nx.minimum_spanning_tree(H, weight="weight")
|
|
387
|
-
|
|
388
|
-
if mode == "community":
|
|
389
|
-
# Detectar comunidades y reconstruir la red con metanodos
|
|
390
|
-
comms = list(nx_comm.greedy_modularity_communities(G))
|
|
391
|
-
if len(comms) <= 1:
|
|
392
|
-
new_edges = set(mst.edges())
|
|
393
|
-
else:
|
|
394
|
-
k_val = (
|
|
395
|
-
int(k)
|
|
396
|
-
if k is not None
|
|
397
|
-
else int(G.graph.get("REMESH_COMMUNITY_K", DEFAULTS.get("REMESH_COMMUNITY_K", 2)))
|
|
398
|
-
)
|
|
399
|
-
# Grafo de comunidades basado en medias de EPI
|
|
400
|
-
C = nx.Graph()
|
|
401
|
-
for idx, comm in enumerate(comms):
|
|
402
|
-
members = list(comm)
|
|
403
|
-
epi_mean = list_mean(epi[n] for n in members)
|
|
404
|
-
C.add_node(idx)
|
|
405
|
-
_set_attr(C.nodes[idx], ALIAS_EPI, epi_mean)
|
|
406
|
-
C.nodes[idx]["members"] = members
|
|
407
|
-
for i in C.nodes():
|
|
408
|
-
for j in C.nodes():
|
|
409
|
-
if i < j:
|
|
410
|
-
w = abs(
|
|
411
|
-
_get_attr(C.nodes[i], ALIAS_EPI, 0.0)
|
|
412
|
-
- _get_attr(C.nodes[j], ALIAS_EPI, 0.0)
|
|
413
|
-
)
|
|
414
|
-
C.add_edge(i, j, weight=w)
|
|
415
|
-
mst_c = nx.minimum_spanning_tree(C, weight="weight")
|
|
416
|
-
new_edges = set(mst_c.edges())
|
|
417
|
-
for u in C.nodes():
|
|
418
|
-
epi_u = _get_attr(C.nodes[u], ALIAS_EPI, 0.0)
|
|
419
|
-
others = [v for v in C.nodes() if v != u]
|
|
420
|
-
others.sort(key=lambda v: abs(epi_u - _get_attr(C.nodes[v], ALIAS_EPI, 0.0)))
|
|
421
|
-
for v in others[:k_val]:
|
|
422
|
-
if rnd.random() < p_rewire:
|
|
423
|
-
new_edges.add(tuple(sorted((u, v))))
|
|
424
|
-
|
|
425
|
-
# Reemplazar nodos y aristas del grafo original por comunidades
|
|
426
|
-
G.remove_edges_from(list(G.edges()))
|
|
427
|
-
G.remove_nodes_from(list(G.nodes()))
|
|
428
|
-
for idx in C.nodes():
|
|
429
|
-
data = dict(C.nodes[idx])
|
|
430
|
-
G.add_node(idx, **data)
|
|
431
|
-
G.add_edges_from(new_edges)
|
|
432
|
-
|
|
433
|
-
if G.graph.get("REMESH_LOG_EVENTS", DEFAULTS["REMESH_LOG_EVENTS"]):
|
|
434
|
-
ev = G.graph.setdefault("history", {}).setdefault("remesh_events", [])
|
|
435
|
-
mapping = {idx: C.nodes[idx].get("members", []) for idx in C.nodes()}
|
|
436
|
-
ev.append({
|
|
437
|
-
"mode": "community",
|
|
438
|
-
"n_before": n_before,
|
|
439
|
-
"n_after": G.number_of_nodes(),
|
|
440
|
-
"mapping": mapping,
|
|
441
|
-
})
|
|
442
|
-
return
|
|
443
|
-
|
|
444
|
-
# Default/mode knn/mst operate on nodos originales
|
|
445
|
-
new_edges = set(mst.edges())
|
|
446
|
-
if mode == "knn":
|
|
447
|
-
k_val = int(k) if k is not None else int(G.graph.get("REMESH_COMMUNITY_K", DEFAULTS.get("REMESH_COMMUNITY_K", 2)))
|
|
448
|
-
k_val = max(1, k_val)
|
|
449
|
-
for u in nodes:
|
|
450
|
-
sims = sorted(nodes, key=lambda v: abs(epi[u] - epi[v]))
|
|
451
|
-
for v in sims[1 : k_val + 1]:
|
|
452
|
-
if rnd.random() < p_rewire:
|
|
453
|
-
new_edges.add(tuple(sorted((u, v))))
|
|
454
|
-
|
|
455
|
-
G.remove_edges_from(list(G.edges()))
|
|
456
|
-
G.add_edges_from(new_edges)
|
|
457
|
-
|
|
458
|
-
def aplicar_remesh_si_estabilizacion_global(G, pasos_estables_consecutivos: Optional[int] = None) -> None:
|
|
459
|
-
# Ventanas y umbrales
|
|
460
|
-
w_estab = (
|
|
461
|
-
pasos_estables_consecutivos
|
|
462
|
-
if pasos_estables_consecutivos is not None
|
|
463
|
-
else int(G.graph.get("REMESH_STABILITY_WINDOW", DEFAULTS["REMESH_STABILITY_WINDOW"]))
|
|
464
|
-
)
|
|
465
|
-
frac_req = float(G.graph.get("FRACTION_STABLE_REMESH", DEFAULTS["FRACTION_STABLE_REMESH"]))
|
|
466
|
-
req_extra = bool(G.graph.get("REMESH_REQUIRE_STABILITY", DEFAULTS["REMESH_REQUIRE_STABILITY"]))
|
|
467
|
-
min_sync = float(G.graph.get("REMESH_MIN_PHASE_SYNC", DEFAULTS["REMESH_MIN_PHASE_SYNC"]))
|
|
468
|
-
max_disr = float(G.graph.get("REMESH_MAX_GLYPH_DISR", DEFAULTS["REMESH_MAX_GLYPH_DISR"]))
|
|
469
|
-
min_sigma = float(G.graph.get("REMESH_MIN_SIGMA_MAG", DEFAULTS["REMESH_MIN_SIGMA_MAG"]))
|
|
470
|
-
min_R = float(G.graph.get("REMESH_MIN_KURAMOTO_R", DEFAULTS["REMESH_MIN_KURAMOTO_R"]))
|
|
471
|
-
min_sihi = float(G.graph.get("REMESH_MIN_SI_HI_FRAC", DEFAULTS["REMESH_MIN_SI_HI_FRAC"]))
|
|
472
|
-
|
|
473
|
-
hist = G.graph.setdefault("history", {"stable_frac": []})
|
|
474
|
-
sf = hist.get("stable_frac", [])
|
|
475
|
-
if len(sf) < w_estab:
|
|
476
|
-
return
|
|
477
|
-
# 1) Estabilidad por fracción de nodos estables
|
|
478
|
-
win_sf = sf[-w_estab:]
|
|
479
|
-
cond_sf = all(v >= frac_req for v in win_sf)
|
|
480
|
-
if not cond_sf:
|
|
481
|
-
return
|
|
482
|
-
# 2) Gating adicional (si está activado)
|
|
483
|
-
if req_extra:
|
|
484
|
-
# sincronía de fase (mayor mejor)
|
|
485
|
-
ps_ok = True
|
|
486
|
-
if "phase_sync" in hist and len(hist["phase_sync"]) >= w_estab:
|
|
487
|
-
win_ps = hist["phase_sync"][-w_estab:]
|
|
488
|
-
ps_ok = (sum(win_ps)/len(win_ps)) >= min_sync
|
|
489
|
-
# carga glífica disruptiva (menor mejor)
|
|
490
|
-
disr_ok = True
|
|
491
|
-
if "glyph_load_disr" in hist and len(hist["glyph_load_disr"]) >= w_estab:
|
|
492
|
-
win_disr = hist["glyph_load_disr"][-w_estab:]
|
|
493
|
-
disr_ok = (sum(win_disr)/len(win_disr)) <= max_disr
|
|
494
|
-
# magnitud de sigma (mayor mejor)
|
|
495
|
-
sig_ok = True
|
|
496
|
-
if "sense_sigma_mag" in hist and len(hist["sense_sigma_mag"]) >= w_estab:
|
|
497
|
-
win_sig = hist["sense_sigma_mag"][-w_estab:]
|
|
498
|
-
sig_ok = (sum(win_sig)/len(win_sig)) >= min_sigma
|
|
499
|
-
# orden de Kuramoto R (mayor mejor)
|
|
500
|
-
R_ok = True
|
|
501
|
-
if "kuramoto_R" in hist and len(hist["kuramoto_R"]) >= w_estab:
|
|
502
|
-
win_R = hist["kuramoto_R"][-w_estab:]
|
|
503
|
-
R_ok = (sum(win_R)/len(win_R)) >= min_R
|
|
504
|
-
# fracción de nodos con Si alto (mayor mejor)
|
|
505
|
-
sihi_ok = True
|
|
506
|
-
if "Si_hi_frac" in hist and len(hist["Si_hi_frac"]) >= w_estab:
|
|
507
|
-
win_sihi = hist["Si_hi_frac"][-w_estab:]
|
|
508
|
-
sihi_ok = (sum(win_sihi)/len(win_sihi)) >= min_sihi
|
|
509
|
-
if not (ps_ok and disr_ok and sig_ok and R_ok and sihi_ok):
|
|
510
|
-
return
|
|
511
|
-
# 3) Cooldown
|
|
512
|
-
last = G.graph.get("_last_remesh_step", -10**9)
|
|
513
|
-
step_idx = len(sf)
|
|
514
|
-
cooldown = int(G.graph.get("REMESH_COOLDOWN_VENTANA", DEFAULTS["REMESH_COOLDOWN_VENTANA"]))
|
|
515
|
-
if step_idx - last < cooldown:
|
|
516
|
-
return
|
|
517
|
-
t_now = float(G.graph.get("_t", 0.0))
|
|
518
|
-
last_ts = float(G.graph.get("_last_remesh_ts", -1e12))
|
|
519
|
-
cooldown_ts = float(G.graph.get("REMESH_COOLDOWN_TS", DEFAULTS.get("REMESH_COOLDOWN_TS", 0.0)))
|
|
520
|
-
if cooldown_ts > 0 and (t_now - last_ts) < cooldown_ts:
|
|
521
|
-
return
|
|
522
|
-
# 4) Aplicar y registrar
|
|
523
|
-
aplicar_remesh_red(G)
|
|
524
|
-
G.graph["_last_remesh_step"] = step_idx
|
|
525
|
-
G.graph["_last_remesh_ts"] = t_now
|
tnfr/program.py
DELETED
|
@@ -1,176 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
"""program.py — API de secuencias canónicas con T’HOL como primera clase."""
|
|
3
|
-
from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, Union
|
|
4
|
-
from dataclasses import dataclass
|
|
5
|
-
from contextlib import contextmanager
|
|
6
|
-
|
|
7
|
-
from .constants import DEFAULTS
|
|
8
|
-
from .helpers import register_callback
|
|
9
|
-
from .grammar import enforce_canonical_grammar, on_applied_glifo
|
|
10
|
-
from .operators import aplicar_glifo
|
|
11
|
-
from .sense import GLYPHS_CANONICAL
|
|
12
|
-
|
|
13
|
-
# Tipos básicos
|
|
14
|
-
Glyph = str
|
|
15
|
-
Node = Any
|
|
16
|
-
AdvanceFn = Callable[[Any], None] # normalmente dynamics.step
|
|
17
|
-
|
|
18
|
-
# ---------------------
|
|
19
|
-
# Construcciones del DSL
|
|
20
|
-
# ---------------------
|
|
21
|
-
|
|
22
|
-
@dataclass
|
|
23
|
-
class WAIT:
|
|
24
|
-
steps: int = 1
|
|
25
|
-
|
|
26
|
-
@dataclass
|
|
27
|
-
class TARGET:
|
|
28
|
-
nodes: Optional[Iterable[Node]] = None # None = todos los nodos
|
|
29
|
-
|
|
30
|
-
@dataclass
|
|
31
|
-
class THOL:
|
|
32
|
-
body: Sequence[Any]
|
|
33
|
-
repeat: int = 1 # cuántas veces repetir el cuerpo
|
|
34
|
-
force_close: Optional[Glyph] = None # None → cierre automático (gramática); 'SH’A' o 'NU’L' para forzar
|
|
35
|
-
|
|
36
|
-
Token = Union[Glyph, WAIT, TARGET, THOL]
|
|
37
|
-
|
|
38
|
-
# ---------------------
|
|
39
|
-
# Utilidades internas
|
|
40
|
-
# ---------------------
|
|
41
|
-
|
|
42
|
-
@contextmanager
|
|
43
|
-
def _forced_selector(G, glyph: Glyph):
|
|
44
|
-
"""Sobrescribe temporalmente el selector glífico para forzar `glyph`.
|
|
45
|
-
Pasa por la gramática canónica antes de aplicar.
|
|
46
|
-
"""
|
|
47
|
-
prev = G.graph.get("glyph_selector")
|
|
48
|
-
def selector_forced(_G, _n):
|
|
49
|
-
return glyph
|
|
50
|
-
G.graph["glyph_selector"] = selector_forced
|
|
51
|
-
try:
|
|
52
|
-
yield
|
|
53
|
-
finally:
|
|
54
|
-
if prev is None:
|
|
55
|
-
G.graph.pop("glyph_selector", None)
|
|
56
|
-
else:
|
|
57
|
-
G.graph["glyph_selector"] = prev
|
|
58
|
-
|
|
59
|
-
def _window(G) -> int:
|
|
60
|
-
return int(G.graph.get("GLYPH_HYSTERESIS_WINDOW", DEFAULTS.get("GLYPH_HYSTERESIS_WINDOW", 1)))
|
|
61
|
-
|
|
62
|
-
def _all_nodes(G):
|
|
63
|
-
return list(G.nodes())
|
|
64
|
-
|
|
65
|
-
# ---------------------
|
|
66
|
-
# Núcleo de ejecución
|
|
67
|
-
# ---------------------
|
|
68
|
-
|
|
69
|
-
def _apply_glyph_to_targets(G, g: Glyph, nodes: Optional[Iterable[Node]] = None):
|
|
70
|
-
nodes = list(nodes) if nodes is not None else _all_nodes(G)
|
|
71
|
-
w = _window(G)
|
|
72
|
-
# Pasamos por la gramática antes de aplicar
|
|
73
|
-
for n in nodes:
|
|
74
|
-
g_eff = enforce_canonical_grammar(G, n, g)
|
|
75
|
-
aplicar_glifo(G, n, g_eff, window=w)
|
|
76
|
-
on_applied_glifo(G, n, g_eff)
|
|
77
|
-
|
|
78
|
-
def _advance(G, step_fn: Optional[AdvanceFn] = None):
|
|
79
|
-
if step_fn is None:
|
|
80
|
-
from .dynamics import step as step_fn
|
|
81
|
-
step_fn(G)
|
|
82
|
-
|
|
83
|
-
# ---------------------
|
|
84
|
-
# Compilación de secuencia → lista de operaciones atómicas
|
|
85
|
-
# ---------------------
|
|
86
|
-
|
|
87
|
-
def _flatten(seq: Sequence[Token], current_target: Optional[TARGET] = None) -> List[Tuple[str, Any]]:
|
|
88
|
-
"""Devuelve lista de operaciones (op, payload).
|
|
89
|
-
op ∈ { 'GLYPH', 'WAIT', 'TARGET' }.
|
|
90
|
-
"""
|
|
91
|
-
ops: List[Tuple[str, Any]] = []
|
|
92
|
-
for item in seq:
|
|
93
|
-
if isinstance(item, TARGET):
|
|
94
|
-
ops.append(("TARGET", item))
|
|
95
|
-
elif isinstance(item, WAIT):
|
|
96
|
-
ops.append(("WAIT", item.steps))
|
|
97
|
-
elif isinstance(item, THOL):
|
|
98
|
-
# abrir bloque T’HOL
|
|
99
|
-
ops.append(("GLYPH", "T’HOL"))
|
|
100
|
-
for _ in range(max(1, int(item.repeat))):
|
|
101
|
-
ops.extend(_flatten(item.body, current_target))
|
|
102
|
-
# cierre explícito si se pidió; si no, la gramática puede cerrarlo
|
|
103
|
-
if item.force_close in ("SH’A", "NU’L"):
|
|
104
|
-
ops.append(("GLYPH", item.force_close))
|
|
105
|
-
else:
|
|
106
|
-
# item debería ser un glifo
|
|
107
|
-
g = str(item)
|
|
108
|
-
if g not in GLYPHS_CANONICAL:
|
|
109
|
-
# Permitimos glifos no listados (compat futuros), pero no forzamos
|
|
110
|
-
pass
|
|
111
|
-
ops.append(("GLYPH", g))
|
|
112
|
-
return ops
|
|
113
|
-
|
|
114
|
-
# ---------------------
|
|
115
|
-
# API pública
|
|
116
|
-
# ---------------------
|
|
117
|
-
|
|
118
|
-
def play(G, sequence: Sequence[Token], step_fn: Optional[AdvanceFn] = None) -> None:
|
|
119
|
-
"""Ejecuta una secuencia canónica sobre el grafo `G`.
|
|
120
|
-
|
|
121
|
-
Reglas:
|
|
122
|
-
- Usa `TARGET(nodes=...)` para cambiar el subconjunto de aplicación.
|
|
123
|
-
- `WAIT(k)` avanza k pasos con el selector vigente (no fuerza glifo).
|
|
124
|
-
- `THOL([...], repeat=r, force_close=…)` abre un bloque autoorganizativo,
|
|
125
|
-
repite el cuerpo y (opcional) fuerza cierre con SH’A/NU’L.
|
|
126
|
-
- Los glifos se aplican pasando por `enforce_canonical_grammar`.
|
|
127
|
-
"""
|
|
128
|
-
ops = _flatten(sequence)
|
|
129
|
-
curr_target: Optional[Iterable[Node]] = None
|
|
130
|
-
|
|
131
|
-
# Traza de programa en history
|
|
132
|
-
if "history" not in G.graph:
|
|
133
|
-
G.graph["history"] = {}
|
|
134
|
-
trace = G.graph["history"].setdefault("program_trace", [])
|
|
135
|
-
|
|
136
|
-
for op, payload in ops:
|
|
137
|
-
if op == "TARGET":
|
|
138
|
-
curr_target = list(payload.nodes) if payload.nodes is not None else None
|
|
139
|
-
trace.append({"t": float(G.graph.get("_t", 0.0)), "op": "TARGET", "n": len(curr_target or _all_nodes(G))})
|
|
140
|
-
continue
|
|
141
|
-
if op == "WAIT":
|
|
142
|
-
for _ in range(max(1, int(payload))):
|
|
143
|
-
_advance(G, step_fn)
|
|
144
|
-
trace.append({"t": float(G.graph.get("_t", 0.0)), "op": "WAIT", "k": int(payload)})
|
|
145
|
-
continue
|
|
146
|
-
if op == "GLYPH":
|
|
147
|
-
g = str(payload)
|
|
148
|
-
# aplicar + avanzar 1 paso del sistema
|
|
149
|
-
_apply_glyph_to_targets(G, g, curr_target)
|
|
150
|
-
_advance(G, step_fn)
|
|
151
|
-
trace.append({"t": float(G.graph.get("_t", 0.0)), "op": "GLYPH", "g": g})
|
|
152
|
-
continue
|
|
153
|
-
|
|
154
|
-
# ---------------------
|
|
155
|
-
# Helpers para construir secuencias de manera cómoda
|
|
156
|
-
# ---------------------
|
|
157
|
-
|
|
158
|
-
def seq(*tokens: Token) -> List[Token]:
|
|
159
|
-
return list(tokens)
|
|
160
|
-
|
|
161
|
-
def block(*tokens: Token, repeat: int = 1, close: Optional[Glyph] = None) -> THOL:
|
|
162
|
-
return THOL(body=list(tokens), repeat=repeat, force_close=close)
|
|
163
|
-
|
|
164
|
-
def target(nodes: Optional[Iterable[Node]] = None) -> TARGET:
|
|
165
|
-
return TARGET(nodes=nodes)
|
|
166
|
-
|
|
167
|
-
def wait(steps: int = 1) -> WAIT:
|
|
168
|
-
return WAIT(steps=max(1, int(steps)))
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
def ejemplo_canonico_basico() -> List[Token]:
|
|
172
|
-
"""Secuencia canónica de referencia.
|
|
173
|
-
|
|
174
|
-
SH’A → A’L → R’A → Z’HIR → NU’L → T’HOL
|
|
175
|
-
"""
|
|
176
|
-
return seq("SH’A", "A’L", "R’A", "Z’HIR", "NU’L", "T’HOL")
|