tnfr 6.0.0__py3-none-any.whl → 7.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +50 -5
- tnfr/__init__.pyi +0 -7
- tnfr/_compat.py +0 -1
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +44 -2
- tnfr/alias.py +14 -13
- tnfr/alias.pyi +5 -37
- tnfr/cache.py +9 -729
- tnfr/cache.pyi +8 -224
- tnfr/callback_utils.py +16 -31
- tnfr/callback_utils.pyi +3 -29
- tnfr/cli/__init__.py +17 -11
- tnfr/cli/__init__.pyi +0 -21
- tnfr/cli/arguments.py +175 -14
- tnfr/cli/arguments.pyi +5 -11
- tnfr/cli/execution.py +434 -48
- tnfr/cli/execution.pyi +14 -24
- tnfr/cli/utils.py +20 -3
- tnfr/cli/utils.pyi +5 -5
- tnfr/config/__init__.py +2 -1
- tnfr/config/__init__.pyi +2 -0
- tnfr/config/feature_flags.py +83 -0
- tnfr/config/init.py +1 -1
- tnfr/config/operator_names.py +1 -14
- tnfr/config/presets.py +6 -26
- tnfr/constants/__init__.py +10 -13
- tnfr/constants/__init__.pyi +10 -22
- tnfr/constants/aliases.py +31 -0
- tnfr/constants/core.py +4 -3
- tnfr/constants/init.py +1 -1
- tnfr/constants/metric.py +3 -3
- tnfr/dynamics/__init__.py +64 -10
- tnfr/dynamics/__init__.pyi +3 -4
- tnfr/dynamics/adaptation.py +79 -13
- tnfr/dynamics/aliases.py +10 -9
- tnfr/dynamics/coordination.py +77 -35
- tnfr/dynamics/dnfr.py +575 -274
- tnfr/dynamics/dnfr.pyi +1 -10
- tnfr/dynamics/integrators.py +47 -33
- tnfr/dynamics/integrators.pyi +0 -1
- tnfr/dynamics/runtime.py +489 -129
- tnfr/dynamics/sampling.py +2 -0
- tnfr/dynamics/selectors.py +101 -62
- tnfr/execution.py +15 -8
- tnfr/execution.pyi +5 -25
- tnfr/flatten.py +7 -3
- tnfr/flatten.pyi +1 -8
- tnfr/gamma.py +22 -26
- tnfr/gamma.pyi +0 -6
- tnfr/glyph_history.py +37 -26
- tnfr/glyph_history.pyi +1 -19
- tnfr/glyph_runtime.py +16 -0
- tnfr/glyph_runtime.pyi +9 -0
- tnfr/immutable.py +20 -15
- tnfr/immutable.pyi +4 -7
- tnfr/initialization.py +5 -7
- tnfr/initialization.pyi +1 -9
- tnfr/io.py +6 -305
- tnfr/io.pyi +13 -8
- tnfr/mathematics/__init__.py +81 -0
- tnfr/mathematics/backend.py +426 -0
- tnfr/mathematics/dynamics.py +398 -0
- tnfr/mathematics/epi.py +254 -0
- tnfr/mathematics/generators.py +222 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/operators.py +233 -0
- tnfr/mathematics/operators_factory.py +71 -0
- tnfr/mathematics/projection.py +78 -0
- tnfr/mathematics/runtime.py +173 -0
- tnfr/mathematics/spaces.py +247 -0
- tnfr/mathematics/transforms.py +292 -0
- tnfr/metrics/__init__.py +10 -10
- tnfr/metrics/coherence.py +123 -94
- tnfr/metrics/common.py +22 -13
- tnfr/metrics/common.pyi +42 -11
- tnfr/metrics/core.py +72 -14
- tnfr/metrics/diagnosis.py +48 -57
- tnfr/metrics/diagnosis.pyi +3 -7
- tnfr/metrics/export.py +3 -5
- tnfr/metrics/glyph_timing.py +41 -31
- tnfr/metrics/reporting.py +13 -6
- tnfr/metrics/sense_index.py +884 -114
- tnfr/metrics/trig.py +167 -11
- tnfr/metrics/trig.pyi +1 -0
- tnfr/metrics/trig_cache.py +112 -15
- tnfr/node.py +400 -17
- tnfr/node.pyi +55 -38
- tnfr/observers.py +111 -8
- tnfr/observers.pyi +0 -15
- tnfr/ontosim.py +9 -6
- tnfr/ontosim.pyi +0 -5
- tnfr/operators/__init__.py +529 -42
- tnfr/operators/__init__.pyi +14 -0
- tnfr/operators/definitions.py +350 -18
- tnfr/operators/definitions.pyi +0 -14
- tnfr/operators/grammar.py +760 -0
- tnfr/operators/jitter.py +28 -22
- tnfr/operators/registry.py +7 -12
- tnfr/operators/registry.pyi +0 -2
- tnfr/operators/remesh.py +38 -61
- tnfr/rng.py +17 -300
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/selector.py +3 -4
- tnfr/selector.pyi +1 -1
- tnfr/sense.py +22 -24
- tnfr/sense.pyi +0 -7
- tnfr/structural.py +504 -21
- tnfr/structural.pyi +41 -18
- tnfr/telemetry/__init__.py +23 -1
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/nu_f.py +423 -0
- tnfr/telemetry/nu_f.pyi +123 -0
- tnfr/tokens.py +1 -4
- tnfr/tokens.pyi +1 -6
- tnfr/trace.py +20 -53
- tnfr/trace.pyi +9 -37
- tnfr/types.py +244 -15
- tnfr/types.pyi +200 -14
- tnfr/units.py +69 -0
- tnfr/units.pyi +16 -0
- tnfr/utils/__init__.py +107 -48
- tnfr/utils/__init__.pyi +80 -11
- tnfr/utils/cache.py +1705 -65
- tnfr/utils/cache.pyi +370 -58
- tnfr/utils/chunks.py +104 -0
- tnfr/utils/chunks.pyi +21 -0
- tnfr/utils/data.py +95 -5
- tnfr/utils/data.pyi +8 -17
- tnfr/utils/graph.py +2 -4
- tnfr/utils/init.py +31 -7
- tnfr/utils/init.pyi +4 -11
- tnfr/utils/io.py +313 -14
- tnfr/{helpers → utils}/numeric.py +50 -24
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +92 -4
- tnfr/validation/__init__.pyi +77 -17
- tnfr/validation/compatibility.py +79 -43
- tnfr/validation/compatibility.pyi +4 -6
- tnfr/validation/grammar.py +55 -133
- tnfr/validation/grammar.pyi +37 -8
- tnfr/validation/graph.py +138 -0
- tnfr/validation/graph.pyi +17 -0
- tnfr/validation/rules.py +161 -74
- tnfr/validation/rules.pyi +55 -18
- tnfr/validation/runtime.py +263 -0
- tnfr/validation/runtime.pyi +31 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +37 -0
- tnfr/validation/spectral.py +159 -0
- tnfr/validation/spectral.pyi +46 -0
- tnfr/validation/syntax.py +28 -139
- tnfr/validation/syntax.pyi +7 -4
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/viz/__init__.py +9 -0
- tnfr/viz/matplotlib.py +246 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/METADATA +63 -19
- tnfr-7.0.0.dist-info/RECORD +185 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/licenses/LICENSE.md +1 -1
- tnfr/constants_glyphs.py +0 -16
- tnfr/constants_glyphs.pyi +0 -12
- tnfr/grammar.py +0 -25
- tnfr/grammar.pyi +0 -13
- tnfr/helpers/__init__.py +0 -151
- tnfr/helpers/__init__.pyi +0 -66
- tnfr/helpers/numeric.pyi +0 -12
- tnfr/presets.py +0 -15
- tnfr/presets.pyi +0 -7
- tnfr/utils/io.pyi +0 -10
- tnfr/utils/validators.py +0 -130
- tnfr/utils/validators.pyi +0 -19
- tnfr-6.0.0.dist-info/RECORD +0 -157
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/WHEEL +0 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/entry_points.txt +0 -0
- {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
"""Spectral dynamics helpers driven by ΔNFR generators."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from dataclasses import dataclass, field
|
|
5
|
+
from typing import Any, NamedTuple, Sequence
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from .backend import MathematicsBackend, ensure_array, ensure_numpy, get_backend
|
|
10
|
+
from .spaces import HilbertSpace
|
|
11
|
+
|
|
12
|
+
try: # pragma: no cover - optional SciPy dependency
|
|
13
|
+
from scipy.linalg import expm as _scipy_expm # type: ignore
|
|
14
|
+
except Exception: # pragma: no cover - SciPy not installed
|
|
15
|
+
_scipy_expm = None
|
|
16
|
+
|
|
17
|
+
__all__ = ["MathematicalDynamicsEngine", "ContractiveDynamicsEngine"]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _has_backend_matrix_exp(backend: MathematicsBackend) -> bool:
|
|
21
|
+
"""Return ``True`` when ``backend`` exposes a usable ``matrix_exp``."""
|
|
22
|
+
|
|
23
|
+
matrix_exp = getattr(backend, "matrix_exp", None)
|
|
24
|
+
if not callable(matrix_exp):
|
|
25
|
+
return False
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
probe = ensure_array([[0.0]], dtype=np.complex128, backend=backend)
|
|
29
|
+
matrix_exp(probe)
|
|
30
|
+
except (AttributeError, NotImplementedError):
|
|
31
|
+
return False
|
|
32
|
+
except Exception:
|
|
33
|
+
# Older backends may surface missing implementations as runtime errors;
|
|
34
|
+
# treat them as signals to fall back to SciPy when available.
|
|
35
|
+
return False
|
|
36
|
+
return True
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _as_matrix(
|
|
40
|
+
matrix: Sequence[Sequence[complex]] | np.ndarray | Any,
|
|
41
|
+
*,
|
|
42
|
+
backend: MathematicsBackend,
|
|
43
|
+
) -> Any:
|
|
44
|
+
arr = ensure_array(matrix, dtype=np.complex128, backend=backend)
|
|
45
|
+
shape = getattr(arr, "shape", None)
|
|
46
|
+
if shape is None or len(shape) != 2 or shape[0] != shape[1]:
|
|
47
|
+
raise ValueError("Generator matrix must be square.")
|
|
48
|
+
return arr
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _is_hermitian(matrix: Any, *, atol: float = 1e-9, backend: MathematicsBackend) -> bool:
|
|
52
|
+
matrix_np = ensure_numpy(matrix, backend=backend)
|
|
53
|
+
return bool(np.allclose(matrix_np, matrix_np.conj().T, atol=atol))
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _vectorize_density(matrix: Any, *, backend: MathematicsBackend) -> Any:
|
|
57
|
+
arr = ensure_array(matrix, dtype=np.complex128, backend=backend)
|
|
58
|
+
return arr.transpose(1, 0).reshape((-1,))
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _devectorize_density(vector: Any, dim: int, *, backend: MathematicsBackend) -> Any:
|
|
62
|
+
arr = ensure_array(vector, dtype=np.complex128, backend=backend)
|
|
63
|
+
return arr.reshape((dim, dim)).transpose(1, 0)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class TraceValue(NamedTuple):
|
|
67
|
+
"""Container for trace evaluations in both backend and NumPy space."""
|
|
68
|
+
|
|
69
|
+
backend: Any
|
|
70
|
+
numpy: complex
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _trace(matrix: Any, *, backend: MathematicsBackend) -> TraceValue:
|
|
74
|
+
traced_backend = backend.einsum("ii->", matrix)
|
|
75
|
+
traced_numpy = complex(np.asarray(ensure_numpy(traced_backend, backend=backend)))
|
|
76
|
+
return TraceValue(traced_backend, traced_numpy)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
@dataclass(slots=True)
|
|
80
|
+
class MathematicalDynamicsEngine:
|
|
81
|
+
"""Unitary evolution generated by Hermitian ΔNFR operators.
|
|
82
|
+
|
|
83
|
+
The engine accepts inputs expressed as backend-native tensors (NumPy,
|
|
84
|
+
:mod:`jax`, :mod:`torch`). When the configured backend supports automatic
|
|
85
|
+
differentiation the evolution map ``exp(-i·Δ·dt)`` remains differentiable
|
|
86
|
+
because native propagators are now preferred. Passing ``use_scipy=True``
|
|
87
|
+
explicitly opts into SciPy's exponential; we only fall back automatically
|
|
88
|
+
when the backend lacks a ``matrix_exp`` implementation.
|
|
89
|
+
"""
|
|
90
|
+
|
|
91
|
+
generator: np.ndarray
|
|
92
|
+
hilbert_space: HilbertSpace
|
|
93
|
+
atol: float = 1e-9
|
|
94
|
+
_use_scipy: bool = False
|
|
95
|
+
backend: MathematicsBackend = field(init=False, repr=False)
|
|
96
|
+
_generator_backend: Any = field(init=False, repr=False)
|
|
97
|
+
_numpy_generator: np.ndarray = field(init=False, repr=False)
|
|
98
|
+
|
|
99
|
+
def __init__(
|
|
100
|
+
self,
|
|
101
|
+
generator: Sequence[Sequence[complex]] | np.ndarray | Any,
|
|
102
|
+
hilbert_space: HilbertSpace,
|
|
103
|
+
*,
|
|
104
|
+
atol: float = 1e-9,
|
|
105
|
+
use_scipy: bool | None = None,
|
|
106
|
+
backend: MathematicsBackend | None = None,
|
|
107
|
+
) -> None:
|
|
108
|
+
resolved_backend = backend or get_backend()
|
|
109
|
+
matrix = _as_matrix(generator, backend=resolved_backend)
|
|
110
|
+
matrix_np = ensure_numpy(matrix, backend=resolved_backend)
|
|
111
|
+
if matrix_np.shape != (hilbert_space.dimension, hilbert_space.dimension):
|
|
112
|
+
raise ValueError("Generator dimension must match the Hilbert space.")
|
|
113
|
+
if not _is_hermitian(matrix, atol=atol, backend=resolved_backend):
|
|
114
|
+
raise ValueError("Dynamics generator must be Hermitian.")
|
|
115
|
+
self.backend = resolved_backend
|
|
116
|
+
self._generator_backend = matrix
|
|
117
|
+
self._numpy_generator = matrix_np
|
|
118
|
+
self.generator = matrix_np
|
|
119
|
+
self.hilbert_space = hilbert_space
|
|
120
|
+
self.atol = float(atol)
|
|
121
|
+
if use_scipy is None:
|
|
122
|
+
has_matrix_exp = _has_backend_matrix_exp(self.backend)
|
|
123
|
+
if has_matrix_exp:
|
|
124
|
+
self._use_scipy = False
|
|
125
|
+
elif _scipy_expm is not None:
|
|
126
|
+
self._use_scipy = True
|
|
127
|
+
else:
|
|
128
|
+
raise RuntimeError(
|
|
129
|
+
"Backend lacks matrix_exp and SciPy is unavailable for fallback."
|
|
130
|
+
)
|
|
131
|
+
else:
|
|
132
|
+
if use_scipy and _scipy_expm is None:
|
|
133
|
+
raise RuntimeError("SciPy expm requested but SciPy is not available.")
|
|
134
|
+
self._use_scipy = bool(use_scipy and _scipy_expm is not None)
|
|
135
|
+
|
|
136
|
+
def _unitary_backend(self, dt: float) -> Any:
|
|
137
|
+
if self._use_scipy and _scipy_expm is not None:
|
|
138
|
+
return ensure_array(
|
|
139
|
+
_scipy_expm(-1j * dt * self._numpy_generator),
|
|
140
|
+
backend=self.backend,
|
|
141
|
+
)
|
|
142
|
+
return self.backend.matrix_exp(-1j * dt * self._generator_backend)
|
|
143
|
+
|
|
144
|
+
def step(
|
|
145
|
+
self,
|
|
146
|
+
state: Sequence[complex] | np.ndarray | Any,
|
|
147
|
+
*,
|
|
148
|
+
dt: float = 1.0,
|
|
149
|
+
normalize: bool = True,
|
|
150
|
+
) -> Any:
|
|
151
|
+
"""Evolve ``state`` by ``dt`` using the unitary ``exp(-i·Δ·dt)``."""
|
|
152
|
+
|
|
153
|
+
vector = ensure_array(state, dtype=np.complex128, backend=self.backend)
|
|
154
|
+
if vector.shape != (self.hilbert_space.dimension,):
|
|
155
|
+
raise ValueError(
|
|
156
|
+
"State dimension mismatch: "
|
|
157
|
+
f"expected ({self.hilbert_space.dimension},), received {vector.shape!r}."
|
|
158
|
+
)
|
|
159
|
+
unitary = self._unitary_backend(dt)
|
|
160
|
+
evolved = self.backend.matmul(unitary, vector)
|
|
161
|
+
if normalize:
|
|
162
|
+
norm_backend = self.backend.norm(evolved)
|
|
163
|
+
norm_numpy = float(np.asarray(ensure_numpy(norm_backend, backend=self.backend)))
|
|
164
|
+
if np.isclose(norm_numpy, 0.0, atol=self.atol):
|
|
165
|
+
raise ValueError("Cannot normalise a null state vector.")
|
|
166
|
+
evolved = evolved / norm_backend
|
|
167
|
+
return evolved
|
|
168
|
+
|
|
169
|
+
def evolve(
|
|
170
|
+
self,
|
|
171
|
+
state: Sequence[complex] | np.ndarray | Any,
|
|
172
|
+
*,
|
|
173
|
+
steps: int,
|
|
174
|
+
dt: float = 1.0,
|
|
175
|
+
normalize: bool = True,
|
|
176
|
+
) -> Any:
|
|
177
|
+
"""Return trajectory of length ``steps + 1`` starting from ``state``."""
|
|
178
|
+
|
|
179
|
+
if steps < 0:
|
|
180
|
+
raise ValueError("steps must be non-negative.")
|
|
181
|
+
current = ensure_array(state, dtype=np.complex128, backend=self.backend)
|
|
182
|
+
if current.shape != (self.hilbert_space.dimension,):
|
|
183
|
+
raise ValueError(
|
|
184
|
+
"State dimension mismatch: "
|
|
185
|
+
f"expected ({self.hilbert_space.dimension},), received {current.shape!r}."
|
|
186
|
+
)
|
|
187
|
+
trajectory: list[Any] = [current]
|
|
188
|
+
for _ in range(steps):
|
|
189
|
+
current = self.step(current, dt=dt, normalize=normalize)
|
|
190
|
+
trajectory.append(current)
|
|
191
|
+
return self.backend.stack(trajectory, axis=0)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
@dataclass(slots=True)
|
|
195
|
+
class ContractiveDynamicsEngine:
|
|
196
|
+
"""Contractive semigroup evolution driven by Lindblad ΔNFR generators.
|
|
197
|
+
|
|
198
|
+
Backend-native tensors are accepted for all density operators. When the
|
|
199
|
+
chosen backend supports automatic differentiation we keep gradients intact
|
|
200
|
+
by default because native semigroup propagators are preferred. Requesting
|
|
201
|
+
``use_scipy=True`` still falls back to SciPy's :func:`scipy.linalg.expm`,
|
|
202
|
+
primarily for generators missing backend support.
|
|
203
|
+
"""
|
|
204
|
+
|
|
205
|
+
generator: np.ndarray
|
|
206
|
+
hilbert_space: HilbertSpace
|
|
207
|
+
atol: float = 1e-9
|
|
208
|
+
_use_scipy: bool = False
|
|
209
|
+
backend: MathematicsBackend = field(init=False, repr=False)
|
|
210
|
+
_generator_backend: Any = field(init=False, repr=False)
|
|
211
|
+
_numpy_generator: np.ndarray = field(init=False, repr=False)
|
|
212
|
+
_identity_backend: Any = field(init=False, repr=False)
|
|
213
|
+
_last_contractivity_gap: float = field(init=False, repr=False)
|
|
214
|
+
|
|
215
|
+
def __init__(
|
|
216
|
+
self,
|
|
217
|
+
generator: Sequence[Sequence[complex]] | np.ndarray | Any,
|
|
218
|
+
hilbert_space: HilbertSpace,
|
|
219
|
+
*,
|
|
220
|
+
atol: float = 1e-9,
|
|
221
|
+
ensure_contractive: bool = True,
|
|
222
|
+
use_scipy: bool | None = None,
|
|
223
|
+
backend: MathematicsBackend | None = None,
|
|
224
|
+
) -> None:
|
|
225
|
+
resolved_backend = backend or get_backend()
|
|
226
|
+
matrix = _as_matrix(generator, backend=resolved_backend)
|
|
227
|
+
matrix_np = ensure_numpy(matrix, backend=resolved_backend)
|
|
228
|
+
expected = hilbert_space.dimension * hilbert_space.dimension
|
|
229
|
+
if matrix_np.shape != (expected, expected):
|
|
230
|
+
raise ValueError(
|
|
231
|
+
"Generator must act on vectorised density operators with dimension "
|
|
232
|
+
f"{expected} × {expected}."
|
|
233
|
+
)
|
|
234
|
+
self.backend = resolved_backend
|
|
235
|
+
self._generator_backend = matrix
|
|
236
|
+
self._numpy_generator = matrix_np.astype(np.complex128, copy=False)
|
|
237
|
+
self.generator = self._numpy_generator
|
|
238
|
+
self.hilbert_space = hilbert_space
|
|
239
|
+
self.atol = float(atol)
|
|
240
|
+
if use_scipy is None:
|
|
241
|
+
has_matrix_exp = _has_backend_matrix_exp(self.backend)
|
|
242
|
+
if has_matrix_exp:
|
|
243
|
+
self._use_scipy = False
|
|
244
|
+
elif _scipy_expm is not None:
|
|
245
|
+
self._use_scipy = True
|
|
246
|
+
else:
|
|
247
|
+
raise RuntimeError(
|
|
248
|
+
"Backend lacks matrix_exp and SciPy is unavailable for fallback."
|
|
249
|
+
)
|
|
250
|
+
else:
|
|
251
|
+
if use_scipy and _scipy_expm is None:
|
|
252
|
+
raise RuntimeError("SciPy expm requested but SciPy is not available.")
|
|
253
|
+
self._use_scipy = bool(use_scipy and _scipy_expm is not None)
|
|
254
|
+
|
|
255
|
+
self._identity_backend = ensure_array(
|
|
256
|
+
np.eye(hilbert_space.dimension, dtype=np.complex128),
|
|
257
|
+
backend=self.backend,
|
|
258
|
+
)
|
|
259
|
+
self._last_contractivity_gap = float("nan")
|
|
260
|
+
if ensure_contractive:
|
|
261
|
+
eigenvalues_backend, _ = self.backend.eig(self._generator_backend)
|
|
262
|
+
eigenvalues = ensure_numpy(eigenvalues_backend, backend=self.backend)
|
|
263
|
+
if np.max(eigenvalues.real) > self.atol:
|
|
264
|
+
raise ValueError(
|
|
265
|
+
"ΔNFR generator is not contractive: positive real eigenvalues detected."
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
def _propagator_backend(self, dt: float) -> Any:
|
|
269
|
+
if self._use_scipy and _scipy_expm is not None:
|
|
270
|
+
return ensure_array(
|
|
271
|
+
_scipy_expm(dt * self._numpy_generator),
|
|
272
|
+
backend=self.backend,
|
|
273
|
+
)
|
|
274
|
+
return self.backend.matrix_exp(dt * self._generator_backend)
|
|
275
|
+
|
|
276
|
+
def frobenius_norm(
|
|
277
|
+
self,
|
|
278
|
+
density: Sequence[Sequence[complex]] | np.ndarray | Any,
|
|
279
|
+
*,
|
|
280
|
+
center: bool = False,
|
|
281
|
+
) -> float:
|
|
282
|
+
"""Return the Frobenius norm associated with the Hilbert space."""
|
|
283
|
+
|
|
284
|
+
matrix = ensure_array(density, dtype=np.complex128, backend=self.backend)
|
|
285
|
+
if matrix.shape != (self.hilbert_space.dimension, self.hilbert_space.dimension):
|
|
286
|
+
raise ValueError(
|
|
287
|
+
"Density operator dimension mismatch: "
|
|
288
|
+
f"expected {(self.hilbert_space.dimension, self.hilbert_space.dimension)}, "
|
|
289
|
+
f"received {matrix.shape!r}."
|
|
290
|
+
)
|
|
291
|
+
if center:
|
|
292
|
+
trace_value = _trace(matrix, backend=self.backend)
|
|
293
|
+
trace_backend = trace_value.backend / self.hilbert_space.dimension
|
|
294
|
+
matrix = matrix - trace_backend * self._identity_backend
|
|
295
|
+
norm_backend = self.backend.norm(matrix, ord="fro")
|
|
296
|
+
return float(np.asarray(ensure_numpy(norm_backend, backend=self.backend)))
|
|
297
|
+
|
|
298
|
+
@property
|
|
299
|
+
def last_contractivity_gap(self) -> float:
|
|
300
|
+
"""Return the latest monitored contractivity gap (NaN if unavailable)."""
|
|
301
|
+
|
|
302
|
+
return float(self._last_contractivity_gap)
|
|
303
|
+
|
|
304
|
+
def step(
|
|
305
|
+
self,
|
|
306
|
+
density: Sequence[Sequence[complex]] | np.ndarray | Any,
|
|
307
|
+
*,
|
|
308
|
+
dt: float = 1.0,
|
|
309
|
+
normalize_trace: bool = True,
|
|
310
|
+
enforce_contractivity: bool = True,
|
|
311
|
+
raise_on_violation: bool = False,
|
|
312
|
+
symmetrize: bool = True,
|
|
313
|
+
) -> Any:
|
|
314
|
+
"""Advance ``density`` by ``dt`` enforcing trace and contractivity control."""
|
|
315
|
+
|
|
316
|
+
matrix = ensure_array(density, dtype=np.complex128, backend=self.backend)
|
|
317
|
+
dim = self.hilbert_space.dimension
|
|
318
|
+
if matrix.shape != (dim, dim):
|
|
319
|
+
raise ValueError(
|
|
320
|
+
"Density operator dimension mismatch: "
|
|
321
|
+
f"expected {(dim, dim)}, received {matrix.shape!r}."
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
initial_norm = None
|
|
325
|
+
if enforce_contractivity:
|
|
326
|
+
trace_value = _trace(matrix, backend=self.backend)
|
|
327
|
+
trace_backend = trace_value.backend / dim
|
|
328
|
+
centered = matrix - trace_backend * self._identity_backend
|
|
329
|
+
initial_norm_backend = self.backend.norm(centered, ord="fro")
|
|
330
|
+
initial_norm = float(np.asarray(ensure_numpy(initial_norm_backend, backend=self.backend)))
|
|
331
|
+
|
|
332
|
+
vector = _vectorize_density(matrix, backend=self.backend)
|
|
333
|
+
propagator = self._propagator_backend(dt)
|
|
334
|
+
evolved_vec = self.backend.matmul(propagator, vector)
|
|
335
|
+
evolved = _devectorize_density(evolved_vec, dim, backend=self.backend)
|
|
336
|
+
|
|
337
|
+
if symmetrize:
|
|
338
|
+
evolved = 0.5 * (evolved + self.backend.conjugate_transpose(evolved))
|
|
339
|
+
|
|
340
|
+
if normalize_trace:
|
|
341
|
+
trace_value = _trace(evolved, backend=self.backend)
|
|
342
|
+
if np.isclose(trace_value.numpy, 0.0, atol=self.atol):
|
|
343
|
+
raise ValueError("Trace collapsed below tolerance during evolution.")
|
|
344
|
+
if not np.isclose(trace_value.numpy, 1.0, atol=10 * self.atol):
|
|
345
|
+
evolved = evolved / trace_value.backend
|
|
346
|
+
|
|
347
|
+
if enforce_contractivity and initial_norm is not None:
|
|
348
|
+
trace_value = _trace(evolved, backend=self.backend)
|
|
349
|
+
trace_backend = trace_value.backend / dim
|
|
350
|
+
centered = evolved - trace_backend * self._identity_backend
|
|
351
|
+
evolved_norm_backend = self.backend.norm(centered, ord="fro")
|
|
352
|
+
evolved_norm = float(np.asarray(ensure_numpy(evolved_norm_backend, backend=self.backend)))
|
|
353
|
+
self._last_contractivity_gap = initial_norm - evolved_norm
|
|
354
|
+
if raise_on_violation and self._last_contractivity_gap < -5 * self.atol:
|
|
355
|
+
raise ValueError(
|
|
356
|
+
"Contractivity violated: Frobenius norm increased beyond tolerance."
|
|
357
|
+
)
|
|
358
|
+
else:
|
|
359
|
+
self._last_contractivity_gap = float("nan")
|
|
360
|
+
|
|
361
|
+
return evolved
|
|
362
|
+
|
|
363
|
+
def evolve(
|
|
364
|
+
self,
|
|
365
|
+
density: Sequence[Sequence[complex]] | np.ndarray | Any,
|
|
366
|
+
*,
|
|
367
|
+
steps: int,
|
|
368
|
+
dt: float = 1.0,
|
|
369
|
+
normalize_trace: bool = True,
|
|
370
|
+
enforce_contractivity: bool = True,
|
|
371
|
+
raise_on_violation: bool = False,
|
|
372
|
+
symmetrize: bool = True,
|
|
373
|
+
) -> Any:
|
|
374
|
+
"""Return trajectory of density operators for the contractive semigroup."""
|
|
375
|
+
|
|
376
|
+
if steps < 0:
|
|
377
|
+
raise ValueError("steps must be non-negative.")
|
|
378
|
+
|
|
379
|
+
current = ensure_array(density, dtype=np.complex128, backend=self.backend)
|
|
380
|
+
dim = self.hilbert_space.dimension
|
|
381
|
+
if current.shape != (dim, dim):
|
|
382
|
+
raise ValueError(
|
|
383
|
+
"Density operator dimension mismatch: "
|
|
384
|
+
f"expected {(dim, dim)}, received {current.shape!r}."
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
trajectory: list[Any] = [current]
|
|
388
|
+
for _ in range(steps):
|
|
389
|
+
current = self.step(
|
|
390
|
+
current,
|
|
391
|
+
dt=dt,
|
|
392
|
+
normalize_trace=normalize_trace,
|
|
393
|
+
enforce_contractivity=enforce_contractivity,
|
|
394
|
+
raise_on_violation=raise_on_violation,
|
|
395
|
+
symmetrize=symmetrize,
|
|
396
|
+
)
|
|
397
|
+
trajectory.append(current)
|
|
398
|
+
return self.backend.stack(trajectory, axis=0)
|
tnfr/mathematics/epi.py
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
"""EPI elements and algebraic helpers for the TNFR Banach space."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Callable, Mapping, Sequence
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"BEPIElement",
|
|
11
|
+
"CoherenceEvaluation",
|
|
12
|
+
"evaluate_coherence_transform",
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class _EPIValidators:
|
|
17
|
+
"""Shared validation helpers for EPI Banach constructions."""
|
|
18
|
+
|
|
19
|
+
_complex_dtype = np.complex128
|
|
20
|
+
|
|
21
|
+
@staticmethod
|
|
22
|
+
def _as_array(values: Sequence[complex] | np.ndarray, *, dtype: np.dtype) -> np.ndarray:
|
|
23
|
+
array = np.asarray(values, dtype=dtype)
|
|
24
|
+
if array.ndim != 1:
|
|
25
|
+
raise ValueError("Inputs must be one-dimensional arrays.")
|
|
26
|
+
if not np.all(np.isfinite(array)):
|
|
27
|
+
raise ValueError("Inputs must not contain NaNs or infinities.")
|
|
28
|
+
return array
|
|
29
|
+
|
|
30
|
+
@classmethod
|
|
31
|
+
def _validate_grid(cls, grid: Sequence[float] | np.ndarray, expected_size: int) -> np.ndarray:
|
|
32
|
+
array = np.asarray(grid, dtype=float)
|
|
33
|
+
if array.ndim != 1:
|
|
34
|
+
raise ValueError("x_grid must be one-dimensional.")
|
|
35
|
+
if array.size != expected_size:
|
|
36
|
+
raise ValueError("x_grid length must match continuous component.")
|
|
37
|
+
if array.size < 2:
|
|
38
|
+
raise ValueError("x_grid must contain at least two points.")
|
|
39
|
+
if not np.all(np.isfinite(array)):
|
|
40
|
+
raise ValueError("x_grid must not contain NaNs or infinities.")
|
|
41
|
+
|
|
42
|
+
spacings = np.diff(array)
|
|
43
|
+
if np.any(spacings <= 0):
|
|
44
|
+
raise ValueError("x_grid must be strictly increasing.")
|
|
45
|
+
if not np.allclose(spacings, spacings[0], rtol=1e-9, atol=1e-12):
|
|
46
|
+
raise ValueError("x_grid must be uniform for finite-difference stability.")
|
|
47
|
+
return array
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def validate_domain(
|
|
51
|
+
cls,
|
|
52
|
+
f_continuous: Sequence[complex] | np.ndarray,
|
|
53
|
+
a_discrete: Sequence[complex] | np.ndarray,
|
|
54
|
+
x_grid: Sequence[float] | np.ndarray | None = None,
|
|
55
|
+
) -> tuple[np.ndarray, np.ndarray, np.ndarray | None]:
|
|
56
|
+
"""Validate dimensionality and sampling grid compatibility."""
|
|
57
|
+
|
|
58
|
+
f_array = cls._as_array(f_continuous, dtype=cls._complex_dtype)
|
|
59
|
+
a_array = cls._as_array(a_discrete, dtype=cls._complex_dtype)
|
|
60
|
+
|
|
61
|
+
if x_grid is None:
|
|
62
|
+
return f_array, a_array, None
|
|
63
|
+
|
|
64
|
+
grid_array = cls._validate_grid(x_grid, f_array.size)
|
|
65
|
+
return f_array, a_array, grid_array
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
@dataclass(frozen=True)
|
|
69
|
+
class BEPIElement(_EPIValidators):
|
|
70
|
+
"""Concrete :math:`C^0([0,1]) \oplus \ell^2` element with TNFR operations."""
|
|
71
|
+
|
|
72
|
+
f_continuous: Sequence[complex] | np.ndarray
|
|
73
|
+
a_discrete: Sequence[complex] | np.ndarray
|
|
74
|
+
x_grid: Sequence[float] | np.ndarray
|
|
75
|
+
|
|
76
|
+
def __post_init__(self) -> None:
|
|
77
|
+
f_array, a_array, grid = self.validate_domain(self.f_continuous, self.a_discrete, self.x_grid)
|
|
78
|
+
if grid is None:
|
|
79
|
+
raise ValueError("x_grid is mandatory for BEPIElement instances.")
|
|
80
|
+
object.__setattr__(self, "f_continuous", f_array)
|
|
81
|
+
object.__setattr__(self, "a_discrete", a_array)
|
|
82
|
+
object.__setattr__(self, "x_grid", grid)
|
|
83
|
+
|
|
84
|
+
def _assert_compatible(self, other: BEPIElement) -> None:
|
|
85
|
+
if self.f_continuous.shape != other.f_continuous.shape:
|
|
86
|
+
raise ValueError("Continuous components must share shape for direct sums.")
|
|
87
|
+
if self.a_discrete.shape != other.a_discrete.shape:
|
|
88
|
+
raise ValueError("Discrete tails must share shape for direct sums.")
|
|
89
|
+
if not np.allclose(self.x_grid, other.x_grid, rtol=1e-12, atol=1e-12):
|
|
90
|
+
raise ValueError("x_grid must match to combine EPI elements.")
|
|
91
|
+
|
|
92
|
+
def direct_sum(self, other: BEPIElement) -> BEPIElement:
|
|
93
|
+
"""Return the algebraic direct sum ``self ⊕ other``."""
|
|
94
|
+
|
|
95
|
+
self._assert_compatible(other)
|
|
96
|
+
return BEPIElement(self.f_continuous + other.f_continuous, self.a_discrete + other.a_discrete, self.x_grid)
|
|
97
|
+
|
|
98
|
+
def tensor(self, vector: Sequence[complex] | np.ndarray) -> np.ndarray:
|
|
99
|
+
"""Return the tensor product between the discrete tail and a Hilbert vector."""
|
|
100
|
+
|
|
101
|
+
hilbert_vector = self._as_array(vector, dtype=self._complex_dtype)
|
|
102
|
+
return np.outer(self.a_discrete, hilbert_vector)
|
|
103
|
+
|
|
104
|
+
def adjoint(self) -> BEPIElement:
|
|
105
|
+
"""Return the conjugate element representing the ``*`` operation."""
|
|
106
|
+
|
|
107
|
+
return BEPIElement(np.conjugate(self.f_continuous), np.conjugate(self.a_discrete), self.x_grid)
|
|
108
|
+
|
|
109
|
+
@staticmethod
|
|
110
|
+
def _apply_transform(transform: Callable[[np.ndarray], np.ndarray], values: np.ndarray) -> np.ndarray:
|
|
111
|
+
result = np.asarray(transform(values), dtype=np.complex128)
|
|
112
|
+
if result.shape != values.shape:
|
|
113
|
+
raise ValueError("Transforms must preserve the element shape.")
|
|
114
|
+
if not np.all(np.isfinite(result)):
|
|
115
|
+
raise ValueError("Transforms must return finite values.")
|
|
116
|
+
return result
|
|
117
|
+
|
|
118
|
+
def compose(
|
|
119
|
+
self,
|
|
120
|
+
transform: Callable[[np.ndarray], np.ndarray],
|
|
121
|
+
*,
|
|
122
|
+
spectral_transform: Callable[[np.ndarray], np.ndarray] | None = None,
|
|
123
|
+
) -> BEPIElement:
|
|
124
|
+
"""Compose the element with linear transforms on both components."""
|
|
125
|
+
|
|
126
|
+
new_f = self._apply_transform(transform, self.f_continuous)
|
|
127
|
+
spectral_fn = spectral_transform or transform
|
|
128
|
+
new_a = self._apply_transform(spectral_fn, self.a_discrete)
|
|
129
|
+
return BEPIElement(new_f, new_a, self.x_grid)
|
|
130
|
+
|
|
131
|
+
def _max_magnitude(self) -> float:
|
|
132
|
+
mags = []
|
|
133
|
+
if self.f_continuous.size:
|
|
134
|
+
mags.append(float(np.max(np.abs(self.f_continuous))))
|
|
135
|
+
if self.a_discrete.size:
|
|
136
|
+
mags.append(float(np.max(np.abs(self.a_discrete))))
|
|
137
|
+
return float(max(mags)) if mags else 0.0
|
|
138
|
+
|
|
139
|
+
def __float__(self) -> float:
|
|
140
|
+
return self._max_magnitude()
|
|
141
|
+
|
|
142
|
+
def __abs__(self) -> float:
|
|
143
|
+
return self._max_magnitude()
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
@dataclass(frozen=True)
|
|
147
|
+
class CoherenceEvaluation:
|
|
148
|
+
"""Container describing the outcome of a coherence transform evaluation."""
|
|
149
|
+
|
|
150
|
+
element: BEPIElement
|
|
151
|
+
transformed: BEPIElement
|
|
152
|
+
coherence_before: float
|
|
153
|
+
coherence_after: float
|
|
154
|
+
kappa: float
|
|
155
|
+
tolerance: float
|
|
156
|
+
satisfied: bool
|
|
157
|
+
required: float
|
|
158
|
+
deficit: float
|
|
159
|
+
ratio: float
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def evaluate_coherence_transform(
|
|
163
|
+
element: BEPIElement,
|
|
164
|
+
transform: Callable[[BEPIElement], BEPIElement],
|
|
165
|
+
*,
|
|
166
|
+
kappa: float = 1.0,
|
|
167
|
+
tolerance: float = 1e-9,
|
|
168
|
+
space: "BanachSpaceEPI" | None = None,
|
|
169
|
+
norm_kwargs: Mapping[str, float] | None = None,
|
|
170
|
+
) -> CoherenceEvaluation:
|
|
171
|
+
"""Apply ``transform`` to ``element`` and verify a coherence inequality.
|
|
172
|
+
|
|
173
|
+
Parameters
|
|
174
|
+
----------
|
|
175
|
+
element:
|
|
176
|
+
The :class:`BEPIElement` subject to the transformation.
|
|
177
|
+
transform:
|
|
178
|
+
Callable receiving ``element`` and returning the transformed
|
|
179
|
+
:class:`BEPIElement`. The callable is expected to preserve the
|
|
180
|
+
structural sampling grid and dimensionality of the element.
|
|
181
|
+
kappa:
|
|
182
|
+
Factor on the right-hand side of the inequality ``C(T(EPI)) ≥ κ·C(EPI)``.
|
|
183
|
+
tolerance:
|
|
184
|
+
Non-negative slack applied to the inequality. When
|
|
185
|
+
``C(T(EPI)) + tolerance`` exceeds ``κ·C(EPI)`` the check succeeds.
|
|
186
|
+
space:
|
|
187
|
+
Optional :class:`~tnfr.mathematics.spaces.BanachSpaceEPI` instance used
|
|
188
|
+
to compute the coherence norm. When omitted, a local instance is
|
|
189
|
+
constructed to avoid circular imports at module import time.
|
|
190
|
+
norm_kwargs:
|
|
191
|
+
Optional keyword arguments forwarded to
|
|
192
|
+
:meth:`BanachSpaceEPI.coherence_norm`.
|
|
193
|
+
|
|
194
|
+
Returns
|
|
195
|
+
-------
|
|
196
|
+
CoherenceEvaluation
|
|
197
|
+
Dataclass capturing the before/after coherence values together with the
|
|
198
|
+
inequality verdict.
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
if kappa < 0:
|
|
202
|
+
raise ValueError("kappa must be non-negative.")
|
|
203
|
+
if tolerance < 0:
|
|
204
|
+
raise ValueError("tolerance must be non-negative.")
|
|
205
|
+
|
|
206
|
+
if norm_kwargs is None:
|
|
207
|
+
norm_kwargs = {}
|
|
208
|
+
|
|
209
|
+
from .spaces import BanachSpaceEPI # Local import to avoid circular dependency
|
|
210
|
+
|
|
211
|
+
working_space = space if space is not None else BanachSpaceEPI()
|
|
212
|
+
|
|
213
|
+
coherence_before = working_space.coherence_norm(
|
|
214
|
+
element.f_continuous,
|
|
215
|
+
element.a_discrete,
|
|
216
|
+
x_grid=element.x_grid,
|
|
217
|
+
**norm_kwargs,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
transformed = transform(element)
|
|
221
|
+
if not isinstance(transformed, BEPIElement):
|
|
222
|
+
raise TypeError("transform must return a BEPIElement instance.")
|
|
223
|
+
|
|
224
|
+
coherence_after = working_space.coherence_norm(
|
|
225
|
+
transformed.f_continuous,
|
|
226
|
+
transformed.a_discrete,
|
|
227
|
+
x_grid=transformed.x_grid,
|
|
228
|
+
**norm_kwargs,
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
required = kappa * coherence_before
|
|
232
|
+
satisfied = coherence_after + tolerance >= required
|
|
233
|
+
deficit = max(0.0, required - coherence_after)
|
|
234
|
+
|
|
235
|
+
if coherence_before > 0:
|
|
236
|
+
ratio = coherence_after / coherence_before
|
|
237
|
+
elif coherence_after > tolerance:
|
|
238
|
+
ratio = float("inf")
|
|
239
|
+
else:
|
|
240
|
+
ratio = 1.0
|
|
241
|
+
|
|
242
|
+
return CoherenceEvaluation(
|
|
243
|
+
element=element,
|
|
244
|
+
transformed=transformed,
|
|
245
|
+
coherence_before=coherence_before,
|
|
246
|
+
coherence_after=coherence_after,
|
|
247
|
+
kappa=kappa,
|
|
248
|
+
tolerance=tolerance,
|
|
249
|
+
satisfied=satisfied,
|
|
250
|
+
required=required,
|
|
251
|
+
deficit=deficit,
|
|
252
|
+
ratio=ratio,
|
|
253
|
+
)
|
|
254
|
+
|