tnfr 6.0.0__py3-none-any.whl → 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tnfr might be problematic. Click here for more details.

Files changed (176) hide show
  1. tnfr/__init__.py +50 -5
  2. tnfr/__init__.pyi +0 -7
  3. tnfr/_compat.py +0 -1
  4. tnfr/_generated_version.py +34 -0
  5. tnfr/_version.py +44 -2
  6. tnfr/alias.py +14 -13
  7. tnfr/alias.pyi +5 -37
  8. tnfr/cache.py +9 -729
  9. tnfr/cache.pyi +8 -224
  10. tnfr/callback_utils.py +16 -31
  11. tnfr/callback_utils.pyi +3 -29
  12. tnfr/cli/__init__.py +17 -11
  13. tnfr/cli/__init__.pyi +0 -21
  14. tnfr/cli/arguments.py +175 -14
  15. tnfr/cli/arguments.pyi +5 -11
  16. tnfr/cli/execution.py +434 -48
  17. tnfr/cli/execution.pyi +14 -24
  18. tnfr/cli/utils.py +20 -3
  19. tnfr/cli/utils.pyi +5 -5
  20. tnfr/config/__init__.py +2 -1
  21. tnfr/config/__init__.pyi +2 -0
  22. tnfr/config/feature_flags.py +83 -0
  23. tnfr/config/init.py +1 -1
  24. tnfr/config/operator_names.py +1 -14
  25. tnfr/config/presets.py +6 -26
  26. tnfr/constants/__init__.py +10 -13
  27. tnfr/constants/__init__.pyi +10 -22
  28. tnfr/constants/aliases.py +31 -0
  29. tnfr/constants/core.py +4 -3
  30. tnfr/constants/init.py +1 -1
  31. tnfr/constants/metric.py +3 -3
  32. tnfr/dynamics/__init__.py +64 -10
  33. tnfr/dynamics/__init__.pyi +3 -4
  34. tnfr/dynamics/adaptation.py +79 -13
  35. tnfr/dynamics/aliases.py +10 -9
  36. tnfr/dynamics/coordination.py +77 -35
  37. tnfr/dynamics/dnfr.py +575 -274
  38. tnfr/dynamics/dnfr.pyi +1 -10
  39. tnfr/dynamics/integrators.py +47 -33
  40. tnfr/dynamics/integrators.pyi +0 -1
  41. tnfr/dynamics/runtime.py +489 -129
  42. tnfr/dynamics/sampling.py +2 -0
  43. tnfr/dynamics/selectors.py +101 -62
  44. tnfr/execution.py +15 -8
  45. tnfr/execution.pyi +5 -25
  46. tnfr/flatten.py +7 -3
  47. tnfr/flatten.pyi +1 -8
  48. tnfr/gamma.py +22 -26
  49. tnfr/gamma.pyi +0 -6
  50. tnfr/glyph_history.py +37 -26
  51. tnfr/glyph_history.pyi +1 -19
  52. tnfr/glyph_runtime.py +16 -0
  53. tnfr/glyph_runtime.pyi +9 -0
  54. tnfr/immutable.py +20 -15
  55. tnfr/immutable.pyi +4 -7
  56. tnfr/initialization.py +5 -7
  57. tnfr/initialization.pyi +1 -9
  58. tnfr/io.py +6 -305
  59. tnfr/io.pyi +13 -8
  60. tnfr/mathematics/__init__.py +81 -0
  61. tnfr/mathematics/backend.py +426 -0
  62. tnfr/mathematics/dynamics.py +398 -0
  63. tnfr/mathematics/epi.py +254 -0
  64. tnfr/mathematics/generators.py +222 -0
  65. tnfr/mathematics/metrics.py +119 -0
  66. tnfr/mathematics/operators.py +233 -0
  67. tnfr/mathematics/operators_factory.py +71 -0
  68. tnfr/mathematics/projection.py +78 -0
  69. tnfr/mathematics/runtime.py +173 -0
  70. tnfr/mathematics/spaces.py +247 -0
  71. tnfr/mathematics/transforms.py +292 -0
  72. tnfr/metrics/__init__.py +10 -10
  73. tnfr/metrics/coherence.py +123 -94
  74. tnfr/metrics/common.py +22 -13
  75. tnfr/metrics/common.pyi +42 -11
  76. tnfr/metrics/core.py +72 -14
  77. tnfr/metrics/diagnosis.py +48 -57
  78. tnfr/metrics/diagnosis.pyi +3 -7
  79. tnfr/metrics/export.py +3 -5
  80. tnfr/metrics/glyph_timing.py +41 -31
  81. tnfr/metrics/reporting.py +13 -6
  82. tnfr/metrics/sense_index.py +884 -114
  83. tnfr/metrics/trig.py +167 -11
  84. tnfr/metrics/trig.pyi +1 -0
  85. tnfr/metrics/trig_cache.py +112 -15
  86. tnfr/node.py +400 -17
  87. tnfr/node.pyi +55 -38
  88. tnfr/observers.py +111 -8
  89. tnfr/observers.pyi +0 -15
  90. tnfr/ontosim.py +9 -6
  91. tnfr/ontosim.pyi +0 -5
  92. tnfr/operators/__init__.py +529 -42
  93. tnfr/operators/__init__.pyi +14 -0
  94. tnfr/operators/definitions.py +350 -18
  95. tnfr/operators/definitions.pyi +0 -14
  96. tnfr/operators/grammar.py +760 -0
  97. tnfr/operators/jitter.py +28 -22
  98. tnfr/operators/registry.py +7 -12
  99. tnfr/operators/registry.pyi +0 -2
  100. tnfr/operators/remesh.py +38 -61
  101. tnfr/rng.py +17 -300
  102. tnfr/schemas/__init__.py +8 -0
  103. tnfr/schemas/grammar.json +94 -0
  104. tnfr/selector.py +3 -4
  105. tnfr/selector.pyi +1 -1
  106. tnfr/sense.py +22 -24
  107. tnfr/sense.pyi +0 -7
  108. tnfr/structural.py +504 -21
  109. tnfr/structural.pyi +41 -18
  110. tnfr/telemetry/__init__.py +23 -1
  111. tnfr/telemetry/cache_metrics.py +226 -0
  112. tnfr/telemetry/nu_f.py +423 -0
  113. tnfr/telemetry/nu_f.pyi +123 -0
  114. tnfr/tokens.py +1 -4
  115. tnfr/tokens.pyi +1 -6
  116. tnfr/trace.py +20 -53
  117. tnfr/trace.pyi +9 -37
  118. tnfr/types.py +244 -15
  119. tnfr/types.pyi +200 -14
  120. tnfr/units.py +69 -0
  121. tnfr/units.pyi +16 -0
  122. tnfr/utils/__init__.py +107 -48
  123. tnfr/utils/__init__.pyi +80 -11
  124. tnfr/utils/cache.py +1705 -65
  125. tnfr/utils/cache.pyi +370 -58
  126. tnfr/utils/chunks.py +104 -0
  127. tnfr/utils/chunks.pyi +21 -0
  128. tnfr/utils/data.py +95 -5
  129. tnfr/utils/data.pyi +8 -17
  130. tnfr/utils/graph.py +2 -4
  131. tnfr/utils/init.py +31 -7
  132. tnfr/utils/init.pyi +4 -11
  133. tnfr/utils/io.py +313 -14
  134. tnfr/{helpers → utils}/numeric.py +50 -24
  135. tnfr/utils/numeric.pyi +21 -0
  136. tnfr/validation/__init__.py +92 -4
  137. tnfr/validation/__init__.pyi +77 -17
  138. tnfr/validation/compatibility.py +79 -43
  139. tnfr/validation/compatibility.pyi +4 -6
  140. tnfr/validation/grammar.py +55 -133
  141. tnfr/validation/grammar.pyi +37 -8
  142. tnfr/validation/graph.py +138 -0
  143. tnfr/validation/graph.pyi +17 -0
  144. tnfr/validation/rules.py +161 -74
  145. tnfr/validation/rules.pyi +55 -18
  146. tnfr/validation/runtime.py +263 -0
  147. tnfr/validation/runtime.pyi +31 -0
  148. tnfr/validation/soft_filters.py +170 -0
  149. tnfr/validation/soft_filters.pyi +37 -0
  150. tnfr/validation/spectral.py +159 -0
  151. tnfr/validation/spectral.pyi +46 -0
  152. tnfr/validation/syntax.py +28 -139
  153. tnfr/validation/syntax.pyi +7 -4
  154. tnfr/validation/window.py +39 -0
  155. tnfr/validation/window.pyi +1 -0
  156. tnfr/viz/__init__.py +9 -0
  157. tnfr/viz/matplotlib.py +246 -0
  158. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/METADATA +63 -19
  159. tnfr-7.0.0.dist-info/RECORD +185 -0
  160. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/licenses/LICENSE.md +1 -1
  161. tnfr/constants_glyphs.py +0 -16
  162. tnfr/constants_glyphs.pyi +0 -12
  163. tnfr/grammar.py +0 -25
  164. tnfr/grammar.pyi +0 -13
  165. tnfr/helpers/__init__.py +0 -151
  166. tnfr/helpers/__init__.pyi +0 -66
  167. tnfr/helpers/numeric.pyi +0 -12
  168. tnfr/presets.py +0 -15
  169. tnfr/presets.pyi +0 -7
  170. tnfr/utils/io.pyi +0 -10
  171. tnfr/utils/validators.py +0 -130
  172. tnfr/utils/validators.pyi +0 -19
  173. tnfr-6.0.0.dist-info/RECORD +0 -157
  174. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/WHEEL +0 -0
  175. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/entry_points.txt +0 -0
  176. {tnfr-6.0.0.dist-info → tnfr-7.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,78 @@
1
+ """Projection helpers constructing TNFR state vectors."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from typing import TYPE_CHECKING, Protocol
7
+
8
+ import numpy as np
9
+
10
+ if TYPE_CHECKING: # pragma: no cover - typing hook when numpy.typing is available
11
+ import numpy.typing as npt
12
+
13
+ ComplexVector = npt.NDArray[np.complexfloating[np.float64, np.float64]]
14
+ else: # pragma: no cover - runtime fallback without numpy.typing
15
+ ComplexVector = np.ndarray # type: ignore[assignment]
16
+
17
+ __all__ = ["StateProjector", "BasicStateProjector"]
18
+
19
+
20
+ class StateProjector(Protocol):
21
+ """Protocol describing state projection callables."""
22
+
23
+ def __call__(
24
+ self,
25
+ epi: float,
26
+ nu_f: float,
27
+ theta: float,
28
+ dim: int,
29
+ rng: np.random.Generator | None = None,
30
+ ) -> ComplexVector:
31
+ """Return a normalised TNFR state vector for the provided parameters."""
32
+
33
+
34
+ @dataclass(slots=True)
35
+ class BasicStateProjector:
36
+ """Canonical projector building deterministic TNFR state vectors.
37
+
38
+ The projector maps the structural scalars of a node—its EPI magnitude,
39
+ structural frequency ``νf`` and phase ``θ``—onto the canonical Hilbert
40
+ basis. The resulting vector encodes a coherent amplitude envelope derived
41
+ from the structural intensity while the complex exponential captures the
42
+ phase progression across the local modes. Optional stochastic excitation is
43
+ injected via a :class:`numpy.random.Generator` to model controlled
44
+ dissonance while preserving determinism when a seed is provided.
45
+ """
46
+
47
+ dtype: np.dtype[np.complexfloating[np.float64, np.float64]] = np.dtype(np.complex128)
48
+ atol: float = 1e-12
49
+
50
+ def __call__(
51
+ self,
52
+ epi: float,
53
+ nu_f: float,
54
+ theta: float,
55
+ dim: int,
56
+ rng: np.random.Generator | None = None,
57
+ ) -> ComplexVector:
58
+ if dim <= 0:
59
+ raise ValueError("State dimension must be a positive integer.")
60
+
61
+ indices = np.arange(1, dim + 1, dtype=float)
62
+ phase_progression = theta + (nu_f + 1.0) * indices / max(dim, 1)
63
+ envelope = np.abs(epi) + 0.5 * indices / dim + 1.0
64
+ base_vector = envelope * np.exp(1j * phase_progression)
65
+
66
+ if rng is not None:
67
+ noise_scale = (np.abs(epi) + np.abs(nu_f) + 1.0) * 0.05
68
+ real_noise = rng.standard_normal(dim)
69
+ imag_noise = rng.standard_normal(dim)
70
+ stochastic = noise_scale * (real_noise + 1j * imag_noise)
71
+ base_vector = base_vector + stochastic
72
+
73
+ norm = np.linalg.norm(base_vector)
74
+ if np.isclose(norm, 0.0, atol=self.atol):
75
+ raise ValueError("Cannot normalise a null state vector.")
76
+
77
+ normalised = base_vector / norm
78
+ return np.asarray(normalised, dtype=self.dtype)
@@ -0,0 +1,173 @@
1
+ """Runtime helpers capturing TNFR spectral performance metrics."""
2
+ from __future__ import annotations
3
+
4
+ from typing import Any, Sequence
5
+
6
+ import numpy as np
7
+
8
+ from ..config import get_flags
9
+ from ..utils import get_logger
10
+ from .backend import ensure_array, ensure_numpy, get_backend
11
+ from .operators import CoherenceOperator, FrequencyOperator
12
+ from .spaces import HilbertSpace
13
+
14
+ __all__ = [
15
+ "normalized",
16
+ "coherence",
17
+ "frequency_positive",
18
+ "stable_unitary",
19
+ "coherence_expectation",
20
+ "frequency_expectation",
21
+ ]
22
+
23
+
24
+ LOGGER = get_logger(__name__)
25
+
26
+
27
+ def _as_vector(
28
+ state: Sequence[complex] | np.ndarray,
29
+ *,
30
+ dimension: int,
31
+ backend=None,
32
+ ) -> Any:
33
+ resolved_backend = backend or get_backend()
34
+ vector = ensure_array(state, dtype=np.complex128, backend=resolved_backend)
35
+ if getattr(vector, "ndim", len(getattr(vector, "shape", ()))) != 1 or vector.shape[0] != dimension:
36
+ raise ValueError(
37
+ "State vector dimension mismatch: "
38
+ f"expected ({dimension},), received {vector.shape!r}."
39
+ )
40
+ return vector
41
+
42
+
43
+ def _resolve_operator_backend(operator: CoherenceOperator) -> tuple[Any, Any]:
44
+ backend = getattr(operator, "backend", None) or get_backend()
45
+ matrix_backend = getattr(operator, "_matrix_backend", None)
46
+ if matrix_backend is None:
47
+ matrix_backend = ensure_array(operator.matrix, dtype=np.complex128, backend=backend)
48
+ return backend, matrix_backend
49
+
50
+
51
+ def _maybe_log(metric: str, payload: dict[str, object]) -> None:
52
+ if not get_flags().log_performance:
53
+ return
54
+ LOGGER.debug("%s: %s", metric, payload)
55
+
56
+
57
+ def normalized(
58
+ state: Sequence[complex] | np.ndarray,
59
+ hilbert_space: HilbertSpace,
60
+ *,
61
+ atol: float = 1e-9,
62
+ label: str = "state",
63
+ ) -> tuple[bool, float]:
64
+ """Return normalization status and norm for ``state``."""
65
+
66
+ backend = get_backend()
67
+ vector = _as_vector(state, dimension=hilbert_space.dimension, backend=backend)
68
+ norm_backend = backend.norm(vector)
69
+ norm = float(np.asarray(ensure_numpy(norm_backend, backend=backend)))
70
+ passed = bool(np.isclose(norm, 1.0, atol=atol))
71
+ _maybe_log("normalized", {"label": label, "norm": norm, "passed": passed})
72
+ return passed, float(norm)
73
+
74
+
75
+ def coherence_expectation(
76
+ state: Sequence[complex] | np.ndarray,
77
+ operator: CoherenceOperator,
78
+ *,
79
+ normalise: bool = True,
80
+ atol: float = 1e-9,
81
+ ) -> float:
82
+ """Return the coherence expectation value for ``state``."""
83
+
84
+ return float(operator.expectation(state, normalise=normalise, atol=atol))
85
+
86
+
87
+ def coherence(
88
+ state: Sequence[complex] | np.ndarray,
89
+ operator: CoherenceOperator,
90
+ threshold: float,
91
+ *,
92
+ normalise: bool = True,
93
+ atol: float = 1e-9,
94
+ label: str = "state",
95
+ ) -> tuple[bool, float]:
96
+ """Evaluate coherence expectation against ``threshold``."""
97
+
98
+ value = coherence_expectation(state, operator, normalise=normalise, atol=atol)
99
+ passed = bool(value + atol >= threshold)
100
+ _maybe_log(
101
+ "coherence",
102
+ {"label": label, "value": value, "threshold": threshold, "passed": passed},
103
+ )
104
+ return passed, value
105
+
106
+
107
+ def frequency_expectation(
108
+ state: Sequence[complex] | np.ndarray,
109
+ operator: FrequencyOperator,
110
+ *,
111
+ normalise: bool = True,
112
+ atol: float = 1e-9,
113
+ ) -> float:
114
+ """Return the structural frequency projection for ``state``."""
115
+
116
+ return float(operator.project_frequency(state, normalise=normalise, atol=atol))
117
+
118
+
119
+ def frequency_positive(
120
+ state: Sequence[complex] | np.ndarray,
121
+ operator: FrequencyOperator,
122
+ *,
123
+ normalise: bool = True,
124
+ enforce: bool = True,
125
+ atol: float = 1e-9,
126
+ label: str = "state",
127
+ ) -> dict[str, float | bool]:
128
+ """Return summary ensuring structural frequency remains non-negative."""
129
+
130
+ spectrum = operator.spectrum()
131
+ spectrum_psd = bool(operator.is_positive_semidefinite(atol=atol))
132
+ value = frequency_expectation(state, operator, normalise=normalise, atol=atol)
133
+ projection_ok = bool(value + atol >= 0.0)
134
+ passed = bool(spectrum_psd and (projection_ok or not enforce))
135
+ summary = {
136
+ "passed": passed,
137
+ "value": value,
138
+ "enforce": enforce,
139
+ "spectrum_psd": spectrum_psd,
140
+ "spectrum_min": float(np.min(spectrum)) if spectrum.size else float("inf"),
141
+ "projection_passed": projection_ok,
142
+ }
143
+ _maybe_log("frequency_positive", {"label": label, **summary})
144
+ return summary
145
+
146
+
147
+ def stable_unitary(
148
+ state: Sequence[complex] | np.ndarray,
149
+ operator: CoherenceOperator,
150
+ hilbert_space: HilbertSpace,
151
+ *,
152
+ normalise: bool = True,
153
+ atol: float = 1e-9,
154
+ label: str = "state",
155
+ ) -> tuple[bool, float]:
156
+ """Return whether a one-step unitary preserves the Hilbert norm."""
157
+
158
+ backend, matrix_backend = _resolve_operator_backend(operator)
159
+ vector = _as_vector(state, dimension=hilbert_space.dimension, backend=backend)
160
+ if normalise:
161
+ norm_backend = backend.norm(vector)
162
+ norm = float(np.asarray(ensure_numpy(norm_backend, backend=backend)))
163
+ if np.isclose(norm, 0.0, atol=atol):
164
+ raise ValueError("Cannot normalise a null state vector.")
165
+ vector = vector / norm
166
+ generator = -1j * matrix_backend
167
+ unitary = backend.matrix_exp(generator)
168
+ evolved_backend = backend.matmul(unitary, vector[..., None]).reshape((hilbert_space.dimension,))
169
+ evolved = np.asarray(ensure_numpy(evolved_backend, backend=backend))
170
+ norm_after = hilbert_space.norm(evolved)
171
+ passed = bool(np.isclose(norm_after, 1.0, atol=atol))
172
+ _maybe_log("stable_unitary", {"label": label, "norm_after": norm_after, "passed": passed})
173
+ return passed, float(norm_after)
@@ -0,0 +1,247 @@
1
+ """Mathematical spaces supporting the TNFR canonical paradigm."""
2
+ from __future__ import annotations
3
+
4
+ from dataclasses import dataclass
5
+ from typing import Callable, Sequence
6
+
7
+ import numpy as np
8
+
9
+ from .epi import BEPIElement, _EPIValidators
10
+
11
+
12
+ @dataclass(frozen=True)
13
+ class HilbertSpace:
14
+ """Finite section of :math:`\ell^2(\mathbb{N}) \otimes L^2(\mathbb{R})`.
15
+
16
+ The space models the discrete spectral component of the TNFR paradigm. The
17
+ canonical orthonormal basis corresponds to the standard coordinate vectors
18
+ and the inner product is sesquilinear, implemented through
19
+ :func:`numpy.vdot`. Projection returns expansion coefficients for any
20
+ supplied orthonormal basis.
21
+ """
22
+
23
+ dimension: int
24
+ dtype: np.dtype = np.complex128
25
+
26
+ def __post_init__(self) -> None:
27
+ if self.dimension <= 0:
28
+ raise ValueError("Hilbert spaces require a positive dimension.")
29
+
30
+ @property
31
+ def basis(self) -> np.ndarray:
32
+ """Return the canonical orthonormal basis as identity vectors."""
33
+
34
+ return np.eye(self.dimension, dtype=self.dtype)
35
+
36
+ def _as_vector(self, value: Sequence[complex] | np.ndarray) -> np.ndarray:
37
+ vector = np.asarray(value, dtype=self.dtype)
38
+ if vector.shape != (self.dimension,):
39
+ raise ValueError(
40
+ f"Vector must have shape ({self.dimension},), got {vector.shape!r}."
41
+ )
42
+ return vector
43
+
44
+ def inner_product(
45
+ self, vector_a: Sequence[complex] | np.ndarray, vector_b: Sequence[complex] | np.ndarray
46
+ ) -> complex:
47
+ """Compute the sesquilinear inner product ``⟨a, b⟩``."""
48
+
49
+ vec_a = self._as_vector(vector_a)
50
+ vec_b = self._as_vector(vector_b)
51
+ return np.vdot(vec_a, vec_b)
52
+
53
+ def norm(self, vector: Sequence[complex] | np.ndarray) -> float:
54
+ """Return the Hilbert norm induced by the inner product."""
55
+
56
+ value = self.inner_product(vector, vector)
57
+ magnitude = max(value.real, 0.0)
58
+ return float(np.sqrt(magnitude))
59
+
60
+ def is_normalized(
61
+ self, vector: Sequence[complex] | np.ndarray, *, atol: float = 1e-9
62
+ ) -> bool:
63
+ """Check whether a vector has unit norm within a tolerance."""
64
+
65
+ return np.isclose(self.norm(vector), 1.0, atol=atol)
66
+
67
+ def _validate_basis(self, basis: Sequence[Sequence[complex] | np.ndarray]) -> np.ndarray:
68
+ basis_list = list(basis)
69
+ if len(basis_list) == 0:
70
+ raise ValueError("An orthonormal basis must contain at least one vector.")
71
+
72
+ basis_vectors = [self._as_vector(vector) for vector in basis_list]
73
+ matrix = np.vstack(basis_vectors)
74
+ gram = matrix @ matrix.conj().T
75
+ identity = np.eye(matrix.shape[0], dtype=self.dtype)
76
+ if not np.allclose(gram, identity, atol=1e-10):
77
+ raise ValueError("Provided basis is not orthonormal within tolerance.")
78
+ return matrix
79
+
80
+ def project(
81
+ self,
82
+ vector: Sequence[complex] | np.ndarray,
83
+ basis: Sequence[Sequence[complex] | np.ndarray] | None = None,
84
+ ) -> np.ndarray:
85
+ """Return coefficients ``⟨b_k|ψ⟩`` for the chosen orthonormal basis."""
86
+
87
+ vec = self._as_vector(vector)
88
+ if basis is None:
89
+ return vec.astype(self.dtype, copy=True)
90
+
91
+ basis_matrix = self._validate_basis(basis)
92
+ coefficients = basis_matrix.conj() @ vec
93
+ return coefficients.astype(self.dtype, copy=False)
94
+
95
+
96
+ class BanachSpaceEPI(_EPIValidators):
97
+ """Banach space for :math:`C^0([0, 1],\mathbb{C}) \oplus \ell^2(\mathbb{N})`.
98
+
99
+ Elements are represented by a pair ``(f, a)`` where ``f`` samples the
100
+ continuous field over a uniform grid ``x_grid`` and ``a`` is the discrete
101
+ spectral tail. The coherence norm combines the supremum of ``f``, the
102
+ :math:`\ell^2` norm of ``a`` and a derivative-based functional capturing
103
+ the local stability of ``f``.
104
+ """
105
+
106
+ def element(
107
+ self,
108
+ f_continuous: Sequence[complex] | np.ndarray,
109
+ a_discrete: Sequence[complex] | np.ndarray,
110
+ *,
111
+ x_grid: Sequence[float] | np.ndarray,
112
+ ) -> BEPIElement:
113
+ """Create a :class:`~tnfr.mathematics.epi.BEPIElement` with validated data."""
114
+
115
+ self.validate_domain(f_continuous, a_discrete, x_grid)
116
+ return BEPIElement(f_continuous, a_discrete, x_grid)
117
+
118
+ def zero_element(
119
+ self,
120
+ *,
121
+ continuous_size: int,
122
+ discrete_size: int,
123
+ x_grid: Sequence[float] | np.ndarray | None = None,
124
+ ) -> BEPIElement:
125
+ """Return the neutral element for the direct sum."""
126
+
127
+ if continuous_size < 2:
128
+ raise ValueError("continuous_size must be at least two samples.")
129
+ grid = (
130
+ np.asarray(x_grid, dtype=float)
131
+ if x_grid is not None
132
+ else np.linspace(0.0, 1.0, continuous_size, dtype=float)
133
+ )
134
+ zeros_f = np.zeros(continuous_size, dtype=np.complex128)
135
+ zeros_a = np.zeros(discrete_size, dtype=np.complex128)
136
+ return self.element(zeros_f, zeros_a, x_grid=grid)
137
+
138
+ def canonical_basis(
139
+ self,
140
+ *,
141
+ continuous_size: int,
142
+ discrete_size: int,
143
+ continuous_index: int = 0,
144
+ discrete_index: int = 0,
145
+ x_grid: Sequence[float] | np.ndarray | None = None,
146
+ ) -> BEPIElement:
147
+ """Generate a canonical basis element for the Banach space."""
148
+
149
+ if continuous_size < 2:
150
+ raise ValueError("continuous_size must be at least two samples.")
151
+ if not (0 <= continuous_index < continuous_size):
152
+ raise ValueError("continuous_index out of range.")
153
+ if not (0 <= discrete_index < discrete_size):
154
+ raise ValueError("discrete_index out of range.")
155
+
156
+ grid = (
157
+ np.asarray(x_grid, dtype=float)
158
+ if x_grid is not None
159
+ else np.linspace(0.0, 1.0, continuous_size, dtype=float)
160
+ )
161
+
162
+ f_vector = np.zeros(continuous_size, dtype=np.complex128)
163
+ a_vector = np.zeros(discrete_size, dtype=np.complex128)
164
+ f_vector[continuous_index] = 1.0 + 0.0j
165
+ a_vector[discrete_index] = 1.0 + 0.0j
166
+ return self.element(f_vector, a_vector, x_grid=grid)
167
+
168
+ def direct_sum(self, left: BEPIElement, right: BEPIElement) -> BEPIElement:
169
+ """Delegate direct sums to the underlying EPI element."""
170
+
171
+ return left.direct_sum(right)
172
+
173
+ def adjoint(self, element: BEPIElement) -> BEPIElement:
174
+ """Return the adjoint element of the supplied operand."""
175
+
176
+ return element.adjoint()
177
+
178
+ def compose(
179
+ self,
180
+ element: BEPIElement,
181
+ transform: Callable[[np.ndarray], np.ndarray],
182
+ *,
183
+ spectral_transform: Callable[[np.ndarray], np.ndarray] | None = None,
184
+ ) -> BEPIElement:
185
+ """Compose an element with the provided transforms."""
186
+
187
+ return element.compose(transform, spectral_transform=spectral_transform)
188
+
189
+ def tensor_with_hilbert(
190
+ self,
191
+ element: BEPIElement,
192
+ hilbert_space: HilbertSpace,
193
+ vector: Sequence[complex] | np.ndarray | None = None,
194
+ ) -> np.ndarray:
195
+ """Compute the tensor product against a :class:`HilbertSpace` vector."""
196
+
197
+ raw_vector = hilbert_space.basis[0] if vector is None else vector
198
+ hilbert_vector = hilbert_space._as_vector(raw_vector) # pylint: disable=protected-access
199
+ return element.tensor(hilbert_vector)
200
+
201
+ def compute_coherence_functional(
202
+ self,
203
+ f_continuous: Sequence[complex] | np.ndarray,
204
+ x_grid: Sequence[float] | np.ndarray,
205
+ ) -> float:
206
+ """Approximate :math:`\int |f'|^2 dx / (1 + \int |f|^2 dx)`."""
207
+
208
+ f_array, _, grid = self.validate_domain(f_continuous, np.array([0.0], dtype=np.complex128), x_grid)
209
+ if grid is None:
210
+ raise ValueError("x_grid must be provided for coherence evaluations.")
211
+
212
+ derivative = np.gradient(
213
+ f_array,
214
+ grid,
215
+ edge_order=2 if f_array.size > 2 else 1,
216
+ )
217
+ numerator = np.trapz(np.abs(derivative) ** 2, grid)
218
+ denominator = 1.0 + np.trapz(np.abs(f_array) ** 2, grid)
219
+ if denominator <= 0:
220
+ raise ValueError("Denominator of coherence functional must be positive.")
221
+ return float(np.real_if_close(numerator / denominator))
222
+
223
+ def coherence_norm(
224
+ self,
225
+ f_continuous: Sequence[complex] | np.ndarray,
226
+ a_discrete: Sequence[complex] | np.ndarray,
227
+ *,
228
+ x_grid: Sequence[float] | np.ndarray,
229
+ alpha: float = 1.0,
230
+ beta: float = 1.0,
231
+ gamma: float = 1.0,
232
+ ) -> float:
233
+ """Return ``α‖f‖_∞ + β‖a‖_2 + γ CF(f)`` for positive weights."""
234
+
235
+ if alpha <= 0 or beta <= 0 or gamma <= 0:
236
+ raise ValueError("alpha, beta and gamma must be strictly positive.")
237
+
238
+ f_array, a_array, grid = self.validate_domain(f_continuous, a_discrete, x_grid)
239
+ if grid is None:
240
+ raise ValueError("x_grid must be supplied when evaluating the norm.")
241
+
242
+ sup_norm = float(np.max(np.abs(f_array))) if f_array.size else 0.0
243
+ l2_norm = float(np.linalg.norm(a_array))
244
+ coherence_functional = self.compute_coherence_functional(f_array, grid)
245
+
246
+ value = alpha * sup_norm + beta * l2_norm + gamma * coherence_functional
247
+ return float(np.real_if_close(value))