tnfr 4.5.2__py3-none-any.whl → 8.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tnfr might be problematic. Click here for more details.

Files changed (365) hide show
  1. tnfr/__init__.py +334 -50
  2. tnfr/__init__.pyi +33 -0
  3. tnfr/_compat.py +10 -0
  4. tnfr/_generated_version.py +34 -0
  5. tnfr/_version.py +49 -0
  6. tnfr/_version.pyi +7 -0
  7. tnfr/alias.py +214 -37
  8. tnfr/alias.pyi +108 -0
  9. tnfr/backends/__init__.py +354 -0
  10. tnfr/backends/jax_backend.py +173 -0
  11. tnfr/backends/numpy_backend.py +238 -0
  12. tnfr/backends/optimized_numpy.py +420 -0
  13. tnfr/backends/torch_backend.py +408 -0
  14. tnfr/cache.py +149 -556
  15. tnfr/cache.pyi +13 -0
  16. tnfr/cli/__init__.py +51 -16
  17. tnfr/cli/__init__.pyi +26 -0
  18. tnfr/cli/arguments.py +344 -32
  19. tnfr/cli/arguments.pyi +29 -0
  20. tnfr/cli/execution.py +676 -50
  21. tnfr/cli/execution.pyi +70 -0
  22. tnfr/cli/interactive_validator.py +614 -0
  23. tnfr/cli/utils.py +18 -3
  24. tnfr/cli/utils.pyi +7 -0
  25. tnfr/cli/validate.py +236 -0
  26. tnfr/compat/__init__.py +85 -0
  27. tnfr/compat/dataclass.py +136 -0
  28. tnfr/compat/jsonschema_stub.py +61 -0
  29. tnfr/compat/matplotlib_stub.py +73 -0
  30. tnfr/compat/numpy_stub.py +155 -0
  31. tnfr/config/__init__.py +224 -0
  32. tnfr/config/__init__.pyi +10 -0
  33. tnfr/{constants_glyphs.py → config/constants.py} +26 -20
  34. tnfr/config/constants.pyi +12 -0
  35. tnfr/config/defaults.py +54 -0
  36. tnfr/{constants/core.py → config/defaults_core.py} +59 -6
  37. tnfr/config/defaults_init.py +33 -0
  38. tnfr/config/defaults_metric.py +104 -0
  39. tnfr/config/feature_flags.py +81 -0
  40. tnfr/config/feature_flags.pyi +16 -0
  41. tnfr/config/glyph_constants.py +31 -0
  42. tnfr/config/init.py +77 -0
  43. tnfr/config/init.pyi +8 -0
  44. tnfr/config/operator_names.py +254 -0
  45. tnfr/config/operator_names.pyi +36 -0
  46. tnfr/config/physics_derivation.py +354 -0
  47. tnfr/config/presets.py +83 -0
  48. tnfr/config/presets.pyi +7 -0
  49. tnfr/config/security.py +927 -0
  50. tnfr/config/thresholds.py +114 -0
  51. tnfr/config/tnfr_config.py +498 -0
  52. tnfr/constants/__init__.py +51 -133
  53. tnfr/constants/__init__.pyi +92 -0
  54. tnfr/constants/aliases.py +33 -0
  55. tnfr/constants/aliases.pyi +27 -0
  56. tnfr/constants/init.py +3 -1
  57. tnfr/constants/init.pyi +12 -0
  58. tnfr/constants/metric.py +9 -15
  59. tnfr/constants/metric.pyi +19 -0
  60. tnfr/core/__init__.py +33 -0
  61. tnfr/core/container.py +226 -0
  62. tnfr/core/default_implementations.py +329 -0
  63. tnfr/core/interfaces.py +279 -0
  64. tnfr/dynamics/__init__.py +213 -633
  65. tnfr/dynamics/__init__.pyi +83 -0
  66. tnfr/dynamics/adaptation.py +267 -0
  67. tnfr/dynamics/adaptation.pyi +7 -0
  68. tnfr/dynamics/adaptive_sequences.py +189 -0
  69. tnfr/dynamics/adaptive_sequences.pyi +14 -0
  70. tnfr/dynamics/aliases.py +23 -0
  71. tnfr/dynamics/aliases.pyi +19 -0
  72. tnfr/dynamics/bifurcation.py +232 -0
  73. tnfr/dynamics/canonical.py +229 -0
  74. tnfr/dynamics/canonical.pyi +48 -0
  75. tnfr/dynamics/coordination.py +385 -0
  76. tnfr/dynamics/coordination.pyi +25 -0
  77. tnfr/dynamics/dnfr.py +2699 -398
  78. tnfr/dynamics/dnfr.pyi +26 -0
  79. tnfr/dynamics/dynamic_limits.py +225 -0
  80. tnfr/dynamics/feedback.py +252 -0
  81. tnfr/dynamics/feedback.pyi +24 -0
  82. tnfr/dynamics/fused_dnfr.py +454 -0
  83. tnfr/dynamics/homeostasis.py +157 -0
  84. tnfr/dynamics/homeostasis.pyi +14 -0
  85. tnfr/dynamics/integrators.py +496 -102
  86. tnfr/dynamics/integrators.pyi +36 -0
  87. tnfr/dynamics/learning.py +310 -0
  88. tnfr/dynamics/learning.pyi +33 -0
  89. tnfr/dynamics/metabolism.py +254 -0
  90. tnfr/dynamics/nbody.py +796 -0
  91. tnfr/dynamics/nbody_tnfr.py +783 -0
  92. tnfr/dynamics/propagation.py +326 -0
  93. tnfr/dynamics/runtime.py +908 -0
  94. tnfr/dynamics/runtime.pyi +77 -0
  95. tnfr/dynamics/sampling.py +10 -5
  96. tnfr/dynamics/sampling.pyi +7 -0
  97. tnfr/dynamics/selectors.py +711 -0
  98. tnfr/dynamics/selectors.pyi +85 -0
  99. tnfr/dynamics/structural_clip.py +207 -0
  100. tnfr/errors/__init__.py +37 -0
  101. tnfr/errors/contextual.py +492 -0
  102. tnfr/execution.py +77 -55
  103. tnfr/execution.pyi +45 -0
  104. tnfr/extensions/__init__.py +205 -0
  105. tnfr/extensions/__init__.pyi +18 -0
  106. tnfr/extensions/base.py +173 -0
  107. tnfr/extensions/base.pyi +35 -0
  108. tnfr/extensions/business/__init__.py +71 -0
  109. tnfr/extensions/business/__init__.pyi +11 -0
  110. tnfr/extensions/business/cookbook.py +88 -0
  111. tnfr/extensions/business/cookbook.pyi +8 -0
  112. tnfr/extensions/business/health_analyzers.py +202 -0
  113. tnfr/extensions/business/health_analyzers.pyi +9 -0
  114. tnfr/extensions/business/patterns.py +183 -0
  115. tnfr/extensions/business/patterns.pyi +8 -0
  116. tnfr/extensions/medical/__init__.py +73 -0
  117. tnfr/extensions/medical/__init__.pyi +11 -0
  118. tnfr/extensions/medical/cookbook.py +88 -0
  119. tnfr/extensions/medical/cookbook.pyi +8 -0
  120. tnfr/extensions/medical/health_analyzers.py +181 -0
  121. tnfr/extensions/medical/health_analyzers.pyi +9 -0
  122. tnfr/extensions/medical/patterns.py +163 -0
  123. tnfr/extensions/medical/patterns.pyi +8 -0
  124. tnfr/flatten.py +29 -50
  125. tnfr/flatten.pyi +21 -0
  126. tnfr/gamma.py +66 -53
  127. tnfr/gamma.pyi +36 -0
  128. tnfr/glyph_history.py +144 -57
  129. tnfr/glyph_history.pyi +35 -0
  130. tnfr/glyph_runtime.py +19 -0
  131. tnfr/glyph_runtime.pyi +8 -0
  132. tnfr/immutable.py +70 -30
  133. tnfr/immutable.pyi +36 -0
  134. tnfr/initialization.py +22 -16
  135. tnfr/initialization.pyi +65 -0
  136. tnfr/io.py +5 -241
  137. tnfr/io.pyi +13 -0
  138. tnfr/locking.pyi +7 -0
  139. tnfr/mathematics/__init__.py +79 -0
  140. tnfr/mathematics/backend.py +453 -0
  141. tnfr/mathematics/backend.pyi +99 -0
  142. tnfr/mathematics/dynamics.py +408 -0
  143. tnfr/mathematics/dynamics.pyi +90 -0
  144. tnfr/mathematics/epi.py +391 -0
  145. tnfr/mathematics/epi.pyi +65 -0
  146. tnfr/mathematics/generators.py +242 -0
  147. tnfr/mathematics/generators.pyi +29 -0
  148. tnfr/mathematics/metrics.py +119 -0
  149. tnfr/mathematics/metrics.pyi +16 -0
  150. tnfr/mathematics/operators.py +239 -0
  151. tnfr/mathematics/operators.pyi +59 -0
  152. tnfr/mathematics/operators_factory.py +124 -0
  153. tnfr/mathematics/operators_factory.pyi +11 -0
  154. tnfr/mathematics/projection.py +87 -0
  155. tnfr/mathematics/projection.pyi +33 -0
  156. tnfr/mathematics/runtime.py +182 -0
  157. tnfr/mathematics/runtime.pyi +64 -0
  158. tnfr/mathematics/spaces.py +256 -0
  159. tnfr/mathematics/spaces.pyi +83 -0
  160. tnfr/mathematics/transforms.py +305 -0
  161. tnfr/mathematics/transforms.pyi +62 -0
  162. tnfr/metrics/__init__.py +47 -9
  163. tnfr/metrics/__init__.pyi +20 -0
  164. tnfr/metrics/buffer_cache.py +163 -0
  165. tnfr/metrics/buffer_cache.pyi +24 -0
  166. tnfr/metrics/cache_utils.py +214 -0
  167. tnfr/metrics/coherence.py +1510 -330
  168. tnfr/metrics/coherence.pyi +129 -0
  169. tnfr/metrics/common.py +23 -16
  170. tnfr/metrics/common.pyi +35 -0
  171. tnfr/metrics/core.py +251 -36
  172. tnfr/metrics/core.pyi +13 -0
  173. tnfr/metrics/diagnosis.py +709 -110
  174. tnfr/metrics/diagnosis.pyi +86 -0
  175. tnfr/metrics/emergence.py +245 -0
  176. tnfr/metrics/export.py +60 -18
  177. tnfr/metrics/export.pyi +7 -0
  178. tnfr/metrics/glyph_timing.py +233 -43
  179. tnfr/metrics/glyph_timing.pyi +81 -0
  180. tnfr/metrics/learning_metrics.py +280 -0
  181. tnfr/metrics/learning_metrics.pyi +21 -0
  182. tnfr/metrics/phase_coherence.py +351 -0
  183. tnfr/metrics/phase_compatibility.py +349 -0
  184. tnfr/metrics/reporting.py +63 -28
  185. tnfr/metrics/reporting.pyi +25 -0
  186. tnfr/metrics/sense_index.py +1126 -43
  187. tnfr/metrics/sense_index.pyi +9 -0
  188. tnfr/metrics/trig.py +215 -23
  189. tnfr/metrics/trig.pyi +13 -0
  190. tnfr/metrics/trig_cache.py +148 -24
  191. tnfr/metrics/trig_cache.pyi +10 -0
  192. tnfr/multiscale/__init__.py +32 -0
  193. tnfr/multiscale/hierarchical.py +517 -0
  194. tnfr/node.py +646 -140
  195. tnfr/node.pyi +139 -0
  196. tnfr/observers.py +160 -45
  197. tnfr/observers.pyi +31 -0
  198. tnfr/ontosim.py +23 -19
  199. tnfr/ontosim.pyi +28 -0
  200. tnfr/operators/__init__.py +1358 -106
  201. tnfr/operators/__init__.pyi +31 -0
  202. tnfr/operators/algebra.py +277 -0
  203. tnfr/operators/canonical_patterns.py +420 -0
  204. tnfr/operators/cascade.py +267 -0
  205. tnfr/operators/cycle_detection.py +358 -0
  206. tnfr/operators/definitions.py +4108 -0
  207. tnfr/operators/definitions.pyi +78 -0
  208. tnfr/operators/grammar.py +1164 -0
  209. tnfr/operators/grammar.pyi +140 -0
  210. tnfr/operators/hamiltonian.py +710 -0
  211. tnfr/operators/health_analyzer.py +809 -0
  212. tnfr/operators/jitter.py +107 -38
  213. tnfr/operators/jitter.pyi +11 -0
  214. tnfr/operators/lifecycle.py +314 -0
  215. tnfr/operators/metabolism.py +618 -0
  216. tnfr/operators/metrics.py +2138 -0
  217. tnfr/operators/network_analysis/__init__.py +27 -0
  218. tnfr/operators/network_analysis/source_detection.py +186 -0
  219. tnfr/operators/nodal_equation.py +395 -0
  220. tnfr/operators/pattern_detection.py +660 -0
  221. tnfr/operators/patterns.py +669 -0
  222. tnfr/operators/postconditions/__init__.py +38 -0
  223. tnfr/operators/postconditions/mutation.py +236 -0
  224. tnfr/operators/preconditions/__init__.py +1226 -0
  225. tnfr/operators/preconditions/coherence.py +305 -0
  226. tnfr/operators/preconditions/dissonance.py +236 -0
  227. tnfr/operators/preconditions/emission.py +128 -0
  228. tnfr/operators/preconditions/mutation.py +580 -0
  229. tnfr/operators/preconditions/reception.py +125 -0
  230. tnfr/operators/preconditions/resonance.py +364 -0
  231. tnfr/operators/registry.py +74 -0
  232. tnfr/operators/registry.pyi +9 -0
  233. tnfr/operators/remesh.py +1415 -91
  234. tnfr/operators/remesh.pyi +26 -0
  235. tnfr/operators/structural_units.py +268 -0
  236. tnfr/operators/unified_grammar.py +105 -0
  237. tnfr/parallel/__init__.py +54 -0
  238. tnfr/parallel/auto_scaler.py +234 -0
  239. tnfr/parallel/distributed.py +384 -0
  240. tnfr/parallel/engine.py +238 -0
  241. tnfr/parallel/gpu_engine.py +420 -0
  242. tnfr/parallel/monitoring.py +248 -0
  243. tnfr/parallel/partitioner.py +459 -0
  244. tnfr/py.typed +0 -0
  245. tnfr/recipes/__init__.py +22 -0
  246. tnfr/recipes/cookbook.py +743 -0
  247. tnfr/rng.py +75 -151
  248. tnfr/rng.pyi +26 -0
  249. tnfr/schemas/__init__.py +8 -0
  250. tnfr/schemas/grammar.json +94 -0
  251. tnfr/sdk/__init__.py +107 -0
  252. tnfr/sdk/__init__.pyi +19 -0
  253. tnfr/sdk/adaptive_system.py +173 -0
  254. tnfr/sdk/adaptive_system.pyi +21 -0
  255. tnfr/sdk/builders.py +370 -0
  256. tnfr/sdk/builders.pyi +51 -0
  257. tnfr/sdk/fluent.py +1121 -0
  258. tnfr/sdk/fluent.pyi +74 -0
  259. tnfr/sdk/templates.py +342 -0
  260. tnfr/sdk/templates.pyi +41 -0
  261. tnfr/sdk/utils.py +341 -0
  262. tnfr/secure_config.py +46 -0
  263. tnfr/security/__init__.py +70 -0
  264. tnfr/security/database.py +514 -0
  265. tnfr/security/subprocess.py +503 -0
  266. tnfr/security/validation.py +290 -0
  267. tnfr/selector.py +59 -22
  268. tnfr/selector.pyi +19 -0
  269. tnfr/sense.py +92 -67
  270. tnfr/sense.pyi +23 -0
  271. tnfr/services/__init__.py +17 -0
  272. tnfr/services/orchestrator.py +325 -0
  273. tnfr/sparse/__init__.py +39 -0
  274. tnfr/sparse/representations.py +492 -0
  275. tnfr/structural.py +639 -263
  276. tnfr/structural.pyi +83 -0
  277. tnfr/telemetry/__init__.py +35 -0
  278. tnfr/telemetry/cache_metrics.py +226 -0
  279. tnfr/telemetry/cache_metrics.pyi +64 -0
  280. tnfr/telemetry/nu_f.py +422 -0
  281. tnfr/telemetry/nu_f.pyi +108 -0
  282. tnfr/telemetry/verbosity.py +36 -0
  283. tnfr/telemetry/verbosity.pyi +15 -0
  284. tnfr/tokens.py +2 -4
  285. tnfr/tokens.pyi +36 -0
  286. tnfr/tools/__init__.py +20 -0
  287. tnfr/tools/domain_templates.py +478 -0
  288. tnfr/tools/sequence_generator.py +846 -0
  289. tnfr/topology/__init__.py +13 -0
  290. tnfr/topology/asymmetry.py +151 -0
  291. tnfr/trace.py +300 -126
  292. tnfr/trace.pyi +42 -0
  293. tnfr/tutorials/__init__.py +38 -0
  294. tnfr/tutorials/autonomous_evolution.py +285 -0
  295. tnfr/tutorials/interactive.py +1576 -0
  296. tnfr/tutorials/structural_metabolism.py +238 -0
  297. tnfr/types.py +743 -12
  298. tnfr/types.pyi +357 -0
  299. tnfr/units.py +68 -0
  300. tnfr/units.pyi +13 -0
  301. tnfr/utils/__init__.py +282 -0
  302. tnfr/utils/__init__.pyi +215 -0
  303. tnfr/utils/cache.py +4223 -0
  304. tnfr/utils/cache.pyi +470 -0
  305. tnfr/{callback_utils.py → utils/callbacks.py} +26 -39
  306. tnfr/utils/callbacks.pyi +49 -0
  307. tnfr/utils/chunks.py +108 -0
  308. tnfr/utils/chunks.pyi +22 -0
  309. tnfr/utils/data.py +428 -0
  310. tnfr/utils/data.pyi +74 -0
  311. tnfr/utils/graph.py +85 -0
  312. tnfr/utils/graph.pyi +10 -0
  313. tnfr/utils/init.py +821 -0
  314. tnfr/utils/init.pyi +80 -0
  315. tnfr/utils/io.py +559 -0
  316. tnfr/utils/io.pyi +66 -0
  317. tnfr/{helpers → utils}/numeric.py +51 -24
  318. tnfr/utils/numeric.pyi +21 -0
  319. tnfr/validation/__init__.py +257 -0
  320. tnfr/validation/__init__.pyi +85 -0
  321. tnfr/validation/compatibility.py +460 -0
  322. tnfr/validation/compatibility.pyi +6 -0
  323. tnfr/validation/config.py +73 -0
  324. tnfr/validation/graph.py +139 -0
  325. tnfr/validation/graph.pyi +18 -0
  326. tnfr/validation/input_validation.py +755 -0
  327. tnfr/validation/invariants.py +712 -0
  328. tnfr/validation/rules.py +253 -0
  329. tnfr/validation/rules.pyi +44 -0
  330. tnfr/validation/runtime.py +279 -0
  331. tnfr/validation/runtime.pyi +28 -0
  332. tnfr/validation/sequence_validator.py +162 -0
  333. tnfr/validation/soft_filters.py +170 -0
  334. tnfr/validation/soft_filters.pyi +32 -0
  335. tnfr/validation/spectral.py +164 -0
  336. tnfr/validation/spectral.pyi +42 -0
  337. tnfr/validation/validator.py +1266 -0
  338. tnfr/validation/window.py +39 -0
  339. tnfr/validation/window.pyi +1 -0
  340. tnfr/visualization/__init__.py +98 -0
  341. tnfr/visualization/cascade_viz.py +256 -0
  342. tnfr/visualization/hierarchy.py +284 -0
  343. tnfr/visualization/sequence_plotter.py +784 -0
  344. tnfr/viz/__init__.py +60 -0
  345. tnfr/viz/matplotlib.py +278 -0
  346. tnfr/viz/matplotlib.pyi +35 -0
  347. tnfr-8.5.0.dist-info/METADATA +573 -0
  348. tnfr-8.5.0.dist-info/RECORD +353 -0
  349. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/entry_points.txt +1 -0
  350. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/licenses/LICENSE.md +1 -1
  351. tnfr/collections_utils.py +0 -300
  352. tnfr/config.py +0 -32
  353. tnfr/grammar.py +0 -344
  354. tnfr/graph_utils.py +0 -84
  355. tnfr/helpers/__init__.py +0 -71
  356. tnfr/import_utils.py +0 -228
  357. tnfr/json_utils.py +0 -162
  358. tnfr/logging_utils.py +0 -116
  359. tnfr/presets.py +0 -60
  360. tnfr/validators.py +0 -84
  361. tnfr/value_utils.py +0 -59
  362. tnfr-4.5.2.dist-info/METADATA +0 -379
  363. tnfr-4.5.2.dist-info/RECORD +0 -67
  364. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
  365. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,391 @@
1
+ """EPI elements and algebraic helpers for the TNFR Banach space."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from typing import TYPE_CHECKING, Callable, Mapping, Sequence
7
+
8
+ import numpy as np
9
+
10
+ if TYPE_CHECKING:
11
+ from .spaces import BanachSpaceEPI
12
+
13
+ __all__ = [
14
+ "BEPIElement",
15
+ "CoherenceEvaluation",
16
+ "evaluate_coherence_transform",
17
+ ]
18
+
19
+
20
+ class _EPIValidators:
21
+ """Shared validation helpers for EPI Banach constructions."""
22
+
23
+ _complex_dtype = np.complex128
24
+
25
+ @staticmethod
26
+ def _as_array(
27
+ values: Sequence[complex] | np.ndarray, *, dtype: np.dtype
28
+ ) -> np.ndarray:
29
+ array = np.asarray(values, dtype=dtype)
30
+ if array.ndim != 1:
31
+ raise ValueError("Inputs must be one-dimensional arrays.")
32
+ if not np.all(np.isfinite(array)):
33
+ raise ValueError("Inputs must not contain NaNs or infinities.")
34
+ return array
35
+
36
+ @classmethod
37
+ def _validate_grid(
38
+ cls, grid: Sequence[float] | np.ndarray, expected_size: int
39
+ ) -> np.ndarray:
40
+ array = np.asarray(grid, dtype=float)
41
+ if array.ndim != 1:
42
+ raise ValueError("x_grid must be one-dimensional.")
43
+ if array.size != expected_size:
44
+ raise ValueError("x_grid length must match continuous component.")
45
+ if array.size < 2:
46
+ raise ValueError("x_grid must contain at least two points.")
47
+ if not np.all(np.isfinite(array)):
48
+ raise ValueError("x_grid must not contain NaNs or infinities.")
49
+
50
+ spacings = np.diff(array)
51
+ if np.any(spacings <= 0):
52
+ raise ValueError("x_grid must be strictly increasing.")
53
+ if not np.allclose(spacings, spacings[0], rtol=1e-9, atol=1e-12):
54
+ raise ValueError("x_grid must be uniform for finite-difference stability.")
55
+ return array
56
+
57
+ @classmethod
58
+ def validate_domain(
59
+ cls,
60
+ f_continuous: Sequence[complex] | np.ndarray,
61
+ a_discrete: Sequence[complex] | np.ndarray,
62
+ x_grid: Sequence[float] | np.ndarray | None = None,
63
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray | None]:
64
+ """Validate dimensionality and sampling grid compatibility."""
65
+
66
+ f_array = cls._as_array(f_continuous, dtype=cls._complex_dtype)
67
+ a_array = cls._as_array(a_discrete, dtype=cls._complex_dtype)
68
+
69
+ if x_grid is None:
70
+ return f_array, a_array, None
71
+
72
+ grid_array = cls._validate_grid(x_grid, f_array.size)
73
+ return f_array, a_array, grid_array
74
+
75
+
76
+ @dataclass(frozen=True)
77
+ class BEPIElement(_EPIValidators):
78
+ r"""Concrete :math:`C^0([0,1]) \oplus \ell^2` element with TNFR operations."""
79
+
80
+ f_continuous: Sequence[complex] | np.ndarray
81
+ a_discrete: Sequence[complex] | np.ndarray
82
+ x_grid: Sequence[float] | np.ndarray
83
+
84
+ def __post_init__(self) -> None:
85
+ f_array, a_array, grid = self.validate_domain(
86
+ self.f_continuous, self.a_discrete, self.x_grid
87
+ )
88
+ if grid is None:
89
+ raise ValueError("x_grid is mandatory for BEPIElement instances.")
90
+ object.__setattr__(self, "f_continuous", f_array)
91
+ object.__setattr__(self, "a_discrete", a_array)
92
+ object.__setattr__(self, "x_grid", grid)
93
+
94
+ def _assert_compatible(self, other: BEPIElement) -> None:
95
+ if self.f_continuous.shape != other.f_continuous.shape:
96
+ raise ValueError("Continuous components must share shape for direct sums.")
97
+ if self.a_discrete.shape != other.a_discrete.shape:
98
+ raise ValueError("Discrete tails must share shape for direct sums.")
99
+ if not np.allclose(self.x_grid, other.x_grid, rtol=1e-12, atol=1e-12):
100
+ raise ValueError("x_grid must match to combine EPI elements.")
101
+
102
+ def direct_sum(self, other: BEPIElement) -> BEPIElement:
103
+ """Return the algebraic direct sum ``self ⊕ other``."""
104
+
105
+ self._assert_compatible(other)
106
+ return BEPIElement(
107
+ self.f_continuous + other.f_continuous,
108
+ self.a_discrete + other.a_discrete,
109
+ self.x_grid,
110
+ )
111
+
112
+ def tensor(self, vector: Sequence[complex] | np.ndarray) -> np.ndarray:
113
+ """Return the tensor product between the discrete tail and a Hilbert vector."""
114
+
115
+ hilbert_vector = self._as_array(vector, dtype=self._complex_dtype)
116
+ return np.outer(self.a_discrete, hilbert_vector)
117
+
118
+ def adjoint(self) -> BEPIElement:
119
+ """Return the conjugate element representing the ``*`` operation."""
120
+
121
+ return BEPIElement(
122
+ np.conjugate(self.f_continuous), np.conjugate(self.a_discrete), self.x_grid
123
+ )
124
+
125
+ @staticmethod
126
+ def _apply_transform(
127
+ transform: Callable[[np.ndarray], np.ndarray], values: np.ndarray
128
+ ) -> np.ndarray:
129
+ result = np.asarray(transform(values), dtype=np.complex128)
130
+ if result.shape != values.shape:
131
+ raise ValueError("Transforms must preserve the element shape.")
132
+ if not np.all(np.isfinite(result)):
133
+ raise ValueError("Transforms must return finite values.")
134
+ return result
135
+
136
+ def compose(
137
+ self,
138
+ transform: Callable[[np.ndarray], np.ndarray],
139
+ *,
140
+ spectral_transform: Callable[[np.ndarray], np.ndarray] | None = None,
141
+ ) -> BEPIElement:
142
+ """Compose the element with linear transforms on both components."""
143
+
144
+ new_f = self._apply_transform(transform, self.f_continuous)
145
+ spectral_fn = spectral_transform or transform
146
+ new_a = self._apply_transform(spectral_fn, self.a_discrete)
147
+ return BEPIElement(new_f, new_a, self.x_grid)
148
+
149
+ def _max_magnitude(self) -> float:
150
+ mags = []
151
+ if self.f_continuous.size:
152
+ mags.append(float(np.max(np.abs(self.f_continuous))))
153
+ if self.a_discrete.size:
154
+ mags.append(float(np.max(np.abs(self.a_discrete))))
155
+ return float(max(mags)) if mags else 0.0
156
+
157
+ def __float__(self) -> float:
158
+ return self._max_magnitude()
159
+
160
+ def __abs__(self) -> float:
161
+ return self._max_magnitude()
162
+
163
+ def __getstate__(self) -> dict[str, tuple[complex, ...] | tuple[float, ...]]:
164
+ """Serialize BEPIElement to a JSON-compatible dict with real/imag pairs.
165
+
166
+ This method enables pickle, JSON, and YAML serialization while preserving
167
+ TNFR invariant #1 (EPI as coherent form) and #7 (operational fractality).
168
+ """
169
+ # Convert numpy arrays to lists for serialization
170
+ continuous = self.f_continuous.tolist()
171
+ discrete = self.a_discrete.tolist()
172
+ grid = self.x_grid.tolist()
173
+
174
+ return {
175
+ "continuous": tuple(continuous),
176
+ "discrete": tuple(discrete),
177
+ "grid": tuple(grid),
178
+ }
179
+
180
+ def __setstate__(
181
+ self, state: dict[str, tuple[complex, ...] | tuple[float, ...]]
182
+ ) -> None:
183
+ """Deserialize BEPIElement from a dict representation.
184
+
185
+ Restores the structural integrity by validating and converting back to numpy arrays.
186
+ """
187
+ f_array, a_array, grid = self.validate_domain(
188
+ state["continuous"], state["discrete"], state["grid"]
189
+ )
190
+ if grid is None:
191
+ raise ValueError("x_grid is mandatory for BEPIElement instances.")
192
+ object.__setattr__(self, "f_continuous", f_array)
193
+ object.__setattr__(self, "a_discrete", a_array)
194
+ object.__setattr__(self, "x_grid", grid)
195
+
196
+ def __add__(self, other: BEPIElement | float | int) -> BEPIElement:
197
+ """Add a scalar or another BEPIElement to this element."""
198
+ if isinstance(other, (int, float)):
199
+ # Scalar addition: broadcast to all components
200
+ scalar = complex(other)
201
+ return BEPIElement(
202
+ self.f_continuous + scalar, self.a_discrete + scalar, self.x_grid
203
+ )
204
+ elif isinstance(other, BEPIElement):
205
+ # Element addition: use direct_sum
206
+ return self.direct_sum(other)
207
+ return NotImplemented
208
+
209
+ def __radd__(self, other: float | int) -> BEPIElement:
210
+ """Support reversed addition (scalar + BEPIElement)."""
211
+ return self.__add__(other)
212
+
213
+ def __sub__(self, other: BEPIElement | float | int) -> BEPIElement:
214
+ """Subtract a scalar or another BEPIElement from this element."""
215
+ if isinstance(other, (int, float)):
216
+ scalar = complex(other)
217
+ return BEPIElement(
218
+ self.f_continuous - scalar, self.a_discrete - scalar, self.x_grid
219
+ )
220
+ elif isinstance(other, BEPIElement):
221
+ self._assert_compatible(other)
222
+ return BEPIElement(
223
+ self.f_continuous - other.f_continuous,
224
+ self.a_discrete - other.a_discrete,
225
+ self.x_grid,
226
+ )
227
+ return NotImplemented
228
+
229
+ def __rsub__(self, other: float | int) -> BEPIElement:
230
+ """Support reversed subtraction (scalar - BEPIElement)."""
231
+ if isinstance(other, (int, float)):
232
+ scalar = complex(other)
233
+ return BEPIElement(
234
+ scalar - self.f_continuous, scalar - self.a_discrete, self.x_grid
235
+ )
236
+ return NotImplemented
237
+
238
+ def __mul__(self, other: float | int) -> BEPIElement:
239
+ """Multiply this element by a scalar."""
240
+ if isinstance(other, (int, float)):
241
+ scalar = complex(other)
242
+ return BEPIElement(
243
+ self.f_continuous * scalar, self.a_discrete * scalar, self.x_grid
244
+ )
245
+ return NotImplemented
246
+
247
+ def __rmul__(self, other: float | int) -> BEPIElement:
248
+ """Support reversed multiplication (scalar * BEPIElement)."""
249
+ return self.__mul__(other)
250
+
251
+ def __truediv__(self, other: float | int) -> BEPIElement:
252
+ """Divide this element by a scalar."""
253
+ if isinstance(other, (int, float)):
254
+ scalar = complex(other)
255
+ if scalar == 0:
256
+ raise ZeroDivisionError("Cannot divide BEPIElement by zero")
257
+ return BEPIElement(
258
+ self.f_continuous / scalar, self.a_discrete / scalar, self.x_grid
259
+ )
260
+ return NotImplemented
261
+
262
+ def __eq__(self, other: object) -> bool:
263
+ """Check equality with another BEPIElement or numeric value.
264
+
265
+ When comparing to a numeric value, compares with the maximum magnitude.
266
+ """
267
+ if isinstance(other, BEPIElement):
268
+ return (
269
+ np.allclose(
270
+ self.f_continuous, other.f_continuous, rtol=1e-12, atol=1e-12
271
+ )
272
+ and np.allclose(
273
+ self.a_discrete, other.a_discrete, rtol=1e-12, atol=1e-12
274
+ )
275
+ and np.allclose(self.x_grid, other.x_grid, rtol=1e-12, atol=1e-12)
276
+ )
277
+ elif isinstance(other, (int, float)):
278
+ # Compare with maximum magnitude for numeric comparisons
279
+ # Use consistent tolerance with element comparisons
280
+ return abs(self._max_magnitude() - float(other)) < 1e-12
281
+ return NotImplemented
282
+
283
+
284
+ @dataclass(frozen=True)
285
+ class CoherenceEvaluation:
286
+ """Container describing the outcome of a coherence transform evaluation."""
287
+
288
+ element: BEPIElement
289
+ transformed: BEPIElement
290
+ coherence_before: float
291
+ coherence_after: float
292
+ kappa: float
293
+ tolerance: float
294
+ satisfied: bool
295
+ required: float
296
+ deficit: float
297
+ ratio: float
298
+
299
+
300
+ def evaluate_coherence_transform(
301
+ element: BEPIElement,
302
+ transform: Callable[[BEPIElement], BEPIElement],
303
+ *,
304
+ kappa: float = 1.0,
305
+ tolerance: float = 1e-9,
306
+ space: "BanachSpaceEPI" | None = None,
307
+ norm_kwargs: Mapping[str, float] | None = None,
308
+ ) -> CoherenceEvaluation:
309
+ """Apply ``transform`` to ``element`` and verify a coherence inequality.
310
+
311
+ Parameters
312
+ ----------
313
+ element:
314
+ The :class:`BEPIElement` subject to the transformation.
315
+ transform:
316
+ Callable receiving ``element`` and returning the transformed
317
+ :class:`BEPIElement`. The callable is expected to preserve the
318
+ structural sampling grid and dimensionality of the element.
319
+ kappa:
320
+ Factor on the right-hand side of the inequality ``C(T(EPI)) ≥ κ·C(EPI)``.
321
+ tolerance:
322
+ Non-negative slack applied to the inequality. When
323
+ ``C(T(EPI)) + tolerance`` exceeds ``κ·C(EPI)`` the check succeeds.
324
+ space:
325
+ Optional :class:`~tnfr.mathematics.spaces.BanachSpaceEPI` instance used
326
+ to compute the coherence norm. When omitted, a local instance is
327
+ constructed to avoid circular imports at module import time.
328
+ norm_kwargs:
329
+ Optional keyword arguments forwarded to
330
+ :meth:`BanachSpaceEPI.coherence_norm`.
331
+
332
+ Returns
333
+ -------
334
+ CoherenceEvaluation
335
+ Dataclass capturing the before/after coherence values together with the
336
+ inequality verdict.
337
+ """
338
+
339
+ if kappa < 0:
340
+ raise ValueError("kappa must be non-negative.")
341
+ if tolerance < 0:
342
+ raise ValueError("tolerance must be non-negative.")
343
+
344
+ if norm_kwargs is None:
345
+ norm_kwargs = {}
346
+
347
+ from .spaces import BanachSpaceEPI # Local import to avoid circular dependency
348
+
349
+ working_space = space if space is not None else BanachSpaceEPI()
350
+
351
+ coherence_before = working_space.coherence_norm(
352
+ element.f_continuous,
353
+ element.a_discrete,
354
+ x_grid=element.x_grid,
355
+ **norm_kwargs,
356
+ )
357
+
358
+ transformed = transform(element)
359
+ if not isinstance(transformed, BEPIElement):
360
+ raise TypeError("transform must return a BEPIElement instance.")
361
+
362
+ coherence_after = working_space.coherence_norm(
363
+ transformed.f_continuous,
364
+ transformed.a_discrete,
365
+ x_grid=transformed.x_grid,
366
+ **norm_kwargs,
367
+ )
368
+
369
+ required = kappa * coherence_before
370
+ satisfied = coherence_after + tolerance >= required
371
+ deficit = max(0.0, required - coherence_after)
372
+
373
+ if coherence_before > 0:
374
+ ratio = coherence_after / coherence_before
375
+ elif coherence_after > tolerance:
376
+ ratio = float("inf")
377
+ else:
378
+ ratio = 1.0
379
+
380
+ return CoherenceEvaluation(
381
+ element=element,
382
+ transformed=transformed,
383
+ coherence_before=coherence_before,
384
+ coherence_after=coherence_after,
385
+ kappa=kappa,
386
+ tolerance=tolerance,
387
+ satisfied=satisfied,
388
+ required=required,
389
+ deficit=deficit,
390
+ ratio=ratio,
391
+ )
@@ -0,0 +1,65 @@
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+ from dataclasses import dataclass
5
+ from typing import Callable, Mapping, Sequence
6
+
7
+ __all__ = ["BEPIElement", "CoherenceEvaluation", "evaluate_coherence_transform"]
8
+
9
+ class _EPIValidators:
10
+ @classmethod
11
+ def validate_domain(
12
+ cls,
13
+ f_continuous: Sequence[complex] | np.ndarray,
14
+ a_discrete: Sequence[complex] | np.ndarray,
15
+ x_grid: Sequence[float] | np.ndarray | None = None,
16
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray | None]: ...
17
+
18
+ @dataclass(frozen=True)
19
+ class BEPIElement(_EPIValidators):
20
+ f_continuous: Sequence[complex] | np.ndarray
21
+ a_discrete: Sequence[complex] | np.ndarray
22
+ x_grid: Sequence[float] | np.ndarray
23
+ def __post_init__(self) -> None: ...
24
+ def direct_sum(self, other: BEPIElement) -> BEPIElement: ...
25
+ def tensor(self, vector: Sequence[complex] | np.ndarray) -> np.ndarray: ...
26
+ def adjoint(self) -> BEPIElement: ...
27
+ def compose(
28
+ self,
29
+ transform: Callable[[np.ndarray], np.ndarray],
30
+ *,
31
+ spectral_transform: Callable[[np.ndarray], np.ndarray] | None = None,
32
+ ) -> BEPIElement: ...
33
+ def __float__(self) -> float: ...
34
+ def __abs__(self) -> float: ...
35
+ def __add__(self, other: BEPIElement | float | int) -> BEPIElement: ...
36
+ def __radd__(self, other: float | int) -> BEPIElement: ...
37
+ def __sub__(self, other: BEPIElement | float | int) -> BEPIElement: ...
38
+ def __rsub__(self, other: float | int) -> BEPIElement: ...
39
+ def __mul__(self, other: float | int) -> BEPIElement: ...
40
+ def __rmul__(self, other: float | int) -> BEPIElement: ...
41
+ def __truediv__(self, other: float | int) -> BEPIElement: ...
42
+ def __eq__(self, other: object) -> bool: ...
43
+
44
+ @dataclass(frozen=True)
45
+ class CoherenceEvaluation:
46
+ element: BEPIElement
47
+ transformed: BEPIElement
48
+ coherence_before: float
49
+ coherence_after: float
50
+ kappa: float
51
+ tolerance: float
52
+ satisfied: bool
53
+ required: float
54
+ deficit: float
55
+ ratio: float
56
+
57
+ def evaluate_coherence_transform(
58
+ element: BEPIElement,
59
+ transform: Callable[[BEPIElement], BEPIElement],
60
+ *,
61
+ kappa: float = 1.0,
62
+ tolerance: float = 1e-09,
63
+ space: BanachSpaceEPI | None = None,
64
+ norm_kwargs: Mapping[str, float] | None = None,
65
+ ) -> CoherenceEvaluation: ...
@@ -0,0 +1,242 @@
1
+ """ΔNFR generator construction utilities."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Final, Sequence
6
+
7
+ import numpy as np
8
+ from numpy.random import Generator
9
+
10
+ from .backend import ensure_array, ensure_numpy, get_backend
11
+
12
+ __all__ = ["build_delta_nfr", "build_lindblad_delta_nfr"]
13
+
14
+ _TOPOLOGIES: Final[set[str]] = {"laplacian", "adjacency"}
15
+
16
+
17
+ def _ring_adjacency(dim: int) -> np.ndarray:
18
+ """Return the adjacency matrix for a coherent ring topology."""
19
+
20
+ adjacency: np.ndarray = np.zeros((dim, dim), dtype=float)
21
+ if dim == 1:
22
+ return adjacency
23
+
24
+ indices = np.arange(dim)
25
+ adjacency[indices, (indices + 1) % dim] = 1.0
26
+ adjacency[(indices + 1) % dim, indices] = 1.0
27
+ return adjacency
28
+
29
+
30
+ def _laplacian_from_adjacency(adjacency: np.ndarray) -> np.ndarray:
31
+ """Construct a Laplacian operator from an adjacency matrix."""
32
+
33
+ degrees = adjacency.sum(axis=1)
34
+ laplacian = np.diag(degrees) - adjacency
35
+ return laplacian
36
+
37
+
38
+ def _hermitian_noise(dim: int, rng: Generator) -> np.ndarray:
39
+ """Generate a Hermitian noise matrix with reproducible statistics."""
40
+
41
+ real = rng.standard_normal((dim, dim))
42
+ imag = rng.standard_normal((dim, dim))
43
+ noise = real + 1j * imag
44
+ return 0.5 * (noise + noise.conj().T)
45
+
46
+
47
+ def _as_square_matrix(
48
+ matrix: Sequence[Sequence[complex]] | np.ndarray,
49
+ *,
50
+ expected_dim: int | None = None,
51
+ label: str = "matrix",
52
+ ) -> np.ndarray:
53
+ """Return ``matrix`` as a square :class:`numpy.ndarray` with validation."""
54
+
55
+ array = np.asarray(matrix, dtype=np.complex128)
56
+ if array.ndim != 2 or array.shape[0] != array.shape[1]:
57
+ raise ValueError(f"{label} must be a square matrix.")
58
+ if expected_dim is not None and array.shape[0] != expected_dim:
59
+ raise ValueError(
60
+ f"{label} dimension mismatch: expected {expected_dim}, received {array.shape[0]}."
61
+ )
62
+ return array
63
+
64
+
65
+ def build_delta_nfr(
66
+ dim: int,
67
+ *,
68
+ topology: str = "laplacian",
69
+ nu_f: float = 1.0,
70
+ scale: float = 1.0,
71
+ rng: Generator | None = None,
72
+ ) -> np.ndarray:
73
+ """Construct a Hermitian ΔNFR generator using canonical TNFR topologies.
74
+
75
+ Parameters
76
+ ----------
77
+ dim:
78
+ Dimensionality of the Hilbert space supporting the ΔNFR operator.
79
+ topology:
80
+ Requested canonical topology. Supported values are ``"laplacian"``
81
+ and ``"adjacency"``.
82
+ nu_f:
83
+ Structural frequency scaling applied to the resulting operator.
84
+ scale:
85
+ Additional scaling applied uniformly to the operator amplitude.
86
+ rng:
87
+ Optional NumPy :class:`~numpy.random.Generator` used to inject
88
+ reproducible Hermitian noise.
89
+ """
90
+
91
+ if dim <= 0:
92
+ raise ValueError("ΔNFR generators require a positive dimensionality.")
93
+
94
+ if topology not in _TOPOLOGIES:
95
+ allowed = ", ".join(sorted(_TOPOLOGIES))
96
+ raise ValueError(
97
+ f"Unknown ΔNFR topology: {topology}. Expected one of: {allowed}."
98
+ )
99
+
100
+ adjacency = _ring_adjacency(dim)
101
+ if topology == "laplacian":
102
+ base = _laplacian_from_adjacency(adjacency)
103
+ else:
104
+ base = adjacency
105
+
106
+ matrix: np.ndarray = base.astype(np.complex128, copy=False)
107
+
108
+ if rng is not None:
109
+ noise = _hermitian_noise(dim, rng)
110
+ matrix = matrix + (1.0 / np.sqrt(dim)) * noise
111
+
112
+ matrix *= nu_f * scale
113
+ hermitian = 0.5 * (matrix + matrix.conj().T)
114
+ backend = get_backend()
115
+ return np.asarray(
116
+ ensure_numpy(ensure_array(hermitian, backend=backend), backend=backend),
117
+ dtype=np.complex128,
118
+ )
119
+
120
+
121
+ def build_lindblad_delta_nfr(
122
+ *,
123
+ hamiltonian: Sequence[Sequence[complex]] | np.ndarray | None = None,
124
+ collapse_operators: (
125
+ Sequence[Sequence[Sequence[complex]] | np.ndarray] | None
126
+ ) = None,
127
+ dim: int | None = None,
128
+ nu_f: float = 1.0,
129
+ scale: float = 1.0,
130
+ ensure_trace_preserving: bool = True,
131
+ ensure_contractive: bool = True,
132
+ atol: float = 1e-9,
133
+ ) -> np.ndarray:
134
+ """Construct a Lindblad ΔNFR generator in Liouville space.
135
+
136
+ The resulting matrix acts on vectorised density operators using the
137
+ canonical column-major flattening. The construction follows the standard
138
+ Gorini–Kossakowski–Sudarshan–Lindblad prescription while exposing TNFR
139
+ semantics through ``ν_f`` and ``scale``.
140
+
141
+ Parameters
142
+ ----------
143
+ hamiltonian:
144
+ Optional coherent component. When ``None`` a null Hamiltonian is
145
+ assumed.
146
+ collapse_operators:
147
+ Iterable with the dissipative operators driving the contractive
148
+ semigroup. Each entry must be square with the same dimension as the
149
+ Hamiltonian. When ``None`` the generator reduces to the coherent part.
150
+ dim:
151
+ Explicit Hilbert-space dimension. Only required if neither
152
+ ``hamiltonian`` nor ``collapse_operators`` are provided. When supplied,
153
+ it must match the dimension inferred from the Hamiltonian and collapse
154
+ operators.
155
+ nu_f, scale:
156
+ Structural frequency scaling applied uniformly to the final generator.
157
+ ensure_trace_preserving:
158
+ When ``True`` (default) the resulting superoperator is validated to
159
+ leave the identity invariant.
160
+ ensure_contractive:
161
+ When ``True`` (default) the spectrum is required to have non-positive
162
+ real parts within ``atol``.
163
+ atol:
164
+ Absolute tolerance used for Hermiticity, trace and spectral checks.
165
+ """
166
+
167
+ operators = list(collapse_operators or [])
168
+
169
+ inferred_dim: int | None = dim
170
+ if hamiltonian is not None:
171
+ hermitian = _as_square_matrix(hamiltonian, label="hamiltonian")
172
+ inferred_dim = hermitian.shape[0]
173
+ elif operators:
174
+ inferred_dim = _as_square_matrix(
175
+ operators[0], label="collapse operator[0]"
176
+ ).shape[0]
177
+
178
+ if inferred_dim is None:
179
+ raise ValueError("dim must be supplied when no operators are provided.")
180
+
181
+ if inferred_dim <= 0:
182
+ raise ValueError("ΔNFR generators require a positive dimension.")
183
+
184
+ dimension = inferred_dim
185
+
186
+ if dim is not None and dim != dimension:
187
+ raise ValueError(
188
+ "Provided dim is inconsistent with the supplied operators: "
189
+ f"expected {dimension}, received {dim}."
190
+ )
191
+
192
+ if hamiltonian is None:
193
+ hermitian = np.zeros((dimension, dimension), dtype=np.complex128)
194
+ else:
195
+ hermitian = _as_square_matrix(
196
+ hamiltonian, expected_dim=dimension, label="hamiltonian"
197
+ )
198
+ if not np.allclose(hermitian, hermitian.conj().T, atol=atol):
199
+ raise ValueError(
200
+ "Hamiltonian component must be Hermitian within tolerance."
201
+ )
202
+
203
+ dissipators = [
204
+ _as_square_matrix(
205
+ operator, expected_dim=dimension, label=f"collapse operator[{index}]"
206
+ )
207
+ for index, operator in enumerate(operators)
208
+ ]
209
+
210
+ identity = np.eye(dimension, dtype=np.complex128)
211
+ liouvillian = -1j * (np.kron(identity, hermitian) - np.kron(hermitian.T, identity))
212
+
213
+ for operator in dissipators:
214
+ adjoint_product = operator.conj().T @ operator
215
+ liouvillian += np.kron(operator.conj(), operator)
216
+ liouvillian -= 0.5 * np.kron(identity, adjoint_product)
217
+ liouvillian -= 0.5 * np.kron(adjoint_product.T, identity)
218
+
219
+ liouvillian *= nu_f * scale
220
+
221
+ if ensure_trace_preserving:
222
+ identity_vec = identity.reshape(dimension * dimension, order="F")
223
+ left_residual = identity_vec.conj().T @ liouvillian
224
+ if not np.allclose(left_residual, np.zeros_like(left_residual), atol=10 * atol):
225
+ raise ValueError(
226
+ "Lindblad generator must preserve the trace of density operators."
227
+ )
228
+
229
+ backend = get_backend()
230
+ liouvillian_backend = ensure_array(liouvillian, backend=backend)
231
+
232
+ if ensure_contractive:
233
+ eigenvalues_backend, _ = backend.eig(liouvillian_backend)
234
+ eigenvalues = ensure_numpy(eigenvalues_backend, backend=backend)
235
+ if np.max(eigenvalues.real) > atol:
236
+ raise ValueError(
237
+ "Lindblad generator is not contractive: spectrum has positive real components."
238
+ )
239
+
240
+ return np.asarray(
241
+ ensure_numpy(liouvillian_backend, backend=backend), dtype=np.complex128
242
+ )