tnfr 3.0.3__py3-none-any.whl → 8.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tnfr might be problematic. Click here for more details.

Files changed (360) hide show
  1. tnfr/__init__.py +375 -56
  2. tnfr/__init__.pyi +33 -0
  3. tnfr/_compat.py +10 -0
  4. tnfr/_generated_version.py +34 -0
  5. tnfr/_version.py +49 -0
  6. tnfr/_version.pyi +7 -0
  7. tnfr/alias.py +723 -0
  8. tnfr/alias.pyi +108 -0
  9. tnfr/backends/__init__.py +354 -0
  10. tnfr/backends/jax_backend.py +173 -0
  11. tnfr/backends/numpy_backend.py +238 -0
  12. tnfr/backends/optimized_numpy.py +420 -0
  13. tnfr/backends/torch_backend.py +408 -0
  14. tnfr/cache.py +171 -0
  15. tnfr/cache.pyi +13 -0
  16. tnfr/cli/__init__.py +110 -0
  17. tnfr/cli/__init__.pyi +26 -0
  18. tnfr/cli/arguments.py +489 -0
  19. tnfr/cli/arguments.pyi +29 -0
  20. tnfr/cli/execution.py +914 -0
  21. tnfr/cli/execution.pyi +70 -0
  22. tnfr/cli/interactive_validator.py +614 -0
  23. tnfr/cli/utils.py +51 -0
  24. tnfr/cli/utils.pyi +7 -0
  25. tnfr/cli/validate.py +236 -0
  26. tnfr/compat/__init__.py +85 -0
  27. tnfr/compat/dataclass.py +136 -0
  28. tnfr/compat/jsonschema_stub.py +61 -0
  29. tnfr/compat/matplotlib_stub.py +73 -0
  30. tnfr/compat/numpy_stub.py +155 -0
  31. tnfr/config/__init__.py +224 -0
  32. tnfr/config/__init__.pyi +10 -0
  33. tnfr/config/constants.py +104 -0
  34. tnfr/config/constants.pyi +12 -0
  35. tnfr/config/defaults.py +54 -0
  36. tnfr/config/defaults_core.py +212 -0
  37. tnfr/config/defaults_init.py +33 -0
  38. tnfr/config/defaults_metric.py +104 -0
  39. tnfr/config/feature_flags.py +81 -0
  40. tnfr/config/feature_flags.pyi +16 -0
  41. tnfr/config/glyph_constants.py +31 -0
  42. tnfr/config/init.py +77 -0
  43. tnfr/config/init.pyi +8 -0
  44. tnfr/config/operator_names.py +254 -0
  45. tnfr/config/operator_names.pyi +36 -0
  46. tnfr/config/physics_derivation.py +354 -0
  47. tnfr/config/presets.py +83 -0
  48. tnfr/config/presets.pyi +7 -0
  49. tnfr/config/security.py +927 -0
  50. tnfr/config/thresholds.py +114 -0
  51. tnfr/config/tnfr_config.py +498 -0
  52. tnfr/constants/__init__.py +92 -0
  53. tnfr/constants/__init__.pyi +92 -0
  54. tnfr/constants/aliases.py +33 -0
  55. tnfr/constants/aliases.pyi +27 -0
  56. tnfr/constants/init.py +33 -0
  57. tnfr/constants/init.pyi +12 -0
  58. tnfr/constants/metric.py +104 -0
  59. tnfr/constants/metric.pyi +19 -0
  60. tnfr/core/__init__.py +33 -0
  61. tnfr/core/container.py +226 -0
  62. tnfr/core/default_implementations.py +329 -0
  63. tnfr/core/interfaces.py +279 -0
  64. tnfr/dynamics/__init__.py +238 -0
  65. tnfr/dynamics/__init__.pyi +83 -0
  66. tnfr/dynamics/adaptation.py +267 -0
  67. tnfr/dynamics/adaptation.pyi +7 -0
  68. tnfr/dynamics/adaptive_sequences.py +189 -0
  69. tnfr/dynamics/adaptive_sequences.pyi +14 -0
  70. tnfr/dynamics/aliases.py +23 -0
  71. tnfr/dynamics/aliases.pyi +19 -0
  72. tnfr/dynamics/bifurcation.py +232 -0
  73. tnfr/dynamics/canonical.py +229 -0
  74. tnfr/dynamics/canonical.pyi +48 -0
  75. tnfr/dynamics/coordination.py +385 -0
  76. tnfr/dynamics/coordination.pyi +25 -0
  77. tnfr/dynamics/dnfr.py +3034 -0
  78. tnfr/dynamics/dnfr.pyi +26 -0
  79. tnfr/dynamics/dynamic_limits.py +225 -0
  80. tnfr/dynamics/feedback.py +252 -0
  81. tnfr/dynamics/feedback.pyi +24 -0
  82. tnfr/dynamics/fused_dnfr.py +454 -0
  83. tnfr/dynamics/homeostasis.py +157 -0
  84. tnfr/dynamics/homeostasis.pyi +14 -0
  85. tnfr/dynamics/integrators.py +661 -0
  86. tnfr/dynamics/integrators.pyi +36 -0
  87. tnfr/dynamics/learning.py +310 -0
  88. tnfr/dynamics/learning.pyi +33 -0
  89. tnfr/dynamics/metabolism.py +254 -0
  90. tnfr/dynamics/nbody.py +796 -0
  91. tnfr/dynamics/nbody_tnfr.py +783 -0
  92. tnfr/dynamics/propagation.py +326 -0
  93. tnfr/dynamics/runtime.py +908 -0
  94. tnfr/dynamics/runtime.pyi +77 -0
  95. tnfr/dynamics/sampling.py +36 -0
  96. tnfr/dynamics/sampling.pyi +7 -0
  97. tnfr/dynamics/selectors.py +711 -0
  98. tnfr/dynamics/selectors.pyi +85 -0
  99. tnfr/dynamics/structural_clip.py +207 -0
  100. tnfr/errors/__init__.py +37 -0
  101. tnfr/errors/contextual.py +492 -0
  102. tnfr/execution.py +223 -0
  103. tnfr/execution.pyi +45 -0
  104. tnfr/extensions/__init__.py +205 -0
  105. tnfr/extensions/__init__.pyi +18 -0
  106. tnfr/extensions/base.py +173 -0
  107. tnfr/extensions/base.pyi +35 -0
  108. tnfr/extensions/business/__init__.py +71 -0
  109. tnfr/extensions/business/__init__.pyi +11 -0
  110. tnfr/extensions/business/cookbook.py +88 -0
  111. tnfr/extensions/business/cookbook.pyi +8 -0
  112. tnfr/extensions/business/health_analyzers.py +202 -0
  113. tnfr/extensions/business/health_analyzers.pyi +9 -0
  114. tnfr/extensions/business/patterns.py +183 -0
  115. tnfr/extensions/business/patterns.pyi +8 -0
  116. tnfr/extensions/medical/__init__.py +73 -0
  117. tnfr/extensions/medical/__init__.pyi +11 -0
  118. tnfr/extensions/medical/cookbook.py +88 -0
  119. tnfr/extensions/medical/cookbook.pyi +8 -0
  120. tnfr/extensions/medical/health_analyzers.py +181 -0
  121. tnfr/extensions/medical/health_analyzers.pyi +9 -0
  122. tnfr/extensions/medical/patterns.py +163 -0
  123. tnfr/extensions/medical/patterns.pyi +8 -0
  124. tnfr/flatten.py +262 -0
  125. tnfr/flatten.pyi +21 -0
  126. tnfr/gamma.py +354 -0
  127. tnfr/gamma.pyi +36 -0
  128. tnfr/glyph_history.py +377 -0
  129. tnfr/glyph_history.pyi +35 -0
  130. tnfr/glyph_runtime.py +19 -0
  131. tnfr/glyph_runtime.pyi +8 -0
  132. tnfr/immutable.py +218 -0
  133. tnfr/immutable.pyi +36 -0
  134. tnfr/initialization.py +203 -0
  135. tnfr/initialization.pyi +65 -0
  136. tnfr/io.py +10 -0
  137. tnfr/io.pyi +13 -0
  138. tnfr/locking.py +37 -0
  139. tnfr/locking.pyi +7 -0
  140. tnfr/mathematics/__init__.py +79 -0
  141. tnfr/mathematics/backend.py +453 -0
  142. tnfr/mathematics/backend.pyi +99 -0
  143. tnfr/mathematics/dynamics.py +408 -0
  144. tnfr/mathematics/dynamics.pyi +90 -0
  145. tnfr/mathematics/epi.py +391 -0
  146. tnfr/mathematics/epi.pyi +65 -0
  147. tnfr/mathematics/generators.py +242 -0
  148. tnfr/mathematics/generators.pyi +29 -0
  149. tnfr/mathematics/metrics.py +119 -0
  150. tnfr/mathematics/metrics.pyi +16 -0
  151. tnfr/mathematics/operators.py +239 -0
  152. tnfr/mathematics/operators.pyi +59 -0
  153. tnfr/mathematics/operators_factory.py +124 -0
  154. tnfr/mathematics/operators_factory.pyi +11 -0
  155. tnfr/mathematics/projection.py +87 -0
  156. tnfr/mathematics/projection.pyi +33 -0
  157. tnfr/mathematics/runtime.py +182 -0
  158. tnfr/mathematics/runtime.pyi +64 -0
  159. tnfr/mathematics/spaces.py +256 -0
  160. tnfr/mathematics/spaces.pyi +83 -0
  161. tnfr/mathematics/transforms.py +305 -0
  162. tnfr/mathematics/transforms.pyi +62 -0
  163. tnfr/metrics/__init__.py +79 -0
  164. tnfr/metrics/__init__.pyi +20 -0
  165. tnfr/metrics/buffer_cache.py +163 -0
  166. tnfr/metrics/buffer_cache.pyi +24 -0
  167. tnfr/metrics/cache_utils.py +214 -0
  168. tnfr/metrics/coherence.py +2009 -0
  169. tnfr/metrics/coherence.pyi +129 -0
  170. tnfr/metrics/common.py +158 -0
  171. tnfr/metrics/common.pyi +35 -0
  172. tnfr/metrics/core.py +316 -0
  173. tnfr/metrics/core.pyi +13 -0
  174. tnfr/metrics/diagnosis.py +833 -0
  175. tnfr/metrics/diagnosis.pyi +86 -0
  176. tnfr/metrics/emergence.py +245 -0
  177. tnfr/metrics/export.py +179 -0
  178. tnfr/metrics/export.pyi +7 -0
  179. tnfr/metrics/glyph_timing.py +379 -0
  180. tnfr/metrics/glyph_timing.pyi +81 -0
  181. tnfr/metrics/learning_metrics.py +280 -0
  182. tnfr/metrics/learning_metrics.pyi +21 -0
  183. tnfr/metrics/phase_coherence.py +351 -0
  184. tnfr/metrics/phase_compatibility.py +349 -0
  185. tnfr/metrics/reporting.py +183 -0
  186. tnfr/metrics/reporting.pyi +25 -0
  187. tnfr/metrics/sense_index.py +1203 -0
  188. tnfr/metrics/sense_index.pyi +9 -0
  189. tnfr/metrics/trig.py +373 -0
  190. tnfr/metrics/trig.pyi +13 -0
  191. tnfr/metrics/trig_cache.py +233 -0
  192. tnfr/metrics/trig_cache.pyi +10 -0
  193. tnfr/multiscale/__init__.py +32 -0
  194. tnfr/multiscale/hierarchical.py +517 -0
  195. tnfr/node.py +763 -0
  196. tnfr/node.pyi +139 -0
  197. tnfr/observers.py +255 -130
  198. tnfr/observers.pyi +31 -0
  199. tnfr/ontosim.py +144 -137
  200. tnfr/ontosim.pyi +28 -0
  201. tnfr/operators/__init__.py +1672 -0
  202. tnfr/operators/__init__.pyi +31 -0
  203. tnfr/operators/algebra.py +277 -0
  204. tnfr/operators/canonical_patterns.py +420 -0
  205. tnfr/operators/cascade.py +267 -0
  206. tnfr/operators/cycle_detection.py +358 -0
  207. tnfr/operators/definitions.py +4108 -0
  208. tnfr/operators/definitions.pyi +78 -0
  209. tnfr/operators/grammar.py +1164 -0
  210. tnfr/operators/grammar.pyi +140 -0
  211. tnfr/operators/hamiltonian.py +710 -0
  212. tnfr/operators/health_analyzer.py +809 -0
  213. tnfr/operators/jitter.py +272 -0
  214. tnfr/operators/jitter.pyi +11 -0
  215. tnfr/operators/lifecycle.py +314 -0
  216. tnfr/operators/metabolism.py +618 -0
  217. tnfr/operators/metrics.py +2138 -0
  218. tnfr/operators/network_analysis/__init__.py +27 -0
  219. tnfr/operators/network_analysis/source_detection.py +186 -0
  220. tnfr/operators/nodal_equation.py +395 -0
  221. tnfr/operators/pattern_detection.py +660 -0
  222. tnfr/operators/patterns.py +669 -0
  223. tnfr/operators/postconditions/__init__.py +38 -0
  224. tnfr/operators/postconditions/mutation.py +236 -0
  225. tnfr/operators/preconditions/__init__.py +1226 -0
  226. tnfr/operators/preconditions/coherence.py +305 -0
  227. tnfr/operators/preconditions/dissonance.py +236 -0
  228. tnfr/operators/preconditions/emission.py +128 -0
  229. tnfr/operators/preconditions/mutation.py +580 -0
  230. tnfr/operators/preconditions/reception.py +125 -0
  231. tnfr/operators/preconditions/resonance.py +364 -0
  232. tnfr/operators/registry.py +74 -0
  233. tnfr/operators/registry.pyi +9 -0
  234. tnfr/operators/remesh.py +1809 -0
  235. tnfr/operators/remesh.pyi +26 -0
  236. tnfr/operators/structural_units.py +268 -0
  237. tnfr/operators/unified_grammar.py +105 -0
  238. tnfr/parallel/__init__.py +54 -0
  239. tnfr/parallel/auto_scaler.py +234 -0
  240. tnfr/parallel/distributed.py +384 -0
  241. tnfr/parallel/engine.py +238 -0
  242. tnfr/parallel/gpu_engine.py +420 -0
  243. tnfr/parallel/monitoring.py +248 -0
  244. tnfr/parallel/partitioner.py +459 -0
  245. tnfr/py.typed +0 -0
  246. tnfr/recipes/__init__.py +22 -0
  247. tnfr/recipes/cookbook.py +743 -0
  248. tnfr/rng.py +178 -0
  249. tnfr/rng.pyi +26 -0
  250. tnfr/schemas/__init__.py +8 -0
  251. tnfr/schemas/grammar.json +94 -0
  252. tnfr/sdk/__init__.py +107 -0
  253. tnfr/sdk/__init__.pyi +19 -0
  254. tnfr/sdk/adaptive_system.py +173 -0
  255. tnfr/sdk/adaptive_system.pyi +21 -0
  256. tnfr/sdk/builders.py +370 -0
  257. tnfr/sdk/builders.pyi +51 -0
  258. tnfr/sdk/fluent.py +1121 -0
  259. tnfr/sdk/fluent.pyi +74 -0
  260. tnfr/sdk/templates.py +342 -0
  261. tnfr/sdk/templates.pyi +41 -0
  262. tnfr/sdk/utils.py +341 -0
  263. tnfr/secure_config.py +46 -0
  264. tnfr/security/__init__.py +70 -0
  265. tnfr/security/database.py +514 -0
  266. tnfr/security/subprocess.py +503 -0
  267. tnfr/security/validation.py +290 -0
  268. tnfr/selector.py +247 -0
  269. tnfr/selector.pyi +19 -0
  270. tnfr/sense.py +378 -0
  271. tnfr/sense.pyi +23 -0
  272. tnfr/services/__init__.py +17 -0
  273. tnfr/services/orchestrator.py +325 -0
  274. tnfr/sparse/__init__.py +39 -0
  275. tnfr/sparse/representations.py +492 -0
  276. tnfr/structural.py +705 -0
  277. tnfr/structural.pyi +83 -0
  278. tnfr/telemetry/__init__.py +35 -0
  279. tnfr/telemetry/cache_metrics.py +226 -0
  280. tnfr/telemetry/cache_metrics.pyi +64 -0
  281. tnfr/telemetry/nu_f.py +422 -0
  282. tnfr/telemetry/nu_f.pyi +108 -0
  283. tnfr/telemetry/verbosity.py +36 -0
  284. tnfr/telemetry/verbosity.pyi +15 -0
  285. tnfr/tokens.py +58 -0
  286. tnfr/tokens.pyi +36 -0
  287. tnfr/tools/__init__.py +20 -0
  288. tnfr/tools/domain_templates.py +478 -0
  289. tnfr/tools/sequence_generator.py +846 -0
  290. tnfr/topology/__init__.py +13 -0
  291. tnfr/topology/asymmetry.py +151 -0
  292. tnfr/trace.py +543 -0
  293. tnfr/trace.pyi +42 -0
  294. tnfr/tutorials/__init__.py +38 -0
  295. tnfr/tutorials/autonomous_evolution.py +285 -0
  296. tnfr/tutorials/interactive.py +1576 -0
  297. tnfr/tutorials/structural_metabolism.py +238 -0
  298. tnfr/types.py +775 -0
  299. tnfr/types.pyi +357 -0
  300. tnfr/units.py +68 -0
  301. tnfr/units.pyi +13 -0
  302. tnfr/utils/__init__.py +282 -0
  303. tnfr/utils/__init__.pyi +215 -0
  304. tnfr/utils/cache.py +4223 -0
  305. tnfr/utils/cache.pyi +470 -0
  306. tnfr/utils/callbacks.py +375 -0
  307. tnfr/utils/callbacks.pyi +49 -0
  308. tnfr/utils/chunks.py +108 -0
  309. tnfr/utils/chunks.pyi +22 -0
  310. tnfr/utils/data.py +428 -0
  311. tnfr/utils/data.pyi +74 -0
  312. tnfr/utils/graph.py +85 -0
  313. tnfr/utils/graph.pyi +10 -0
  314. tnfr/utils/init.py +821 -0
  315. tnfr/utils/init.pyi +80 -0
  316. tnfr/utils/io.py +559 -0
  317. tnfr/utils/io.pyi +66 -0
  318. tnfr/utils/numeric.py +114 -0
  319. tnfr/utils/numeric.pyi +21 -0
  320. tnfr/validation/__init__.py +257 -0
  321. tnfr/validation/__init__.pyi +85 -0
  322. tnfr/validation/compatibility.py +460 -0
  323. tnfr/validation/compatibility.pyi +6 -0
  324. tnfr/validation/config.py +73 -0
  325. tnfr/validation/graph.py +139 -0
  326. tnfr/validation/graph.pyi +18 -0
  327. tnfr/validation/input_validation.py +755 -0
  328. tnfr/validation/invariants.py +712 -0
  329. tnfr/validation/rules.py +253 -0
  330. tnfr/validation/rules.pyi +44 -0
  331. tnfr/validation/runtime.py +279 -0
  332. tnfr/validation/runtime.pyi +28 -0
  333. tnfr/validation/sequence_validator.py +162 -0
  334. tnfr/validation/soft_filters.py +170 -0
  335. tnfr/validation/soft_filters.pyi +32 -0
  336. tnfr/validation/spectral.py +164 -0
  337. tnfr/validation/spectral.pyi +42 -0
  338. tnfr/validation/validator.py +1266 -0
  339. tnfr/validation/window.py +39 -0
  340. tnfr/validation/window.pyi +1 -0
  341. tnfr/visualization/__init__.py +98 -0
  342. tnfr/visualization/cascade_viz.py +256 -0
  343. tnfr/visualization/hierarchy.py +284 -0
  344. tnfr/visualization/sequence_plotter.py +784 -0
  345. tnfr/viz/__init__.py +60 -0
  346. tnfr/viz/matplotlib.py +278 -0
  347. tnfr/viz/matplotlib.pyi +35 -0
  348. tnfr-8.5.0.dist-info/METADATA +573 -0
  349. tnfr-8.5.0.dist-info/RECORD +353 -0
  350. tnfr-8.5.0.dist-info/entry_points.txt +3 -0
  351. tnfr-3.0.3.dist-info/licenses/LICENSE.txt → tnfr-8.5.0.dist-info/licenses/LICENSE.md +1 -1
  352. tnfr/constants.py +0 -183
  353. tnfr/dynamics.py +0 -543
  354. tnfr/helpers.py +0 -198
  355. tnfr/main.py +0 -37
  356. tnfr/operators.py +0 -296
  357. tnfr-3.0.3.dist-info/METADATA +0 -35
  358. tnfr-3.0.3.dist-info/RECORD +0 -13
  359. {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
  360. {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,420 @@
1
+ """GPU acceleration for TNFR computations.
2
+
3
+ Optional module providing JAX and CuPy integration for GPU-accelerated
4
+ vectorized operations. Requires installation of optional dependencies:
5
+ pip install tnfr[jax] # or
6
+ pip install tnfr[cupy]
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from typing import Any, Optional
12
+
13
+ # Check for optional GPU backends
14
+ try:
15
+ import cupy as cp
16
+
17
+ HAS_CUPY = True
18
+ except ImportError:
19
+ HAS_CUPY = False
20
+ cp = None # type: ignore
21
+
22
+ try:
23
+ import jax
24
+ import jax.numpy as jnp
25
+ from jax import jit
26
+
27
+ HAS_JAX = True
28
+ except ImportError:
29
+ HAS_JAX = False
30
+ jax = None # type: ignore
31
+ jnp = None # type: ignore
32
+ jit = None # type: ignore
33
+
34
+
35
+ class TNFRGPUEngine:
36
+ """GPU acceleration engine for TNFR computations.
37
+
38
+ Provides vectorized GPU implementations of ΔNFR and other TNFR operations
39
+ using JAX or CuPy backends.
40
+
41
+ Parameters
42
+ ----------
43
+ backend : {"auto", "jax", "cupy", "numpy"}, default="auto"
44
+ GPU backend to use. "auto" prefers JAX, then CuPy, then NumPy fallback.
45
+
46
+ Raises
47
+ ------
48
+ ImportError
49
+ If requested GPU backend is not installed
50
+
51
+ Examples
52
+ --------
53
+ >>> # Requires JAX or CuPy installation
54
+ >>> try:
55
+ ... from tnfr.parallel import TNFRGPUEngine
56
+ ... engine = TNFRGPUEngine(backend="auto")
57
+ ... # engine.backend in ["jax", "cupy", "numpy"]
58
+ ... except ImportError:
59
+ ... pass # Optional dependency not installed
60
+
61
+ Notes
62
+ -----
63
+ GPU acceleration provides significant speedup for large dense networks
64
+ but requires compatible hardware and drivers. For sparse networks or
65
+ small graphs, multiprocessing may be more efficient.
66
+ """
67
+
68
+ def __init__(self, backend: str = "auto"):
69
+ self.backend = self._select_gpu_backend(backend)
70
+
71
+ def _select_gpu_backend(self, backend: str) -> str:
72
+ """Select available GPU backend."""
73
+ if backend == "auto":
74
+ if HAS_JAX:
75
+ return "jax"
76
+ elif HAS_CUPY:
77
+ return "cupy"
78
+ else:
79
+ return "numpy" # Fallback
80
+
81
+ if backend == "jax" and not HAS_JAX:
82
+ raise ImportError("JAX not available. Install with: pip install jax[cuda]")
83
+ if backend == "cupy" and not HAS_CUPY:
84
+ raise ImportError("CuPy not available. Install with: pip install cupy")
85
+
86
+ return backend
87
+
88
+ def compute_delta_nfr_gpu(
89
+ self,
90
+ adjacency_matrix: Any,
91
+ epi_vector: Any,
92
+ vf_vector: Any,
93
+ phase_vector: Any,
94
+ ) -> Any:
95
+ """Compute ΔNFR using vectorized GPU operations.
96
+
97
+ Parameters
98
+ ----------
99
+ adjacency_matrix : array-like
100
+ Network adjacency matrix (N x N)
101
+ epi_vector : array-like
102
+ EPI values for all nodes (N,)
103
+ vf_vector : array-like
104
+ Structural frequencies νf for all nodes (N,)
105
+ phase_vector : array-like
106
+ Phase values θ for all nodes (N,)
107
+
108
+ Returns
109
+ -------
110
+ array-like
111
+ ΔNFR values for all nodes (N,)
112
+
113
+ Notes
114
+ -----
115
+ This is a placeholder for future GPU-accelerated implementations.
116
+ Actual GPU computation requires careful optimization and testing.
117
+ Current implementation raises NotImplementedError.
118
+ """
119
+ if self.backend == "jax" and HAS_JAX:
120
+ return self._compute_delta_nfr_jax(
121
+ adjacency_matrix, epi_vector, vf_vector, phase_vector
122
+ )
123
+ elif self.backend == "cupy" and HAS_CUPY:
124
+ return self._compute_delta_nfr_cupy(
125
+ adjacency_matrix, epi_vector, vf_vector, phase_vector
126
+ )
127
+ else:
128
+ return self._compute_delta_nfr_numpy(
129
+ adjacency_matrix, epi_vector, vf_vector, phase_vector
130
+ )
131
+
132
+ def _compute_delta_nfr_jax(
133
+ self, adj_matrix: Any, epi_vec: Any, vf_vec: Any, phase_vec: Any
134
+ ) -> Any:
135
+ """JAX implementation with JIT compilation for GPU acceleration.
136
+
137
+ Implements vectorized ΔNFR computation using JAX for automatic
138
+ GPU acceleration and JIT compilation.
139
+
140
+ Parameters
141
+ ----------
142
+ adj_matrix : array-like
143
+ Adjacency matrix (N x N)
144
+ epi_vec : array-like
145
+ EPI values (N,)
146
+ vf_vec : array-like
147
+ Structural frequencies (N,)
148
+ phase_vec : array-like
149
+ Phase values (N,)
150
+
151
+ Returns
152
+ -------
153
+ jax.numpy.ndarray
154
+ ΔNFR values for all nodes
155
+
156
+ Notes
157
+ -----
158
+ Uses the canonical TNFR nodal equation:
159
+ ∂EPI/∂t = νf · ΔNFR(t)
160
+
161
+ ΔNFR is computed from:
162
+ - Topological gradient (EPI differences with neighbors)
163
+ - Phase gradient (phase synchronization)
164
+ - Weighted by structural frequency
165
+ """
166
+ if not HAS_JAX:
167
+ raise ImportError("JAX required for GPU acceleration")
168
+
169
+ # Convert inputs to JAX arrays
170
+ adj = jnp.asarray(adj_matrix)
171
+ epi = jnp.asarray(epi_vec)
172
+ vf = jnp.asarray(vf_vec)
173
+ phase = jnp.asarray(phase_vec)
174
+
175
+ # Define JIT-compiled ΔNFR computation
176
+ @jit
177
+ def compute_dnfr_vectorized(adj, epi, vf, phase):
178
+ """Vectorized ΔNFR computation (JIT compiled)."""
179
+ # Topological gradient: difference in EPI with neighbors
180
+ # epi_diff[i,j] = epi[j] - epi[i]
181
+ epi_diff = epi[None, :] - epi[:, None] # (N, N) matrix
182
+ topo_gradient = jnp.sum(adj * epi_diff, axis=1) # (N,) vector
183
+
184
+ # Phase gradient: phase difference with neighbors
185
+ # phase_diff[i,j] = sin(phase[j] - phase[i])
186
+ phase_diff = jnp.sin(phase[None, :] - phase[:, None]) # (N, N)
187
+ phase_gradient = jnp.sum(adj * phase_diff, axis=1) # (N,)
188
+
189
+ # Normalize by degree (number of neighbors)
190
+ degree = jnp.sum(adj, axis=1)
191
+ # Avoid division by zero
192
+ degree_safe = jnp.where(degree > 0, degree, 1.0)
193
+
194
+ topo_gradient = topo_gradient / degree_safe
195
+ phase_gradient = phase_gradient / degree_safe
196
+
197
+ # Combine gradients with TNFR weights
198
+ # Emphasize topological structure (0.7) over phase (0.3)
199
+ combined_gradient = 0.7 * topo_gradient + 0.3 * phase_gradient
200
+
201
+ # Apply structural frequency modulation (canonical equation)
202
+ delta_nfr = vf * combined_gradient
203
+
204
+ return delta_nfr
205
+
206
+ # Execute JIT-compiled computation (GPU accelerated if available)
207
+ result = compute_dnfr_vectorized(adj, epi, vf, phase)
208
+
209
+ return result
210
+
211
+ def _compute_delta_nfr_cupy(
212
+ self, adj_matrix: Any, epi_vec: Any, vf_vec: Any, phase_vec: Any
213
+ ) -> Any:
214
+ """CuPy implementation for CUDA GPUs.
215
+
216
+ Implements vectorized ΔNFR computation using CuPy for CUDA GPU
217
+ acceleration with NumPy-compatible interface.
218
+
219
+ Parameters
220
+ ----------
221
+ adj_matrix : array-like
222
+ Adjacency matrix (N x N)
223
+ epi_vec : array-like
224
+ EPI values (N,)
225
+ vf_vec : array-like
226
+ Structural frequencies (N,)
227
+ phase_vec : array-like
228
+ Phase values (N,)
229
+
230
+ Returns
231
+ -------
232
+ cupy.ndarray
233
+ ΔNFR values for all nodes (on GPU)
234
+ """
235
+ if not HAS_CUPY:
236
+ raise ImportError("CuPy required for CUDA GPU acceleration")
237
+
238
+ # Transfer to GPU
239
+ adj = cp.asarray(adj_matrix)
240
+ epi = cp.asarray(epi_vec)
241
+ vf = cp.asarray(vf_vec)
242
+ phase = cp.asarray(phase_vec)
243
+
244
+ # Topological gradient (vectorized on GPU)
245
+ epi_diff = epi[None, :] - epi[:, None]
246
+ topo_gradient = cp.sum(adj * epi_diff, axis=1)
247
+
248
+ # Phase gradient (vectorized on GPU)
249
+ phase_diff = cp.sin(phase[None, :] - phase[:, None])
250
+ phase_gradient = cp.sum(adj * phase_diff, axis=1)
251
+
252
+ # Normalize by degree
253
+ degree = cp.sum(adj, axis=1)
254
+ degree_safe = cp.where(degree > 0, degree, 1.0)
255
+
256
+ topo_gradient = topo_gradient / degree_safe
257
+ phase_gradient = phase_gradient / degree_safe
258
+
259
+ # Combine with TNFR weights
260
+ combined_gradient = 0.7 * topo_gradient + 0.3 * phase_gradient
261
+
262
+ # Apply structural frequency
263
+ delta_nfr = vf * combined_gradient
264
+
265
+ return delta_nfr
266
+
267
+ def _compute_delta_nfr_numpy(
268
+ self, adj_matrix: Any, epi_vec: Any, vf_vec: Any, phase_vec: Any
269
+ ) -> Any:
270
+ """NumPy fallback implementation (CPU-only).
271
+
272
+ Provides CPU-based vectorized computation when GPU is unavailable.
273
+
274
+ Parameters
275
+ ----------
276
+ adj_matrix : array-like
277
+ Adjacency matrix (N x N)
278
+ epi_vec : array-like
279
+ EPI values (N,)
280
+ vf_vec : array-like
281
+ Structural frequencies (N,)
282
+ phase_vec : array-like
283
+ Phase values (N,)
284
+
285
+ Returns
286
+ -------
287
+ numpy.ndarray
288
+ ΔNFR values for all nodes
289
+ """
290
+ try:
291
+ import numpy as np
292
+ except ImportError:
293
+ raise ImportError("NumPy required for CPU computation")
294
+
295
+ # Convert to numpy arrays
296
+ adj = np.asarray(adj_matrix)
297
+ epi = np.asarray(epi_vec)
298
+ vf = np.asarray(vf_vec)
299
+ phase = np.asarray(phase_vec)
300
+
301
+ # Topological gradient
302
+ epi_diff = epi[None, :] - epi[:, None]
303
+ topo_gradient = np.sum(adj * epi_diff, axis=1)
304
+
305
+ # Phase gradient
306
+ phase_diff = np.sin(phase[None, :] - phase[:, None])
307
+ phase_gradient = np.sum(adj * phase_diff, axis=1)
308
+
309
+ # Normalize by degree
310
+ degree = np.sum(adj, axis=1)
311
+ degree_safe = np.where(degree > 0, degree, 1.0)
312
+
313
+ topo_gradient = topo_gradient / degree_safe
314
+ phase_gradient = phase_gradient / degree_safe
315
+
316
+ # Combine with TNFR weights
317
+ combined_gradient = 0.7 * topo_gradient + 0.3 * phase_gradient
318
+
319
+ # Apply structural frequency
320
+ delta_nfr = vf * combined_gradient
321
+
322
+ return delta_nfr
323
+
324
+ def compute_delta_nfr_from_graph(self, graph: Any) -> Dict[Any, float]:
325
+ """Compute ΔNFR directly from a TNFR graph using GPU acceleration.
326
+
327
+ Convenience method that extracts matrices from graph and computes
328
+ ΔNFR using GPU backend.
329
+
330
+ Parameters
331
+ ----------
332
+ graph : TNFRGraph
333
+ Network graph with TNFR attributes
334
+
335
+ Returns
336
+ -------
337
+ Dict[Any, float]
338
+ Mapping from node IDs to ΔNFR values
339
+
340
+ Examples
341
+ --------
342
+ >>> import networkx as nx
343
+ >>> from tnfr.parallel import TNFRGPUEngine
344
+ >>> G = nx.Graph([(0, 1), (1, 2)])
345
+ >>> for node in G.nodes():
346
+ ... G.nodes[node]['epi'] = 0.5
347
+ ... G.nodes[node]['nu_f'] = 1.0
348
+ ... G.nodes[node]['phase'] = 0.0
349
+ >>> engine = TNFRGPUEngine(backend="numpy") # Use numpy for testing
350
+ >>> result = engine.compute_delta_nfr_from_graph(G)
351
+ >>> len(result) == 3
352
+ True
353
+ """
354
+ import networkx as nx
355
+
356
+ try:
357
+ import numpy as np
358
+ except ImportError:
359
+ raise ImportError("NumPy required for graph processing")
360
+
361
+ # Extract node list (maintain order)
362
+ nodes = list(graph.nodes())
363
+ node_to_idx = {node: idx for idx, node in enumerate(nodes)}
364
+
365
+ # Build adjacency matrix
366
+ n = len(nodes)
367
+ adj_matrix = np.zeros((n, n))
368
+ for i, j in graph.edges():
369
+ idx_i = node_to_idx[i]
370
+ idx_j = node_to_idx[j]
371
+ adj_matrix[idx_i, idx_j] = 1.0
372
+ adj_matrix[idx_j, idx_i] = 1.0 # Undirected
373
+
374
+ # Extract node attributes
375
+ def get_attr(node, attr_names, default):
376
+ """Get attribute with fallbacks."""
377
+ for name in (
378
+ attr_names if isinstance(attr_names, (list, tuple)) else [attr_names]
379
+ ):
380
+ if name in graph.nodes[node]:
381
+ return float(graph.nodes[node][name])
382
+ return default
383
+
384
+ epi_vec = np.array([get_attr(node, ["epi", "EPI"], 0.5) for node in nodes])
385
+ vf_vec = np.array([get_attr(node, ["nu_f", "vf", "νf"], 1.0) for node in nodes])
386
+ phase_vec = np.array(
387
+ [get_attr(node, ["phase", "theta"], 0.0) for node in nodes]
388
+ )
389
+
390
+ # Compute ΔNFR using GPU
391
+ delta_nfr_array = self.compute_delta_nfr_gpu(
392
+ adj_matrix, epi_vec, vf_vec, phase_vec
393
+ )
394
+
395
+ # Convert back to dictionary
396
+ if self.backend == "cupy" and HAS_CUPY:
397
+ delta_nfr_array = cp.asnumpy(delta_nfr_array) # Transfer from GPU
398
+ elif self.backend == "jax" and HAS_JAX:
399
+ delta_nfr_array = np.array(delta_nfr_array) # Convert from JAX
400
+
401
+ result = {node: float(delta_nfr_array[idx]) for idx, node in enumerate(nodes)}
402
+
403
+ return result
404
+
405
+ @property
406
+ def is_gpu_available(self) -> bool:
407
+ """Check if GPU acceleration is actually available."""
408
+ if self.backend == "jax" and HAS_JAX:
409
+ try:
410
+ # Check if JAX has GPU backend
411
+ return len(jax.devices("gpu")) > 0
412
+ except Exception:
413
+ return False
414
+ elif self.backend == "cupy" and HAS_CUPY:
415
+ try:
416
+ # Check if CuPy can access GPU
417
+ return cp.cuda.runtime.getDeviceCount() > 0
418
+ except Exception:
419
+ return False
420
+ return False
@@ -0,0 +1,248 @@
1
+ """Performance monitoring for parallel TNFR computations.
2
+
3
+ Tracks execution metrics to enable optimization and auto-scaling decisions.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import time
9
+ from dataclasses import dataclass
10
+ from typing import Any, Dict, List, Optional
11
+
12
+ try:
13
+ import psutil
14
+
15
+ HAS_PSUTIL = True
16
+ except ImportError:
17
+ HAS_PSUTIL = False
18
+
19
+
20
+ @dataclass
21
+ class PerformanceMetrics:
22
+ """Performance metrics for parallel TNFR execution.
23
+
24
+ Attributes
25
+ ----------
26
+ start_time : float
27
+ Unix timestamp when execution started
28
+ end_time : float
29
+ Unix timestamp when execution completed
30
+ duration_seconds : float
31
+ Total execution time in seconds
32
+ peak_memory_mb : float
33
+ Peak memory usage in megabytes
34
+ avg_cpu_percent : float
35
+ Average CPU utilization percentage
36
+ workers_used : int
37
+ Number of parallel workers employed
38
+ nodes_processed : int
39
+ Total number of nodes processed
40
+ operations_per_second : float
41
+ Throughput metric (nodes/second)
42
+ coherence_improvement : float
43
+ Change in global coherence C(t)
44
+ parallelization_efficiency : float
45
+ Actual speedup / theoretical speedup ratio
46
+ memory_efficiency : float
47
+ Useful work / total memory ratio
48
+ """
49
+
50
+ start_time: float
51
+ end_time: float
52
+ duration_seconds: float
53
+ peak_memory_mb: float
54
+ avg_cpu_percent: float
55
+ workers_used: int
56
+ nodes_processed: int
57
+ operations_per_second: float
58
+ coherence_improvement: float
59
+ parallelization_efficiency: float
60
+ memory_efficiency: float
61
+
62
+
63
+ class ParallelExecutionMonitor:
64
+ """Real-time monitoring for parallel TNFR execution.
65
+
66
+ Tracks resource usage, throughput, and efficiency metrics during parallel
67
+ computation to enable dynamic optimization and post-execution analysis.
68
+
69
+ Examples
70
+ --------
71
+ >>> from tnfr.parallel import ParallelExecutionMonitor
72
+ >>> monitor = ParallelExecutionMonitor()
73
+ >>> monitor.start_monitoring(expected_nodes=100, workers=2)
74
+ >>> # ... perform computation ...
75
+ >>> metrics = monitor.stop_monitoring(
76
+ ... final_coherence=0.85,
77
+ ... initial_coherence=0.75
78
+ ... )
79
+ >>> metrics.nodes_processed
80
+ 100
81
+ >>> metrics.workers_used
82
+ 2
83
+ """
84
+
85
+ def __init__(self):
86
+ self._metrics_history: List[PerformanceMetrics] = []
87
+ self._current_metrics: Optional[Dict[str, Any]] = None
88
+ self._process = None
89
+ if HAS_PSUTIL:
90
+ try:
91
+ import psutil
92
+
93
+ self._process = psutil.Process()
94
+ except Exception:
95
+ self._process = None
96
+
97
+ def start_monitoring(self, expected_nodes: int, workers: int) -> None:
98
+ """Start monitoring execution.
99
+
100
+ Parameters
101
+ ----------
102
+ expected_nodes : int
103
+ Expected number of nodes to process
104
+ workers : int
105
+ Number of parallel workers
106
+ """
107
+ self._current_metrics = {
108
+ "start_time": time.time(),
109
+ "expected_nodes": expected_nodes,
110
+ "workers": workers,
111
+ "memory_samples": [],
112
+ "cpu_samples": [],
113
+ }
114
+
115
+ # Take initial resource snapshot
116
+ if self._process:
117
+ try:
118
+ mem_info = self._process.memory_info()
119
+ self._current_metrics["memory_samples"].append(
120
+ mem_info.rss / 1024 / 1024
121
+ )
122
+ self._current_metrics["cpu_samples"].append(self._process.cpu_percent())
123
+ except Exception:
124
+ pass
125
+
126
+ def stop_monitoring(
127
+ self, final_coherence: float, initial_coherence: float
128
+ ) -> PerformanceMetrics:
129
+ """Stop monitoring and compute final metrics.
130
+
131
+ Parameters
132
+ ----------
133
+ final_coherence : float
134
+ Final network coherence C(t)
135
+ initial_coherence : float
136
+ Initial network coherence C(t)
137
+
138
+ Returns
139
+ -------
140
+ PerformanceMetrics
141
+ Complete performance metrics for the execution
142
+ """
143
+ if self._current_metrics is None:
144
+ raise RuntimeError("Monitoring not started")
145
+
146
+ end_time = time.time()
147
+ duration = end_time - self._current_metrics["start_time"]
148
+
149
+ # Take final resource snapshot
150
+ if self._process:
151
+ try:
152
+ mem_info = self._process.memory_info()
153
+ self._current_metrics["memory_samples"].append(
154
+ mem_info.rss / 1024 / 1024
155
+ )
156
+ self._current_metrics["cpu_samples"].append(self._process.cpu_percent())
157
+ except Exception:
158
+ pass
159
+
160
+ # Calculate aggregated metrics
161
+ memory_samples = self._current_metrics.get("memory_samples", [])
162
+ cpu_samples = self._current_metrics.get("cpu_samples", [])
163
+
164
+ peak_memory = max(memory_samples) if memory_samples else 0.0
165
+ avg_cpu = sum(cpu_samples) / len(cpu_samples) if cpu_samples else 0.0
166
+
167
+ nodes = self._current_metrics["expected_nodes"]
168
+ workers = self._current_metrics["workers"]
169
+
170
+ # Calculate parallelization efficiency
171
+ # NOTE: This is a heuristic approximation. True efficiency requires:
172
+ # - Baseline sequential measurement
173
+ # - Accounting for Amdahl's law (sequential portions)
174
+ # - Consideration of communication overhead
175
+ # Current approach: estimate from CPU utilization as proxy
176
+ theoretical_speedup = workers
177
+ # Estimate actual speedup from CPU utilization
178
+ # If we're using N workers, we expect ~N * 100% CPU in ideal case
179
+ expected_cpu = workers * 100.0
180
+ actual_speedup = (avg_cpu / 100.0) if expected_cpu > 0 else 1.0
181
+ parallelization_eff = (
182
+ min(1.0, actual_speedup / theoretical_speedup)
183
+ if theoretical_speedup > 0
184
+ else 0.0
185
+ )
186
+
187
+ # Memory efficiency: nodes per MB
188
+ memory_eff = nodes / peak_memory if peak_memory > 0 else 0.0
189
+
190
+ metrics = PerformanceMetrics(
191
+ start_time=self._current_metrics["start_time"],
192
+ end_time=end_time,
193
+ duration_seconds=duration,
194
+ peak_memory_mb=peak_memory,
195
+ avg_cpu_percent=avg_cpu,
196
+ workers_used=workers,
197
+ nodes_processed=nodes,
198
+ operations_per_second=nodes / duration if duration > 0 else 0.0,
199
+ coherence_improvement=final_coherence - initial_coherence,
200
+ parallelization_efficiency=parallelization_eff,
201
+ memory_efficiency=memory_eff,
202
+ )
203
+
204
+ self._metrics_history.append(metrics)
205
+ self._current_metrics = None
206
+
207
+ return metrics
208
+
209
+ def get_optimization_suggestions(self) -> List[str]:
210
+ """Generate optimization suggestions based on execution history.
211
+
212
+ Returns
213
+ -------
214
+ List[str]
215
+ List of actionable suggestions for improving performance
216
+ """
217
+ if not self._metrics_history:
218
+ return ["No execution history available"]
219
+
220
+ latest = self._metrics_history[-1]
221
+ suggestions = []
222
+
223
+ if latest.parallelization_efficiency < 0.5:
224
+ suggestions.append(
225
+ "⚡ Low parallelization efficiency - consider reducing "
226
+ "worker count or increasing chunk size"
227
+ )
228
+
229
+ if latest.memory_efficiency < 0.1:
230
+ suggestions.append(
231
+ "💾 High memory usage - consider distributed execution "
232
+ "or memory optimization"
233
+ )
234
+
235
+ if latest.operations_per_second < 100:
236
+ suggestions.append(
237
+ "📈 Low throughput - consider GPU backend or algorithm " "optimization"
238
+ )
239
+
240
+ if not suggestions:
241
+ suggestions.append("✨ Performance looks optimal!")
242
+
243
+ return suggestions
244
+
245
+ @property
246
+ def history(self) -> List[PerformanceMetrics]:
247
+ """Get execution history."""
248
+ return self._metrics_history.copy()