tnfr 4.5.2__py3-none-any.whl → 8.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tnfr might be problematic. Click here for more details.

Files changed (365) hide show
  1. tnfr/__init__.py +334 -50
  2. tnfr/__init__.pyi +33 -0
  3. tnfr/_compat.py +10 -0
  4. tnfr/_generated_version.py +34 -0
  5. tnfr/_version.py +49 -0
  6. tnfr/_version.pyi +7 -0
  7. tnfr/alias.py +214 -37
  8. tnfr/alias.pyi +108 -0
  9. tnfr/backends/__init__.py +354 -0
  10. tnfr/backends/jax_backend.py +173 -0
  11. tnfr/backends/numpy_backend.py +238 -0
  12. tnfr/backends/optimized_numpy.py +420 -0
  13. tnfr/backends/torch_backend.py +408 -0
  14. tnfr/cache.py +149 -556
  15. tnfr/cache.pyi +13 -0
  16. tnfr/cli/__init__.py +51 -16
  17. tnfr/cli/__init__.pyi +26 -0
  18. tnfr/cli/arguments.py +344 -32
  19. tnfr/cli/arguments.pyi +29 -0
  20. tnfr/cli/execution.py +676 -50
  21. tnfr/cli/execution.pyi +70 -0
  22. tnfr/cli/interactive_validator.py +614 -0
  23. tnfr/cli/utils.py +18 -3
  24. tnfr/cli/utils.pyi +7 -0
  25. tnfr/cli/validate.py +236 -0
  26. tnfr/compat/__init__.py +85 -0
  27. tnfr/compat/dataclass.py +136 -0
  28. tnfr/compat/jsonschema_stub.py +61 -0
  29. tnfr/compat/matplotlib_stub.py +73 -0
  30. tnfr/compat/numpy_stub.py +155 -0
  31. tnfr/config/__init__.py +224 -0
  32. tnfr/config/__init__.pyi +10 -0
  33. tnfr/{constants_glyphs.py → config/constants.py} +26 -20
  34. tnfr/config/constants.pyi +12 -0
  35. tnfr/config/defaults.py +54 -0
  36. tnfr/{constants/core.py → config/defaults_core.py} +59 -6
  37. tnfr/config/defaults_init.py +33 -0
  38. tnfr/config/defaults_metric.py +104 -0
  39. tnfr/config/feature_flags.py +81 -0
  40. tnfr/config/feature_flags.pyi +16 -0
  41. tnfr/config/glyph_constants.py +31 -0
  42. tnfr/config/init.py +77 -0
  43. tnfr/config/init.pyi +8 -0
  44. tnfr/config/operator_names.py +254 -0
  45. tnfr/config/operator_names.pyi +36 -0
  46. tnfr/config/physics_derivation.py +354 -0
  47. tnfr/config/presets.py +83 -0
  48. tnfr/config/presets.pyi +7 -0
  49. tnfr/config/security.py +927 -0
  50. tnfr/config/thresholds.py +114 -0
  51. tnfr/config/tnfr_config.py +498 -0
  52. tnfr/constants/__init__.py +51 -133
  53. tnfr/constants/__init__.pyi +92 -0
  54. tnfr/constants/aliases.py +33 -0
  55. tnfr/constants/aliases.pyi +27 -0
  56. tnfr/constants/init.py +3 -1
  57. tnfr/constants/init.pyi +12 -0
  58. tnfr/constants/metric.py +9 -15
  59. tnfr/constants/metric.pyi +19 -0
  60. tnfr/core/__init__.py +33 -0
  61. tnfr/core/container.py +226 -0
  62. tnfr/core/default_implementations.py +329 -0
  63. tnfr/core/interfaces.py +279 -0
  64. tnfr/dynamics/__init__.py +213 -633
  65. tnfr/dynamics/__init__.pyi +83 -0
  66. tnfr/dynamics/adaptation.py +267 -0
  67. tnfr/dynamics/adaptation.pyi +7 -0
  68. tnfr/dynamics/adaptive_sequences.py +189 -0
  69. tnfr/dynamics/adaptive_sequences.pyi +14 -0
  70. tnfr/dynamics/aliases.py +23 -0
  71. tnfr/dynamics/aliases.pyi +19 -0
  72. tnfr/dynamics/bifurcation.py +232 -0
  73. tnfr/dynamics/canonical.py +229 -0
  74. tnfr/dynamics/canonical.pyi +48 -0
  75. tnfr/dynamics/coordination.py +385 -0
  76. tnfr/dynamics/coordination.pyi +25 -0
  77. tnfr/dynamics/dnfr.py +2699 -398
  78. tnfr/dynamics/dnfr.pyi +26 -0
  79. tnfr/dynamics/dynamic_limits.py +225 -0
  80. tnfr/dynamics/feedback.py +252 -0
  81. tnfr/dynamics/feedback.pyi +24 -0
  82. tnfr/dynamics/fused_dnfr.py +454 -0
  83. tnfr/dynamics/homeostasis.py +157 -0
  84. tnfr/dynamics/homeostasis.pyi +14 -0
  85. tnfr/dynamics/integrators.py +496 -102
  86. tnfr/dynamics/integrators.pyi +36 -0
  87. tnfr/dynamics/learning.py +310 -0
  88. tnfr/dynamics/learning.pyi +33 -0
  89. tnfr/dynamics/metabolism.py +254 -0
  90. tnfr/dynamics/nbody.py +796 -0
  91. tnfr/dynamics/nbody_tnfr.py +783 -0
  92. tnfr/dynamics/propagation.py +326 -0
  93. tnfr/dynamics/runtime.py +908 -0
  94. tnfr/dynamics/runtime.pyi +77 -0
  95. tnfr/dynamics/sampling.py +10 -5
  96. tnfr/dynamics/sampling.pyi +7 -0
  97. tnfr/dynamics/selectors.py +711 -0
  98. tnfr/dynamics/selectors.pyi +85 -0
  99. tnfr/dynamics/structural_clip.py +207 -0
  100. tnfr/errors/__init__.py +37 -0
  101. tnfr/errors/contextual.py +492 -0
  102. tnfr/execution.py +77 -55
  103. tnfr/execution.pyi +45 -0
  104. tnfr/extensions/__init__.py +205 -0
  105. tnfr/extensions/__init__.pyi +18 -0
  106. tnfr/extensions/base.py +173 -0
  107. tnfr/extensions/base.pyi +35 -0
  108. tnfr/extensions/business/__init__.py +71 -0
  109. tnfr/extensions/business/__init__.pyi +11 -0
  110. tnfr/extensions/business/cookbook.py +88 -0
  111. tnfr/extensions/business/cookbook.pyi +8 -0
  112. tnfr/extensions/business/health_analyzers.py +202 -0
  113. tnfr/extensions/business/health_analyzers.pyi +9 -0
  114. tnfr/extensions/business/patterns.py +183 -0
  115. tnfr/extensions/business/patterns.pyi +8 -0
  116. tnfr/extensions/medical/__init__.py +73 -0
  117. tnfr/extensions/medical/__init__.pyi +11 -0
  118. tnfr/extensions/medical/cookbook.py +88 -0
  119. tnfr/extensions/medical/cookbook.pyi +8 -0
  120. tnfr/extensions/medical/health_analyzers.py +181 -0
  121. tnfr/extensions/medical/health_analyzers.pyi +9 -0
  122. tnfr/extensions/medical/patterns.py +163 -0
  123. tnfr/extensions/medical/patterns.pyi +8 -0
  124. tnfr/flatten.py +29 -50
  125. tnfr/flatten.pyi +21 -0
  126. tnfr/gamma.py +66 -53
  127. tnfr/gamma.pyi +36 -0
  128. tnfr/glyph_history.py +144 -57
  129. tnfr/glyph_history.pyi +35 -0
  130. tnfr/glyph_runtime.py +19 -0
  131. tnfr/glyph_runtime.pyi +8 -0
  132. tnfr/immutable.py +70 -30
  133. tnfr/immutable.pyi +36 -0
  134. tnfr/initialization.py +22 -16
  135. tnfr/initialization.pyi +65 -0
  136. tnfr/io.py +5 -241
  137. tnfr/io.pyi +13 -0
  138. tnfr/locking.pyi +7 -0
  139. tnfr/mathematics/__init__.py +79 -0
  140. tnfr/mathematics/backend.py +453 -0
  141. tnfr/mathematics/backend.pyi +99 -0
  142. tnfr/mathematics/dynamics.py +408 -0
  143. tnfr/mathematics/dynamics.pyi +90 -0
  144. tnfr/mathematics/epi.py +391 -0
  145. tnfr/mathematics/epi.pyi +65 -0
  146. tnfr/mathematics/generators.py +242 -0
  147. tnfr/mathematics/generators.pyi +29 -0
  148. tnfr/mathematics/metrics.py +119 -0
  149. tnfr/mathematics/metrics.pyi +16 -0
  150. tnfr/mathematics/operators.py +239 -0
  151. tnfr/mathematics/operators.pyi +59 -0
  152. tnfr/mathematics/operators_factory.py +124 -0
  153. tnfr/mathematics/operators_factory.pyi +11 -0
  154. tnfr/mathematics/projection.py +87 -0
  155. tnfr/mathematics/projection.pyi +33 -0
  156. tnfr/mathematics/runtime.py +182 -0
  157. tnfr/mathematics/runtime.pyi +64 -0
  158. tnfr/mathematics/spaces.py +256 -0
  159. tnfr/mathematics/spaces.pyi +83 -0
  160. tnfr/mathematics/transforms.py +305 -0
  161. tnfr/mathematics/transforms.pyi +62 -0
  162. tnfr/metrics/__init__.py +47 -9
  163. tnfr/metrics/__init__.pyi +20 -0
  164. tnfr/metrics/buffer_cache.py +163 -0
  165. tnfr/metrics/buffer_cache.pyi +24 -0
  166. tnfr/metrics/cache_utils.py +214 -0
  167. tnfr/metrics/coherence.py +1510 -330
  168. tnfr/metrics/coherence.pyi +129 -0
  169. tnfr/metrics/common.py +23 -16
  170. tnfr/metrics/common.pyi +35 -0
  171. tnfr/metrics/core.py +251 -36
  172. tnfr/metrics/core.pyi +13 -0
  173. tnfr/metrics/diagnosis.py +709 -110
  174. tnfr/metrics/diagnosis.pyi +86 -0
  175. tnfr/metrics/emergence.py +245 -0
  176. tnfr/metrics/export.py +60 -18
  177. tnfr/metrics/export.pyi +7 -0
  178. tnfr/metrics/glyph_timing.py +233 -43
  179. tnfr/metrics/glyph_timing.pyi +81 -0
  180. tnfr/metrics/learning_metrics.py +280 -0
  181. tnfr/metrics/learning_metrics.pyi +21 -0
  182. tnfr/metrics/phase_coherence.py +351 -0
  183. tnfr/metrics/phase_compatibility.py +349 -0
  184. tnfr/metrics/reporting.py +63 -28
  185. tnfr/metrics/reporting.pyi +25 -0
  186. tnfr/metrics/sense_index.py +1126 -43
  187. tnfr/metrics/sense_index.pyi +9 -0
  188. tnfr/metrics/trig.py +215 -23
  189. tnfr/metrics/trig.pyi +13 -0
  190. tnfr/metrics/trig_cache.py +148 -24
  191. tnfr/metrics/trig_cache.pyi +10 -0
  192. tnfr/multiscale/__init__.py +32 -0
  193. tnfr/multiscale/hierarchical.py +517 -0
  194. tnfr/node.py +646 -140
  195. tnfr/node.pyi +139 -0
  196. tnfr/observers.py +160 -45
  197. tnfr/observers.pyi +31 -0
  198. tnfr/ontosim.py +23 -19
  199. tnfr/ontosim.pyi +28 -0
  200. tnfr/operators/__init__.py +1358 -106
  201. tnfr/operators/__init__.pyi +31 -0
  202. tnfr/operators/algebra.py +277 -0
  203. tnfr/operators/canonical_patterns.py +420 -0
  204. tnfr/operators/cascade.py +267 -0
  205. tnfr/operators/cycle_detection.py +358 -0
  206. tnfr/operators/definitions.py +4108 -0
  207. tnfr/operators/definitions.pyi +78 -0
  208. tnfr/operators/grammar.py +1164 -0
  209. tnfr/operators/grammar.pyi +140 -0
  210. tnfr/operators/hamiltonian.py +710 -0
  211. tnfr/operators/health_analyzer.py +809 -0
  212. tnfr/operators/jitter.py +107 -38
  213. tnfr/operators/jitter.pyi +11 -0
  214. tnfr/operators/lifecycle.py +314 -0
  215. tnfr/operators/metabolism.py +618 -0
  216. tnfr/operators/metrics.py +2138 -0
  217. tnfr/operators/network_analysis/__init__.py +27 -0
  218. tnfr/operators/network_analysis/source_detection.py +186 -0
  219. tnfr/operators/nodal_equation.py +395 -0
  220. tnfr/operators/pattern_detection.py +660 -0
  221. tnfr/operators/patterns.py +669 -0
  222. tnfr/operators/postconditions/__init__.py +38 -0
  223. tnfr/operators/postconditions/mutation.py +236 -0
  224. tnfr/operators/preconditions/__init__.py +1226 -0
  225. tnfr/operators/preconditions/coherence.py +305 -0
  226. tnfr/operators/preconditions/dissonance.py +236 -0
  227. tnfr/operators/preconditions/emission.py +128 -0
  228. tnfr/operators/preconditions/mutation.py +580 -0
  229. tnfr/operators/preconditions/reception.py +125 -0
  230. tnfr/operators/preconditions/resonance.py +364 -0
  231. tnfr/operators/registry.py +74 -0
  232. tnfr/operators/registry.pyi +9 -0
  233. tnfr/operators/remesh.py +1415 -91
  234. tnfr/operators/remesh.pyi +26 -0
  235. tnfr/operators/structural_units.py +268 -0
  236. tnfr/operators/unified_grammar.py +105 -0
  237. tnfr/parallel/__init__.py +54 -0
  238. tnfr/parallel/auto_scaler.py +234 -0
  239. tnfr/parallel/distributed.py +384 -0
  240. tnfr/parallel/engine.py +238 -0
  241. tnfr/parallel/gpu_engine.py +420 -0
  242. tnfr/parallel/monitoring.py +248 -0
  243. tnfr/parallel/partitioner.py +459 -0
  244. tnfr/py.typed +0 -0
  245. tnfr/recipes/__init__.py +22 -0
  246. tnfr/recipes/cookbook.py +743 -0
  247. tnfr/rng.py +75 -151
  248. tnfr/rng.pyi +26 -0
  249. tnfr/schemas/__init__.py +8 -0
  250. tnfr/schemas/grammar.json +94 -0
  251. tnfr/sdk/__init__.py +107 -0
  252. tnfr/sdk/__init__.pyi +19 -0
  253. tnfr/sdk/adaptive_system.py +173 -0
  254. tnfr/sdk/adaptive_system.pyi +21 -0
  255. tnfr/sdk/builders.py +370 -0
  256. tnfr/sdk/builders.pyi +51 -0
  257. tnfr/sdk/fluent.py +1121 -0
  258. tnfr/sdk/fluent.pyi +74 -0
  259. tnfr/sdk/templates.py +342 -0
  260. tnfr/sdk/templates.pyi +41 -0
  261. tnfr/sdk/utils.py +341 -0
  262. tnfr/secure_config.py +46 -0
  263. tnfr/security/__init__.py +70 -0
  264. tnfr/security/database.py +514 -0
  265. tnfr/security/subprocess.py +503 -0
  266. tnfr/security/validation.py +290 -0
  267. tnfr/selector.py +59 -22
  268. tnfr/selector.pyi +19 -0
  269. tnfr/sense.py +92 -67
  270. tnfr/sense.pyi +23 -0
  271. tnfr/services/__init__.py +17 -0
  272. tnfr/services/orchestrator.py +325 -0
  273. tnfr/sparse/__init__.py +39 -0
  274. tnfr/sparse/representations.py +492 -0
  275. tnfr/structural.py +639 -263
  276. tnfr/structural.pyi +83 -0
  277. tnfr/telemetry/__init__.py +35 -0
  278. tnfr/telemetry/cache_metrics.py +226 -0
  279. tnfr/telemetry/cache_metrics.pyi +64 -0
  280. tnfr/telemetry/nu_f.py +422 -0
  281. tnfr/telemetry/nu_f.pyi +108 -0
  282. tnfr/telemetry/verbosity.py +36 -0
  283. tnfr/telemetry/verbosity.pyi +15 -0
  284. tnfr/tokens.py +2 -4
  285. tnfr/tokens.pyi +36 -0
  286. tnfr/tools/__init__.py +20 -0
  287. tnfr/tools/domain_templates.py +478 -0
  288. tnfr/tools/sequence_generator.py +846 -0
  289. tnfr/topology/__init__.py +13 -0
  290. tnfr/topology/asymmetry.py +151 -0
  291. tnfr/trace.py +300 -126
  292. tnfr/trace.pyi +42 -0
  293. tnfr/tutorials/__init__.py +38 -0
  294. tnfr/tutorials/autonomous_evolution.py +285 -0
  295. tnfr/tutorials/interactive.py +1576 -0
  296. tnfr/tutorials/structural_metabolism.py +238 -0
  297. tnfr/types.py +743 -12
  298. tnfr/types.pyi +357 -0
  299. tnfr/units.py +68 -0
  300. tnfr/units.pyi +13 -0
  301. tnfr/utils/__init__.py +282 -0
  302. tnfr/utils/__init__.pyi +215 -0
  303. tnfr/utils/cache.py +4223 -0
  304. tnfr/utils/cache.pyi +470 -0
  305. tnfr/{callback_utils.py → utils/callbacks.py} +26 -39
  306. tnfr/utils/callbacks.pyi +49 -0
  307. tnfr/utils/chunks.py +108 -0
  308. tnfr/utils/chunks.pyi +22 -0
  309. tnfr/utils/data.py +428 -0
  310. tnfr/utils/data.pyi +74 -0
  311. tnfr/utils/graph.py +85 -0
  312. tnfr/utils/graph.pyi +10 -0
  313. tnfr/utils/init.py +821 -0
  314. tnfr/utils/init.pyi +80 -0
  315. tnfr/utils/io.py +559 -0
  316. tnfr/utils/io.pyi +66 -0
  317. tnfr/{helpers → utils}/numeric.py +51 -24
  318. tnfr/utils/numeric.pyi +21 -0
  319. tnfr/validation/__init__.py +257 -0
  320. tnfr/validation/__init__.pyi +85 -0
  321. tnfr/validation/compatibility.py +460 -0
  322. tnfr/validation/compatibility.pyi +6 -0
  323. tnfr/validation/config.py +73 -0
  324. tnfr/validation/graph.py +139 -0
  325. tnfr/validation/graph.pyi +18 -0
  326. tnfr/validation/input_validation.py +755 -0
  327. tnfr/validation/invariants.py +712 -0
  328. tnfr/validation/rules.py +253 -0
  329. tnfr/validation/rules.pyi +44 -0
  330. tnfr/validation/runtime.py +279 -0
  331. tnfr/validation/runtime.pyi +28 -0
  332. tnfr/validation/sequence_validator.py +162 -0
  333. tnfr/validation/soft_filters.py +170 -0
  334. tnfr/validation/soft_filters.pyi +32 -0
  335. tnfr/validation/spectral.py +164 -0
  336. tnfr/validation/spectral.pyi +42 -0
  337. tnfr/validation/validator.py +1266 -0
  338. tnfr/validation/window.py +39 -0
  339. tnfr/validation/window.pyi +1 -0
  340. tnfr/visualization/__init__.py +98 -0
  341. tnfr/visualization/cascade_viz.py +256 -0
  342. tnfr/visualization/hierarchy.py +284 -0
  343. tnfr/visualization/sequence_plotter.py +784 -0
  344. tnfr/viz/__init__.py +60 -0
  345. tnfr/viz/matplotlib.py +278 -0
  346. tnfr/viz/matplotlib.pyi +35 -0
  347. tnfr-8.5.0.dist-info/METADATA +573 -0
  348. tnfr-8.5.0.dist-info/RECORD +353 -0
  349. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/entry_points.txt +1 -0
  350. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/licenses/LICENSE.md +1 -1
  351. tnfr/collections_utils.py +0 -300
  352. tnfr/config.py +0 -32
  353. tnfr/grammar.py +0 -344
  354. tnfr/graph_utils.py +0 -84
  355. tnfr/helpers/__init__.py +0 -71
  356. tnfr/import_utils.py +0 -228
  357. tnfr/json_utils.py +0 -162
  358. tnfr/logging_utils.py +0 -116
  359. tnfr/presets.py +0 -60
  360. tnfr/validators.py +0 -84
  361. tnfr/value_utils.py +0 -59
  362. tnfr-4.5.2.dist-info/METADATA +0 -379
  363. tnfr-4.5.2.dist-info/RECORD +0 -67
  364. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
  365. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,408 @@
1
+ """PyTorch-based GPU-accelerated backend for TNFR computations (Experimental).
2
+
3
+ This module provides a PyTorch implementation of TNFR computational kernels
4
+ with support for:
5
+
6
+ - GPU acceleration via CUDA/ROCm
7
+ - Automatic differentiation with autograd
8
+ - Optimized tensor operations
9
+ - Mixed precision training support
10
+
11
+ **Status**: Experimental - API may change in future releases.
12
+
13
+ The Torch backend currently delegates to the NumPy implementation but provides
14
+ infrastructure for future GPU-optimized kernels.
15
+
16
+ Examples
17
+ --------
18
+ >>> from tnfr.backends import get_backend
19
+ >>> backend = get_backend("torch") # doctest: +SKIP
20
+ >>> backend.supports_gpu # doctest: +SKIP
21
+ True
22
+ """
23
+
24
+ from __future__ import annotations
25
+
26
+ from typing import Any, MutableMapping
27
+
28
+ from . import TNFRBackend
29
+ from ..types import TNFRGraph
30
+
31
+
32
+ class TorchBackend(TNFRBackend):
33
+ """PyTorch GPU-accelerated implementation of TNFR kernels (Experimental).
34
+
35
+ This backend provides a foundation for GPU-accelerated TNFR computations
36
+ using PyTorch. Current implementation delegates to NumPy backend while
37
+ maintaining interface compatibility for future GPU implementations.
38
+
39
+ Future optimizations planned:
40
+ - GPU-accelerated ΔNFR computation using torch tensors
41
+ - Sparse tensor operations for large-scale graphs
42
+ - Mixed precision support (FP16/BF16) for memory efficiency
43
+ - Automatic device placement (CPU/CUDA/ROCm)
44
+ - Integration with PyTorch Geometric for graph operations
45
+
46
+ Attributes
47
+ ----------
48
+ name : str
49
+ Returns "torch"
50
+ supports_gpu : bool
51
+ True (PyTorch supports GPU acceleration)
52
+ supports_jit : bool
53
+ False (TorchScript not yet integrated)
54
+
55
+ Notes
56
+ -----
57
+ Requires PyTorch to be installed: `pip install torch`
58
+
59
+ For GPU support, install PyTorch with CUDA:
60
+ `pip install torch --index-url https://download.pytorch.org/whl/cu118`
61
+ """
62
+
63
+ def __init__(self) -> None:
64
+ """Initialize PyTorch backend."""
65
+ try:
66
+ import torch
67
+
68
+ self._torch = torch
69
+ self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
70
+ except ImportError as exc:
71
+ raise RuntimeError(
72
+ "PyTorch backend requires torch to be installed. "
73
+ "Install with: pip install torch"
74
+ ) from exc
75
+
76
+ @property
77
+ def name(self) -> str:
78
+ """Return the backend identifier."""
79
+ return "torch"
80
+
81
+ @property
82
+ def supports_gpu(self) -> bool:
83
+ """PyTorch supports GPU acceleration."""
84
+ return True
85
+
86
+ @property
87
+ def supports_jit(self) -> bool:
88
+ """TorchScript not yet integrated."""
89
+ return False
90
+
91
+ @property
92
+ def device(self) -> Any:
93
+ """Return the current PyTorch device (CPU or CUDA)."""
94
+ return self._device
95
+
96
+ def compute_delta_nfr(
97
+ self,
98
+ graph: TNFRGraph,
99
+ *,
100
+ cache_size: int | None = 1,
101
+ n_jobs: int | None = None,
102
+ profile: MutableMapping[str, float] | None = None,
103
+ ) -> None:
104
+ """Compute ΔNFR using PyTorch backend with GPU acceleration.
105
+
106
+ Implements vectorized ΔNFR computation using PyTorch tensors with
107
+ automatic device placement (CPU/CUDA). For large graphs (>1000 nodes),
108
+ uses GPU if available for significant speedup.
109
+
110
+ Parameters
111
+ ----------
112
+ graph : TNFRGraph
113
+ NetworkX graph with TNFR node attributes
114
+ cache_size : int or None, optional
115
+ Cache size hint (ignored, PyTorch manages memory)
116
+ n_jobs : int or None, optional
117
+ Ignored (PyTorch uses GPU parallelism)
118
+ profile : MutableMapping[str, float] or None, optional
119
+ Dict to collect timing metrics
120
+
121
+ Notes
122
+ -----
123
+ Automatically moves tensors to GPU if available (backend.device).
124
+ For small graphs (<1000 nodes), may use NumPy backend to avoid
125
+ overhead of tensor conversion and device transfer.
126
+ """
127
+ import time
128
+ import numpy as np
129
+
130
+ if profile is not None:
131
+ profile["dnfr_backend"] = "torch"
132
+ profile["dnfr_device"] = str(self._device)
133
+
134
+ n_nodes = graph.number_of_nodes()
135
+ n_edges = graph.number_of_edges()
136
+
137
+ # For very small graphs, delegate to NumPy (tensor overhead not worth it)
138
+ if n_nodes < 1000:
139
+ if profile is not None:
140
+ profile["dnfr_path"] = "numpy_fallback"
141
+ from ..dynamics.dnfr import default_compute_delta_nfr
142
+
143
+ default_compute_delta_nfr(
144
+ graph, cache_size=cache_size, n_jobs=n_jobs, profile=profile
145
+ )
146
+ return
147
+
148
+ if profile is not None:
149
+ profile["dnfr_path"] = "torch_gpu"
150
+ t0 = time.perf_counter()
151
+
152
+ # Extract graph data
153
+ node_list = list(graph.nodes())
154
+ node_to_idx = {node: idx for idx, node in enumerate(node_list)}
155
+
156
+ # Get node attributes as numpy arrays first
157
+ phase = np.array(
158
+ [graph.nodes[node].get("phase", 0.0) for node in node_list],
159
+ dtype=np.float32,
160
+ )
161
+ epi = np.array(
162
+ [graph.nodes[node].get("EPI", 0.5) for node in node_list], dtype=np.float32
163
+ )
164
+ vf = np.array(
165
+ [graph.nodes[node].get("nu_f", 1.0) for node in node_list], dtype=np.float32
166
+ )
167
+
168
+ # Get edge list
169
+ edges = list(graph.edges())
170
+ if not edges:
171
+ # No edges - all nodes get zero ΔNFR
172
+ for node in node_list:
173
+ graph.nodes[node]["ΔNFR"] = 0.0
174
+ return
175
+
176
+ edge_src = np.array([node_to_idx[src] for src, _ in edges], dtype=np.int64)
177
+ edge_dst = np.array([node_to_idx[dst] for _, dst in edges], dtype=np.int64)
178
+
179
+ # Get weights
180
+ weights = graph.graph.get("DNFR_WEIGHTS", {})
181
+ w_phase = float(weights.get("phase", 0.0))
182
+ w_epi = float(weights.get("epi", 0.0))
183
+ w_vf = float(weights.get("vf", 0.0))
184
+ w_topo = float(weights.get("topo", 0.0))
185
+
186
+ if profile is not None:
187
+ profile["dnfr_data_prep"] = time.perf_counter() - t0
188
+ t0 = time.perf_counter()
189
+
190
+ # Convert to PyTorch tensors and move to device
191
+ phase_t = self._torch.tensor(
192
+ phase, device=self._device, dtype=self._torch.float32
193
+ )
194
+ epi_t = self._torch.tensor(epi, device=self._device, dtype=self._torch.float32)
195
+ vf_t = self._torch.tensor(vf, device=self._device, dtype=self._torch.float32)
196
+ edge_src_t = self._torch.tensor(
197
+ edge_src, device=self._device, dtype=self._torch.int64
198
+ )
199
+ edge_dst_t = self._torch.tensor(
200
+ edge_dst, device=self._device, dtype=self._torch.int64
201
+ )
202
+
203
+ if profile is not None:
204
+ profile["dnfr_to_device"] = time.perf_counter() - t0
205
+ t0 = time.perf_counter()
206
+
207
+ # Compute ΔNFR using PyTorch operations
208
+ delta_nfr_t = self._compute_delta_nfr_torch(
209
+ phase_t,
210
+ epi_t,
211
+ vf_t,
212
+ edge_src_t,
213
+ edge_dst_t,
214
+ w_phase,
215
+ w_epi,
216
+ w_vf,
217
+ w_topo,
218
+ graph.is_directed(),
219
+ )
220
+
221
+ if profile is not None:
222
+ profile["dnfr_compute"] = time.perf_counter() - t0
223
+ t0 = time.perf_counter()
224
+
225
+ # Convert back to numpy and write to graph
226
+ delta_nfr = delta_nfr_t.cpu().numpy()
227
+
228
+ if profile is not None:
229
+ profile["dnfr_from_device"] = time.perf_counter() - t0
230
+ t0 = time.perf_counter()
231
+
232
+ for idx, node in enumerate(node_list):
233
+ graph.nodes[node]["ΔNFR"] = float(delta_nfr[idx])
234
+
235
+ if profile is not None:
236
+ profile["dnfr_write_back"] = time.perf_counter() - t0
237
+
238
+ def _compute_delta_nfr_torch(
239
+ self,
240
+ phase: Any,
241
+ epi: Any,
242
+ vf: Any,
243
+ edge_src: Any,
244
+ edge_dst: Any,
245
+ w_phase: float,
246
+ w_epi: float,
247
+ w_vf: float,
248
+ w_topo: float,
249
+ is_directed: bool,
250
+ ) -> Any:
251
+ """Compute ΔNFR using PyTorch tensor operations.
252
+
253
+ Implements the TNFR canonical formula:
254
+ ΔNFR = νf · (w_phase·g_phase + w_epi·g_epi + w_vf·g_vf + w_topo·g_topo)
255
+
256
+ Where:
257
+ - g_phase = angle_diff(phase_mean, phase) / π (circular mean)
258
+ - g_epi = epi_mean - epi
259
+ - g_vf = vf_mean - vf
260
+ - g_topo = neighbor_count · w_topo
261
+
262
+ Parameters
263
+ ----------
264
+ phase, epi, vf : torch.Tensor
265
+ Node attribute tensors on device
266
+ edge_src, edge_dst : torch.Tensor
267
+ Edge index tensors
268
+ w_phase, w_epi, w_vf, w_topo : float
269
+ Component weights
270
+ is_directed : bool
271
+ Whether graph is directed
272
+
273
+ Returns
274
+ -------
275
+ torch.Tensor
276
+ ΔNFR values for all nodes
277
+ """
278
+ n_nodes = phase.shape[0]
279
+ torch = self._torch
280
+
281
+ # Initialize accumulators
282
+ neighbor_cos_sum = torch.zeros(
283
+ n_nodes, device=self._device, dtype=torch.float32
284
+ )
285
+ neighbor_sin_sum = torch.zeros(
286
+ n_nodes, device=self._device, dtype=torch.float32
287
+ )
288
+ neighbor_epi_sum = torch.zeros(
289
+ n_nodes, device=self._device, dtype=torch.float32
290
+ )
291
+ neighbor_vf_sum = torch.zeros(n_nodes, device=self._device, dtype=torch.float32)
292
+ neighbor_count = torch.zeros(n_nodes, device=self._device, dtype=torch.float32)
293
+
294
+ # Accumulate neighbor statistics
295
+ # For each edge, dst receives contributions from src
296
+ neighbor_cos_sum.scatter_add_(0, edge_dst, torch.cos(phase[edge_src]))
297
+ neighbor_sin_sum.scatter_add_(0, edge_dst, torch.sin(phase[edge_src]))
298
+ neighbor_epi_sum.scatter_add_(0, edge_dst, epi[edge_src])
299
+ neighbor_vf_sum.scatter_add_(0, edge_dst, vf[edge_src])
300
+ neighbor_count.scatter_add_(
301
+ 0, edge_dst, torch.ones_like(edge_dst, dtype=torch.float32)
302
+ )
303
+
304
+ # For undirected graphs, also accumulate in reverse
305
+ if not is_directed:
306
+ neighbor_cos_sum.scatter_add_(0, edge_src, torch.cos(phase[edge_dst]))
307
+ neighbor_sin_sum.scatter_add_(0, edge_src, torch.sin(phase[edge_dst]))
308
+ neighbor_epi_sum.scatter_add_(0, edge_src, epi[edge_dst])
309
+ neighbor_vf_sum.scatter_add_(0, edge_src, vf[edge_dst])
310
+ neighbor_count.scatter_add_(
311
+ 0, edge_src, torch.ones_like(edge_src, dtype=torch.float32)
312
+ )
313
+
314
+ # Compute means
315
+ has_neighbors = neighbor_count > 0
316
+
317
+ # Circular mean for phase (using atan2)
318
+ phase_mean = torch.zeros(n_nodes, device=self._device, dtype=torch.float32)
319
+ phase_mean[has_neighbors] = torch.atan2(
320
+ neighbor_sin_sum[has_neighbors], neighbor_cos_sum[has_neighbors]
321
+ )
322
+
323
+ # Arithmetic means for EPI and vf
324
+ epi_mean = torch.zeros(n_nodes, device=self._device, dtype=torch.float32)
325
+ vf_mean = torch.zeros(n_nodes, device=self._device, dtype=torch.float32)
326
+ epi_mean[has_neighbors] = (
327
+ neighbor_epi_sum[has_neighbors] / neighbor_count[has_neighbors]
328
+ )
329
+ vf_mean[has_neighbors] = (
330
+ neighbor_vf_sum[has_neighbors] / neighbor_count[has_neighbors]
331
+ )
332
+
333
+ # Compute gradients using TNFR canonical formula
334
+ # Phase: angle_diff with wrapping to [-π, π]
335
+ phase_diff = (phase_mean - phase + torch.pi) % (2 * torch.pi) - torch.pi
336
+ g_phase = phase_diff / torch.pi
337
+ g_phase[~has_neighbors] = 0.0
338
+
339
+ # EPI and vf gradients
340
+ g_epi = epi_mean - epi
341
+ g_epi[~has_neighbors] = 0.0
342
+
343
+ g_vf = vf_mean - vf
344
+ g_vf[~has_neighbors] = 0.0
345
+
346
+ # Topology gradient
347
+ g_topo = neighbor_count * w_topo
348
+
349
+ # Combine gradients
350
+ delta_nfr = w_phase * g_phase + w_epi * g_epi + w_vf * g_vf + g_topo
351
+
352
+ # Apply structural frequency scaling (canonical TNFR)
353
+ delta_nfr = vf * delta_nfr
354
+
355
+ return delta_nfr
356
+
357
+ def compute_si(
358
+ self,
359
+ graph: TNFRGraph,
360
+ *,
361
+ inplace: bool = True,
362
+ n_jobs: int | None = None,
363
+ chunk_size: int | None = None,
364
+ profile: MutableMapping[str, Any] | None = None,
365
+ ) -> dict[Any, float] | Any:
366
+ """Compute sense index using PyTorch backend.
367
+
368
+ **Current implementation**: Delegates to NumPy backend while maintaining
369
+ interface compatibility.
370
+
371
+ **Planned**: GPU-accelerated vectorized Si computation using torch tensors
372
+ with optimized phase dispersion kernels and mixed precision support.
373
+
374
+ Parameters
375
+ ----------
376
+ graph : TNFRGraph
377
+ NetworkX graph with TNFR node attributes
378
+ inplace : bool, default=True
379
+ Whether to write Si values back to graph
380
+ n_jobs : int or None, optional
381
+ Ignored (PyTorch uses GPU parallelism)
382
+ chunk_size : int or None, optional
383
+ Chunk size hint (currently passed to NumPy backend)
384
+ profile : MutableMapping[str, Any] or None, optional
385
+ Dict to collect timing metrics
386
+
387
+ Returns
388
+ -------
389
+ dict[Any, float] or numpy.ndarray
390
+ Node-to-Si mapping or array of Si values
391
+
392
+ Notes
393
+ -----
394
+ When implemented, will support mixed precision (FP16/BF16) for
395
+ memory-efficient computation on large graphs, selectable via
396
+ graph.graph["TORCH_DTYPE"] = torch.float16
397
+ """
398
+ # TODO: Implement GPU-accelerated PyTorch version
399
+ # For now, delegate to NumPy backend
400
+ from ..metrics.sense_index import compute_Si
401
+
402
+ return compute_Si(
403
+ graph,
404
+ inplace=inplace,
405
+ n_jobs=n_jobs,
406
+ chunk_size=chunk_size,
407
+ profile=profile,
408
+ )