tnfr 4.5.2__py3-none-any.whl → 8.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of tnfr might be problematic. Click here for more details.

Files changed (365) hide show
  1. tnfr/__init__.py +334 -50
  2. tnfr/__init__.pyi +33 -0
  3. tnfr/_compat.py +10 -0
  4. tnfr/_generated_version.py +34 -0
  5. tnfr/_version.py +49 -0
  6. tnfr/_version.pyi +7 -0
  7. tnfr/alias.py +214 -37
  8. tnfr/alias.pyi +108 -0
  9. tnfr/backends/__init__.py +354 -0
  10. tnfr/backends/jax_backend.py +173 -0
  11. tnfr/backends/numpy_backend.py +238 -0
  12. tnfr/backends/optimized_numpy.py +420 -0
  13. tnfr/backends/torch_backend.py +408 -0
  14. tnfr/cache.py +149 -556
  15. tnfr/cache.pyi +13 -0
  16. tnfr/cli/__init__.py +51 -16
  17. tnfr/cli/__init__.pyi +26 -0
  18. tnfr/cli/arguments.py +344 -32
  19. tnfr/cli/arguments.pyi +29 -0
  20. tnfr/cli/execution.py +676 -50
  21. tnfr/cli/execution.pyi +70 -0
  22. tnfr/cli/interactive_validator.py +614 -0
  23. tnfr/cli/utils.py +18 -3
  24. tnfr/cli/utils.pyi +7 -0
  25. tnfr/cli/validate.py +236 -0
  26. tnfr/compat/__init__.py +85 -0
  27. tnfr/compat/dataclass.py +136 -0
  28. tnfr/compat/jsonschema_stub.py +61 -0
  29. tnfr/compat/matplotlib_stub.py +73 -0
  30. tnfr/compat/numpy_stub.py +155 -0
  31. tnfr/config/__init__.py +224 -0
  32. tnfr/config/__init__.pyi +10 -0
  33. tnfr/{constants_glyphs.py → config/constants.py} +26 -20
  34. tnfr/config/constants.pyi +12 -0
  35. tnfr/config/defaults.py +54 -0
  36. tnfr/{constants/core.py → config/defaults_core.py} +59 -6
  37. tnfr/config/defaults_init.py +33 -0
  38. tnfr/config/defaults_metric.py +104 -0
  39. tnfr/config/feature_flags.py +81 -0
  40. tnfr/config/feature_flags.pyi +16 -0
  41. tnfr/config/glyph_constants.py +31 -0
  42. tnfr/config/init.py +77 -0
  43. tnfr/config/init.pyi +8 -0
  44. tnfr/config/operator_names.py +254 -0
  45. tnfr/config/operator_names.pyi +36 -0
  46. tnfr/config/physics_derivation.py +354 -0
  47. tnfr/config/presets.py +83 -0
  48. tnfr/config/presets.pyi +7 -0
  49. tnfr/config/security.py +927 -0
  50. tnfr/config/thresholds.py +114 -0
  51. tnfr/config/tnfr_config.py +498 -0
  52. tnfr/constants/__init__.py +51 -133
  53. tnfr/constants/__init__.pyi +92 -0
  54. tnfr/constants/aliases.py +33 -0
  55. tnfr/constants/aliases.pyi +27 -0
  56. tnfr/constants/init.py +3 -1
  57. tnfr/constants/init.pyi +12 -0
  58. tnfr/constants/metric.py +9 -15
  59. tnfr/constants/metric.pyi +19 -0
  60. tnfr/core/__init__.py +33 -0
  61. tnfr/core/container.py +226 -0
  62. tnfr/core/default_implementations.py +329 -0
  63. tnfr/core/interfaces.py +279 -0
  64. tnfr/dynamics/__init__.py +213 -633
  65. tnfr/dynamics/__init__.pyi +83 -0
  66. tnfr/dynamics/adaptation.py +267 -0
  67. tnfr/dynamics/adaptation.pyi +7 -0
  68. tnfr/dynamics/adaptive_sequences.py +189 -0
  69. tnfr/dynamics/adaptive_sequences.pyi +14 -0
  70. tnfr/dynamics/aliases.py +23 -0
  71. tnfr/dynamics/aliases.pyi +19 -0
  72. tnfr/dynamics/bifurcation.py +232 -0
  73. tnfr/dynamics/canonical.py +229 -0
  74. tnfr/dynamics/canonical.pyi +48 -0
  75. tnfr/dynamics/coordination.py +385 -0
  76. tnfr/dynamics/coordination.pyi +25 -0
  77. tnfr/dynamics/dnfr.py +2699 -398
  78. tnfr/dynamics/dnfr.pyi +26 -0
  79. tnfr/dynamics/dynamic_limits.py +225 -0
  80. tnfr/dynamics/feedback.py +252 -0
  81. tnfr/dynamics/feedback.pyi +24 -0
  82. tnfr/dynamics/fused_dnfr.py +454 -0
  83. tnfr/dynamics/homeostasis.py +157 -0
  84. tnfr/dynamics/homeostasis.pyi +14 -0
  85. tnfr/dynamics/integrators.py +496 -102
  86. tnfr/dynamics/integrators.pyi +36 -0
  87. tnfr/dynamics/learning.py +310 -0
  88. tnfr/dynamics/learning.pyi +33 -0
  89. tnfr/dynamics/metabolism.py +254 -0
  90. tnfr/dynamics/nbody.py +796 -0
  91. tnfr/dynamics/nbody_tnfr.py +783 -0
  92. tnfr/dynamics/propagation.py +326 -0
  93. tnfr/dynamics/runtime.py +908 -0
  94. tnfr/dynamics/runtime.pyi +77 -0
  95. tnfr/dynamics/sampling.py +10 -5
  96. tnfr/dynamics/sampling.pyi +7 -0
  97. tnfr/dynamics/selectors.py +711 -0
  98. tnfr/dynamics/selectors.pyi +85 -0
  99. tnfr/dynamics/structural_clip.py +207 -0
  100. tnfr/errors/__init__.py +37 -0
  101. tnfr/errors/contextual.py +492 -0
  102. tnfr/execution.py +77 -55
  103. tnfr/execution.pyi +45 -0
  104. tnfr/extensions/__init__.py +205 -0
  105. tnfr/extensions/__init__.pyi +18 -0
  106. tnfr/extensions/base.py +173 -0
  107. tnfr/extensions/base.pyi +35 -0
  108. tnfr/extensions/business/__init__.py +71 -0
  109. tnfr/extensions/business/__init__.pyi +11 -0
  110. tnfr/extensions/business/cookbook.py +88 -0
  111. tnfr/extensions/business/cookbook.pyi +8 -0
  112. tnfr/extensions/business/health_analyzers.py +202 -0
  113. tnfr/extensions/business/health_analyzers.pyi +9 -0
  114. tnfr/extensions/business/patterns.py +183 -0
  115. tnfr/extensions/business/patterns.pyi +8 -0
  116. tnfr/extensions/medical/__init__.py +73 -0
  117. tnfr/extensions/medical/__init__.pyi +11 -0
  118. tnfr/extensions/medical/cookbook.py +88 -0
  119. tnfr/extensions/medical/cookbook.pyi +8 -0
  120. tnfr/extensions/medical/health_analyzers.py +181 -0
  121. tnfr/extensions/medical/health_analyzers.pyi +9 -0
  122. tnfr/extensions/medical/patterns.py +163 -0
  123. tnfr/extensions/medical/patterns.pyi +8 -0
  124. tnfr/flatten.py +29 -50
  125. tnfr/flatten.pyi +21 -0
  126. tnfr/gamma.py +66 -53
  127. tnfr/gamma.pyi +36 -0
  128. tnfr/glyph_history.py +144 -57
  129. tnfr/glyph_history.pyi +35 -0
  130. tnfr/glyph_runtime.py +19 -0
  131. tnfr/glyph_runtime.pyi +8 -0
  132. tnfr/immutable.py +70 -30
  133. tnfr/immutable.pyi +36 -0
  134. tnfr/initialization.py +22 -16
  135. tnfr/initialization.pyi +65 -0
  136. tnfr/io.py +5 -241
  137. tnfr/io.pyi +13 -0
  138. tnfr/locking.pyi +7 -0
  139. tnfr/mathematics/__init__.py +79 -0
  140. tnfr/mathematics/backend.py +453 -0
  141. tnfr/mathematics/backend.pyi +99 -0
  142. tnfr/mathematics/dynamics.py +408 -0
  143. tnfr/mathematics/dynamics.pyi +90 -0
  144. tnfr/mathematics/epi.py +391 -0
  145. tnfr/mathematics/epi.pyi +65 -0
  146. tnfr/mathematics/generators.py +242 -0
  147. tnfr/mathematics/generators.pyi +29 -0
  148. tnfr/mathematics/metrics.py +119 -0
  149. tnfr/mathematics/metrics.pyi +16 -0
  150. tnfr/mathematics/operators.py +239 -0
  151. tnfr/mathematics/operators.pyi +59 -0
  152. tnfr/mathematics/operators_factory.py +124 -0
  153. tnfr/mathematics/operators_factory.pyi +11 -0
  154. tnfr/mathematics/projection.py +87 -0
  155. tnfr/mathematics/projection.pyi +33 -0
  156. tnfr/mathematics/runtime.py +182 -0
  157. tnfr/mathematics/runtime.pyi +64 -0
  158. tnfr/mathematics/spaces.py +256 -0
  159. tnfr/mathematics/spaces.pyi +83 -0
  160. tnfr/mathematics/transforms.py +305 -0
  161. tnfr/mathematics/transforms.pyi +62 -0
  162. tnfr/metrics/__init__.py +47 -9
  163. tnfr/metrics/__init__.pyi +20 -0
  164. tnfr/metrics/buffer_cache.py +163 -0
  165. tnfr/metrics/buffer_cache.pyi +24 -0
  166. tnfr/metrics/cache_utils.py +214 -0
  167. tnfr/metrics/coherence.py +1510 -330
  168. tnfr/metrics/coherence.pyi +129 -0
  169. tnfr/metrics/common.py +23 -16
  170. tnfr/metrics/common.pyi +35 -0
  171. tnfr/metrics/core.py +251 -36
  172. tnfr/metrics/core.pyi +13 -0
  173. tnfr/metrics/diagnosis.py +709 -110
  174. tnfr/metrics/diagnosis.pyi +86 -0
  175. tnfr/metrics/emergence.py +245 -0
  176. tnfr/metrics/export.py +60 -18
  177. tnfr/metrics/export.pyi +7 -0
  178. tnfr/metrics/glyph_timing.py +233 -43
  179. tnfr/metrics/glyph_timing.pyi +81 -0
  180. tnfr/metrics/learning_metrics.py +280 -0
  181. tnfr/metrics/learning_metrics.pyi +21 -0
  182. tnfr/metrics/phase_coherence.py +351 -0
  183. tnfr/metrics/phase_compatibility.py +349 -0
  184. tnfr/metrics/reporting.py +63 -28
  185. tnfr/metrics/reporting.pyi +25 -0
  186. tnfr/metrics/sense_index.py +1126 -43
  187. tnfr/metrics/sense_index.pyi +9 -0
  188. tnfr/metrics/trig.py +215 -23
  189. tnfr/metrics/trig.pyi +13 -0
  190. tnfr/metrics/trig_cache.py +148 -24
  191. tnfr/metrics/trig_cache.pyi +10 -0
  192. tnfr/multiscale/__init__.py +32 -0
  193. tnfr/multiscale/hierarchical.py +517 -0
  194. tnfr/node.py +646 -140
  195. tnfr/node.pyi +139 -0
  196. tnfr/observers.py +160 -45
  197. tnfr/observers.pyi +31 -0
  198. tnfr/ontosim.py +23 -19
  199. tnfr/ontosim.pyi +28 -0
  200. tnfr/operators/__init__.py +1358 -106
  201. tnfr/operators/__init__.pyi +31 -0
  202. tnfr/operators/algebra.py +277 -0
  203. tnfr/operators/canonical_patterns.py +420 -0
  204. tnfr/operators/cascade.py +267 -0
  205. tnfr/operators/cycle_detection.py +358 -0
  206. tnfr/operators/definitions.py +4108 -0
  207. tnfr/operators/definitions.pyi +78 -0
  208. tnfr/operators/grammar.py +1164 -0
  209. tnfr/operators/grammar.pyi +140 -0
  210. tnfr/operators/hamiltonian.py +710 -0
  211. tnfr/operators/health_analyzer.py +809 -0
  212. tnfr/operators/jitter.py +107 -38
  213. tnfr/operators/jitter.pyi +11 -0
  214. tnfr/operators/lifecycle.py +314 -0
  215. tnfr/operators/metabolism.py +618 -0
  216. tnfr/operators/metrics.py +2138 -0
  217. tnfr/operators/network_analysis/__init__.py +27 -0
  218. tnfr/operators/network_analysis/source_detection.py +186 -0
  219. tnfr/operators/nodal_equation.py +395 -0
  220. tnfr/operators/pattern_detection.py +660 -0
  221. tnfr/operators/patterns.py +669 -0
  222. tnfr/operators/postconditions/__init__.py +38 -0
  223. tnfr/operators/postconditions/mutation.py +236 -0
  224. tnfr/operators/preconditions/__init__.py +1226 -0
  225. tnfr/operators/preconditions/coherence.py +305 -0
  226. tnfr/operators/preconditions/dissonance.py +236 -0
  227. tnfr/operators/preconditions/emission.py +128 -0
  228. tnfr/operators/preconditions/mutation.py +580 -0
  229. tnfr/operators/preconditions/reception.py +125 -0
  230. tnfr/operators/preconditions/resonance.py +364 -0
  231. tnfr/operators/registry.py +74 -0
  232. tnfr/operators/registry.pyi +9 -0
  233. tnfr/operators/remesh.py +1415 -91
  234. tnfr/operators/remesh.pyi +26 -0
  235. tnfr/operators/structural_units.py +268 -0
  236. tnfr/operators/unified_grammar.py +105 -0
  237. tnfr/parallel/__init__.py +54 -0
  238. tnfr/parallel/auto_scaler.py +234 -0
  239. tnfr/parallel/distributed.py +384 -0
  240. tnfr/parallel/engine.py +238 -0
  241. tnfr/parallel/gpu_engine.py +420 -0
  242. tnfr/parallel/monitoring.py +248 -0
  243. tnfr/parallel/partitioner.py +459 -0
  244. tnfr/py.typed +0 -0
  245. tnfr/recipes/__init__.py +22 -0
  246. tnfr/recipes/cookbook.py +743 -0
  247. tnfr/rng.py +75 -151
  248. tnfr/rng.pyi +26 -0
  249. tnfr/schemas/__init__.py +8 -0
  250. tnfr/schemas/grammar.json +94 -0
  251. tnfr/sdk/__init__.py +107 -0
  252. tnfr/sdk/__init__.pyi +19 -0
  253. tnfr/sdk/adaptive_system.py +173 -0
  254. tnfr/sdk/adaptive_system.pyi +21 -0
  255. tnfr/sdk/builders.py +370 -0
  256. tnfr/sdk/builders.pyi +51 -0
  257. tnfr/sdk/fluent.py +1121 -0
  258. tnfr/sdk/fluent.pyi +74 -0
  259. tnfr/sdk/templates.py +342 -0
  260. tnfr/sdk/templates.pyi +41 -0
  261. tnfr/sdk/utils.py +341 -0
  262. tnfr/secure_config.py +46 -0
  263. tnfr/security/__init__.py +70 -0
  264. tnfr/security/database.py +514 -0
  265. tnfr/security/subprocess.py +503 -0
  266. tnfr/security/validation.py +290 -0
  267. tnfr/selector.py +59 -22
  268. tnfr/selector.pyi +19 -0
  269. tnfr/sense.py +92 -67
  270. tnfr/sense.pyi +23 -0
  271. tnfr/services/__init__.py +17 -0
  272. tnfr/services/orchestrator.py +325 -0
  273. tnfr/sparse/__init__.py +39 -0
  274. tnfr/sparse/representations.py +492 -0
  275. tnfr/structural.py +639 -263
  276. tnfr/structural.pyi +83 -0
  277. tnfr/telemetry/__init__.py +35 -0
  278. tnfr/telemetry/cache_metrics.py +226 -0
  279. tnfr/telemetry/cache_metrics.pyi +64 -0
  280. tnfr/telemetry/nu_f.py +422 -0
  281. tnfr/telemetry/nu_f.pyi +108 -0
  282. tnfr/telemetry/verbosity.py +36 -0
  283. tnfr/telemetry/verbosity.pyi +15 -0
  284. tnfr/tokens.py +2 -4
  285. tnfr/tokens.pyi +36 -0
  286. tnfr/tools/__init__.py +20 -0
  287. tnfr/tools/domain_templates.py +478 -0
  288. tnfr/tools/sequence_generator.py +846 -0
  289. tnfr/topology/__init__.py +13 -0
  290. tnfr/topology/asymmetry.py +151 -0
  291. tnfr/trace.py +300 -126
  292. tnfr/trace.pyi +42 -0
  293. tnfr/tutorials/__init__.py +38 -0
  294. tnfr/tutorials/autonomous_evolution.py +285 -0
  295. tnfr/tutorials/interactive.py +1576 -0
  296. tnfr/tutorials/structural_metabolism.py +238 -0
  297. tnfr/types.py +743 -12
  298. tnfr/types.pyi +357 -0
  299. tnfr/units.py +68 -0
  300. tnfr/units.pyi +13 -0
  301. tnfr/utils/__init__.py +282 -0
  302. tnfr/utils/__init__.pyi +215 -0
  303. tnfr/utils/cache.py +4223 -0
  304. tnfr/utils/cache.pyi +470 -0
  305. tnfr/{callback_utils.py → utils/callbacks.py} +26 -39
  306. tnfr/utils/callbacks.pyi +49 -0
  307. tnfr/utils/chunks.py +108 -0
  308. tnfr/utils/chunks.pyi +22 -0
  309. tnfr/utils/data.py +428 -0
  310. tnfr/utils/data.pyi +74 -0
  311. tnfr/utils/graph.py +85 -0
  312. tnfr/utils/graph.pyi +10 -0
  313. tnfr/utils/init.py +821 -0
  314. tnfr/utils/init.pyi +80 -0
  315. tnfr/utils/io.py +559 -0
  316. tnfr/utils/io.pyi +66 -0
  317. tnfr/{helpers → utils}/numeric.py +51 -24
  318. tnfr/utils/numeric.pyi +21 -0
  319. tnfr/validation/__init__.py +257 -0
  320. tnfr/validation/__init__.pyi +85 -0
  321. tnfr/validation/compatibility.py +460 -0
  322. tnfr/validation/compatibility.pyi +6 -0
  323. tnfr/validation/config.py +73 -0
  324. tnfr/validation/graph.py +139 -0
  325. tnfr/validation/graph.pyi +18 -0
  326. tnfr/validation/input_validation.py +755 -0
  327. tnfr/validation/invariants.py +712 -0
  328. tnfr/validation/rules.py +253 -0
  329. tnfr/validation/rules.pyi +44 -0
  330. tnfr/validation/runtime.py +279 -0
  331. tnfr/validation/runtime.pyi +28 -0
  332. tnfr/validation/sequence_validator.py +162 -0
  333. tnfr/validation/soft_filters.py +170 -0
  334. tnfr/validation/soft_filters.pyi +32 -0
  335. tnfr/validation/spectral.py +164 -0
  336. tnfr/validation/spectral.pyi +42 -0
  337. tnfr/validation/validator.py +1266 -0
  338. tnfr/validation/window.py +39 -0
  339. tnfr/validation/window.pyi +1 -0
  340. tnfr/visualization/__init__.py +98 -0
  341. tnfr/visualization/cascade_viz.py +256 -0
  342. tnfr/visualization/hierarchy.py +284 -0
  343. tnfr/visualization/sequence_plotter.py +784 -0
  344. tnfr/viz/__init__.py +60 -0
  345. tnfr/viz/matplotlib.py +278 -0
  346. tnfr/viz/matplotlib.pyi +35 -0
  347. tnfr-8.5.0.dist-info/METADATA +573 -0
  348. tnfr-8.5.0.dist-info/RECORD +353 -0
  349. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/entry_points.txt +1 -0
  350. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/licenses/LICENSE.md +1 -1
  351. tnfr/collections_utils.py +0 -300
  352. tnfr/config.py +0 -32
  353. tnfr/grammar.py +0 -344
  354. tnfr/graph_utils.py +0 -84
  355. tnfr/helpers/__init__.py +0 -71
  356. tnfr/import_utils.py +0 -228
  357. tnfr/json_utils.py +0 -162
  358. tnfr/logging_utils.py +0 -116
  359. tnfr/presets.py +0 -60
  360. tnfr/validators.py +0 -84
  361. tnfr/value_utils.py +0 -59
  362. tnfr-4.5.2.dist-info/METADATA +0 -379
  363. tnfr-4.5.2.dist-info/RECORD +0 -67
  364. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
  365. {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,384 @@
1
+ """Distributed computation backend for massive TNFR networks.
2
+
3
+ Optional module that provides Ray and Dask integration for cluster computing.
4
+ Requires installation of optional dependencies:
5
+ pip install tnfr[ray] # or
6
+ pip install tnfr[dask]
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
12
+
13
+ if TYPE_CHECKING: # pragma: no cover
14
+ from ..types import TNFRGraph
15
+
16
+ # Check for optional dependencies
17
+ try:
18
+ import ray
19
+
20
+ HAS_RAY = True
21
+ except ImportError:
22
+ HAS_RAY = False
23
+ ray = None # type: ignore
24
+
25
+ try:
26
+ import dask
27
+ from dask.distributed import Client
28
+
29
+ HAS_DASK = True
30
+ except ImportError:
31
+ HAS_DASK = False
32
+ dask = None # type: ignore
33
+ Client = None # type: ignore
34
+
35
+
36
+ class TNFRDistributedEngine:
37
+ """Distributed computation engine for massive TNFR networks.
38
+
39
+ Provides Ray and Dask backend integration for cluster-scale computation
40
+ while preserving TNFR structural invariants.
41
+
42
+ Parameters
43
+ ----------
44
+ backend : {"auto", "ray", "dask"}, default="auto"
45
+ Distributed backend to use. "auto" selects Ray if available,
46
+ otherwise Dask, otherwise falls back to multiprocessing.
47
+
48
+ Raises
49
+ ------
50
+ ImportError
51
+ If requested backend is not installed
52
+
53
+ Examples
54
+ --------
55
+ >>> # Requires ray installation
56
+ >>> try:
57
+ ... from tnfr.parallel import TNFRDistributedEngine
58
+ ... engine = TNFRDistributedEngine(backend="auto")
59
+ ... # engine.backend in ["ray", "dask", "multiprocessing"]
60
+ ... except ImportError:
61
+ ... pass # Optional dependency not installed
62
+
63
+ Notes
64
+ -----
65
+ This is an optional advanced feature. Basic parallelization via
66
+ TNFRParallelEngine is sufficient for most use cases.
67
+ """
68
+
69
+ def __init__(self, backend: str = "auto"):
70
+ self.backend = self._select_backend(backend)
71
+ self._client = None
72
+ self._ray_initialized = False
73
+
74
+ def _select_backend(self, backend: str) -> str:
75
+ """Select available distributed backend."""
76
+ if backend == "auto":
77
+ if HAS_RAY:
78
+ return "ray"
79
+ elif HAS_DASK:
80
+ return "dask"
81
+ else:
82
+ return "multiprocessing"
83
+
84
+ if backend == "ray" and not HAS_RAY:
85
+ raise ImportError("Ray not available. Install with: pip install ray")
86
+ if backend == "dask" and not HAS_DASK:
87
+ raise ImportError(
88
+ "Dask not available. Install with: pip install dask[distributed]"
89
+ )
90
+
91
+ return backend
92
+
93
+ def initialize_cluster(self, **cluster_config: Any) -> None:
94
+ """Initialize distributed cluster.
95
+
96
+ Parameters
97
+ ----------
98
+ **cluster_config
99
+ Backend-specific cluster configuration
100
+
101
+ Examples
102
+ --------
103
+ >>> # Ray configuration
104
+ >>> engine = TNFRDistributedEngine(backend="ray")
105
+ >>> engine.initialize_cluster(num_cpus=4)
106
+
107
+ >>> # Dask configuration
108
+ >>> engine = TNFRDistributedEngine(backend="dask")
109
+ >>> engine.initialize_cluster(n_workers=4)
110
+ """
111
+ if self.backend == "ray" and HAS_RAY:
112
+ if not self._ray_initialized:
113
+ ray.init(**cluster_config)
114
+ self._ray_initialized = True
115
+ elif self.backend == "dask" and HAS_DASK:
116
+ if self._client is None:
117
+ self._client = Client(**cluster_config)
118
+
119
+ def shutdown_cluster(self) -> None:
120
+ """Shutdown distributed cluster and release resources."""
121
+ if self.backend == "ray" and HAS_RAY and self._ray_initialized:
122
+ ray.shutdown()
123
+ self._ray_initialized = False
124
+ elif self.backend == "dask" and self._client is not None:
125
+ self._client.close()
126
+ self._client = None
127
+
128
+ def compute_si_distributed(
129
+ self, graph: TNFRGraph, chunk_size: int = 500, **kwargs: Any
130
+ ) -> Dict[str, Any]:
131
+ """Compute sense index using distributed computation.
132
+
133
+ Parameters
134
+ ----------
135
+ graph : TNFRGraph
136
+ Network graph with TNFR attributes
137
+ chunk_size : int, default=500
138
+ Nodes per distributed work unit
139
+ **kwargs
140
+ Additional arguments for Si computation
141
+
142
+ Returns
143
+ -------
144
+ Dict[str, Any]
145
+ Si values and metadata
146
+
147
+ Notes
148
+ -----
149
+ Requires Ray or Dask to be installed and initialized.
150
+ Falls back to multiprocessing if distributed backend unavailable.
151
+ """
152
+ if self.backend == "ray" and HAS_RAY:
153
+ return self._compute_si_ray(graph, chunk_size, **kwargs)
154
+ elif self.backend == "dask" and HAS_DASK:
155
+ return self._compute_si_dask(graph, chunk_size, **kwargs)
156
+ else:
157
+ # Fallback to multiprocessing
158
+ from .engine import TNFRParallelEngine
159
+
160
+ engine = TNFRParallelEngine(max_workers=4)
161
+ si_values = engine.compute_si_parallel(graph, **kwargs)
162
+ return {"si_values": si_values, "backend": "multiprocessing"}
163
+
164
+ def _compute_si_ray(
165
+ self, graph: TNFRGraph, chunk_size: int, **kwargs: Any
166
+ ) -> Dict[str, Any]:
167
+ """Compute Si using Ray for distributed execution.
168
+
169
+ Parameters
170
+ ----------
171
+ graph : TNFRGraph
172
+ Network graph
173
+ chunk_size : int
174
+ Nodes per work unit
175
+ **kwargs
176
+ Additional Si computation parameters
177
+
178
+ Returns
179
+ -------
180
+ Dict[str, Any]
181
+ Results dictionary with si_values and metadata
182
+ """
183
+ if not HAS_RAY:
184
+ raise ImportError("Ray required for distributed computation")
185
+
186
+ # Define remote function for Ray
187
+ @ray.remote
188
+ def compute_si_chunk(node_chunk, graph_data):
189
+ """Compute Si for a chunk of nodes (Ray remote function)."""
190
+ import networkx as nx
191
+ from tnfr.metrics.sense_index import compute_Si
192
+
193
+ # Reconstruct graph in worker
194
+ G = nx.Graph()
195
+ G.add_nodes_from([(nid, attrs) for nid, attrs in graph_data["nodes"]])
196
+ G.add_edges_from(graph_data["edges"])
197
+ G.graph.update(graph_data["graph_attrs"])
198
+
199
+ # Compute Si for this chunk
200
+ si_values = {}
201
+ for node_id in node_chunk:
202
+ try:
203
+ from tnfr.metrics.sense_index import compute_Si_node
204
+
205
+ si_values[node_id] = compute_Si_node(G, node_id)
206
+ except Exception as e:
207
+ # Fallback value on error
208
+ si_values[node_id] = 0.5
209
+
210
+ return si_values
211
+
212
+ # Serialize graph data
213
+ graph_data = {
214
+ "nodes": list(graph.nodes(data=True)),
215
+ "edges": list(graph.edges()),
216
+ "graph_attrs": dict(graph.graph),
217
+ }
218
+
219
+ # Chunk nodes
220
+ nodes = list(graph.nodes())
221
+ chunks = [nodes[i : i + chunk_size] for i in range(0, len(nodes), chunk_size)]
222
+
223
+ # Submit Ray tasks
224
+ futures = [compute_si_chunk.remote(chunk, graph_data) for chunk in chunks]
225
+
226
+ # Gather results
227
+ chunk_results = ray.get(futures)
228
+
229
+ # Merge results
230
+ si_values = {}
231
+ for chunk_result in chunk_results:
232
+ si_values.update(chunk_result)
233
+
234
+ return {
235
+ "si_values": si_values,
236
+ "backend": "ray",
237
+ "chunks_processed": len(chunks),
238
+ "nodes_per_chunk": chunk_size,
239
+ }
240
+
241
+ def _compute_si_dask(
242
+ self, graph: TNFRGraph, chunk_size: int, **kwargs: Any
243
+ ) -> Dict[str, Any]:
244
+ """Compute Si using Dask for distributed execution.
245
+
246
+ Parameters
247
+ ----------
248
+ graph : TNFRGraph
249
+ Network graph
250
+ chunk_size : int
251
+ Nodes per work unit
252
+ **kwargs
253
+ Additional Si computation parameters
254
+
255
+ Returns
256
+ -------
257
+ Dict[str, Any]
258
+ Results dictionary with si_values and metadata
259
+
260
+ Notes
261
+ -----
262
+ Basic Dask implementation. Can be extended for more sophisticated
263
+ distributed patterns (delayed, dataframes, etc.)
264
+ """
265
+ if not HAS_DASK:
266
+ raise ImportError("Dask required for distributed computation")
267
+
268
+ from dask import delayed, compute
269
+
270
+ def compute_si_chunk(node_chunk, graph_data):
271
+ """Compute Si for a chunk of nodes (Dask delayed function)."""
272
+ import networkx as nx
273
+ from tnfr.metrics.sense_index import compute_Si_node
274
+
275
+ # Reconstruct graph
276
+ G = nx.Graph()
277
+ G.add_nodes_from([(nid, attrs) for nid, attrs in graph_data["nodes"]])
278
+ G.add_edges_from(graph_data["edges"])
279
+ G.graph.update(graph_data["graph_attrs"])
280
+
281
+ # Compute Si for chunk
282
+ si_values = {}
283
+ for node_id in node_chunk:
284
+ try:
285
+ si_values[node_id] = compute_Si_node(G, node_id)
286
+ except Exception:
287
+ si_values[node_id] = 0.5
288
+
289
+ return si_values
290
+
291
+ # Serialize graph
292
+ graph_data = {
293
+ "nodes": list(graph.nodes(data=True)),
294
+ "edges": list(graph.edges()),
295
+ "graph_attrs": dict(graph.graph),
296
+ }
297
+
298
+ # Chunk and create delayed tasks
299
+ nodes = list(graph.nodes())
300
+ chunks = [nodes[i : i + chunk_size] for i in range(0, len(nodes), chunk_size)]
301
+
302
+ delayed_tasks = [
303
+ delayed(compute_si_chunk)(chunk, graph_data) for chunk in chunks
304
+ ]
305
+
306
+ # Compute in parallel
307
+ chunk_results = compute(*delayed_tasks)
308
+
309
+ # Merge results
310
+ si_values = {}
311
+ for chunk_result in chunk_results:
312
+ si_values.update(chunk_result)
313
+
314
+ return {
315
+ "si_values": si_values,
316
+ "backend": "dask",
317
+ "chunks_processed": len(chunks),
318
+ "nodes_per_chunk": chunk_size,
319
+ }
320
+
321
+ def simulate_large_network(
322
+ self,
323
+ node_count: int,
324
+ edge_probability: float,
325
+ operator_sequences: List[List[str]],
326
+ chunk_size: int = 500,
327
+ ) -> Dict[str, Any]:
328
+ """Simulate massive network using distributed computation.
329
+
330
+ Parameters
331
+ ----------
332
+ node_count : int
333
+ Total number of nodes in network
334
+ edge_probability : float
335
+ Edge creation probability for random network
336
+ operator_sequences : List[List[str]]
337
+ Sequences of TNFR operators to apply
338
+ chunk_size : int, default=500
339
+ Nodes per distributed work unit
340
+
341
+ Returns
342
+ -------
343
+ Dict[str, Any]
344
+ Simulation results with coherence and sense indices
345
+
346
+ Notes
347
+ -----
348
+ Creates a large network and processes it using distributed backend.
349
+ This is a simplified implementation focused on Si computation.
350
+ Full operator sequence application would require more sophisticated
351
+ distributed state management.
352
+ """
353
+ import networkx as nx
354
+
355
+ # Create large network
356
+ G = nx.erdos_renyi_graph(node_count, edge_probability)
357
+
358
+ # Initialize with TNFR attributes
359
+ for node in G.nodes():
360
+ G.nodes[node]["nu_f"] = 1.0
361
+ G.nodes[node]["phase"] = 0.0
362
+ G.nodes[node]["epi"] = 0.5
363
+ G.nodes[node]["delta_nfr"] = 0.0
364
+
365
+ # Compute Si using distributed backend
366
+ results = self.compute_si_distributed(G, chunk_size=chunk_size)
367
+
368
+ # Add network statistics
369
+ results["network_stats"] = {
370
+ "nodes": node_count,
371
+ "edges": G.number_of_edges(),
372
+ "density": nx.density(G),
373
+ "avg_clustering": nx.average_clustering(G) if node_count < 10000 else 0.0,
374
+ }
375
+
376
+ return results
377
+
378
+ def __enter__(self):
379
+ """Context manager entry."""
380
+ return self
381
+
382
+ def __exit__(self, exc_type, exc_val, exc_tb):
383
+ """Context manager exit - cleanup resources."""
384
+ self.shutdown_cluster()
@@ -0,0 +1,238 @@
1
+ """Parallel execution engine for TNFR computations.
2
+
3
+ Provides thread and process-based parallelization for ΔNFR and Si computations
4
+ while preserving all TNFR structural invariants.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
10
+ from multiprocessing import cpu_count
11
+ from typing import TYPE_CHECKING, Any, Dict, Optional
12
+
13
+ if TYPE_CHECKING: # pragma: no cover
14
+ from ..types import TNFRGraph
15
+
16
+ from .partitioner import FractalPartitioner
17
+
18
+
19
+ class TNFRParallelEngine:
20
+ """Parallel computation engine for TNFR networks.
21
+
22
+ Leverages multiprocessing or threading to accelerate ΔNFR and Si
23
+ computations on medium to large networks. Respects TNFR invariants by
24
+ partitioning networks along coherence boundaries.
25
+
26
+ Parameters
27
+ ----------
28
+ max_workers : int or None, optional
29
+ Maximum number of parallel workers. None auto-detects CPU count.
30
+ execution_mode : {"threads", "processes"}, default="threads"
31
+ Execution backend. "threads" for I/O-bound tasks, "processes" for
32
+ CPU-bound tasks. Note: "processes" has serialization overhead.
33
+ partition_size : int, default=100
34
+ Maximum nodes per partition for parallel processing.
35
+
36
+ Examples
37
+ --------
38
+ >>> import networkx as nx
39
+ >>> from tnfr.parallel import TNFRParallelEngine
40
+ >>> G = nx.Graph()
41
+ >>> G.add_edges_from([("a", "b"), ("b", "c")])
42
+ >>> for node in G.nodes():
43
+ ... G.nodes[node]["vf"] = 1.0
44
+ ... G.nodes[node]["phase"] = 0.0
45
+ ... G.nodes[node]["epi"] = 0.5
46
+ ... G.nodes[node]["delta_nfr"] = 0.0
47
+ >>> engine = TNFRParallelEngine(max_workers=2)
48
+ >>> # Engine ready for parallel computation
49
+ >>> engine.max_workers <= cpu_count()
50
+ True
51
+
52
+ Notes
53
+ -----
54
+ This engine integrates with existing TNFR dynamics by:
55
+ 1. Using the same n_jobs parameter conventions
56
+ 2. Preserving all node attributes and graph structure
57
+ 3. Maintaining canonical ΔNFR semantics
58
+ 4. Respecting phase coherence in partitioning
59
+ """
60
+
61
+ def __init__(
62
+ self,
63
+ max_workers: Optional[int] = None,
64
+ execution_mode: str = "threads",
65
+ partition_size: int = 100,
66
+ cache_aware: bool = True,
67
+ ):
68
+ if max_workers is None:
69
+ max_workers = cpu_count()
70
+
71
+ self.max_workers = max_workers
72
+ self.execution_mode = execution_mode
73
+ self.cache_aware = cache_aware
74
+ self.partitioner = FractalPartitioner(max_partition_size=partition_size)
75
+
76
+ def _distribute_work_cache_aware(self, partitions: list, num_workers: int) -> list:
77
+ """Distribute work across workers in a cache-aware manner.
78
+
79
+ Groups related partitions together to improve cache locality
80
+ and reduce cache misses during parallel execution.
81
+
82
+ Parameters
83
+ ----------
84
+ partitions : list
85
+ List of work partitions to distribute
86
+ num_workers : int
87
+ Number of worker processes/threads
88
+
89
+ Returns
90
+ -------
91
+ list
92
+ List of work chunks, one per worker, organized for cache efficiency
93
+ """
94
+ if not self.cache_aware or len(partitions) <= num_workers:
95
+ # Simple round-robin distribution
96
+ chunks = [[] for _ in range(num_workers)]
97
+ for i, partition in enumerate(partitions):
98
+ chunks[i % num_workers].append(partition)
99
+ return chunks
100
+
101
+ # Cache-aware distribution: group spatially nearby partitions
102
+ # This reduces cache misses when processing related nodes
103
+
104
+ # Sort partitions by their "center" (average νf of nodes)
105
+ def partition_center(partition_info):
106
+ node_set, subgraph = partition_info
107
+ if not node_set:
108
+ return 0.0
109
+ try:
110
+ from ..alias import get_attr
111
+ from ..constants.aliases import ALIAS_VF
112
+
113
+ vf_sum = sum(
114
+ float(
115
+ get_attr(subgraph.nodes[n], ALIAS_VF, None)
116
+ or subgraph.nodes[n].get("vf", 1.0)
117
+ )
118
+ for n in node_set
119
+ )
120
+ return vf_sum / len(node_set)
121
+ except:
122
+ return 0.0
123
+
124
+ sorted_partitions = sorted(partitions, key=partition_center)
125
+
126
+ # Distribute sorted partitions in contiguous blocks
127
+ # This ensures workers process spatially nearby partitions
128
+ chunks = [[] for _ in range(num_workers)]
129
+ chunk_size = len(sorted_partitions) // num_workers
130
+ remainder = len(sorted_partitions) % num_workers
131
+
132
+ start_idx = 0
133
+ for worker_id in range(num_workers):
134
+ # Give some workers an extra partition to handle remainder
135
+ end_idx = start_idx + chunk_size + (1 if worker_id < remainder else 0)
136
+ chunks[worker_id] = sorted_partitions[start_idx:end_idx]
137
+ start_idx = end_idx
138
+
139
+ return chunks
140
+
141
+ def compute_delta_nfr_parallel(
142
+ self, graph: TNFRGraph, **kwargs: Any
143
+ ) -> Dict[Any, float]:
144
+ """Compute ΔNFR in parallel using fractal partitioning.
145
+
146
+ Delegates to existing default_compute_delta_nfr with n_jobs parameter.
147
+ This method exists primarily for API consistency with the proposal.
148
+
149
+ Parameters
150
+ ----------
151
+ graph : TNFRGraph
152
+ Network graph with structural attributes
153
+ **kwargs
154
+ Additional arguments passed to compute function
155
+
156
+ Returns
157
+ -------
158
+ Dict[Any, float]
159
+ Mapping from node IDs to ΔNFR values
160
+
161
+ Notes
162
+ -----
163
+ Currently delegates to the existing implementation in
164
+ tnfr.dynamics.dnfr.default_compute_delta_nfr which already supports
165
+ n_jobs for parallelization. Future enhancements could use explicit
166
+ partitioning strategies.
167
+ """
168
+ from ..dynamics.dnfr import default_compute_delta_nfr
169
+ from ..constants.aliases import ALIAS_DNFR
170
+ from ..alias import get_attr
171
+
172
+ # Use existing parallel infrastructure
173
+ kwargs.setdefault("n_jobs", self.max_workers)
174
+ default_compute_delta_nfr(graph, **kwargs)
175
+
176
+ # Extract results
177
+ return {
178
+ node_id: float(get_attr(graph.nodes[node_id], ALIAS_DNFR, 0.0))
179
+ for node_id in graph.nodes()
180
+ }
181
+
182
+ def compute_si_parallel(self, graph: TNFRGraph, **kwargs: Any) -> Dict[Any, float]:
183
+ """Compute sense index in parallel.
184
+
185
+ Delegates to existing compute_Si with n_jobs parameter.
186
+ This method exists primarily for API consistency with the proposal.
187
+
188
+ Parameters
189
+ ----------
190
+ graph : TNFRGraph
191
+ Network graph with structural attributes
192
+ **kwargs
193
+ Additional arguments passed to compute function
194
+
195
+ Returns
196
+ -------
197
+ Dict[Any, float]
198
+ Mapping from node IDs to Si values
199
+
200
+ Notes
201
+ -----
202
+ Currently delegates to the existing implementation in
203
+ tnfr.metrics.sense_index.compute_Si which already supports n_jobs for
204
+ parallelization. Future enhancements could use explicit partitioning.
205
+ """
206
+ from ..metrics.sense_index import compute_Si
207
+
208
+ # Use existing parallel infrastructure
209
+ kwargs.setdefault("n_jobs", self.max_workers)
210
+ kwargs.setdefault("inplace", False)
211
+ return compute_Si(graph, **kwargs)
212
+
213
+ def recommend_workers(self, graph_size: int) -> int:
214
+ """Recommend optimal worker count for given graph size.
215
+
216
+ Parameters
217
+ ----------
218
+ graph_size : int
219
+ Number of nodes in the network
220
+
221
+ Returns
222
+ -------
223
+ int
224
+ Recommended number of workers
225
+
226
+ Notes
227
+ -----
228
+ Uses heuristics:
229
+ - Small graphs (<50 nodes): serial execution
230
+ - Medium graphs: min(cpu_count, graph_size // 25)
231
+ - Large graphs: full parallelism
232
+ """
233
+ if graph_size < 50:
234
+ return 1 # Serial is faster for small graphs
235
+ elif graph_size < 500:
236
+ return min(self.max_workers, graph_size // 25)
237
+ else:
238
+ return self.max_workers