tnfr 4.5.2__py3-none-any.whl → 8.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +334 -50
- tnfr/__init__.pyi +33 -0
- tnfr/_compat.py +10 -0
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +49 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +214 -37
- tnfr/alias.pyi +108 -0
- tnfr/backends/__init__.py +354 -0
- tnfr/backends/jax_backend.py +173 -0
- tnfr/backends/numpy_backend.py +238 -0
- tnfr/backends/optimized_numpy.py +420 -0
- tnfr/backends/torch_backend.py +408 -0
- tnfr/cache.py +149 -556
- tnfr/cache.pyi +13 -0
- tnfr/cli/__init__.py +51 -16
- tnfr/cli/__init__.pyi +26 -0
- tnfr/cli/arguments.py +344 -32
- tnfr/cli/arguments.pyi +29 -0
- tnfr/cli/execution.py +676 -50
- tnfr/cli/execution.pyi +70 -0
- tnfr/cli/interactive_validator.py +614 -0
- tnfr/cli/utils.py +18 -3
- tnfr/cli/utils.pyi +7 -0
- tnfr/cli/validate.py +236 -0
- tnfr/compat/__init__.py +85 -0
- tnfr/compat/dataclass.py +136 -0
- tnfr/compat/jsonschema_stub.py +61 -0
- tnfr/compat/matplotlib_stub.py +73 -0
- tnfr/compat/numpy_stub.py +155 -0
- tnfr/config/__init__.py +224 -0
- tnfr/config/__init__.pyi +10 -0
- tnfr/{constants_glyphs.py → config/constants.py} +26 -20
- tnfr/config/constants.pyi +12 -0
- tnfr/config/defaults.py +54 -0
- tnfr/{constants/core.py → config/defaults_core.py} +59 -6
- tnfr/config/defaults_init.py +33 -0
- tnfr/config/defaults_metric.py +104 -0
- tnfr/config/feature_flags.py +81 -0
- tnfr/config/feature_flags.pyi +16 -0
- tnfr/config/glyph_constants.py +31 -0
- tnfr/config/init.py +77 -0
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +254 -0
- tnfr/config/operator_names.pyi +36 -0
- tnfr/config/physics_derivation.py +354 -0
- tnfr/config/presets.py +83 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/config/security.py +927 -0
- tnfr/config/thresholds.py +114 -0
- tnfr/config/tnfr_config.py +498 -0
- tnfr/constants/__init__.py +51 -133
- tnfr/constants/__init__.pyi +92 -0
- tnfr/constants/aliases.py +33 -0
- tnfr/constants/aliases.pyi +27 -0
- tnfr/constants/init.py +3 -1
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +9 -15
- tnfr/constants/metric.pyi +19 -0
- tnfr/core/__init__.py +33 -0
- tnfr/core/container.py +226 -0
- tnfr/core/default_implementations.py +329 -0
- tnfr/core/interfaces.py +279 -0
- tnfr/dynamics/__init__.py +213 -633
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +267 -0
- tnfr/dynamics/adaptation.pyi +7 -0
- tnfr/dynamics/adaptive_sequences.py +189 -0
- tnfr/dynamics/adaptive_sequences.pyi +14 -0
- tnfr/dynamics/aliases.py +23 -0
- tnfr/dynamics/aliases.pyi +19 -0
- tnfr/dynamics/bifurcation.py +232 -0
- tnfr/dynamics/canonical.py +229 -0
- tnfr/dynamics/canonical.pyi +48 -0
- tnfr/dynamics/coordination.py +385 -0
- tnfr/dynamics/coordination.pyi +25 -0
- tnfr/dynamics/dnfr.py +2699 -398
- tnfr/dynamics/dnfr.pyi +26 -0
- tnfr/dynamics/dynamic_limits.py +225 -0
- tnfr/dynamics/feedback.py +252 -0
- tnfr/dynamics/feedback.pyi +24 -0
- tnfr/dynamics/fused_dnfr.py +454 -0
- tnfr/dynamics/homeostasis.py +157 -0
- tnfr/dynamics/homeostasis.pyi +14 -0
- tnfr/dynamics/integrators.py +496 -102
- tnfr/dynamics/integrators.pyi +36 -0
- tnfr/dynamics/learning.py +310 -0
- tnfr/dynamics/learning.pyi +33 -0
- tnfr/dynamics/metabolism.py +254 -0
- tnfr/dynamics/nbody.py +796 -0
- tnfr/dynamics/nbody_tnfr.py +783 -0
- tnfr/dynamics/propagation.py +326 -0
- tnfr/dynamics/runtime.py +908 -0
- tnfr/dynamics/runtime.pyi +77 -0
- tnfr/dynamics/sampling.py +10 -5
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +711 -0
- tnfr/dynamics/selectors.pyi +85 -0
- tnfr/dynamics/structural_clip.py +207 -0
- tnfr/errors/__init__.py +37 -0
- tnfr/errors/contextual.py +492 -0
- tnfr/execution.py +77 -55
- tnfr/execution.pyi +45 -0
- tnfr/extensions/__init__.py +205 -0
- tnfr/extensions/__init__.pyi +18 -0
- tnfr/extensions/base.py +173 -0
- tnfr/extensions/base.pyi +35 -0
- tnfr/extensions/business/__init__.py +71 -0
- tnfr/extensions/business/__init__.pyi +11 -0
- tnfr/extensions/business/cookbook.py +88 -0
- tnfr/extensions/business/cookbook.pyi +8 -0
- tnfr/extensions/business/health_analyzers.py +202 -0
- tnfr/extensions/business/health_analyzers.pyi +9 -0
- tnfr/extensions/business/patterns.py +183 -0
- tnfr/extensions/business/patterns.pyi +8 -0
- tnfr/extensions/medical/__init__.py +73 -0
- tnfr/extensions/medical/__init__.pyi +11 -0
- tnfr/extensions/medical/cookbook.py +88 -0
- tnfr/extensions/medical/cookbook.pyi +8 -0
- tnfr/extensions/medical/health_analyzers.py +181 -0
- tnfr/extensions/medical/health_analyzers.pyi +9 -0
- tnfr/extensions/medical/patterns.py +163 -0
- tnfr/extensions/medical/patterns.pyi +8 -0
- tnfr/flatten.py +29 -50
- tnfr/flatten.pyi +21 -0
- tnfr/gamma.py +66 -53
- tnfr/gamma.pyi +36 -0
- tnfr/glyph_history.py +144 -57
- tnfr/glyph_history.pyi +35 -0
- tnfr/glyph_runtime.py +19 -0
- tnfr/glyph_runtime.pyi +8 -0
- tnfr/immutable.py +70 -30
- tnfr/immutable.pyi +36 -0
- tnfr/initialization.py +22 -16
- tnfr/initialization.pyi +65 -0
- tnfr/io.py +5 -241
- tnfr/io.pyi +13 -0
- tnfr/locking.pyi +7 -0
- tnfr/mathematics/__init__.py +79 -0
- tnfr/mathematics/backend.py +453 -0
- tnfr/mathematics/backend.pyi +99 -0
- tnfr/mathematics/dynamics.py +408 -0
- tnfr/mathematics/dynamics.pyi +90 -0
- tnfr/mathematics/epi.py +391 -0
- tnfr/mathematics/epi.pyi +65 -0
- tnfr/mathematics/generators.py +242 -0
- tnfr/mathematics/generators.pyi +29 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/metrics.pyi +16 -0
- tnfr/mathematics/operators.py +239 -0
- tnfr/mathematics/operators.pyi +59 -0
- tnfr/mathematics/operators_factory.py +124 -0
- tnfr/mathematics/operators_factory.pyi +11 -0
- tnfr/mathematics/projection.py +87 -0
- tnfr/mathematics/projection.pyi +33 -0
- tnfr/mathematics/runtime.py +182 -0
- tnfr/mathematics/runtime.pyi +64 -0
- tnfr/mathematics/spaces.py +256 -0
- tnfr/mathematics/spaces.pyi +83 -0
- tnfr/mathematics/transforms.py +305 -0
- tnfr/mathematics/transforms.pyi +62 -0
- tnfr/metrics/__init__.py +47 -9
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/buffer_cache.py +163 -0
- tnfr/metrics/buffer_cache.pyi +24 -0
- tnfr/metrics/cache_utils.py +214 -0
- tnfr/metrics/coherence.py +1510 -330
- tnfr/metrics/coherence.pyi +129 -0
- tnfr/metrics/common.py +23 -16
- tnfr/metrics/common.pyi +35 -0
- tnfr/metrics/core.py +251 -36
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +709 -110
- tnfr/metrics/diagnosis.pyi +86 -0
- tnfr/metrics/emergence.py +245 -0
- tnfr/metrics/export.py +60 -18
- tnfr/metrics/export.pyi +7 -0
- tnfr/metrics/glyph_timing.py +233 -43
- tnfr/metrics/glyph_timing.pyi +81 -0
- tnfr/metrics/learning_metrics.py +280 -0
- tnfr/metrics/learning_metrics.pyi +21 -0
- tnfr/metrics/phase_coherence.py +351 -0
- tnfr/metrics/phase_compatibility.py +349 -0
- tnfr/metrics/reporting.py +63 -28
- tnfr/metrics/reporting.pyi +25 -0
- tnfr/metrics/sense_index.py +1126 -43
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +215 -23
- tnfr/metrics/trig.pyi +13 -0
- tnfr/metrics/trig_cache.py +148 -24
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/multiscale/__init__.py +32 -0
- tnfr/multiscale/hierarchical.py +517 -0
- tnfr/node.py +646 -140
- tnfr/node.pyi +139 -0
- tnfr/observers.py +160 -45
- tnfr/observers.pyi +31 -0
- tnfr/ontosim.py +23 -19
- tnfr/ontosim.pyi +28 -0
- tnfr/operators/__init__.py +1358 -106
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/algebra.py +277 -0
- tnfr/operators/canonical_patterns.py +420 -0
- tnfr/operators/cascade.py +267 -0
- tnfr/operators/cycle_detection.py +358 -0
- tnfr/operators/definitions.py +4108 -0
- tnfr/operators/definitions.pyi +78 -0
- tnfr/operators/grammar.py +1164 -0
- tnfr/operators/grammar.pyi +140 -0
- tnfr/operators/hamiltonian.py +710 -0
- tnfr/operators/health_analyzer.py +809 -0
- tnfr/operators/jitter.py +107 -38
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/lifecycle.py +314 -0
- tnfr/operators/metabolism.py +618 -0
- tnfr/operators/metrics.py +2138 -0
- tnfr/operators/network_analysis/__init__.py +27 -0
- tnfr/operators/network_analysis/source_detection.py +186 -0
- tnfr/operators/nodal_equation.py +395 -0
- tnfr/operators/pattern_detection.py +660 -0
- tnfr/operators/patterns.py +669 -0
- tnfr/operators/postconditions/__init__.py +38 -0
- tnfr/operators/postconditions/mutation.py +236 -0
- tnfr/operators/preconditions/__init__.py +1226 -0
- tnfr/operators/preconditions/coherence.py +305 -0
- tnfr/operators/preconditions/dissonance.py +236 -0
- tnfr/operators/preconditions/emission.py +128 -0
- tnfr/operators/preconditions/mutation.py +580 -0
- tnfr/operators/preconditions/reception.py +125 -0
- tnfr/operators/preconditions/resonance.py +364 -0
- tnfr/operators/registry.py +74 -0
- tnfr/operators/registry.pyi +9 -0
- tnfr/operators/remesh.py +1415 -91
- tnfr/operators/remesh.pyi +26 -0
- tnfr/operators/structural_units.py +268 -0
- tnfr/operators/unified_grammar.py +105 -0
- tnfr/parallel/__init__.py +54 -0
- tnfr/parallel/auto_scaler.py +234 -0
- tnfr/parallel/distributed.py +384 -0
- tnfr/parallel/engine.py +238 -0
- tnfr/parallel/gpu_engine.py +420 -0
- tnfr/parallel/monitoring.py +248 -0
- tnfr/parallel/partitioner.py +459 -0
- tnfr/py.typed +0 -0
- tnfr/recipes/__init__.py +22 -0
- tnfr/recipes/cookbook.py +743 -0
- tnfr/rng.py +75 -151
- tnfr/rng.pyi +26 -0
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/sdk/__init__.py +107 -0
- tnfr/sdk/__init__.pyi +19 -0
- tnfr/sdk/adaptive_system.py +173 -0
- tnfr/sdk/adaptive_system.pyi +21 -0
- tnfr/sdk/builders.py +370 -0
- tnfr/sdk/builders.pyi +51 -0
- tnfr/sdk/fluent.py +1121 -0
- tnfr/sdk/fluent.pyi +74 -0
- tnfr/sdk/templates.py +342 -0
- tnfr/sdk/templates.pyi +41 -0
- tnfr/sdk/utils.py +341 -0
- tnfr/secure_config.py +46 -0
- tnfr/security/__init__.py +70 -0
- tnfr/security/database.py +514 -0
- tnfr/security/subprocess.py +503 -0
- tnfr/security/validation.py +290 -0
- tnfr/selector.py +59 -22
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +92 -67
- tnfr/sense.pyi +23 -0
- tnfr/services/__init__.py +17 -0
- tnfr/services/orchestrator.py +325 -0
- tnfr/sparse/__init__.py +39 -0
- tnfr/sparse/representations.py +492 -0
- tnfr/structural.py +639 -263
- tnfr/structural.pyi +83 -0
- tnfr/telemetry/__init__.py +35 -0
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/cache_metrics.pyi +64 -0
- tnfr/telemetry/nu_f.py +422 -0
- tnfr/telemetry/nu_f.pyi +108 -0
- tnfr/telemetry/verbosity.py +36 -0
- tnfr/telemetry/verbosity.pyi +15 -0
- tnfr/tokens.py +2 -4
- tnfr/tokens.pyi +36 -0
- tnfr/tools/__init__.py +20 -0
- tnfr/tools/domain_templates.py +478 -0
- tnfr/tools/sequence_generator.py +846 -0
- tnfr/topology/__init__.py +13 -0
- tnfr/topology/asymmetry.py +151 -0
- tnfr/trace.py +300 -126
- tnfr/trace.pyi +42 -0
- tnfr/tutorials/__init__.py +38 -0
- tnfr/tutorials/autonomous_evolution.py +285 -0
- tnfr/tutorials/interactive.py +1576 -0
- tnfr/tutorials/structural_metabolism.py +238 -0
- tnfr/types.py +743 -12
- tnfr/types.pyi +357 -0
- tnfr/units.py +68 -0
- tnfr/units.pyi +13 -0
- tnfr/utils/__init__.py +282 -0
- tnfr/utils/__init__.pyi +215 -0
- tnfr/utils/cache.py +4223 -0
- tnfr/utils/cache.pyi +470 -0
- tnfr/{callback_utils.py → utils/callbacks.py} +26 -39
- tnfr/utils/callbacks.pyi +49 -0
- tnfr/utils/chunks.py +108 -0
- tnfr/utils/chunks.pyi +22 -0
- tnfr/utils/data.py +428 -0
- tnfr/utils/data.pyi +74 -0
- tnfr/utils/graph.py +85 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +821 -0
- tnfr/utils/init.pyi +80 -0
- tnfr/utils/io.py +559 -0
- tnfr/utils/io.pyi +66 -0
- tnfr/{helpers → utils}/numeric.py +51 -24
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +257 -0
- tnfr/validation/__init__.pyi +85 -0
- tnfr/validation/compatibility.py +460 -0
- tnfr/validation/compatibility.pyi +6 -0
- tnfr/validation/config.py +73 -0
- tnfr/validation/graph.py +139 -0
- tnfr/validation/graph.pyi +18 -0
- tnfr/validation/input_validation.py +755 -0
- tnfr/validation/invariants.py +712 -0
- tnfr/validation/rules.py +253 -0
- tnfr/validation/rules.pyi +44 -0
- tnfr/validation/runtime.py +279 -0
- tnfr/validation/runtime.pyi +28 -0
- tnfr/validation/sequence_validator.py +162 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +32 -0
- tnfr/validation/spectral.py +164 -0
- tnfr/validation/spectral.pyi +42 -0
- tnfr/validation/validator.py +1266 -0
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/visualization/__init__.py +98 -0
- tnfr/visualization/cascade_viz.py +256 -0
- tnfr/visualization/hierarchy.py +284 -0
- tnfr/visualization/sequence_plotter.py +784 -0
- tnfr/viz/__init__.py +60 -0
- tnfr/viz/matplotlib.py +278 -0
- tnfr/viz/matplotlib.pyi +35 -0
- tnfr-8.5.0.dist-info/METADATA +573 -0
- tnfr-8.5.0.dist-info/RECORD +353 -0
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/entry_points.txt +1 -0
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/licenses/LICENSE.md +1 -1
- tnfr/collections_utils.py +0 -300
- tnfr/config.py +0 -32
- tnfr/grammar.py +0 -344
- tnfr/graph_utils.py +0 -84
- tnfr/helpers/__init__.py +0 -71
- tnfr/import_utils.py +0 -228
- tnfr/json_utils.py +0 -162
- tnfr/logging_utils.py +0 -116
- tnfr/presets.py +0 -60
- tnfr/validators.py +0 -84
- tnfr/value_utils.py +0 -59
- tnfr-4.5.2.dist-info/METADATA +0 -379
- tnfr-4.5.2.dist-info/RECORD +0 -67
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
"""GPU acceleration for TNFR computations.
|
|
2
|
+
|
|
3
|
+
Optional module providing JAX and CuPy integration for GPU-accelerated
|
|
4
|
+
vectorized operations. Requires installation of optional dependencies:
|
|
5
|
+
pip install tnfr[jax] # or
|
|
6
|
+
pip install tnfr[cupy]
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from typing import Any, Optional
|
|
12
|
+
|
|
13
|
+
# Check for optional GPU backends
|
|
14
|
+
try:
|
|
15
|
+
import cupy as cp
|
|
16
|
+
|
|
17
|
+
HAS_CUPY = True
|
|
18
|
+
except ImportError:
|
|
19
|
+
HAS_CUPY = False
|
|
20
|
+
cp = None # type: ignore
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
import jax
|
|
24
|
+
import jax.numpy as jnp
|
|
25
|
+
from jax import jit
|
|
26
|
+
|
|
27
|
+
HAS_JAX = True
|
|
28
|
+
except ImportError:
|
|
29
|
+
HAS_JAX = False
|
|
30
|
+
jax = None # type: ignore
|
|
31
|
+
jnp = None # type: ignore
|
|
32
|
+
jit = None # type: ignore
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class TNFRGPUEngine:
|
|
36
|
+
"""GPU acceleration engine for TNFR computations.
|
|
37
|
+
|
|
38
|
+
Provides vectorized GPU implementations of ΔNFR and other TNFR operations
|
|
39
|
+
using JAX or CuPy backends.
|
|
40
|
+
|
|
41
|
+
Parameters
|
|
42
|
+
----------
|
|
43
|
+
backend : {"auto", "jax", "cupy", "numpy"}, default="auto"
|
|
44
|
+
GPU backend to use. "auto" prefers JAX, then CuPy, then NumPy fallback.
|
|
45
|
+
|
|
46
|
+
Raises
|
|
47
|
+
------
|
|
48
|
+
ImportError
|
|
49
|
+
If requested GPU backend is not installed
|
|
50
|
+
|
|
51
|
+
Examples
|
|
52
|
+
--------
|
|
53
|
+
>>> # Requires JAX or CuPy installation
|
|
54
|
+
>>> try:
|
|
55
|
+
... from tnfr.parallel import TNFRGPUEngine
|
|
56
|
+
... engine = TNFRGPUEngine(backend="auto")
|
|
57
|
+
... # engine.backend in ["jax", "cupy", "numpy"]
|
|
58
|
+
... except ImportError:
|
|
59
|
+
... pass # Optional dependency not installed
|
|
60
|
+
|
|
61
|
+
Notes
|
|
62
|
+
-----
|
|
63
|
+
GPU acceleration provides significant speedup for large dense networks
|
|
64
|
+
but requires compatible hardware and drivers. For sparse networks or
|
|
65
|
+
small graphs, multiprocessing may be more efficient.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
def __init__(self, backend: str = "auto"):
|
|
69
|
+
self.backend = self._select_gpu_backend(backend)
|
|
70
|
+
|
|
71
|
+
def _select_gpu_backend(self, backend: str) -> str:
|
|
72
|
+
"""Select available GPU backend."""
|
|
73
|
+
if backend == "auto":
|
|
74
|
+
if HAS_JAX:
|
|
75
|
+
return "jax"
|
|
76
|
+
elif HAS_CUPY:
|
|
77
|
+
return "cupy"
|
|
78
|
+
else:
|
|
79
|
+
return "numpy" # Fallback
|
|
80
|
+
|
|
81
|
+
if backend == "jax" and not HAS_JAX:
|
|
82
|
+
raise ImportError("JAX not available. Install with: pip install jax[cuda]")
|
|
83
|
+
if backend == "cupy" and not HAS_CUPY:
|
|
84
|
+
raise ImportError("CuPy not available. Install with: pip install cupy")
|
|
85
|
+
|
|
86
|
+
return backend
|
|
87
|
+
|
|
88
|
+
def compute_delta_nfr_gpu(
|
|
89
|
+
self,
|
|
90
|
+
adjacency_matrix: Any,
|
|
91
|
+
epi_vector: Any,
|
|
92
|
+
vf_vector: Any,
|
|
93
|
+
phase_vector: Any,
|
|
94
|
+
) -> Any:
|
|
95
|
+
"""Compute ΔNFR using vectorized GPU operations.
|
|
96
|
+
|
|
97
|
+
Parameters
|
|
98
|
+
----------
|
|
99
|
+
adjacency_matrix : array-like
|
|
100
|
+
Network adjacency matrix (N x N)
|
|
101
|
+
epi_vector : array-like
|
|
102
|
+
EPI values for all nodes (N,)
|
|
103
|
+
vf_vector : array-like
|
|
104
|
+
Structural frequencies νf for all nodes (N,)
|
|
105
|
+
phase_vector : array-like
|
|
106
|
+
Phase values θ for all nodes (N,)
|
|
107
|
+
|
|
108
|
+
Returns
|
|
109
|
+
-------
|
|
110
|
+
array-like
|
|
111
|
+
ΔNFR values for all nodes (N,)
|
|
112
|
+
|
|
113
|
+
Notes
|
|
114
|
+
-----
|
|
115
|
+
This is a placeholder for future GPU-accelerated implementations.
|
|
116
|
+
Actual GPU computation requires careful optimization and testing.
|
|
117
|
+
Current implementation raises NotImplementedError.
|
|
118
|
+
"""
|
|
119
|
+
if self.backend == "jax" and HAS_JAX:
|
|
120
|
+
return self._compute_delta_nfr_jax(
|
|
121
|
+
adjacency_matrix, epi_vector, vf_vector, phase_vector
|
|
122
|
+
)
|
|
123
|
+
elif self.backend == "cupy" and HAS_CUPY:
|
|
124
|
+
return self._compute_delta_nfr_cupy(
|
|
125
|
+
adjacency_matrix, epi_vector, vf_vector, phase_vector
|
|
126
|
+
)
|
|
127
|
+
else:
|
|
128
|
+
return self._compute_delta_nfr_numpy(
|
|
129
|
+
adjacency_matrix, epi_vector, vf_vector, phase_vector
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def _compute_delta_nfr_jax(
|
|
133
|
+
self, adj_matrix: Any, epi_vec: Any, vf_vec: Any, phase_vec: Any
|
|
134
|
+
) -> Any:
|
|
135
|
+
"""JAX implementation with JIT compilation for GPU acceleration.
|
|
136
|
+
|
|
137
|
+
Implements vectorized ΔNFR computation using JAX for automatic
|
|
138
|
+
GPU acceleration and JIT compilation.
|
|
139
|
+
|
|
140
|
+
Parameters
|
|
141
|
+
----------
|
|
142
|
+
adj_matrix : array-like
|
|
143
|
+
Adjacency matrix (N x N)
|
|
144
|
+
epi_vec : array-like
|
|
145
|
+
EPI values (N,)
|
|
146
|
+
vf_vec : array-like
|
|
147
|
+
Structural frequencies (N,)
|
|
148
|
+
phase_vec : array-like
|
|
149
|
+
Phase values (N,)
|
|
150
|
+
|
|
151
|
+
Returns
|
|
152
|
+
-------
|
|
153
|
+
jax.numpy.ndarray
|
|
154
|
+
ΔNFR values for all nodes
|
|
155
|
+
|
|
156
|
+
Notes
|
|
157
|
+
-----
|
|
158
|
+
Uses the canonical TNFR nodal equation:
|
|
159
|
+
∂EPI/∂t = νf · ΔNFR(t)
|
|
160
|
+
|
|
161
|
+
ΔNFR is computed from:
|
|
162
|
+
- Topological gradient (EPI differences with neighbors)
|
|
163
|
+
- Phase gradient (phase synchronization)
|
|
164
|
+
- Weighted by structural frequency
|
|
165
|
+
"""
|
|
166
|
+
if not HAS_JAX:
|
|
167
|
+
raise ImportError("JAX required for GPU acceleration")
|
|
168
|
+
|
|
169
|
+
# Convert inputs to JAX arrays
|
|
170
|
+
adj = jnp.asarray(adj_matrix)
|
|
171
|
+
epi = jnp.asarray(epi_vec)
|
|
172
|
+
vf = jnp.asarray(vf_vec)
|
|
173
|
+
phase = jnp.asarray(phase_vec)
|
|
174
|
+
|
|
175
|
+
# Define JIT-compiled ΔNFR computation
|
|
176
|
+
@jit
|
|
177
|
+
def compute_dnfr_vectorized(adj, epi, vf, phase):
|
|
178
|
+
"""Vectorized ΔNFR computation (JIT compiled)."""
|
|
179
|
+
# Topological gradient: difference in EPI with neighbors
|
|
180
|
+
# epi_diff[i,j] = epi[j] - epi[i]
|
|
181
|
+
epi_diff = epi[None, :] - epi[:, None] # (N, N) matrix
|
|
182
|
+
topo_gradient = jnp.sum(adj * epi_diff, axis=1) # (N,) vector
|
|
183
|
+
|
|
184
|
+
# Phase gradient: phase difference with neighbors
|
|
185
|
+
# phase_diff[i,j] = sin(phase[j] - phase[i])
|
|
186
|
+
phase_diff = jnp.sin(phase[None, :] - phase[:, None]) # (N, N)
|
|
187
|
+
phase_gradient = jnp.sum(adj * phase_diff, axis=1) # (N,)
|
|
188
|
+
|
|
189
|
+
# Normalize by degree (number of neighbors)
|
|
190
|
+
degree = jnp.sum(adj, axis=1)
|
|
191
|
+
# Avoid division by zero
|
|
192
|
+
degree_safe = jnp.where(degree > 0, degree, 1.0)
|
|
193
|
+
|
|
194
|
+
topo_gradient = topo_gradient / degree_safe
|
|
195
|
+
phase_gradient = phase_gradient / degree_safe
|
|
196
|
+
|
|
197
|
+
# Combine gradients with TNFR weights
|
|
198
|
+
# Emphasize topological structure (0.7) over phase (0.3)
|
|
199
|
+
combined_gradient = 0.7 * topo_gradient + 0.3 * phase_gradient
|
|
200
|
+
|
|
201
|
+
# Apply structural frequency modulation (canonical equation)
|
|
202
|
+
delta_nfr = vf * combined_gradient
|
|
203
|
+
|
|
204
|
+
return delta_nfr
|
|
205
|
+
|
|
206
|
+
# Execute JIT-compiled computation (GPU accelerated if available)
|
|
207
|
+
result = compute_dnfr_vectorized(adj, epi, vf, phase)
|
|
208
|
+
|
|
209
|
+
return result
|
|
210
|
+
|
|
211
|
+
def _compute_delta_nfr_cupy(
|
|
212
|
+
self, adj_matrix: Any, epi_vec: Any, vf_vec: Any, phase_vec: Any
|
|
213
|
+
) -> Any:
|
|
214
|
+
"""CuPy implementation for CUDA GPUs.
|
|
215
|
+
|
|
216
|
+
Implements vectorized ΔNFR computation using CuPy for CUDA GPU
|
|
217
|
+
acceleration with NumPy-compatible interface.
|
|
218
|
+
|
|
219
|
+
Parameters
|
|
220
|
+
----------
|
|
221
|
+
adj_matrix : array-like
|
|
222
|
+
Adjacency matrix (N x N)
|
|
223
|
+
epi_vec : array-like
|
|
224
|
+
EPI values (N,)
|
|
225
|
+
vf_vec : array-like
|
|
226
|
+
Structural frequencies (N,)
|
|
227
|
+
phase_vec : array-like
|
|
228
|
+
Phase values (N,)
|
|
229
|
+
|
|
230
|
+
Returns
|
|
231
|
+
-------
|
|
232
|
+
cupy.ndarray
|
|
233
|
+
ΔNFR values for all nodes (on GPU)
|
|
234
|
+
"""
|
|
235
|
+
if not HAS_CUPY:
|
|
236
|
+
raise ImportError("CuPy required for CUDA GPU acceleration")
|
|
237
|
+
|
|
238
|
+
# Transfer to GPU
|
|
239
|
+
adj = cp.asarray(adj_matrix)
|
|
240
|
+
epi = cp.asarray(epi_vec)
|
|
241
|
+
vf = cp.asarray(vf_vec)
|
|
242
|
+
phase = cp.asarray(phase_vec)
|
|
243
|
+
|
|
244
|
+
# Topological gradient (vectorized on GPU)
|
|
245
|
+
epi_diff = epi[None, :] - epi[:, None]
|
|
246
|
+
topo_gradient = cp.sum(adj * epi_diff, axis=1)
|
|
247
|
+
|
|
248
|
+
# Phase gradient (vectorized on GPU)
|
|
249
|
+
phase_diff = cp.sin(phase[None, :] - phase[:, None])
|
|
250
|
+
phase_gradient = cp.sum(adj * phase_diff, axis=1)
|
|
251
|
+
|
|
252
|
+
# Normalize by degree
|
|
253
|
+
degree = cp.sum(adj, axis=1)
|
|
254
|
+
degree_safe = cp.where(degree > 0, degree, 1.0)
|
|
255
|
+
|
|
256
|
+
topo_gradient = topo_gradient / degree_safe
|
|
257
|
+
phase_gradient = phase_gradient / degree_safe
|
|
258
|
+
|
|
259
|
+
# Combine with TNFR weights
|
|
260
|
+
combined_gradient = 0.7 * topo_gradient + 0.3 * phase_gradient
|
|
261
|
+
|
|
262
|
+
# Apply structural frequency
|
|
263
|
+
delta_nfr = vf * combined_gradient
|
|
264
|
+
|
|
265
|
+
return delta_nfr
|
|
266
|
+
|
|
267
|
+
def _compute_delta_nfr_numpy(
|
|
268
|
+
self, adj_matrix: Any, epi_vec: Any, vf_vec: Any, phase_vec: Any
|
|
269
|
+
) -> Any:
|
|
270
|
+
"""NumPy fallback implementation (CPU-only).
|
|
271
|
+
|
|
272
|
+
Provides CPU-based vectorized computation when GPU is unavailable.
|
|
273
|
+
|
|
274
|
+
Parameters
|
|
275
|
+
----------
|
|
276
|
+
adj_matrix : array-like
|
|
277
|
+
Adjacency matrix (N x N)
|
|
278
|
+
epi_vec : array-like
|
|
279
|
+
EPI values (N,)
|
|
280
|
+
vf_vec : array-like
|
|
281
|
+
Structural frequencies (N,)
|
|
282
|
+
phase_vec : array-like
|
|
283
|
+
Phase values (N,)
|
|
284
|
+
|
|
285
|
+
Returns
|
|
286
|
+
-------
|
|
287
|
+
numpy.ndarray
|
|
288
|
+
ΔNFR values for all nodes
|
|
289
|
+
"""
|
|
290
|
+
try:
|
|
291
|
+
import numpy as np
|
|
292
|
+
except ImportError:
|
|
293
|
+
raise ImportError("NumPy required for CPU computation")
|
|
294
|
+
|
|
295
|
+
# Convert to numpy arrays
|
|
296
|
+
adj = np.asarray(adj_matrix)
|
|
297
|
+
epi = np.asarray(epi_vec)
|
|
298
|
+
vf = np.asarray(vf_vec)
|
|
299
|
+
phase = np.asarray(phase_vec)
|
|
300
|
+
|
|
301
|
+
# Topological gradient
|
|
302
|
+
epi_diff = epi[None, :] - epi[:, None]
|
|
303
|
+
topo_gradient = np.sum(adj * epi_diff, axis=1)
|
|
304
|
+
|
|
305
|
+
# Phase gradient
|
|
306
|
+
phase_diff = np.sin(phase[None, :] - phase[:, None])
|
|
307
|
+
phase_gradient = np.sum(adj * phase_diff, axis=1)
|
|
308
|
+
|
|
309
|
+
# Normalize by degree
|
|
310
|
+
degree = np.sum(adj, axis=1)
|
|
311
|
+
degree_safe = np.where(degree > 0, degree, 1.0)
|
|
312
|
+
|
|
313
|
+
topo_gradient = topo_gradient / degree_safe
|
|
314
|
+
phase_gradient = phase_gradient / degree_safe
|
|
315
|
+
|
|
316
|
+
# Combine with TNFR weights
|
|
317
|
+
combined_gradient = 0.7 * topo_gradient + 0.3 * phase_gradient
|
|
318
|
+
|
|
319
|
+
# Apply structural frequency
|
|
320
|
+
delta_nfr = vf * combined_gradient
|
|
321
|
+
|
|
322
|
+
return delta_nfr
|
|
323
|
+
|
|
324
|
+
def compute_delta_nfr_from_graph(self, graph: Any) -> Dict[Any, float]:
|
|
325
|
+
"""Compute ΔNFR directly from a TNFR graph using GPU acceleration.
|
|
326
|
+
|
|
327
|
+
Convenience method that extracts matrices from graph and computes
|
|
328
|
+
ΔNFR using GPU backend.
|
|
329
|
+
|
|
330
|
+
Parameters
|
|
331
|
+
----------
|
|
332
|
+
graph : TNFRGraph
|
|
333
|
+
Network graph with TNFR attributes
|
|
334
|
+
|
|
335
|
+
Returns
|
|
336
|
+
-------
|
|
337
|
+
Dict[Any, float]
|
|
338
|
+
Mapping from node IDs to ΔNFR values
|
|
339
|
+
|
|
340
|
+
Examples
|
|
341
|
+
--------
|
|
342
|
+
>>> import networkx as nx
|
|
343
|
+
>>> from tnfr.parallel import TNFRGPUEngine
|
|
344
|
+
>>> G = nx.Graph([(0, 1), (1, 2)])
|
|
345
|
+
>>> for node in G.nodes():
|
|
346
|
+
... G.nodes[node]['epi'] = 0.5
|
|
347
|
+
... G.nodes[node]['nu_f'] = 1.0
|
|
348
|
+
... G.nodes[node]['phase'] = 0.0
|
|
349
|
+
>>> engine = TNFRGPUEngine(backend="numpy") # Use numpy for testing
|
|
350
|
+
>>> result = engine.compute_delta_nfr_from_graph(G)
|
|
351
|
+
>>> len(result) == 3
|
|
352
|
+
True
|
|
353
|
+
"""
|
|
354
|
+
import networkx as nx
|
|
355
|
+
|
|
356
|
+
try:
|
|
357
|
+
import numpy as np
|
|
358
|
+
except ImportError:
|
|
359
|
+
raise ImportError("NumPy required for graph processing")
|
|
360
|
+
|
|
361
|
+
# Extract node list (maintain order)
|
|
362
|
+
nodes = list(graph.nodes())
|
|
363
|
+
node_to_idx = {node: idx for idx, node in enumerate(nodes)}
|
|
364
|
+
|
|
365
|
+
# Build adjacency matrix
|
|
366
|
+
n = len(nodes)
|
|
367
|
+
adj_matrix = np.zeros((n, n))
|
|
368
|
+
for i, j in graph.edges():
|
|
369
|
+
idx_i = node_to_idx[i]
|
|
370
|
+
idx_j = node_to_idx[j]
|
|
371
|
+
adj_matrix[idx_i, idx_j] = 1.0
|
|
372
|
+
adj_matrix[idx_j, idx_i] = 1.0 # Undirected
|
|
373
|
+
|
|
374
|
+
# Extract node attributes
|
|
375
|
+
def get_attr(node, attr_names, default):
|
|
376
|
+
"""Get attribute with fallbacks."""
|
|
377
|
+
for name in (
|
|
378
|
+
attr_names if isinstance(attr_names, (list, tuple)) else [attr_names]
|
|
379
|
+
):
|
|
380
|
+
if name in graph.nodes[node]:
|
|
381
|
+
return float(graph.nodes[node][name])
|
|
382
|
+
return default
|
|
383
|
+
|
|
384
|
+
epi_vec = np.array([get_attr(node, ["epi", "EPI"], 0.5) for node in nodes])
|
|
385
|
+
vf_vec = np.array([get_attr(node, ["nu_f", "vf", "νf"], 1.0) for node in nodes])
|
|
386
|
+
phase_vec = np.array(
|
|
387
|
+
[get_attr(node, ["phase", "theta"], 0.0) for node in nodes]
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
# Compute ΔNFR using GPU
|
|
391
|
+
delta_nfr_array = self.compute_delta_nfr_gpu(
|
|
392
|
+
adj_matrix, epi_vec, vf_vec, phase_vec
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
# Convert back to dictionary
|
|
396
|
+
if self.backend == "cupy" and HAS_CUPY:
|
|
397
|
+
delta_nfr_array = cp.asnumpy(delta_nfr_array) # Transfer from GPU
|
|
398
|
+
elif self.backend == "jax" and HAS_JAX:
|
|
399
|
+
delta_nfr_array = np.array(delta_nfr_array) # Convert from JAX
|
|
400
|
+
|
|
401
|
+
result = {node: float(delta_nfr_array[idx]) for idx, node in enumerate(nodes)}
|
|
402
|
+
|
|
403
|
+
return result
|
|
404
|
+
|
|
405
|
+
@property
|
|
406
|
+
def is_gpu_available(self) -> bool:
|
|
407
|
+
"""Check if GPU acceleration is actually available."""
|
|
408
|
+
if self.backend == "jax" and HAS_JAX:
|
|
409
|
+
try:
|
|
410
|
+
# Check if JAX has GPU backend
|
|
411
|
+
return len(jax.devices("gpu")) > 0
|
|
412
|
+
except Exception:
|
|
413
|
+
return False
|
|
414
|
+
elif self.backend == "cupy" and HAS_CUPY:
|
|
415
|
+
try:
|
|
416
|
+
# Check if CuPy can access GPU
|
|
417
|
+
return cp.cuda.runtime.getDeviceCount() > 0
|
|
418
|
+
except Exception:
|
|
419
|
+
return False
|
|
420
|
+
return False
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
"""Performance monitoring for parallel TNFR computations.
|
|
2
|
+
|
|
3
|
+
Tracks execution metrics to enable optimization and auto-scaling decisions.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from typing import Any, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
import psutil
|
|
14
|
+
|
|
15
|
+
HAS_PSUTIL = True
|
|
16
|
+
except ImportError:
|
|
17
|
+
HAS_PSUTIL = False
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class PerformanceMetrics:
|
|
22
|
+
"""Performance metrics for parallel TNFR execution.
|
|
23
|
+
|
|
24
|
+
Attributes
|
|
25
|
+
----------
|
|
26
|
+
start_time : float
|
|
27
|
+
Unix timestamp when execution started
|
|
28
|
+
end_time : float
|
|
29
|
+
Unix timestamp when execution completed
|
|
30
|
+
duration_seconds : float
|
|
31
|
+
Total execution time in seconds
|
|
32
|
+
peak_memory_mb : float
|
|
33
|
+
Peak memory usage in megabytes
|
|
34
|
+
avg_cpu_percent : float
|
|
35
|
+
Average CPU utilization percentage
|
|
36
|
+
workers_used : int
|
|
37
|
+
Number of parallel workers employed
|
|
38
|
+
nodes_processed : int
|
|
39
|
+
Total number of nodes processed
|
|
40
|
+
operations_per_second : float
|
|
41
|
+
Throughput metric (nodes/second)
|
|
42
|
+
coherence_improvement : float
|
|
43
|
+
Change in global coherence C(t)
|
|
44
|
+
parallelization_efficiency : float
|
|
45
|
+
Actual speedup / theoretical speedup ratio
|
|
46
|
+
memory_efficiency : float
|
|
47
|
+
Useful work / total memory ratio
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
start_time: float
|
|
51
|
+
end_time: float
|
|
52
|
+
duration_seconds: float
|
|
53
|
+
peak_memory_mb: float
|
|
54
|
+
avg_cpu_percent: float
|
|
55
|
+
workers_used: int
|
|
56
|
+
nodes_processed: int
|
|
57
|
+
operations_per_second: float
|
|
58
|
+
coherence_improvement: float
|
|
59
|
+
parallelization_efficiency: float
|
|
60
|
+
memory_efficiency: float
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class ParallelExecutionMonitor:
|
|
64
|
+
"""Real-time monitoring for parallel TNFR execution.
|
|
65
|
+
|
|
66
|
+
Tracks resource usage, throughput, and efficiency metrics during parallel
|
|
67
|
+
computation to enable dynamic optimization and post-execution analysis.
|
|
68
|
+
|
|
69
|
+
Examples
|
|
70
|
+
--------
|
|
71
|
+
>>> from tnfr.parallel import ParallelExecutionMonitor
|
|
72
|
+
>>> monitor = ParallelExecutionMonitor()
|
|
73
|
+
>>> monitor.start_monitoring(expected_nodes=100, workers=2)
|
|
74
|
+
>>> # ... perform computation ...
|
|
75
|
+
>>> metrics = monitor.stop_monitoring(
|
|
76
|
+
... final_coherence=0.85,
|
|
77
|
+
... initial_coherence=0.75
|
|
78
|
+
... )
|
|
79
|
+
>>> metrics.nodes_processed
|
|
80
|
+
100
|
|
81
|
+
>>> metrics.workers_used
|
|
82
|
+
2
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
def __init__(self):
|
|
86
|
+
self._metrics_history: List[PerformanceMetrics] = []
|
|
87
|
+
self._current_metrics: Optional[Dict[str, Any]] = None
|
|
88
|
+
self._process = None
|
|
89
|
+
if HAS_PSUTIL:
|
|
90
|
+
try:
|
|
91
|
+
import psutil
|
|
92
|
+
|
|
93
|
+
self._process = psutil.Process()
|
|
94
|
+
except Exception:
|
|
95
|
+
self._process = None
|
|
96
|
+
|
|
97
|
+
def start_monitoring(self, expected_nodes: int, workers: int) -> None:
|
|
98
|
+
"""Start monitoring execution.
|
|
99
|
+
|
|
100
|
+
Parameters
|
|
101
|
+
----------
|
|
102
|
+
expected_nodes : int
|
|
103
|
+
Expected number of nodes to process
|
|
104
|
+
workers : int
|
|
105
|
+
Number of parallel workers
|
|
106
|
+
"""
|
|
107
|
+
self._current_metrics = {
|
|
108
|
+
"start_time": time.time(),
|
|
109
|
+
"expected_nodes": expected_nodes,
|
|
110
|
+
"workers": workers,
|
|
111
|
+
"memory_samples": [],
|
|
112
|
+
"cpu_samples": [],
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
# Take initial resource snapshot
|
|
116
|
+
if self._process:
|
|
117
|
+
try:
|
|
118
|
+
mem_info = self._process.memory_info()
|
|
119
|
+
self._current_metrics["memory_samples"].append(
|
|
120
|
+
mem_info.rss / 1024 / 1024
|
|
121
|
+
)
|
|
122
|
+
self._current_metrics["cpu_samples"].append(self._process.cpu_percent())
|
|
123
|
+
except Exception:
|
|
124
|
+
pass
|
|
125
|
+
|
|
126
|
+
def stop_monitoring(
|
|
127
|
+
self, final_coherence: float, initial_coherence: float
|
|
128
|
+
) -> PerformanceMetrics:
|
|
129
|
+
"""Stop monitoring and compute final metrics.
|
|
130
|
+
|
|
131
|
+
Parameters
|
|
132
|
+
----------
|
|
133
|
+
final_coherence : float
|
|
134
|
+
Final network coherence C(t)
|
|
135
|
+
initial_coherence : float
|
|
136
|
+
Initial network coherence C(t)
|
|
137
|
+
|
|
138
|
+
Returns
|
|
139
|
+
-------
|
|
140
|
+
PerformanceMetrics
|
|
141
|
+
Complete performance metrics for the execution
|
|
142
|
+
"""
|
|
143
|
+
if self._current_metrics is None:
|
|
144
|
+
raise RuntimeError("Monitoring not started")
|
|
145
|
+
|
|
146
|
+
end_time = time.time()
|
|
147
|
+
duration = end_time - self._current_metrics["start_time"]
|
|
148
|
+
|
|
149
|
+
# Take final resource snapshot
|
|
150
|
+
if self._process:
|
|
151
|
+
try:
|
|
152
|
+
mem_info = self._process.memory_info()
|
|
153
|
+
self._current_metrics["memory_samples"].append(
|
|
154
|
+
mem_info.rss / 1024 / 1024
|
|
155
|
+
)
|
|
156
|
+
self._current_metrics["cpu_samples"].append(self._process.cpu_percent())
|
|
157
|
+
except Exception:
|
|
158
|
+
pass
|
|
159
|
+
|
|
160
|
+
# Calculate aggregated metrics
|
|
161
|
+
memory_samples = self._current_metrics.get("memory_samples", [])
|
|
162
|
+
cpu_samples = self._current_metrics.get("cpu_samples", [])
|
|
163
|
+
|
|
164
|
+
peak_memory = max(memory_samples) if memory_samples else 0.0
|
|
165
|
+
avg_cpu = sum(cpu_samples) / len(cpu_samples) if cpu_samples else 0.0
|
|
166
|
+
|
|
167
|
+
nodes = self._current_metrics["expected_nodes"]
|
|
168
|
+
workers = self._current_metrics["workers"]
|
|
169
|
+
|
|
170
|
+
# Calculate parallelization efficiency
|
|
171
|
+
# NOTE: This is a heuristic approximation. True efficiency requires:
|
|
172
|
+
# - Baseline sequential measurement
|
|
173
|
+
# - Accounting for Amdahl's law (sequential portions)
|
|
174
|
+
# - Consideration of communication overhead
|
|
175
|
+
# Current approach: estimate from CPU utilization as proxy
|
|
176
|
+
theoretical_speedup = workers
|
|
177
|
+
# Estimate actual speedup from CPU utilization
|
|
178
|
+
# If we're using N workers, we expect ~N * 100% CPU in ideal case
|
|
179
|
+
expected_cpu = workers * 100.0
|
|
180
|
+
actual_speedup = (avg_cpu / 100.0) if expected_cpu > 0 else 1.0
|
|
181
|
+
parallelization_eff = (
|
|
182
|
+
min(1.0, actual_speedup / theoretical_speedup)
|
|
183
|
+
if theoretical_speedup > 0
|
|
184
|
+
else 0.0
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Memory efficiency: nodes per MB
|
|
188
|
+
memory_eff = nodes / peak_memory if peak_memory > 0 else 0.0
|
|
189
|
+
|
|
190
|
+
metrics = PerformanceMetrics(
|
|
191
|
+
start_time=self._current_metrics["start_time"],
|
|
192
|
+
end_time=end_time,
|
|
193
|
+
duration_seconds=duration,
|
|
194
|
+
peak_memory_mb=peak_memory,
|
|
195
|
+
avg_cpu_percent=avg_cpu,
|
|
196
|
+
workers_used=workers,
|
|
197
|
+
nodes_processed=nodes,
|
|
198
|
+
operations_per_second=nodes / duration if duration > 0 else 0.0,
|
|
199
|
+
coherence_improvement=final_coherence - initial_coherence,
|
|
200
|
+
parallelization_efficiency=parallelization_eff,
|
|
201
|
+
memory_efficiency=memory_eff,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
self._metrics_history.append(metrics)
|
|
205
|
+
self._current_metrics = None
|
|
206
|
+
|
|
207
|
+
return metrics
|
|
208
|
+
|
|
209
|
+
def get_optimization_suggestions(self) -> List[str]:
|
|
210
|
+
"""Generate optimization suggestions based on execution history.
|
|
211
|
+
|
|
212
|
+
Returns
|
|
213
|
+
-------
|
|
214
|
+
List[str]
|
|
215
|
+
List of actionable suggestions for improving performance
|
|
216
|
+
"""
|
|
217
|
+
if not self._metrics_history:
|
|
218
|
+
return ["No execution history available"]
|
|
219
|
+
|
|
220
|
+
latest = self._metrics_history[-1]
|
|
221
|
+
suggestions = []
|
|
222
|
+
|
|
223
|
+
if latest.parallelization_efficiency < 0.5:
|
|
224
|
+
suggestions.append(
|
|
225
|
+
"⚡ Low parallelization efficiency - consider reducing "
|
|
226
|
+
"worker count or increasing chunk size"
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
if latest.memory_efficiency < 0.1:
|
|
230
|
+
suggestions.append(
|
|
231
|
+
"💾 High memory usage - consider distributed execution "
|
|
232
|
+
"or memory optimization"
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
if latest.operations_per_second < 100:
|
|
236
|
+
suggestions.append(
|
|
237
|
+
"📈 Low throughput - consider GPU backend or algorithm " "optimization"
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
if not suggestions:
|
|
241
|
+
suggestions.append("✨ Performance looks optimal!")
|
|
242
|
+
|
|
243
|
+
return suggestions
|
|
244
|
+
|
|
245
|
+
@property
|
|
246
|
+
def history(self) -> List[PerformanceMetrics]:
|
|
247
|
+
"""Get execution history."""
|
|
248
|
+
return self._metrics_history.copy()
|