tnfr 3.0.3__py3-none-any.whl → 8.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +375 -56
- tnfr/__init__.pyi +33 -0
- tnfr/_compat.py +10 -0
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +49 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +723 -0
- tnfr/alias.pyi +108 -0
- tnfr/backends/__init__.py +354 -0
- tnfr/backends/jax_backend.py +173 -0
- tnfr/backends/numpy_backend.py +238 -0
- tnfr/backends/optimized_numpy.py +420 -0
- tnfr/backends/torch_backend.py +408 -0
- tnfr/cache.py +171 -0
- tnfr/cache.pyi +13 -0
- tnfr/cli/__init__.py +110 -0
- tnfr/cli/__init__.pyi +26 -0
- tnfr/cli/arguments.py +489 -0
- tnfr/cli/arguments.pyi +29 -0
- tnfr/cli/execution.py +914 -0
- tnfr/cli/execution.pyi +70 -0
- tnfr/cli/interactive_validator.py +614 -0
- tnfr/cli/utils.py +51 -0
- tnfr/cli/utils.pyi +7 -0
- tnfr/cli/validate.py +236 -0
- tnfr/compat/__init__.py +85 -0
- tnfr/compat/dataclass.py +136 -0
- tnfr/compat/jsonschema_stub.py +61 -0
- tnfr/compat/matplotlib_stub.py +73 -0
- tnfr/compat/numpy_stub.py +155 -0
- tnfr/config/__init__.py +224 -0
- tnfr/config/__init__.pyi +10 -0
- tnfr/config/constants.py +104 -0
- tnfr/config/constants.pyi +12 -0
- tnfr/config/defaults.py +54 -0
- tnfr/config/defaults_core.py +212 -0
- tnfr/config/defaults_init.py +33 -0
- tnfr/config/defaults_metric.py +104 -0
- tnfr/config/feature_flags.py +81 -0
- tnfr/config/feature_flags.pyi +16 -0
- tnfr/config/glyph_constants.py +31 -0
- tnfr/config/init.py +77 -0
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +254 -0
- tnfr/config/operator_names.pyi +36 -0
- tnfr/config/physics_derivation.py +354 -0
- tnfr/config/presets.py +83 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/config/security.py +927 -0
- tnfr/config/thresholds.py +114 -0
- tnfr/config/tnfr_config.py +498 -0
- tnfr/constants/__init__.py +92 -0
- tnfr/constants/__init__.pyi +92 -0
- tnfr/constants/aliases.py +33 -0
- tnfr/constants/aliases.pyi +27 -0
- tnfr/constants/init.py +33 -0
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +104 -0
- tnfr/constants/metric.pyi +19 -0
- tnfr/core/__init__.py +33 -0
- tnfr/core/container.py +226 -0
- tnfr/core/default_implementations.py +329 -0
- tnfr/core/interfaces.py +279 -0
- tnfr/dynamics/__init__.py +238 -0
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +267 -0
- tnfr/dynamics/adaptation.pyi +7 -0
- tnfr/dynamics/adaptive_sequences.py +189 -0
- tnfr/dynamics/adaptive_sequences.pyi +14 -0
- tnfr/dynamics/aliases.py +23 -0
- tnfr/dynamics/aliases.pyi +19 -0
- tnfr/dynamics/bifurcation.py +232 -0
- tnfr/dynamics/canonical.py +229 -0
- tnfr/dynamics/canonical.pyi +48 -0
- tnfr/dynamics/coordination.py +385 -0
- tnfr/dynamics/coordination.pyi +25 -0
- tnfr/dynamics/dnfr.py +3034 -0
- tnfr/dynamics/dnfr.pyi +26 -0
- tnfr/dynamics/dynamic_limits.py +225 -0
- tnfr/dynamics/feedback.py +252 -0
- tnfr/dynamics/feedback.pyi +24 -0
- tnfr/dynamics/fused_dnfr.py +454 -0
- tnfr/dynamics/homeostasis.py +157 -0
- tnfr/dynamics/homeostasis.pyi +14 -0
- tnfr/dynamics/integrators.py +661 -0
- tnfr/dynamics/integrators.pyi +36 -0
- tnfr/dynamics/learning.py +310 -0
- tnfr/dynamics/learning.pyi +33 -0
- tnfr/dynamics/metabolism.py +254 -0
- tnfr/dynamics/nbody.py +796 -0
- tnfr/dynamics/nbody_tnfr.py +783 -0
- tnfr/dynamics/propagation.py +326 -0
- tnfr/dynamics/runtime.py +908 -0
- tnfr/dynamics/runtime.pyi +77 -0
- tnfr/dynamics/sampling.py +36 -0
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +711 -0
- tnfr/dynamics/selectors.pyi +85 -0
- tnfr/dynamics/structural_clip.py +207 -0
- tnfr/errors/__init__.py +37 -0
- tnfr/errors/contextual.py +492 -0
- tnfr/execution.py +223 -0
- tnfr/execution.pyi +45 -0
- tnfr/extensions/__init__.py +205 -0
- tnfr/extensions/__init__.pyi +18 -0
- tnfr/extensions/base.py +173 -0
- tnfr/extensions/base.pyi +35 -0
- tnfr/extensions/business/__init__.py +71 -0
- tnfr/extensions/business/__init__.pyi +11 -0
- tnfr/extensions/business/cookbook.py +88 -0
- tnfr/extensions/business/cookbook.pyi +8 -0
- tnfr/extensions/business/health_analyzers.py +202 -0
- tnfr/extensions/business/health_analyzers.pyi +9 -0
- tnfr/extensions/business/patterns.py +183 -0
- tnfr/extensions/business/patterns.pyi +8 -0
- tnfr/extensions/medical/__init__.py +73 -0
- tnfr/extensions/medical/__init__.pyi +11 -0
- tnfr/extensions/medical/cookbook.py +88 -0
- tnfr/extensions/medical/cookbook.pyi +8 -0
- tnfr/extensions/medical/health_analyzers.py +181 -0
- tnfr/extensions/medical/health_analyzers.pyi +9 -0
- tnfr/extensions/medical/patterns.py +163 -0
- tnfr/extensions/medical/patterns.pyi +8 -0
- tnfr/flatten.py +262 -0
- tnfr/flatten.pyi +21 -0
- tnfr/gamma.py +354 -0
- tnfr/gamma.pyi +36 -0
- tnfr/glyph_history.py +377 -0
- tnfr/glyph_history.pyi +35 -0
- tnfr/glyph_runtime.py +19 -0
- tnfr/glyph_runtime.pyi +8 -0
- tnfr/immutable.py +218 -0
- tnfr/immutable.pyi +36 -0
- tnfr/initialization.py +203 -0
- tnfr/initialization.pyi +65 -0
- tnfr/io.py +10 -0
- tnfr/io.pyi +13 -0
- tnfr/locking.py +37 -0
- tnfr/locking.pyi +7 -0
- tnfr/mathematics/__init__.py +79 -0
- tnfr/mathematics/backend.py +453 -0
- tnfr/mathematics/backend.pyi +99 -0
- tnfr/mathematics/dynamics.py +408 -0
- tnfr/mathematics/dynamics.pyi +90 -0
- tnfr/mathematics/epi.py +391 -0
- tnfr/mathematics/epi.pyi +65 -0
- tnfr/mathematics/generators.py +242 -0
- tnfr/mathematics/generators.pyi +29 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/metrics.pyi +16 -0
- tnfr/mathematics/operators.py +239 -0
- tnfr/mathematics/operators.pyi +59 -0
- tnfr/mathematics/operators_factory.py +124 -0
- tnfr/mathematics/operators_factory.pyi +11 -0
- tnfr/mathematics/projection.py +87 -0
- tnfr/mathematics/projection.pyi +33 -0
- tnfr/mathematics/runtime.py +182 -0
- tnfr/mathematics/runtime.pyi +64 -0
- tnfr/mathematics/spaces.py +256 -0
- tnfr/mathematics/spaces.pyi +83 -0
- tnfr/mathematics/transforms.py +305 -0
- tnfr/mathematics/transforms.pyi +62 -0
- tnfr/metrics/__init__.py +79 -0
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/buffer_cache.py +163 -0
- tnfr/metrics/buffer_cache.pyi +24 -0
- tnfr/metrics/cache_utils.py +214 -0
- tnfr/metrics/coherence.py +2009 -0
- tnfr/metrics/coherence.pyi +129 -0
- tnfr/metrics/common.py +158 -0
- tnfr/metrics/common.pyi +35 -0
- tnfr/metrics/core.py +316 -0
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +833 -0
- tnfr/metrics/diagnosis.pyi +86 -0
- tnfr/metrics/emergence.py +245 -0
- tnfr/metrics/export.py +179 -0
- tnfr/metrics/export.pyi +7 -0
- tnfr/metrics/glyph_timing.py +379 -0
- tnfr/metrics/glyph_timing.pyi +81 -0
- tnfr/metrics/learning_metrics.py +280 -0
- tnfr/metrics/learning_metrics.pyi +21 -0
- tnfr/metrics/phase_coherence.py +351 -0
- tnfr/metrics/phase_compatibility.py +349 -0
- tnfr/metrics/reporting.py +183 -0
- tnfr/metrics/reporting.pyi +25 -0
- tnfr/metrics/sense_index.py +1203 -0
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +373 -0
- tnfr/metrics/trig.pyi +13 -0
- tnfr/metrics/trig_cache.py +233 -0
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/multiscale/__init__.py +32 -0
- tnfr/multiscale/hierarchical.py +517 -0
- tnfr/node.py +763 -0
- tnfr/node.pyi +139 -0
- tnfr/observers.py +255 -130
- tnfr/observers.pyi +31 -0
- tnfr/ontosim.py +144 -137
- tnfr/ontosim.pyi +28 -0
- tnfr/operators/__init__.py +1672 -0
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/algebra.py +277 -0
- tnfr/operators/canonical_patterns.py +420 -0
- tnfr/operators/cascade.py +267 -0
- tnfr/operators/cycle_detection.py +358 -0
- tnfr/operators/definitions.py +4108 -0
- tnfr/operators/definitions.pyi +78 -0
- tnfr/operators/grammar.py +1164 -0
- tnfr/operators/grammar.pyi +140 -0
- tnfr/operators/hamiltonian.py +710 -0
- tnfr/operators/health_analyzer.py +809 -0
- tnfr/operators/jitter.py +272 -0
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/lifecycle.py +314 -0
- tnfr/operators/metabolism.py +618 -0
- tnfr/operators/metrics.py +2138 -0
- tnfr/operators/network_analysis/__init__.py +27 -0
- tnfr/operators/network_analysis/source_detection.py +186 -0
- tnfr/operators/nodal_equation.py +395 -0
- tnfr/operators/pattern_detection.py +660 -0
- tnfr/operators/patterns.py +669 -0
- tnfr/operators/postconditions/__init__.py +38 -0
- tnfr/operators/postconditions/mutation.py +236 -0
- tnfr/operators/preconditions/__init__.py +1226 -0
- tnfr/operators/preconditions/coherence.py +305 -0
- tnfr/operators/preconditions/dissonance.py +236 -0
- tnfr/operators/preconditions/emission.py +128 -0
- tnfr/operators/preconditions/mutation.py +580 -0
- tnfr/operators/preconditions/reception.py +125 -0
- tnfr/operators/preconditions/resonance.py +364 -0
- tnfr/operators/registry.py +74 -0
- tnfr/operators/registry.pyi +9 -0
- tnfr/operators/remesh.py +1809 -0
- tnfr/operators/remesh.pyi +26 -0
- tnfr/operators/structural_units.py +268 -0
- tnfr/operators/unified_grammar.py +105 -0
- tnfr/parallel/__init__.py +54 -0
- tnfr/parallel/auto_scaler.py +234 -0
- tnfr/parallel/distributed.py +384 -0
- tnfr/parallel/engine.py +238 -0
- tnfr/parallel/gpu_engine.py +420 -0
- tnfr/parallel/monitoring.py +248 -0
- tnfr/parallel/partitioner.py +459 -0
- tnfr/py.typed +0 -0
- tnfr/recipes/__init__.py +22 -0
- tnfr/recipes/cookbook.py +743 -0
- tnfr/rng.py +178 -0
- tnfr/rng.pyi +26 -0
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/sdk/__init__.py +107 -0
- tnfr/sdk/__init__.pyi +19 -0
- tnfr/sdk/adaptive_system.py +173 -0
- tnfr/sdk/adaptive_system.pyi +21 -0
- tnfr/sdk/builders.py +370 -0
- tnfr/sdk/builders.pyi +51 -0
- tnfr/sdk/fluent.py +1121 -0
- tnfr/sdk/fluent.pyi +74 -0
- tnfr/sdk/templates.py +342 -0
- tnfr/sdk/templates.pyi +41 -0
- tnfr/sdk/utils.py +341 -0
- tnfr/secure_config.py +46 -0
- tnfr/security/__init__.py +70 -0
- tnfr/security/database.py +514 -0
- tnfr/security/subprocess.py +503 -0
- tnfr/security/validation.py +290 -0
- tnfr/selector.py +247 -0
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +378 -0
- tnfr/sense.pyi +23 -0
- tnfr/services/__init__.py +17 -0
- tnfr/services/orchestrator.py +325 -0
- tnfr/sparse/__init__.py +39 -0
- tnfr/sparse/representations.py +492 -0
- tnfr/structural.py +705 -0
- tnfr/structural.pyi +83 -0
- tnfr/telemetry/__init__.py +35 -0
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/cache_metrics.pyi +64 -0
- tnfr/telemetry/nu_f.py +422 -0
- tnfr/telemetry/nu_f.pyi +108 -0
- tnfr/telemetry/verbosity.py +36 -0
- tnfr/telemetry/verbosity.pyi +15 -0
- tnfr/tokens.py +58 -0
- tnfr/tokens.pyi +36 -0
- tnfr/tools/__init__.py +20 -0
- tnfr/tools/domain_templates.py +478 -0
- tnfr/tools/sequence_generator.py +846 -0
- tnfr/topology/__init__.py +13 -0
- tnfr/topology/asymmetry.py +151 -0
- tnfr/trace.py +543 -0
- tnfr/trace.pyi +42 -0
- tnfr/tutorials/__init__.py +38 -0
- tnfr/tutorials/autonomous_evolution.py +285 -0
- tnfr/tutorials/interactive.py +1576 -0
- tnfr/tutorials/structural_metabolism.py +238 -0
- tnfr/types.py +775 -0
- tnfr/types.pyi +357 -0
- tnfr/units.py +68 -0
- tnfr/units.pyi +13 -0
- tnfr/utils/__init__.py +282 -0
- tnfr/utils/__init__.pyi +215 -0
- tnfr/utils/cache.py +4223 -0
- tnfr/utils/cache.pyi +470 -0
- tnfr/utils/callbacks.py +375 -0
- tnfr/utils/callbacks.pyi +49 -0
- tnfr/utils/chunks.py +108 -0
- tnfr/utils/chunks.pyi +22 -0
- tnfr/utils/data.py +428 -0
- tnfr/utils/data.pyi +74 -0
- tnfr/utils/graph.py +85 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +821 -0
- tnfr/utils/init.pyi +80 -0
- tnfr/utils/io.py +559 -0
- tnfr/utils/io.pyi +66 -0
- tnfr/utils/numeric.py +114 -0
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +257 -0
- tnfr/validation/__init__.pyi +85 -0
- tnfr/validation/compatibility.py +460 -0
- tnfr/validation/compatibility.pyi +6 -0
- tnfr/validation/config.py +73 -0
- tnfr/validation/graph.py +139 -0
- tnfr/validation/graph.pyi +18 -0
- tnfr/validation/input_validation.py +755 -0
- tnfr/validation/invariants.py +712 -0
- tnfr/validation/rules.py +253 -0
- tnfr/validation/rules.pyi +44 -0
- tnfr/validation/runtime.py +279 -0
- tnfr/validation/runtime.pyi +28 -0
- tnfr/validation/sequence_validator.py +162 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +32 -0
- tnfr/validation/spectral.py +164 -0
- tnfr/validation/spectral.pyi +42 -0
- tnfr/validation/validator.py +1266 -0
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/visualization/__init__.py +98 -0
- tnfr/visualization/cascade_viz.py +256 -0
- tnfr/visualization/hierarchy.py +284 -0
- tnfr/visualization/sequence_plotter.py +784 -0
- tnfr/viz/__init__.py +60 -0
- tnfr/viz/matplotlib.py +278 -0
- tnfr/viz/matplotlib.pyi +35 -0
- tnfr-8.5.0.dist-info/METADATA +573 -0
- tnfr-8.5.0.dist-info/RECORD +353 -0
- tnfr-8.5.0.dist-info/entry_points.txt +3 -0
- tnfr-3.0.3.dist-info/licenses/LICENSE.txt → tnfr-8.5.0.dist-info/licenses/LICENSE.md +1 -1
- tnfr/constants.py +0 -183
- tnfr/dynamics.py +0 -543
- tnfr/helpers.py +0 -198
- tnfr/main.py +0 -37
- tnfr/operators.py +0 -296
- tnfr-3.0.3.dist-info/METADATA +0 -35
- tnfr-3.0.3.dist-info/RECORD +0 -13
- {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
- {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
tnfr/alias.pyi
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Callable, Iterable, Mapping, MutableMapping
|
|
4
|
+
from types import ModuleType
|
|
5
|
+
from typing import TYPE_CHECKING, Any, Hashable, TypeVar
|
|
6
|
+
|
|
7
|
+
from .types import FloatArray, NodeId
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
import networkx as nx
|
|
11
|
+
|
|
12
|
+
T = TypeVar("T")
|
|
13
|
+
|
|
14
|
+
__all__: list[str]
|
|
15
|
+
|
|
16
|
+
def __getattr__(name: str) -> Any: ...
|
|
17
|
+
|
|
18
|
+
class AbsMaxResult:
|
|
19
|
+
max_value: float
|
|
20
|
+
node: Hashable | None
|
|
21
|
+
|
|
22
|
+
SCALAR_SETTERS: dict[str, dict[str, Any]]
|
|
23
|
+
|
|
24
|
+
def get_attr(
|
|
25
|
+
d: dict[str, Any],
|
|
26
|
+
aliases: Iterable[str],
|
|
27
|
+
default: T | None = ...,
|
|
28
|
+
*,
|
|
29
|
+
strict: bool = ...,
|
|
30
|
+
log_level: int | None = ...,
|
|
31
|
+
conv: Callable[[Any], T] = ...,
|
|
32
|
+
) -> T | None: ...
|
|
33
|
+
def get_theta_attr(
|
|
34
|
+
d: Mapping[str, Any],
|
|
35
|
+
default: T | None = ...,
|
|
36
|
+
*,
|
|
37
|
+
strict: bool = ...,
|
|
38
|
+
log_level: int | None = ...,
|
|
39
|
+
conv: Callable[[Any], T] = ...,
|
|
40
|
+
) -> T | None: ...
|
|
41
|
+
def collect_attr(
|
|
42
|
+
G: "nx.Graph",
|
|
43
|
+
nodes: Iterable[NodeId],
|
|
44
|
+
aliases: Iterable[str],
|
|
45
|
+
default: float = ...,
|
|
46
|
+
*,
|
|
47
|
+
np: ModuleType | None = ...,
|
|
48
|
+
) -> FloatArray | list[float]: ...
|
|
49
|
+
def collect_theta_attr(
|
|
50
|
+
G: "nx.Graph",
|
|
51
|
+
nodes: Iterable[NodeId],
|
|
52
|
+
default: float = ...,
|
|
53
|
+
*,
|
|
54
|
+
np: ModuleType | None = ...,
|
|
55
|
+
) -> FloatArray | list[float]: ...
|
|
56
|
+
def set_attr_generic(
|
|
57
|
+
d: dict[str, Any],
|
|
58
|
+
aliases: Iterable[str],
|
|
59
|
+
value: Any,
|
|
60
|
+
*,
|
|
61
|
+
conv: Callable[[Any], T],
|
|
62
|
+
) -> T: ...
|
|
63
|
+
def set_attr(
|
|
64
|
+
d: dict[str, Any],
|
|
65
|
+
aliases: Iterable[str],
|
|
66
|
+
value: Any,
|
|
67
|
+
conv: Callable[[Any], T] = ...,
|
|
68
|
+
) -> T: ...
|
|
69
|
+
def get_attr_str(
|
|
70
|
+
d: dict[str, Any],
|
|
71
|
+
aliases: Iterable[str],
|
|
72
|
+
default: str | None = ...,
|
|
73
|
+
*,
|
|
74
|
+
strict: bool = ...,
|
|
75
|
+
log_level: int | None = ...,
|
|
76
|
+
conv: Callable[[Any], str] = ...,
|
|
77
|
+
) -> str | None: ...
|
|
78
|
+
def set_attr_str(d: dict[str, Any], aliases: Iterable[str], value: Any) -> str: ...
|
|
79
|
+
def set_theta_attr(d: MutableMapping[str, Any], value: Any) -> float: ...
|
|
80
|
+
def multi_recompute_abs_max(
|
|
81
|
+
G: "nx.Graph", alias_map: Mapping[str, tuple[str, ...]]
|
|
82
|
+
) -> dict[str, float]: ...
|
|
83
|
+
def set_attr_and_cache(
|
|
84
|
+
G: "nx.Graph",
|
|
85
|
+
n: Hashable,
|
|
86
|
+
aliases: tuple[str, ...],
|
|
87
|
+
value: float,
|
|
88
|
+
*,
|
|
89
|
+
cache: str | None = ...,
|
|
90
|
+
extra: Callable[["nx.Graph", Hashable, float], None] | None = ...,
|
|
91
|
+
) -> AbsMaxResult | None: ...
|
|
92
|
+
def set_attr_with_max(
|
|
93
|
+
G: "nx.Graph", n: Hashable, aliases: tuple[str, ...], value: float, *, cache: str
|
|
94
|
+
) -> AbsMaxResult: ...
|
|
95
|
+
def set_scalar(
|
|
96
|
+
G: "nx.Graph",
|
|
97
|
+
n: Hashable,
|
|
98
|
+
alias: tuple[str, ...],
|
|
99
|
+
value: float,
|
|
100
|
+
*,
|
|
101
|
+
cache: str | None = ...,
|
|
102
|
+
extra: Callable[["nx.Graph", Hashable, float], None] | None = ...,
|
|
103
|
+
) -> AbsMaxResult | None: ...
|
|
104
|
+
def set_vf(
|
|
105
|
+
G: "nx.Graph", n: Hashable, value: float, *, update_max: bool = ...
|
|
106
|
+
) -> AbsMaxResult | None: ...
|
|
107
|
+
def set_dnfr(G: "nx.Graph", n: Hashable, value: float) -> AbsMaxResult | None: ...
|
|
108
|
+
def set_theta(G: "nx.Graph", n: Hashable, value: float) -> AbsMaxResult | None: ...
|
|
@@ -0,0 +1,354 @@
|
|
|
1
|
+
"""TNFR computation backends for high-performance ΔNFR and Si evaluation.
|
|
2
|
+
|
|
3
|
+
This module provides pluggable backend implementations that optimize the core
|
|
4
|
+
TNFR computational kernels (ΔNFR, Si) using different numerical libraries.
|
|
5
|
+
Each backend maintains TNFR semantic fidelity while leveraging library-specific
|
|
6
|
+
optimizations like JIT compilation or GPU acceleration.
|
|
7
|
+
|
|
8
|
+
The backend system ensures that the nodal equation ∂EPI/∂t = νf · ΔNFR(t) and
|
|
9
|
+
all structural invariants remain intact regardless of which backend executes
|
|
10
|
+
the computation.
|
|
11
|
+
|
|
12
|
+
Examples
|
|
13
|
+
--------
|
|
14
|
+
Use the NumPy backend explicitly:
|
|
15
|
+
|
|
16
|
+
>>> from tnfr.backends import get_backend
|
|
17
|
+
>>> backend = get_backend("numpy")
|
|
18
|
+
>>> backend.name
|
|
19
|
+
'numpy'
|
|
20
|
+
|
|
21
|
+
Select backend via environment variable:
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
export TNFR_BACKEND=jax
|
|
25
|
+
python your_simulation.py
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
Available backends
|
|
29
|
+
------------------
|
|
30
|
+
- **numpy**: Vectorized NumPy implementation (default, stable)
|
|
31
|
+
- **jax**: JIT-compiled JAX with autodiff support (experimental)
|
|
32
|
+
- **torch**: PyTorch GPU-accelerated implementation (experimental)
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
from __future__ import annotations
|
|
36
|
+
|
|
37
|
+
import os
|
|
38
|
+
from abc import ABC, abstractmethod
|
|
39
|
+
from typing import Any, Mapping, MutableMapping
|
|
40
|
+
|
|
41
|
+
from ..types import TNFRGraph
|
|
42
|
+
from ..utils import get_logger
|
|
43
|
+
|
|
44
|
+
__all__ = [
|
|
45
|
+
"TNFRBackend",
|
|
46
|
+
"get_backend",
|
|
47
|
+
"set_backend",
|
|
48
|
+
"available_backends",
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
logger = get_logger(__name__)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class TNFRBackend(ABC):
|
|
55
|
+
"""Base class for TNFR computation backends.
|
|
56
|
+
|
|
57
|
+
All backends must implement the core computational methods while
|
|
58
|
+
preserving TNFR structural semantics and the canonical nodal equation.
|
|
59
|
+
|
|
60
|
+
Structural Invariants
|
|
61
|
+
---------------------
|
|
62
|
+
1. ΔNFR semantics: sign and magnitude must modulate reorganization rate
|
|
63
|
+
2. Phase verification: coupling requires explicit phase synchrony check
|
|
64
|
+
3. Operator closure: all transformations map to valid TNFR states
|
|
65
|
+
4. Determinism: computations must be reproducible with fixed seeds
|
|
66
|
+
5. Si stability: sense index must correlate with network coherence
|
|
67
|
+
|
|
68
|
+
Attributes
|
|
69
|
+
----------
|
|
70
|
+
name : str
|
|
71
|
+
Backend identifier (e.g., "numpy", "jax", "torch")
|
|
72
|
+
supports_gpu : bool
|
|
73
|
+
Whether this backend can utilize GPU acceleration
|
|
74
|
+
supports_jit : bool
|
|
75
|
+
Whether this backend supports JIT compilation
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
@property
|
|
79
|
+
@abstractmethod
|
|
80
|
+
def name(self) -> str:
|
|
81
|
+
"""Return the backend identifier."""
|
|
82
|
+
...
|
|
83
|
+
|
|
84
|
+
@property
|
|
85
|
+
def supports_gpu(self) -> bool:
|
|
86
|
+
"""Return True if this backend can use GPU acceleration."""
|
|
87
|
+
return False
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def supports_jit(self) -> bool:
|
|
91
|
+
"""Return True if this backend supports JIT compilation."""
|
|
92
|
+
return False
|
|
93
|
+
|
|
94
|
+
@abstractmethod
|
|
95
|
+
def compute_delta_nfr(
|
|
96
|
+
self,
|
|
97
|
+
graph: TNFRGraph,
|
|
98
|
+
*,
|
|
99
|
+
cache_size: int | None = 1,
|
|
100
|
+
n_jobs: int | None = None,
|
|
101
|
+
profile: MutableMapping[str, float] | None = None,
|
|
102
|
+
) -> None:
|
|
103
|
+
"""Compute ΔNFR for all nodes in the graph.
|
|
104
|
+
|
|
105
|
+
This method must preserve the canonical ΔNFR computation semantics:
|
|
106
|
+
- Weighted combination of phase, EPI, νf, and topology gradients
|
|
107
|
+
- Proper phase dispersion calculation via neighbor phase means
|
|
108
|
+
- Coherent handling of isolated nodes (ΔNFR = 0)
|
|
109
|
+
|
|
110
|
+
Parameters
|
|
111
|
+
----------
|
|
112
|
+
graph : TNFRGraph
|
|
113
|
+
Graph with node attributes (phase, EPI, νf) to compute ΔNFR
|
|
114
|
+
cache_size : int or None, optional
|
|
115
|
+
Maximum cached state entries. None means unlimited.
|
|
116
|
+
n_jobs : int or None, optional
|
|
117
|
+
Parallelism hint for backends that support it
|
|
118
|
+
profile : MutableMapping[str, float] or None, optional
|
|
119
|
+
Dict to accumulate timing metrics for profiling
|
|
120
|
+
|
|
121
|
+
Notes
|
|
122
|
+
-----
|
|
123
|
+
The computation must write ΔNFR values back to nodes using the
|
|
124
|
+
appropriate alias and maintain consistency with graph metadata.
|
|
125
|
+
"""
|
|
126
|
+
...
|
|
127
|
+
|
|
128
|
+
@abstractmethod
|
|
129
|
+
def compute_si(
|
|
130
|
+
self,
|
|
131
|
+
graph: TNFRGraph,
|
|
132
|
+
*,
|
|
133
|
+
inplace: bool = True,
|
|
134
|
+
n_jobs: int | None = None,
|
|
135
|
+
chunk_size: int | None = None,
|
|
136
|
+
profile: MutableMapping[str, Any] | None = None,
|
|
137
|
+
) -> dict[Any, float] | Any:
|
|
138
|
+
"""Compute the sense index (Si) for all nodes.
|
|
139
|
+
|
|
140
|
+
Si blends structural frequency (νf), phase alignment, and ΔNFR
|
|
141
|
+
attenuation according to the weights configured in graph metadata.
|
|
142
|
+
|
|
143
|
+
Parameters
|
|
144
|
+
----------
|
|
145
|
+
graph : TNFRGraph
|
|
146
|
+
Graph with node attributes (νf, ΔNFR, phase)
|
|
147
|
+
inplace : bool, default=True
|
|
148
|
+
Whether to write Si values back to nodes
|
|
149
|
+
n_jobs : int or None, optional
|
|
150
|
+
Parallelism hint for backends that support it
|
|
151
|
+
chunk_size : int or None, optional
|
|
152
|
+
Batch size for chunked processing
|
|
153
|
+
profile : MutableMapping[str, Any] or None, optional
|
|
154
|
+
Dict to accumulate timing and execution path metrics
|
|
155
|
+
|
|
156
|
+
Returns
|
|
157
|
+
-------
|
|
158
|
+
dict[Any, float] or numpy.ndarray
|
|
159
|
+
Node-to-Si mapping or array of Si values
|
|
160
|
+
|
|
161
|
+
Notes
|
|
162
|
+
-----
|
|
163
|
+
The Si computation must respect the structural sensitivity weights
|
|
164
|
+
(alpha, beta, gamma) configured in the graph's SI_WEIGHTS metadata.
|
|
165
|
+
"""
|
|
166
|
+
...
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
# Backend registry
|
|
170
|
+
_BACKEND_REGISTRY: MutableMapping[str, type[TNFRBackend]] = {}
|
|
171
|
+
_BACKEND_CACHE: MutableMapping[str, TNFRBackend] = {}
|
|
172
|
+
_DEFAULT_BACKEND: str = "numpy"
|
|
173
|
+
_CURRENT_BACKEND: str | None = None
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def register_backend(name: str, backend_class: type[TNFRBackend]) -> None:
|
|
177
|
+
"""Register a TNFR backend implementation.
|
|
178
|
+
|
|
179
|
+
Parameters
|
|
180
|
+
----------
|
|
181
|
+
name : str
|
|
182
|
+
Backend identifier (will be normalized to lowercase)
|
|
183
|
+
backend_class : type[TNFRBackend]
|
|
184
|
+
Backend class implementing the TNFRBackend interface
|
|
185
|
+
|
|
186
|
+
Raises
|
|
187
|
+
------
|
|
188
|
+
ValueError
|
|
189
|
+
If name is already registered
|
|
190
|
+
TypeError
|
|
191
|
+
If backend_class doesn't implement TNFRBackend
|
|
192
|
+
"""
|
|
193
|
+
name_lower = name.lower().strip()
|
|
194
|
+
if not name_lower:
|
|
195
|
+
raise ValueError("Backend name cannot be empty")
|
|
196
|
+
|
|
197
|
+
if name_lower in _BACKEND_REGISTRY:
|
|
198
|
+
raise ValueError(f"Backend '{name}' is already registered")
|
|
199
|
+
|
|
200
|
+
if not issubclass(backend_class, TNFRBackend):
|
|
201
|
+
raise TypeError(
|
|
202
|
+
f"Backend class must inherit from TNFRBackend, got {backend_class}"
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
_BACKEND_REGISTRY[name_lower] = backend_class
|
|
206
|
+
logger.debug("Registered TNFR backend: %s", name)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def get_backend(name: str | None = None) -> TNFRBackend:
|
|
210
|
+
"""Get a TNFR backend instance by name.
|
|
211
|
+
|
|
212
|
+
Resolution order:
|
|
213
|
+
1. Explicit `name` parameter
|
|
214
|
+
2. Previously set backend via set_backend()
|
|
215
|
+
3. TNFR_BACKEND environment variable
|
|
216
|
+
4. Default backend ("numpy")
|
|
217
|
+
|
|
218
|
+
Parameters
|
|
219
|
+
----------
|
|
220
|
+
name : str or None, optional
|
|
221
|
+
Backend name to retrieve. If None, uses resolution order above.
|
|
222
|
+
|
|
223
|
+
Returns
|
|
224
|
+
-------
|
|
225
|
+
TNFRBackend
|
|
226
|
+
Backend instance ready for computation
|
|
227
|
+
|
|
228
|
+
Raises
|
|
229
|
+
------
|
|
230
|
+
ValueError
|
|
231
|
+
If the requested backend is not registered
|
|
232
|
+
RuntimeError
|
|
233
|
+
If backend initialization fails
|
|
234
|
+
|
|
235
|
+
Examples
|
|
236
|
+
--------
|
|
237
|
+
>>> backend = get_backend("numpy")
|
|
238
|
+
>>> backend.name
|
|
239
|
+
'numpy'
|
|
240
|
+
|
|
241
|
+
>>> import os
|
|
242
|
+
>>> os.environ["TNFR_BACKEND"] = "numpy"
|
|
243
|
+
>>> backend = get_backend()
|
|
244
|
+
>>> backend.name
|
|
245
|
+
'numpy'
|
|
246
|
+
"""
|
|
247
|
+
global _CURRENT_BACKEND
|
|
248
|
+
|
|
249
|
+
# Resolve backend name
|
|
250
|
+
if name is None:
|
|
251
|
+
if _CURRENT_BACKEND is not None:
|
|
252
|
+
name = _CURRENT_BACKEND
|
|
253
|
+
else:
|
|
254
|
+
name = os.environ.get("TNFR_BACKEND", _DEFAULT_BACKEND)
|
|
255
|
+
|
|
256
|
+
name_lower = name.lower().strip()
|
|
257
|
+
|
|
258
|
+
# Return cached instance if available
|
|
259
|
+
if name_lower in _BACKEND_CACHE:
|
|
260
|
+
return _BACKEND_CACHE[name_lower]
|
|
261
|
+
|
|
262
|
+
# Get backend class from registry
|
|
263
|
+
if name_lower not in _BACKEND_REGISTRY:
|
|
264
|
+
available = ", ".join(sorted(_BACKEND_REGISTRY.keys()))
|
|
265
|
+
raise ValueError(f"Unknown backend '{name}'. Available backends: {available}")
|
|
266
|
+
|
|
267
|
+
# Instantiate backend
|
|
268
|
+
backend_class = _BACKEND_REGISTRY[name_lower]
|
|
269
|
+
try:
|
|
270
|
+
backend = backend_class()
|
|
271
|
+
_BACKEND_CACHE[name_lower] = backend
|
|
272
|
+
logger.info("Initialized TNFR backend: %s", name_lower)
|
|
273
|
+
return backend
|
|
274
|
+
except Exception as exc:
|
|
275
|
+
raise RuntimeError(f"Failed to initialize backend '{name}': {exc}") from exc
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def set_backend(name: str) -> None:
|
|
279
|
+
"""Set the default TNFR backend for subsequent operations.
|
|
280
|
+
|
|
281
|
+
Parameters
|
|
282
|
+
----------
|
|
283
|
+
name : str
|
|
284
|
+
Backend name to set as default
|
|
285
|
+
|
|
286
|
+
Raises
|
|
287
|
+
------
|
|
288
|
+
ValueError
|
|
289
|
+
If the backend name is not registered
|
|
290
|
+
|
|
291
|
+
Examples
|
|
292
|
+
--------
|
|
293
|
+
>>> set_backend("numpy")
|
|
294
|
+
>>> get_backend().name
|
|
295
|
+
'numpy'
|
|
296
|
+
"""
|
|
297
|
+
global _CURRENT_BACKEND
|
|
298
|
+
|
|
299
|
+
name_lower = name.lower().strip()
|
|
300
|
+
if name_lower not in _BACKEND_REGISTRY:
|
|
301
|
+
available = ", ".join(sorted(_BACKEND_REGISTRY.keys()))
|
|
302
|
+
raise ValueError(f"Unknown backend '{name}'. Available backends: {available}")
|
|
303
|
+
|
|
304
|
+
_CURRENT_BACKEND = name_lower
|
|
305
|
+
logger.info("Set default TNFR backend to: %s", name_lower)
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def available_backends() -> Mapping[str, type[TNFRBackend]]:
|
|
309
|
+
"""Return mapping of registered backend names to their classes.
|
|
310
|
+
|
|
311
|
+
Returns
|
|
312
|
+
-------
|
|
313
|
+
Mapping[str, type[TNFRBackend]]
|
|
314
|
+
Read-only view of registered backends
|
|
315
|
+
|
|
316
|
+
Examples
|
|
317
|
+
--------
|
|
318
|
+
>>> backends = available_backends()
|
|
319
|
+
>>> "numpy" in backends
|
|
320
|
+
True
|
|
321
|
+
"""
|
|
322
|
+
return dict(_BACKEND_REGISTRY)
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
# Import and register backends
|
|
326
|
+
# This is done at module level to ensure backends are available immediately
|
|
327
|
+
try:
|
|
328
|
+
from . import numpy_backend
|
|
329
|
+
|
|
330
|
+
register_backend("numpy", numpy_backend.NumPyBackend)
|
|
331
|
+
except ImportError as exc:
|
|
332
|
+
logger.warning("NumPy backend unavailable: %s", exc)
|
|
333
|
+
|
|
334
|
+
try:
|
|
335
|
+
from . import optimized_numpy
|
|
336
|
+
|
|
337
|
+
register_backend("optimized_numpy", optimized_numpy.OptimizedNumPyBackend)
|
|
338
|
+
register_backend("optimized", optimized_numpy.OptimizedNumPyBackend)
|
|
339
|
+
except ImportError as exc:
|
|
340
|
+
logger.debug("Optimized NumPy backend not available: %s", exc)
|
|
341
|
+
|
|
342
|
+
try:
|
|
343
|
+
from . import jax_backend
|
|
344
|
+
|
|
345
|
+
register_backend("jax", jax_backend.JAXBackend)
|
|
346
|
+
except ImportError as exc:
|
|
347
|
+
logger.debug("JAX backend not available (optional dependency): %s", exc)
|
|
348
|
+
|
|
349
|
+
try:
|
|
350
|
+
from . import torch_backend
|
|
351
|
+
|
|
352
|
+
register_backend("torch", torch_backend.TorchBackend)
|
|
353
|
+
except ImportError as exc:
|
|
354
|
+
logger.debug("PyTorch backend not available (optional dependency): %s", exc)
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""JAX-based JIT-compiled backend for TNFR computations (Experimental).
|
|
2
|
+
|
|
3
|
+
This module provides a JIT-compiled JAX implementation of TNFR computational
|
|
4
|
+
kernels. JAX enables:
|
|
5
|
+
|
|
6
|
+
- Just-in-time (JIT) compilation for optimized machine code
|
|
7
|
+
- Automatic differentiation for gradient-based analysis
|
|
8
|
+
- GPU acceleration for large-scale networks
|
|
9
|
+
- XLA compiler optimizations
|
|
10
|
+
|
|
11
|
+
**Status**: Experimental - API may change in future releases.
|
|
12
|
+
|
|
13
|
+
The JAX backend currently delegates to the NumPy implementation but provides
|
|
14
|
+
infrastructure for future JIT-optimized kernels.
|
|
15
|
+
|
|
16
|
+
Examples
|
|
17
|
+
--------
|
|
18
|
+
>>> from tnfr.backends import get_backend
|
|
19
|
+
>>> backend = get_backend("jax") # doctest: +SKIP
|
|
20
|
+
>>> backend.supports_jit # doctest: +SKIP
|
|
21
|
+
True
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
from __future__ import annotations
|
|
25
|
+
|
|
26
|
+
from typing import Any, MutableMapping
|
|
27
|
+
|
|
28
|
+
from . import TNFRBackend
|
|
29
|
+
from ..types import TNFRGraph
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class JAXBackend(TNFRBackend):
|
|
33
|
+
"""JIT-compiled JAX implementation of TNFR kernels (Experimental).
|
|
34
|
+
|
|
35
|
+
This backend provides a foundation for JIT-optimized TNFR computations
|
|
36
|
+
using JAX. Current implementation delegates to NumPy backend while
|
|
37
|
+
maintaining interface compatibility for future JIT implementations.
|
|
38
|
+
|
|
39
|
+
Future optimizations planned:
|
|
40
|
+
- JIT-compiled ΔNFR computation with @jax.jit
|
|
41
|
+
- Vectorized operations using jax.numpy
|
|
42
|
+
- GPU acceleration via JAX device placement
|
|
43
|
+
- Automatic differentiation for sensitivity analysis
|
|
44
|
+
|
|
45
|
+
Attributes
|
|
46
|
+
----------
|
|
47
|
+
name : str
|
|
48
|
+
Returns "jax"
|
|
49
|
+
supports_gpu : bool
|
|
50
|
+
True (JAX supports GPU acceleration)
|
|
51
|
+
supports_jit : bool
|
|
52
|
+
True (JAX provides JIT compilation)
|
|
53
|
+
|
|
54
|
+
Notes
|
|
55
|
+
-----
|
|
56
|
+
Requires JAX to be installed: `pip install jax jaxlib`
|
|
57
|
+
|
|
58
|
+
For GPU support, install appropriate JAX GPU build for your platform.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
def __init__(self) -> None:
|
|
62
|
+
"""Initialize JAX backend."""
|
|
63
|
+
try:
|
|
64
|
+
import jax
|
|
65
|
+
import jax.numpy as jnp
|
|
66
|
+
|
|
67
|
+
self._jax = jax
|
|
68
|
+
self._jnp = jnp
|
|
69
|
+
except ImportError as exc:
|
|
70
|
+
raise RuntimeError(
|
|
71
|
+
"JAX backend requires jax to be installed. "
|
|
72
|
+
"Install with: pip install jax jaxlib"
|
|
73
|
+
) from exc
|
|
74
|
+
|
|
75
|
+
@property
|
|
76
|
+
def name(self) -> str:
|
|
77
|
+
"""Return the backend identifier."""
|
|
78
|
+
return "jax"
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def supports_gpu(self) -> bool:
|
|
82
|
+
"""JAX supports GPU acceleration."""
|
|
83
|
+
return True
|
|
84
|
+
|
|
85
|
+
@property
|
|
86
|
+
def supports_jit(self) -> bool:
|
|
87
|
+
"""JAX supports JIT compilation."""
|
|
88
|
+
return True
|
|
89
|
+
|
|
90
|
+
def compute_delta_nfr(
|
|
91
|
+
self,
|
|
92
|
+
graph: TNFRGraph,
|
|
93
|
+
*,
|
|
94
|
+
cache_size: int | None = 1,
|
|
95
|
+
n_jobs: int | None = None,
|
|
96
|
+
profile: MutableMapping[str, float] | None = None,
|
|
97
|
+
) -> None:
|
|
98
|
+
"""Compute ΔNFR using JAX backend.
|
|
99
|
+
|
|
100
|
+
**Current implementation**: Delegates to NumPy backend while maintaining
|
|
101
|
+
interface compatibility.
|
|
102
|
+
|
|
103
|
+
**Planned**: JIT-compiled vectorized computation using jax.numpy with
|
|
104
|
+
automatic XLA optimization and optional GPU acceleration.
|
|
105
|
+
|
|
106
|
+
Parameters
|
|
107
|
+
----------
|
|
108
|
+
graph : TNFRGraph
|
|
109
|
+
NetworkX graph with TNFR node attributes
|
|
110
|
+
cache_size : int or None, optional
|
|
111
|
+
Cache size hint (currently passed to NumPy backend)
|
|
112
|
+
n_jobs : int or None, optional
|
|
113
|
+
Ignored (JAX uses vectorization instead of multiprocessing)
|
|
114
|
+
profile : MutableMapping[str, float] or None, optional
|
|
115
|
+
Dict to collect timing metrics
|
|
116
|
+
"""
|
|
117
|
+
# TODO: Implement JIT-compiled JAX version
|
|
118
|
+
# For now, delegate to NumPy backend
|
|
119
|
+
from ..dynamics.dnfr import default_compute_delta_nfr
|
|
120
|
+
|
|
121
|
+
default_compute_delta_nfr(
|
|
122
|
+
graph,
|
|
123
|
+
cache_size=cache_size,
|
|
124
|
+
n_jobs=n_jobs,
|
|
125
|
+
profile=profile,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
def compute_si(
|
|
129
|
+
self,
|
|
130
|
+
graph: TNFRGraph,
|
|
131
|
+
*,
|
|
132
|
+
inplace: bool = True,
|
|
133
|
+
n_jobs: int | None = None,
|
|
134
|
+
chunk_size: int | None = None,
|
|
135
|
+
profile: MutableMapping[str, Any] | None = None,
|
|
136
|
+
) -> dict[Any, float] | Any:
|
|
137
|
+
"""Compute sense index using JAX backend.
|
|
138
|
+
|
|
139
|
+
**Current implementation**: Delegates to NumPy backend while maintaining
|
|
140
|
+
interface compatibility.
|
|
141
|
+
|
|
142
|
+
**Planned**: JIT-compiled vectorized Si computation using jax.numpy with
|
|
143
|
+
optimized phase dispersion and normalization kernels.
|
|
144
|
+
|
|
145
|
+
Parameters
|
|
146
|
+
----------
|
|
147
|
+
graph : TNFRGraph
|
|
148
|
+
NetworkX graph with TNFR node attributes
|
|
149
|
+
inplace : bool, default=True
|
|
150
|
+
Whether to write Si values back to graph
|
|
151
|
+
n_jobs : int or None, optional
|
|
152
|
+
Ignored (JAX uses vectorization)
|
|
153
|
+
chunk_size : int or None, optional
|
|
154
|
+
Chunk size hint (currently passed to NumPy backend)
|
|
155
|
+
profile : MutableMapping[str, Any] or None, optional
|
|
156
|
+
Dict to collect timing metrics
|
|
157
|
+
|
|
158
|
+
Returns
|
|
159
|
+
-------
|
|
160
|
+
dict[Any, float] or numpy.ndarray
|
|
161
|
+
Node-to-Si mapping or array of Si values
|
|
162
|
+
"""
|
|
163
|
+
# TODO: Implement JIT-compiled JAX version
|
|
164
|
+
# For now, delegate to NumPy backend
|
|
165
|
+
from ..metrics.sense_index import compute_Si
|
|
166
|
+
|
|
167
|
+
return compute_Si(
|
|
168
|
+
graph,
|
|
169
|
+
inplace=inplace,
|
|
170
|
+
n_jobs=n_jobs,
|
|
171
|
+
chunk_size=chunk_size,
|
|
172
|
+
profile=profile,
|
|
173
|
+
)
|