tnfr 3.0.3__py3-none-any.whl → 8.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +375 -56
- tnfr/__init__.pyi +33 -0
- tnfr/_compat.py +10 -0
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +49 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +723 -0
- tnfr/alias.pyi +108 -0
- tnfr/backends/__init__.py +354 -0
- tnfr/backends/jax_backend.py +173 -0
- tnfr/backends/numpy_backend.py +238 -0
- tnfr/backends/optimized_numpy.py +420 -0
- tnfr/backends/torch_backend.py +408 -0
- tnfr/cache.py +171 -0
- tnfr/cache.pyi +13 -0
- tnfr/cli/__init__.py +110 -0
- tnfr/cli/__init__.pyi +26 -0
- tnfr/cli/arguments.py +489 -0
- tnfr/cli/arguments.pyi +29 -0
- tnfr/cli/execution.py +914 -0
- tnfr/cli/execution.pyi +70 -0
- tnfr/cli/interactive_validator.py +614 -0
- tnfr/cli/utils.py +51 -0
- tnfr/cli/utils.pyi +7 -0
- tnfr/cli/validate.py +236 -0
- tnfr/compat/__init__.py +85 -0
- tnfr/compat/dataclass.py +136 -0
- tnfr/compat/jsonschema_stub.py +61 -0
- tnfr/compat/matplotlib_stub.py +73 -0
- tnfr/compat/numpy_stub.py +155 -0
- tnfr/config/__init__.py +224 -0
- tnfr/config/__init__.pyi +10 -0
- tnfr/config/constants.py +104 -0
- tnfr/config/constants.pyi +12 -0
- tnfr/config/defaults.py +54 -0
- tnfr/config/defaults_core.py +212 -0
- tnfr/config/defaults_init.py +33 -0
- tnfr/config/defaults_metric.py +104 -0
- tnfr/config/feature_flags.py +81 -0
- tnfr/config/feature_flags.pyi +16 -0
- tnfr/config/glyph_constants.py +31 -0
- tnfr/config/init.py +77 -0
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +254 -0
- tnfr/config/operator_names.pyi +36 -0
- tnfr/config/physics_derivation.py +354 -0
- tnfr/config/presets.py +83 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/config/security.py +927 -0
- tnfr/config/thresholds.py +114 -0
- tnfr/config/tnfr_config.py +498 -0
- tnfr/constants/__init__.py +92 -0
- tnfr/constants/__init__.pyi +92 -0
- tnfr/constants/aliases.py +33 -0
- tnfr/constants/aliases.pyi +27 -0
- tnfr/constants/init.py +33 -0
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +104 -0
- tnfr/constants/metric.pyi +19 -0
- tnfr/core/__init__.py +33 -0
- tnfr/core/container.py +226 -0
- tnfr/core/default_implementations.py +329 -0
- tnfr/core/interfaces.py +279 -0
- tnfr/dynamics/__init__.py +238 -0
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +267 -0
- tnfr/dynamics/adaptation.pyi +7 -0
- tnfr/dynamics/adaptive_sequences.py +189 -0
- tnfr/dynamics/adaptive_sequences.pyi +14 -0
- tnfr/dynamics/aliases.py +23 -0
- tnfr/dynamics/aliases.pyi +19 -0
- tnfr/dynamics/bifurcation.py +232 -0
- tnfr/dynamics/canonical.py +229 -0
- tnfr/dynamics/canonical.pyi +48 -0
- tnfr/dynamics/coordination.py +385 -0
- tnfr/dynamics/coordination.pyi +25 -0
- tnfr/dynamics/dnfr.py +3034 -0
- tnfr/dynamics/dnfr.pyi +26 -0
- tnfr/dynamics/dynamic_limits.py +225 -0
- tnfr/dynamics/feedback.py +252 -0
- tnfr/dynamics/feedback.pyi +24 -0
- tnfr/dynamics/fused_dnfr.py +454 -0
- tnfr/dynamics/homeostasis.py +157 -0
- tnfr/dynamics/homeostasis.pyi +14 -0
- tnfr/dynamics/integrators.py +661 -0
- tnfr/dynamics/integrators.pyi +36 -0
- tnfr/dynamics/learning.py +310 -0
- tnfr/dynamics/learning.pyi +33 -0
- tnfr/dynamics/metabolism.py +254 -0
- tnfr/dynamics/nbody.py +796 -0
- tnfr/dynamics/nbody_tnfr.py +783 -0
- tnfr/dynamics/propagation.py +326 -0
- tnfr/dynamics/runtime.py +908 -0
- tnfr/dynamics/runtime.pyi +77 -0
- tnfr/dynamics/sampling.py +36 -0
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +711 -0
- tnfr/dynamics/selectors.pyi +85 -0
- tnfr/dynamics/structural_clip.py +207 -0
- tnfr/errors/__init__.py +37 -0
- tnfr/errors/contextual.py +492 -0
- tnfr/execution.py +223 -0
- tnfr/execution.pyi +45 -0
- tnfr/extensions/__init__.py +205 -0
- tnfr/extensions/__init__.pyi +18 -0
- tnfr/extensions/base.py +173 -0
- tnfr/extensions/base.pyi +35 -0
- tnfr/extensions/business/__init__.py +71 -0
- tnfr/extensions/business/__init__.pyi +11 -0
- tnfr/extensions/business/cookbook.py +88 -0
- tnfr/extensions/business/cookbook.pyi +8 -0
- tnfr/extensions/business/health_analyzers.py +202 -0
- tnfr/extensions/business/health_analyzers.pyi +9 -0
- tnfr/extensions/business/patterns.py +183 -0
- tnfr/extensions/business/patterns.pyi +8 -0
- tnfr/extensions/medical/__init__.py +73 -0
- tnfr/extensions/medical/__init__.pyi +11 -0
- tnfr/extensions/medical/cookbook.py +88 -0
- tnfr/extensions/medical/cookbook.pyi +8 -0
- tnfr/extensions/medical/health_analyzers.py +181 -0
- tnfr/extensions/medical/health_analyzers.pyi +9 -0
- tnfr/extensions/medical/patterns.py +163 -0
- tnfr/extensions/medical/patterns.pyi +8 -0
- tnfr/flatten.py +262 -0
- tnfr/flatten.pyi +21 -0
- tnfr/gamma.py +354 -0
- tnfr/gamma.pyi +36 -0
- tnfr/glyph_history.py +377 -0
- tnfr/glyph_history.pyi +35 -0
- tnfr/glyph_runtime.py +19 -0
- tnfr/glyph_runtime.pyi +8 -0
- tnfr/immutable.py +218 -0
- tnfr/immutable.pyi +36 -0
- tnfr/initialization.py +203 -0
- tnfr/initialization.pyi +65 -0
- tnfr/io.py +10 -0
- tnfr/io.pyi +13 -0
- tnfr/locking.py +37 -0
- tnfr/locking.pyi +7 -0
- tnfr/mathematics/__init__.py +79 -0
- tnfr/mathematics/backend.py +453 -0
- tnfr/mathematics/backend.pyi +99 -0
- tnfr/mathematics/dynamics.py +408 -0
- tnfr/mathematics/dynamics.pyi +90 -0
- tnfr/mathematics/epi.py +391 -0
- tnfr/mathematics/epi.pyi +65 -0
- tnfr/mathematics/generators.py +242 -0
- tnfr/mathematics/generators.pyi +29 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/metrics.pyi +16 -0
- tnfr/mathematics/operators.py +239 -0
- tnfr/mathematics/operators.pyi +59 -0
- tnfr/mathematics/operators_factory.py +124 -0
- tnfr/mathematics/operators_factory.pyi +11 -0
- tnfr/mathematics/projection.py +87 -0
- tnfr/mathematics/projection.pyi +33 -0
- tnfr/mathematics/runtime.py +182 -0
- tnfr/mathematics/runtime.pyi +64 -0
- tnfr/mathematics/spaces.py +256 -0
- tnfr/mathematics/spaces.pyi +83 -0
- tnfr/mathematics/transforms.py +305 -0
- tnfr/mathematics/transforms.pyi +62 -0
- tnfr/metrics/__init__.py +79 -0
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/buffer_cache.py +163 -0
- tnfr/metrics/buffer_cache.pyi +24 -0
- tnfr/metrics/cache_utils.py +214 -0
- tnfr/metrics/coherence.py +2009 -0
- tnfr/metrics/coherence.pyi +129 -0
- tnfr/metrics/common.py +158 -0
- tnfr/metrics/common.pyi +35 -0
- tnfr/metrics/core.py +316 -0
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +833 -0
- tnfr/metrics/diagnosis.pyi +86 -0
- tnfr/metrics/emergence.py +245 -0
- tnfr/metrics/export.py +179 -0
- tnfr/metrics/export.pyi +7 -0
- tnfr/metrics/glyph_timing.py +379 -0
- tnfr/metrics/glyph_timing.pyi +81 -0
- tnfr/metrics/learning_metrics.py +280 -0
- tnfr/metrics/learning_metrics.pyi +21 -0
- tnfr/metrics/phase_coherence.py +351 -0
- tnfr/metrics/phase_compatibility.py +349 -0
- tnfr/metrics/reporting.py +183 -0
- tnfr/metrics/reporting.pyi +25 -0
- tnfr/metrics/sense_index.py +1203 -0
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +373 -0
- tnfr/metrics/trig.pyi +13 -0
- tnfr/metrics/trig_cache.py +233 -0
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/multiscale/__init__.py +32 -0
- tnfr/multiscale/hierarchical.py +517 -0
- tnfr/node.py +763 -0
- tnfr/node.pyi +139 -0
- tnfr/observers.py +255 -130
- tnfr/observers.pyi +31 -0
- tnfr/ontosim.py +144 -137
- tnfr/ontosim.pyi +28 -0
- tnfr/operators/__init__.py +1672 -0
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/algebra.py +277 -0
- tnfr/operators/canonical_patterns.py +420 -0
- tnfr/operators/cascade.py +267 -0
- tnfr/operators/cycle_detection.py +358 -0
- tnfr/operators/definitions.py +4108 -0
- tnfr/operators/definitions.pyi +78 -0
- tnfr/operators/grammar.py +1164 -0
- tnfr/operators/grammar.pyi +140 -0
- tnfr/operators/hamiltonian.py +710 -0
- tnfr/operators/health_analyzer.py +809 -0
- tnfr/operators/jitter.py +272 -0
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/lifecycle.py +314 -0
- tnfr/operators/metabolism.py +618 -0
- tnfr/operators/metrics.py +2138 -0
- tnfr/operators/network_analysis/__init__.py +27 -0
- tnfr/operators/network_analysis/source_detection.py +186 -0
- tnfr/operators/nodal_equation.py +395 -0
- tnfr/operators/pattern_detection.py +660 -0
- tnfr/operators/patterns.py +669 -0
- tnfr/operators/postconditions/__init__.py +38 -0
- tnfr/operators/postconditions/mutation.py +236 -0
- tnfr/operators/preconditions/__init__.py +1226 -0
- tnfr/operators/preconditions/coherence.py +305 -0
- tnfr/operators/preconditions/dissonance.py +236 -0
- tnfr/operators/preconditions/emission.py +128 -0
- tnfr/operators/preconditions/mutation.py +580 -0
- tnfr/operators/preconditions/reception.py +125 -0
- tnfr/operators/preconditions/resonance.py +364 -0
- tnfr/operators/registry.py +74 -0
- tnfr/operators/registry.pyi +9 -0
- tnfr/operators/remesh.py +1809 -0
- tnfr/operators/remesh.pyi +26 -0
- tnfr/operators/structural_units.py +268 -0
- tnfr/operators/unified_grammar.py +105 -0
- tnfr/parallel/__init__.py +54 -0
- tnfr/parallel/auto_scaler.py +234 -0
- tnfr/parallel/distributed.py +384 -0
- tnfr/parallel/engine.py +238 -0
- tnfr/parallel/gpu_engine.py +420 -0
- tnfr/parallel/monitoring.py +248 -0
- tnfr/parallel/partitioner.py +459 -0
- tnfr/py.typed +0 -0
- tnfr/recipes/__init__.py +22 -0
- tnfr/recipes/cookbook.py +743 -0
- tnfr/rng.py +178 -0
- tnfr/rng.pyi +26 -0
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/sdk/__init__.py +107 -0
- tnfr/sdk/__init__.pyi +19 -0
- tnfr/sdk/adaptive_system.py +173 -0
- tnfr/sdk/adaptive_system.pyi +21 -0
- tnfr/sdk/builders.py +370 -0
- tnfr/sdk/builders.pyi +51 -0
- tnfr/sdk/fluent.py +1121 -0
- tnfr/sdk/fluent.pyi +74 -0
- tnfr/sdk/templates.py +342 -0
- tnfr/sdk/templates.pyi +41 -0
- tnfr/sdk/utils.py +341 -0
- tnfr/secure_config.py +46 -0
- tnfr/security/__init__.py +70 -0
- tnfr/security/database.py +514 -0
- tnfr/security/subprocess.py +503 -0
- tnfr/security/validation.py +290 -0
- tnfr/selector.py +247 -0
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +378 -0
- tnfr/sense.pyi +23 -0
- tnfr/services/__init__.py +17 -0
- tnfr/services/orchestrator.py +325 -0
- tnfr/sparse/__init__.py +39 -0
- tnfr/sparse/representations.py +492 -0
- tnfr/structural.py +705 -0
- tnfr/structural.pyi +83 -0
- tnfr/telemetry/__init__.py +35 -0
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/cache_metrics.pyi +64 -0
- tnfr/telemetry/nu_f.py +422 -0
- tnfr/telemetry/nu_f.pyi +108 -0
- tnfr/telemetry/verbosity.py +36 -0
- tnfr/telemetry/verbosity.pyi +15 -0
- tnfr/tokens.py +58 -0
- tnfr/tokens.pyi +36 -0
- tnfr/tools/__init__.py +20 -0
- tnfr/tools/domain_templates.py +478 -0
- tnfr/tools/sequence_generator.py +846 -0
- tnfr/topology/__init__.py +13 -0
- tnfr/topology/asymmetry.py +151 -0
- tnfr/trace.py +543 -0
- tnfr/trace.pyi +42 -0
- tnfr/tutorials/__init__.py +38 -0
- tnfr/tutorials/autonomous_evolution.py +285 -0
- tnfr/tutorials/interactive.py +1576 -0
- tnfr/tutorials/structural_metabolism.py +238 -0
- tnfr/types.py +775 -0
- tnfr/types.pyi +357 -0
- tnfr/units.py +68 -0
- tnfr/units.pyi +13 -0
- tnfr/utils/__init__.py +282 -0
- tnfr/utils/__init__.pyi +215 -0
- tnfr/utils/cache.py +4223 -0
- tnfr/utils/cache.pyi +470 -0
- tnfr/utils/callbacks.py +375 -0
- tnfr/utils/callbacks.pyi +49 -0
- tnfr/utils/chunks.py +108 -0
- tnfr/utils/chunks.pyi +22 -0
- tnfr/utils/data.py +428 -0
- tnfr/utils/data.pyi +74 -0
- tnfr/utils/graph.py +85 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +821 -0
- tnfr/utils/init.pyi +80 -0
- tnfr/utils/io.py +559 -0
- tnfr/utils/io.pyi +66 -0
- tnfr/utils/numeric.py +114 -0
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +257 -0
- tnfr/validation/__init__.pyi +85 -0
- tnfr/validation/compatibility.py +460 -0
- tnfr/validation/compatibility.pyi +6 -0
- tnfr/validation/config.py +73 -0
- tnfr/validation/graph.py +139 -0
- tnfr/validation/graph.pyi +18 -0
- tnfr/validation/input_validation.py +755 -0
- tnfr/validation/invariants.py +712 -0
- tnfr/validation/rules.py +253 -0
- tnfr/validation/rules.pyi +44 -0
- tnfr/validation/runtime.py +279 -0
- tnfr/validation/runtime.pyi +28 -0
- tnfr/validation/sequence_validator.py +162 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +32 -0
- tnfr/validation/spectral.py +164 -0
- tnfr/validation/spectral.pyi +42 -0
- tnfr/validation/validator.py +1266 -0
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/visualization/__init__.py +98 -0
- tnfr/visualization/cascade_viz.py +256 -0
- tnfr/visualization/hierarchy.py +284 -0
- tnfr/visualization/sequence_plotter.py +784 -0
- tnfr/viz/__init__.py +60 -0
- tnfr/viz/matplotlib.py +278 -0
- tnfr/viz/matplotlib.pyi +35 -0
- tnfr-8.5.0.dist-info/METADATA +573 -0
- tnfr-8.5.0.dist-info/RECORD +353 -0
- tnfr-8.5.0.dist-info/entry_points.txt +3 -0
- tnfr-3.0.3.dist-info/licenses/LICENSE.txt → tnfr-8.5.0.dist-info/licenses/LICENSE.md +1 -1
- tnfr/constants.py +0 -183
- tnfr/dynamics.py +0 -543
- tnfr/helpers.py +0 -198
- tnfr/main.py +0 -37
- tnfr/operators.py +0 -296
- tnfr-3.0.3.dist-info/METADATA +0 -35
- tnfr-3.0.3.dist-info/RECORD +0 -13
- {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
- {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
"""Canonical transform contracts for TNFR coherence tooling.
|
|
2
|
+
|
|
3
|
+
This module intentionally provides *contracts* rather than concrete
|
|
4
|
+
implementations. Phase 2 of the mathematics roadmap will plug the actual
|
|
5
|
+
algorithms into these helpers. Until then, the functions below raise
|
|
6
|
+
``NotImplementedError`` with descriptive guidance so downstream modules know
|
|
7
|
+
which structural guarantees each helper must provide.
|
|
8
|
+
|
|
9
|
+
The three exposed contracts cover:
|
|
10
|
+
|
|
11
|
+
``build_isometry_factory``
|
|
12
|
+
Expected to output callables that embed or project states while preserving
|
|
13
|
+
the TNFR structural metric. Implementations must return operators whose
|
|
14
|
+
adjoint composes to identity inside the target Hilbert or Banach space so
|
|
15
|
+
no coherence is lost during modal changes.
|
|
16
|
+
|
|
17
|
+
``validate_norm_preservation``
|
|
18
|
+
Should perform diagnostic checks that a provided transform keeps the
|
|
19
|
+
νf-aligned norm invariant (within tolerance) across representative states.
|
|
20
|
+
Validation must surface informative errors so simulation pipelines can
|
|
21
|
+
gate potentially destructive transforms before they act on an EPI.
|
|
22
|
+
|
|
23
|
+
``ensure_coherence_monotonicity``
|
|
24
|
+
Designed to assert that a transform (or sequence thereof) does not break
|
|
25
|
+
the monotonic coherence requirements captured in the repo-wide invariants.
|
|
26
|
+
Implementations should report any drop in ``C(t)`` outside authorised
|
|
27
|
+
dissonance windows and annotate the offending timestep to ease triage.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
from __future__ import annotations
|
|
31
|
+
|
|
32
|
+
import logging
|
|
33
|
+
from dataclasses import dataclass
|
|
34
|
+
from typing import (
|
|
35
|
+
TYPE_CHECKING,
|
|
36
|
+
Callable,
|
|
37
|
+
Iterable,
|
|
38
|
+
Mapping,
|
|
39
|
+
Protocol,
|
|
40
|
+
Sequence,
|
|
41
|
+
Union,
|
|
42
|
+
runtime_checkable,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
import numpy as np
|
|
46
|
+
|
|
47
|
+
from .epi import BEPIElement
|
|
48
|
+
|
|
49
|
+
if TYPE_CHECKING:
|
|
50
|
+
from .spaces import BanachSpaceEPI
|
|
51
|
+
|
|
52
|
+
logger = logging.getLogger(__name__)
|
|
53
|
+
|
|
54
|
+
__all__ = [
|
|
55
|
+
"CoherenceMonotonicityReport",
|
|
56
|
+
"CoherenceViolation",
|
|
57
|
+
"IsometryFactory",
|
|
58
|
+
"build_isometry_factory",
|
|
59
|
+
"validate_norm_preservation",
|
|
60
|
+
"ensure_coherence_monotonicity",
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@runtime_checkable
|
|
65
|
+
class IsometryFactory(Protocol):
|
|
66
|
+
"""Callable creating isometric transforms aligned with TNFR semantics.
|
|
67
|
+
|
|
68
|
+
Implementations produced by :func:`build_isometry_factory` must accept a
|
|
69
|
+
structural basis (modal decomposition, eigenvectors, or similar spectral
|
|
70
|
+
anchors) and return a transform that preserves both the vector norm and the
|
|
71
|
+
encoded coherence structure. The returned callable should accept the raw
|
|
72
|
+
state data and emit the mapped state in the target representation while
|
|
73
|
+
guaranteeing ``T* · T == I`` on the relevant space.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __call__(
|
|
77
|
+
self,
|
|
78
|
+
*,
|
|
79
|
+
basis: Sequence[Sequence[complex]] | None = None,
|
|
80
|
+
enforce_phase: bool = True,
|
|
81
|
+
) -> Callable[[Sequence[complex]], Sequence[complex]]:
|
|
82
|
+
"""Return an isometric transform for the provided basis."""
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def build_isometry_factory(
|
|
86
|
+
*,
|
|
87
|
+
source_dimension: int,
|
|
88
|
+
target_dimension: int,
|
|
89
|
+
allow_expansion: bool = False,
|
|
90
|
+
) -> IsometryFactory:
|
|
91
|
+
"""Create a factory for constructing TNFR-aligned isometries.
|
|
92
|
+
|
|
93
|
+
Parameters
|
|
94
|
+
----------
|
|
95
|
+
source_dimension:
|
|
96
|
+
Dimensionality of the input structural space.
|
|
97
|
+
target_dimension:
|
|
98
|
+
Dimensionality of the destination structural space. When the target
|
|
99
|
+
dimension is larger than the source, implementations must specify how
|
|
100
|
+
coherence is embedded without dilution.
|
|
101
|
+
allow_expansion:
|
|
102
|
+
Flag indicating whether the isometry may expand into a higher
|
|
103
|
+
dimensional space (still norm-preserving via padding and phase guards).
|
|
104
|
+
|
|
105
|
+
Returns
|
|
106
|
+
-------
|
|
107
|
+
IsometryFactory
|
|
108
|
+
A callable that can produce concrete isometries on demand once a basis
|
|
109
|
+
or spectral frame is available.
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
raise NotImplementedError(
|
|
113
|
+
"Phase 2 will provide the canonical TNFR isometry factory; "
|
|
114
|
+
"current stage only documents the expected contract."
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def validate_norm_preservation(
|
|
119
|
+
transform: Callable[[Sequence[complex]], Sequence[complex]],
|
|
120
|
+
*,
|
|
121
|
+
probes: Iterable[Sequence[complex]],
|
|
122
|
+
metric: Callable[[Sequence[complex]], float],
|
|
123
|
+
atol: float = 1e-9,
|
|
124
|
+
) -> None:
|
|
125
|
+
"""Assert that a transform preserves the TNFR structural norm.
|
|
126
|
+
|
|
127
|
+
The validator should iterate through ``probes`` (representative EPI states)
|
|
128
|
+
and confirm that applying ``transform`` leaves the provided ``metric``
|
|
129
|
+
unchanged within ``atol``. Any detected drift must be reported via
|
|
130
|
+
exceptions that include the offending probe and the measured deviation so
|
|
131
|
+
callers can attribute potential coherence loss to specific conditions.
|
|
132
|
+
"""
|
|
133
|
+
|
|
134
|
+
raise NotImplementedError(
|
|
135
|
+
"Norm preservation checks will be introduced in Phase 2; implementers "
|
|
136
|
+
"should ensure transform(metric(state)) == metric(state) within atol."
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
@dataclass(frozen=True)
|
|
141
|
+
class CoherenceViolation:
|
|
142
|
+
"""Details about a monotonicity violation detected in a coherence trace."""
|
|
143
|
+
|
|
144
|
+
index: int
|
|
145
|
+
previous_value: float
|
|
146
|
+
current_value: float
|
|
147
|
+
tolerated_drop: float
|
|
148
|
+
drop: float
|
|
149
|
+
kind: str
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
@dataclass(frozen=True)
|
|
153
|
+
class CoherenceMonotonicityReport:
|
|
154
|
+
"""Structured report generated by :func:`ensure_coherence_monotonicity`."""
|
|
155
|
+
|
|
156
|
+
coherence_values: tuple[float, ...]
|
|
157
|
+
violations: tuple[CoherenceViolation, ...]
|
|
158
|
+
allow_plateaus: bool
|
|
159
|
+
tolerated_drop: float
|
|
160
|
+
atol: float
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def is_monotonic(self) -> bool:
|
|
164
|
+
"""Return ``True`` when no violations were recorded."""
|
|
165
|
+
|
|
166
|
+
return not self.violations
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _as_coherence_values(
|
|
170
|
+
coherence_series: Sequence[Union[float, BEPIElement]],
|
|
171
|
+
*,
|
|
172
|
+
space: "BanachSpaceEPI" | None,
|
|
173
|
+
norm_kwargs: Mapping[str, float],
|
|
174
|
+
) -> tuple[float, ...]:
|
|
175
|
+
if not coherence_series:
|
|
176
|
+
raise ValueError("coherence_series must contain at least one entry.")
|
|
177
|
+
|
|
178
|
+
first = coherence_series[0]
|
|
179
|
+
if isinstance(first, BEPIElement):
|
|
180
|
+
from .spaces import BanachSpaceEPI # Local import to avoid circular dependency
|
|
181
|
+
|
|
182
|
+
working_space = space if space is not None else BanachSpaceEPI()
|
|
183
|
+
values = []
|
|
184
|
+
for element in coherence_series:
|
|
185
|
+
if not isinstance(element, BEPIElement):
|
|
186
|
+
raise TypeError(
|
|
187
|
+
"All entries must be BEPIElement instances when the series contains BEPI data.",
|
|
188
|
+
)
|
|
189
|
+
value = working_space.coherence_norm(
|
|
190
|
+
element.f_continuous,
|
|
191
|
+
element.a_discrete,
|
|
192
|
+
x_grid=element.x_grid,
|
|
193
|
+
**norm_kwargs,
|
|
194
|
+
)
|
|
195
|
+
values.append(float(value))
|
|
196
|
+
return tuple(values)
|
|
197
|
+
|
|
198
|
+
values = []
|
|
199
|
+
for value in coherence_series:
|
|
200
|
+
if isinstance(value, BEPIElement):
|
|
201
|
+
raise TypeError(
|
|
202
|
+
"All entries must be numeric when the series is treated as coherence values.",
|
|
203
|
+
)
|
|
204
|
+
numeric = float(value)
|
|
205
|
+
if not np.isfinite(numeric):
|
|
206
|
+
raise ValueError("Coherence values must be finite numbers.")
|
|
207
|
+
values.append(numeric)
|
|
208
|
+
return tuple(values)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def ensure_coherence_monotonicity(
|
|
212
|
+
coherence_series: Sequence[Union[float, BEPIElement]],
|
|
213
|
+
*,
|
|
214
|
+
allow_plateaus: bool = True,
|
|
215
|
+
tolerated_drop: float = 0.0,
|
|
216
|
+
atol: float = 1e-9,
|
|
217
|
+
space: "BanachSpaceEPI" | None = None,
|
|
218
|
+
norm_kwargs: Mapping[str, float] | None = None,
|
|
219
|
+
) -> CoherenceMonotonicityReport:
|
|
220
|
+
"""Validate monotonic behaviour of coherence measurements ``C(t)``.
|
|
221
|
+
|
|
222
|
+
Parameters
|
|
223
|
+
----------
|
|
224
|
+
coherence_series:
|
|
225
|
+
Ordered sequence of coherence measurements (as floats) or
|
|
226
|
+
:class:`BEPIElement` instances recorded after each transform
|
|
227
|
+
application.
|
|
228
|
+
allow_plateaus:
|
|
229
|
+
When ``True`` the contract tolerates flat segments, otherwise every
|
|
230
|
+
subsequent value must strictly increase.
|
|
231
|
+
tolerated_drop:
|
|
232
|
+
Maximum allowed temporary decrease in coherence, representing approved
|
|
233
|
+
dissonance windows. Values greater than zero should only appear when a
|
|
234
|
+
higher-level scenario explicitly references controlled dissonance tests.
|
|
235
|
+
|
|
236
|
+
Returns
|
|
237
|
+
-------
|
|
238
|
+
CoherenceMonotonicityReport
|
|
239
|
+
Structured report describing the evaluated coherence trajectory and any
|
|
240
|
+
detected violations. Callers can inspect ``report.is_monotonic`` to
|
|
241
|
+
determine whether the constraint holds.
|
|
242
|
+
"""
|
|
243
|
+
|
|
244
|
+
if tolerated_drop < 0:
|
|
245
|
+
raise ValueError("tolerated_drop must be non-negative.")
|
|
246
|
+
if atol < 0:
|
|
247
|
+
raise ValueError("atol must be non-negative.")
|
|
248
|
+
|
|
249
|
+
if norm_kwargs is None:
|
|
250
|
+
norm_kwargs = {}
|
|
251
|
+
|
|
252
|
+
values = _as_coherence_values(
|
|
253
|
+
coherence_series, space=space, norm_kwargs=norm_kwargs
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
violations: list[CoherenceViolation] = []
|
|
257
|
+
|
|
258
|
+
for index in range(1, len(values)):
|
|
259
|
+
previous_value = values[index - 1]
|
|
260
|
+
current_value = values[index]
|
|
261
|
+
drop = previous_value - current_value
|
|
262
|
+
|
|
263
|
+
if current_value + tolerated_drop + atol < previous_value:
|
|
264
|
+
violation = CoherenceViolation(
|
|
265
|
+
index=index,
|
|
266
|
+
previous_value=previous_value,
|
|
267
|
+
current_value=current_value,
|
|
268
|
+
tolerated_drop=tolerated_drop,
|
|
269
|
+
drop=drop,
|
|
270
|
+
kind="drop",
|
|
271
|
+
)
|
|
272
|
+
violations.append(violation)
|
|
273
|
+
logger.warning(
|
|
274
|
+
"Coherence drop detected at step %s: previous=%s current=%s tolerated_drop=%s",
|
|
275
|
+
index,
|
|
276
|
+
previous_value,
|
|
277
|
+
current_value,
|
|
278
|
+
tolerated_drop,
|
|
279
|
+
)
|
|
280
|
+
continue
|
|
281
|
+
|
|
282
|
+
if not allow_plateaus and current_value <= previous_value + atol:
|
|
283
|
+
violation = CoherenceViolation(
|
|
284
|
+
index=index,
|
|
285
|
+
previous_value=previous_value,
|
|
286
|
+
current_value=current_value,
|
|
287
|
+
tolerated_drop=tolerated_drop,
|
|
288
|
+
drop=max(0.0, drop),
|
|
289
|
+
kind="plateau",
|
|
290
|
+
)
|
|
291
|
+
violations.append(violation)
|
|
292
|
+
logger.warning(
|
|
293
|
+
"Coherence plateau detected at step %s: previous=%s current=%s",
|
|
294
|
+
index,
|
|
295
|
+
previous_value,
|
|
296
|
+
current_value,
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
return CoherenceMonotonicityReport(
|
|
300
|
+
coherence_values=values,
|
|
301
|
+
violations=tuple(violations),
|
|
302
|
+
allow_plateaus=allow_plateaus,
|
|
303
|
+
tolerated_drop=tolerated_drop,
|
|
304
|
+
atol=atol,
|
|
305
|
+
)
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from .epi import BEPIElement
|
|
4
|
+
from .spaces import BanachSpaceEPI
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Callable, Iterable, Mapping, Protocol, Sequence
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"CoherenceMonotonicityReport",
|
|
10
|
+
"CoherenceViolation",
|
|
11
|
+
"IsometryFactory",
|
|
12
|
+
"build_isometry_factory",
|
|
13
|
+
"validate_norm_preservation",
|
|
14
|
+
"ensure_coherence_monotonicity",
|
|
15
|
+
]
|
|
16
|
+
|
|
17
|
+
class IsometryFactory(Protocol):
|
|
18
|
+
def __call__(
|
|
19
|
+
self,
|
|
20
|
+
*,
|
|
21
|
+
basis: Sequence[Sequence[complex]] | None = None,
|
|
22
|
+
enforce_phase: bool = True,
|
|
23
|
+
) -> Callable[[Sequence[complex]], Sequence[complex]]: ...
|
|
24
|
+
|
|
25
|
+
def build_isometry_factory(
|
|
26
|
+
*, source_dimension: int, target_dimension: int, allow_expansion: bool = False
|
|
27
|
+
) -> IsometryFactory: ...
|
|
28
|
+
def validate_norm_preservation(
|
|
29
|
+
transform: Callable[[Sequence[complex]], Sequence[complex]],
|
|
30
|
+
*,
|
|
31
|
+
probes: Iterable[Sequence[complex]],
|
|
32
|
+
metric: Callable[[Sequence[complex]], float],
|
|
33
|
+
atol: float = 1e-09,
|
|
34
|
+
) -> None: ...
|
|
35
|
+
@dataclass(frozen=True)
|
|
36
|
+
class CoherenceViolation:
|
|
37
|
+
index: int
|
|
38
|
+
previous_value: float
|
|
39
|
+
current_value: float
|
|
40
|
+
tolerated_drop: float
|
|
41
|
+
drop: float
|
|
42
|
+
kind: str
|
|
43
|
+
|
|
44
|
+
@dataclass(frozen=True)
|
|
45
|
+
class CoherenceMonotonicityReport:
|
|
46
|
+
coherence_values: tuple[float, ...]
|
|
47
|
+
violations: tuple[CoherenceViolation, ...]
|
|
48
|
+
allow_plateaus: bool
|
|
49
|
+
tolerated_drop: float
|
|
50
|
+
atol: float
|
|
51
|
+
@property
|
|
52
|
+
def is_monotonic(self) -> bool: ...
|
|
53
|
+
|
|
54
|
+
def ensure_coherence_monotonicity(
|
|
55
|
+
coherence_series: Sequence[float | BEPIElement],
|
|
56
|
+
*,
|
|
57
|
+
allow_plateaus: bool = True,
|
|
58
|
+
tolerated_drop: float = 0.0,
|
|
59
|
+
atol: float = 1e-09,
|
|
60
|
+
space: BanachSpaceEPI | None = None,
|
|
61
|
+
norm_kwargs: Mapping[str, float] | None = None,
|
|
62
|
+
) -> CoherenceMonotonicityReport: ...
|
tnfr/metrics/__init__.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"""Registerable metrics."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from .cache_utils import (
|
|
6
|
+
CacheStats,
|
|
7
|
+
configure_hot_path_caches,
|
|
8
|
+
get_cache_config,
|
|
9
|
+
log_cache_metrics,
|
|
10
|
+
)
|
|
11
|
+
from .coherence import (
|
|
12
|
+
coherence_matrix,
|
|
13
|
+
local_phase_sync,
|
|
14
|
+
local_phase_sync_weighted,
|
|
15
|
+
register_coherence_callbacks,
|
|
16
|
+
)
|
|
17
|
+
from .core import register_metrics_callbacks
|
|
18
|
+
from .diagnosis import (
|
|
19
|
+
dissonance_events,
|
|
20
|
+
register_diagnosis_callbacks,
|
|
21
|
+
)
|
|
22
|
+
from .emergence import (
|
|
23
|
+
compute_bifurcation_rate,
|
|
24
|
+
compute_emergence_index,
|
|
25
|
+
compute_metabolic_efficiency,
|
|
26
|
+
compute_structural_complexity,
|
|
27
|
+
)
|
|
28
|
+
from .export import export_metrics
|
|
29
|
+
from .learning_metrics import (
|
|
30
|
+
compute_consolidation_index,
|
|
31
|
+
compute_learning_efficiency,
|
|
32
|
+
compute_learning_plasticity,
|
|
33
|
+
glyph_history_to_operator_names,
|
|
34
|
+
)
|
|
35
|
+
from .phase_compatibility import (
|
|
36
|
+
compute_network_phase_alignment,
|
|
37
|
+
compute_phase_coupling_strength,
|
|
38
|
+
is_phase_compatible,
|
|
39
|
+
)
|
|
40
|
+
from .reporting import (
|
|
41
|
+
Tg_by_node,
|
|
42
|
+
Tg_global,
|
|
43
|
+
build_metrics_summary,
|
|
44
|
+
glyph_top,
|
|
45
|
+
glyphogram_series,
|
|
46
|
+
latency_series,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
__all__ = (
|
|
50
|
+
"register_metrics_callbacks",
|
|
51
|
+
"Tg_global",
|
|
52
|
+
"Tg_by_node",
|
|
53
|
+
"latency_series",
|
|
54
|
+
"glyphogram_series",
|
|
55
|
+
"glyph_top",
|
|
56
|
+
"build_metrics_summary",
|
|
57
|
+
"coherence_matrix",
|
|
58
|
+
"local_phase_sync",
|
|
59
|
+
"local_phase_sync_weighted",
|
|
60
|
+
"register_coherence_callbacks",
|
|
61
|
+
"register_diagnosis_callbacks",
|
|
62
|
+
"dissonance_events",
|
|
63
|
+
"export_metrics",
|
|
64
|
+
"CacheStats",
|
|
65
|
+
"configure_hot_path_caches",
|
|
66
|
+
"get_cache_config",
|
|
67
|
+
"log_cache_metrics",
|
|
68
|
+
"compute_learning_plasticity",
|
|
69
|
+
"compute_consolidation_index",
|
|
70
|
+
"compute_learning_efficiency",
|
|
71
|
+
"glyph_history_to_operator_names",
|
|
72
|
+
"compute_structural_complexity",
|
|
73
|
+
"compute_bifurcation_rate",
|
|
74
|
+
"compute_metabolic_efficiency",
|
|
75
|
+
"compute_emergence_index",
|
|
76
|
+
"compute_phase_coupling_strength",
|
|
77
|
+
"is_phase_compatible",
|
|
78
|
+
"compute_network_phase_alignment",
|
|
79
|
+
)
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
__all__: Any
|
|
4
|
+
|
|
5
|
+
def __getattr__(name: str) -> Any: ...
|
|
6
|
+
|
|
7
|
+
Tg_by_node: Any
|
|
8
|
+
Tg_global: Any
|
|
9
|
+
build_metrics_summary: Any
|
|
10
|
+
coherence_matrix: Any
|
|
11
|
+
dissonance_events: Any
|
|
12
|
+
export_metrics: Any
|
|
13
|
+
glyph_top: Any
|
|
14
|
+
glyphogram_series: Any
|
|
15
|
+
latency_series: Any
|
|
16
|
+
local_phase_sync: Any
|
|
17
|
+
local_phase_sync_weighted: Any
|
|
18
|
+
register_coherence_callbacks: Any
|
|
19
|
+
register_diagnosis_callbacks: Any
|
|
20
|
+
register_metrics_callbacks: Any
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""Unified buffer cache for TNFR metrics hot paths.
|
|
2
|
+
|
|
3
|
+
This module consolidates buffer management across hot path computations
|
|
4
|
+
(Sense index, coherence, ΔNFR) to eliminate duplication and ensure consistent
|
|
5
|
+
cache key patterns and invalidation strategies.
|
|
6
|
+
|
|
7
|
+
Cache Key Structure
|
|
8
|
+
-------------------
|
|
9
|
+
All buffer caches use a tuple key: ``(key_prefix, count, buffer_count)``
|
|
10
|
+
|
|
11
|
+
This ensures:
|
|
12
|
+
- Collision avoidance between different computations (via unique key_prefix)
|
|
13
|
+
- Automatic invalidation on graph topology changes (via edge_version_cache)
|
|
14
|
+
- Efficient cache lookups without hash collisions
|
|
15
|
+
|
|
16
|
+
Common Key Prefixes
|
|
17
|
+
-------------------
|
|
18
|
+
- ``_si_buffers``: Sense index main computation buffers
|
|
19
|
+
- ``_si_chunk_workspace``: Si chunked processing scratch space
|
|
20
|
+
- ``_si_neighbor_buffers``: Si neighbor phase aggregation buffers
|
|
21
|
+
- ``_coherence_temp``: Coherence matrix temporary buffers
|
|
22
|
+
- ``_dnfr_prep_buffers``: ΔNFR preparation workspace
|
|
23
|
+
|
|
24
|
+
See docs/CACHING_STRATEGY.md for complete cache documentation.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
from __future__ import annotations
|
|
28
|
+
|
|
29
|
+
from typing import Any
|
|
30
|
+
|
|
31
|
+
from ..types import GraphLike
|
|
32
|
+
from ..utils import edge_version_cache, get_graph
|
|
33
|
+
|
|
34
|
+
__all__ = ("ensure_numpy_buffers",)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def ensure_numpy_buffers(
|
|
38
|
+
G: GraphLike,
|
|
39
|
+
*,
|
|
40
|
+
key_prefix: str,
|
|
41
|
+
count: int,
|
|
42
|
+
buffer_count: int,
|
|
43
|
+
np: Any,
|
|
44
|
+
dtype: Any = None,
|
|
45
|
+
max_cache_entries: int | None = 128,
|
|
46
|
+
) -> tuple[Any, ...]:
|
|
47
|
+
"""Return reusable NumPy buffers with unified caching strategy.
|
|
48
|
+
|
|
49
|
+
This function centralizes buffer allocation for vectorized computations,
|
|
50
|
+
ensuring consistent cache key structure and automatic invalidation on
|
|
51
|
+
topology changes. Buffers are tied to the graph's edge version and
|
|
52
|
+
automatically cleared when edges are added or removed.
|
|
53
|
+
|
|
54
|
+
Cache Behavior
|
|
55
|
+
--------------
|
|
56
|
+
- **Key**: ``(key_prefix, count, buffer_count)`` ensures uniqueness
|
|
57
|
+
- **Invalidation**: Automatic on edge version changes
|
|
58
|
+
- **Capacity**: Controlled by ``max_cache_entries`` parameter
|
|
59
|
+
- **Override**: Graph-level config via ``_cache_config['buffer_max_entries']``
|
|
60
|
+
|
|
61
|
+
Parameters
|
|
62
|
+
----------
|
|
63
|
+
G : GraphLike
|
|
64
|
+
Graph whose edge version controls cache invalidation.
|
|
65
|
+
key_prefix : str
|
|
66
|
+
Prefix for the cache key, e.g. ``"_si_buffers"`` or ``"_coherence_temp"``.
|
|
67
|
+
Must be unique per computation to avoid key collisions. See module
|
|
68
|
+
docstring for standard prefixes.
|
|
69
|
+
count : int
|
|
70
|
+
Number of elements per buffer. Typically set to node count for
|
|
71
|
+
node-level computations or edge count for edge-level operations.
|
|
72
|
+
buffer_count : int
|
|
73
|
+
Number of buffers to allocate. Each buffer is independent and can be
|
|
74
|
+
used for different intermediate values in the computation.
|
|
75
|
+
np : Any
|
|
76
|
+
NumPy module or compatible array backend. Must support ``np.empty``.
|
|
77
|
+
dtype : Any, optional
|
|
78
|
+
Data type for the buffers. Default: ``float``.
|
|
79
|
+
max_cache_entries : int or None, optional
|
|
80
|
+
Maximum number of cached buffer sets for this key prefix. Default: ``128``.
|
|
81
|
+
Set to ``None`` for unlimited cache size (use with caution on large graphs).
|
|
82
|
+
Can be overridden globally via graph-level configuration.
|
|
83
|
+
|
|
84
|
+
Returns
|
|
85
|
+
-------
|
|
86
|
+
tuple[Any, ...]
|
|
87
|
+
Tuple of ``buffer_count`` NumPy arrays each sized to ``count`` elements.
|
|
88
|
+
Arrays are reused from cache when available, avoiding repeated allocation.
|
|
89
|
+
|
|
90
|
+
Notes
|
|
91
|
+
-----
|
|
92
|
+
This function consolidates buffer allocation patterns across Si computation,
|
|
93
|
+
coherence matrix computation, and ΔNFR preparation. By centralizing buffer
|
|
94
|
+
management, we ensure consistent cache key naming, avoid duplication, and
|
|
95
|
+
maintain coherent cache invalidation when the graph edge structure changes.
|
|
96
|
+
|
|
97
|
+
The buffer allocation pattern follows TNFR caching principles:
|
|
98
|
+
1. **Determinism**: Same graph topology → same cached buffers
|
|
99
|
+
2. **Coherence**: Edge changes → automatic cache invalidation
|
|
100
|
+
3. **Efficiency**: Reuse eliminates allocation overhead in hot loops
|
|
101
|
+
|
|
102
|
+
Performance Considerations
|
|
103
|
+
--------------------------
|
|
104
|
+
- Cache hits avoid O(n) allocation overhead
|
|
105
|
+
- Memory cost: O(buffer_count * count * sizeof(dtype)) per cache entry
|
|
106
|
+
- Recommended for buffers reused across multiple computation steps
|
|
107
|
+
- Consider chunked processing for very large graphs (n > 100k nodes)
|
|
108
|
+
|
|
109
|
+
Examples
|
|
110
|
+
--------
|
|
111
|
+
>>> import numpy as np
|
|
112
|
+
>>> import networkx as nx
|
|
113
|
+
>>> G = nx.Graph([(0, 1)])
|
|
114
|
+
>>> buffers = ensure_numpy_buffers(
|
|
115
|
+
... G, key_prefix="_test", count=10, buffer_count=3, np=np
|
|
116
|
+
... )
|
|
117
|
+
>>> len(buffers)
|
|
118
|
+
3
|
|
119
|
+
>>> buffers[0].shape
|
|
120
|
+
(10,)
|
|
121
|
+
>>> all(isinstance(buf, np.ndarray) for buf in buffers)
|
|
122
|
+
True
|
|
123
|
+
|
|
124
|
+
Allocate workspace for a computation with 100 nodes:
|
|
125
|
+
|
|
126
|
+
>>> G_large = nx.complete_graph(100)
|
|
127
|
+
>>> workspace = ensure_numpy_buffers(
|
|
128
|
+
... G_large,
|
|
129
|
+
... key_prefix="_my_computation",
|
|
130
|
+
... count=100,
|
|
131
|
+
... buffer_count=2,
|
|
132
|
+
... np=np
|
|
133
|
+
... )
|
|
134
|
+
>>> workspace[0].size == 100
|
|
135
|
+
True
|
|
136
|
+
|
|
137
|
+
See Also
|
|
138
|
+
--------
|
|
139
|
+
edge_version_cache : Underlying cache mechanism
|
|
140
|
+
configure_hot_path_caches : Global cache configuration
|
|
141
|
+
"""
|
|
142
|
+
# Allow graph-level override of max_cache_entries
|
|
143
|
+
graph = get_graph(G)
|
|
144
|
+
cache_config = graph.get("_cache_config")
|
|
145
|
+
if isinstance(cache_config, dict) and max_cache_entries is not None:
|
|
146
|
+
override = cache_config.get("buffer_max_entries")
|
|
147
|
+
if override is not None:
|
|
148
|
+
max_cache_entries = int(override)
|
|
149
|
+
|
|
150
|
+
if dtype is None:
|
|
151
|
+
dtype = float
|
|
152
|
+
if count <= 0:
|
|
153
|
+
count = 1
|
|
154
|
+
|
|
155
|
+
def builder() -> tuple[Any, ...]:
|
|
156
|
+
return tuple(np.empty(count, dtype=dtype) for _ in range(buffer_count))
|
|
157
|
+
|
|
158
|
+
return edge_version_cache(
|
|
159
|
+
G,
|
|
160
|
+
(key_prefix, count, buffer_count),
|
|
161
|
+
builder,
|
|
162
|
+
max_entries=max_cache_entries,
|
|
163
|
+
)
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""Unified buffer cache for TNFR metrics hot paths.
|
|
2
|
+
|
|
3
|
+
This module consolidates buffer management across hot path computations
|
|
4
|
+
(Sense index, coherence, ΔNFR) to eliminate duplication and ensure consistent
|
|
5
|
+
cache key patterns and invalidation strategies.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from ..types import GraphLike
|
|
13
|
+
|
|
14
|
+
__all__ = ("ensure_numpy_buffers",)
|
|
15
|
+
|
|
16
|
+
def ensure_numpy_buffers(
|
|
17
|
+
G: GraphLike,
|
|
18
|
+
*,
|
|
19
|
+
key_prefix: str,
|
|
20
|
+
count: int,
|
|
21
|
+
buffer_count: int,
|
|
22
|
+
np: Any,
|
|
23
|
+
dtype: Any = None,
|
|
24
|
+
) -> tuple[Any, ...]: ...
|