tnfr 4.5.2__py3-none-any.whl → 8.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +334 -50
- tnfr/__init__.pyi +33 -0
- tnfr/_compat.py +10 -0
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +49 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +214 -37
- tnfr/alias.pyi +108 -0
- tnfr/backends/__init__.py +354 -0
- tnfr/backends/jax_backend.py +173 -0
- tnfr/backends/numpy_backend.py +238 -0
- tnfr/backends/optimized_numpy.py +420 -0
- tnfr/backends/torch_backend.py +408 -0
- tnfr/cache.py +149 -556
- tnfr/cache.pyi +13 -0
- tnfr/cli/__init__.py +51 -16
- tnfr/cli/__init__.pyi +26 -0
- tnfr/cli/arguments.py +344 -32
- tnfr/cli/arguments.pyi +29 -0
- tnfr/cli/execution.py +676 -50
- tnfr/cli/execution.pyi +70 -0
- tnfr/cli/interactive_validator.py +614 -0
- tnfr/cli/utils.py +18 -3
- tnfr/cli/utils.pyi +7 -0
- tnfr/cli/validate.py +236 -0
- tnfr/compat/__init__.py +85 -0
- tnfr/compat/dataclass.py +136 -0
- tnfr/compat/jsonschema_stub.py +61 -0
- tnfr/compat/matplotlib_stub.py +73 -0
- tnfr/compat/numpy_stub.py +155 -0
- tnfr/config/__init__.py +224 -0
- tnfr/config/__init__.pyi +10 -0
- tnfr/{constants_glyphs.py → config/constants.py} +26 -20
- tnfr/config/constants.pyi +12 -0
- tnfr/config/defaults.py +54 -0
- tnfr/{constants/core.py → config/defaults_core.py} +59 -6
- tnfr/config/defaults_init.py +33 -0
- tnfr/config/defaults_metric.py +104 -0
- tnfr/config/feature_flags.py +81 -0
- tnfr/config/feature_flags.pyi +16 -0
- tnfr/config/glyph_constants.py +31 -0
- tnfr/config/init.py +77 -0
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +254 -0
- tnfr/config/operator_names.pyi +36 -0
- tnfr/config/physics_derivation.py +354 -0
- tnfr/config/presets.py +83 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/config/security.py +927 -0
- tnfr/config/thresholds.py +114 -0
- tnfr/config/tnfr_config.py +498 -0
- tnfr/constants/__init__.py +51 -133
- tnfr/constants/__init__.pyi +92 -0
- tnfr/constants/aliases.py +33 -0
- tnfr/constants/aliases.pyi +27 -0
- tnfr/constants/init.py +3 -1
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +9 -15
- tnfr/constants/metric.pyi +19 -0
- tnfr/core/__init__.py +33 -0
- tnfr/core/container.py +226 -0
- tnfr/core/default_implementations.py +329 -0
- tnfr/core/interfaces.py +279 -0
- tnfr/dynamics/__init__.py +213 -633
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +267 -0
- tnfr/dynamics/adaptation.pyi +7 -0
- tnfr/dynamics/adaptive_sequences.py +189 -0
- tnfr/dynamics/adaptive_sequences.pyi +14 -0
- tnfr/dynamics/aliases.py +23 -0
- tnfr/dynamics/aliases.pyi +19 -0
- tnfr/dynamics/bifurcation.py +232 -0
- tnfr/dynamics/canonical.py +229 -0
- tnfr/dynamics/canonical.pyi +48 -0
- tnfr/dynamics/coordination.py +385 -0
- tnfr/dynamics/coordination.pyi +25 -0
- tnfr/dynamics/dnfr.py +2699 -398
- tnfr/dynamics/dnfr.pyi +26 -0
- tnfr/dynamics/dynamic_limits.py +225 -0
- tnfr/dynamics/feedback.py +252 -0
- tnfr/dynamics/feedback.pyi +24 -0
- tnfr/dynamics/fused_dnfr.py +454 -0
- tnfr/dynamics/homeostasis.py +157 -0
- tnfr/dynamics/homeostasis.pyi +14 -0
- tnfr/dynamics/integrators.py +496 -102
- tnfr/dynamics/integrators.pyi +36 -0
- tnfr/dynamics/learning.py +310 -0
- tnfr/dynamics/learning.pyi +33 -0
- tnfr/dynamics/metabolism.py +254 -0
- tnfr/dynamics/nbody.py +796 -0
- tnfr/dynamics/nbody_tnfr.py +783 -0
- tnfr/dynamics/propagation.py +326 -0
- tnfr/dynamics/runtime.py +908 -0
- tnfr/dynamics/runtime.pyi +77 -0
- tnfr/dynamics/sampling.py +10 -5
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +711 -0
- tnfr/dynamics/selectors.pyi +85 -0
- tnfr/dynamics/structural_clip.py +207 -0
- tnfr/errors/__init__.py +37 -0
- tnfr/errors/contextual.py +492 -0
- tnfr/execution.py +77 -55
- tnfr/execution.pyi +45 -0
- tnfr/extensions/__init__.py +205 -0
- tnfr/extensions/__init__.pyi +18 -0
- tnfr/extensions/base.py +173 -0
- tnfr/extensions/base.pyi +35 -0
- tnfr/extensions/business/__init__.py +71 -0
- tnfr/extensions/business/__init__.pyi +11 -0
- tnfr/extensions/business/cookbook.py +88 -0
- tnfr/extensions/business/cookbook.pyi +8 -0
- tnfr/extensions/business/health_analyzers.py +202 -0
- tnfr/extensions/business/health_analyzers.pyi +9 -0
- tnfr/extensions/business/patterns.py +183 -0
- tnfr/extensions/business/patterns.pyi +8 -0
- tnfr/extensions/medical/__init__.py +73 -0
- tnfr/extensions/medical/__init__.pyi +11 -0
- tnfr/extensions/medical/cookbook.py +88 -0
- tnfr/extensions/medical/cookbook.pyi +8 -0
- tnfr/extensions/medical/health_analyzers.py +181 -0
- tnfr/extensions/medical/health_analyzers.pyi +9 -0
- tnfr/extensions/medical/patterns.py +163 -0
- tnfr/extensions/medical/patterns.pyi +8 -0
- tnfr/flatten.py +29 -50
- tnfr/flatten.pyi +21 -0
- tnfr/gamma.py +66 -53
- tnfr/gamma.pyi +36 -0
- tnfr/glyph_history.py +144 -57
- tnfr/glyph_history.pyi +35 -0
- tnfr/glyph_runtime.py +19 -0
- tnfr/glyph_runtime.pyi +8 -0
- tnfr/immutable.py +70 -30
- tnfr/immutable.pyi +36 -0
- tnfr/initialization.py +22 -16
- tnfr/initialization.pyi +65 -0
- tnfr/io.py +5 -241
- tnfr/io.pyi +13 -0
- tnfr/locking.pyi +7 -0
- tnfr/mathematics/__init__.py +79 -0
- tnfr/mathematics/backend.py +453 -0
- tnfr/mathematics/backend.pyi +99 -0
- tnfr/mathematics/dynamics.py +408 -0
- tnfr/mathematics/dynamics.pyi +90 -0
- tnfr/mathematics/epi.py +391 -0
- tnfr/mathematics/epi.pyi +65 -0
- tnfr/mathematics/generators.py +242 -0
- tnfr/mathematics/generators.pyi +29 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/metrics.pyi +16 -0
- tnfr/mathematics/operators.py +239 -0
- tnfr/mathematics/operators.pyi +59 -0
- tnfr/mathematics/operators_factory.py +124 -0
- tnfr/mathematics/operators_factory.pyi +11 -0
- tnfr/mathematics/projection.py +87 -0
- tnfr/mathematics/projection.pyi +33 -0
- tnfr/mathematics/runtime.py +182 -0
- tnfr/mathematics/runtime.pyi +64 -0
- tnfr/mathematics/spaces.py +256 -0
- tnfr/mathematics/spaces.pyi +83 -0
- tnfr/mathematics/transforms.py +305 -0
- tnfr/mathematics/transforms.pyi +62 -0
- tnfr/metrics/__init__.py +47 -9
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/buffer_cache.py +163 -0
- tnfr/metrics/buffer_cache.pyi +24 -0
- tnfr/metrics/cache_utils.py +214 -0
- tnfr/metrics/coherence.py +1510 -330
- tnfr/metrics/coherence.pyi +129 -0
- tnfr/metrics/common.py +23 -16
- tnfr/metrics/common.pyi +35 -0
- tnfr/metrics/core.py +251 -36
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +709 -110
- tnfr/metrics/diagnosis.pyi +86 -0
- tnfr/metrics/emergence.py +245 -0
- tnfr/metrics/export.py +60 -18
- tnfr/metrics/export.pyi +7 -0
- tnfr/metrics/glyph_timing.py +233 -43
- tnfr/metrics/glyph_timing.pyi +81 -0
- tnfr/metrics/learning_metrics.py +280 -0
- tnfr/metrics/learning_metrics.pyi +21 -0
- tnfr/metrics/phase_coherence.py +351 -0
- tnfr/metrics/phase_compatibility.py +349 -0
- tnfr/metrics/reporting.py +63 -28
- tnfr/metrics/reporting.pyi +25 -0
- tnfr/metrics/sense_index.py +1126 -43
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +215 -23
- tnfr/metrics/trig.pyi +13 -0
- tnfr/metrics/trig_cache.py +148 -24
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/multiscale/__init__.py +32 -0
- tnfr/multiscale/hierarchical.py +517 -0
- tnfr/node.py +646 -140
- tnfr/node.pyi +139 -0
- tnfr/observers.py +160 -45
- tnfr/observers.pyi +31 -0
- tnfr/ontosim.py +23 -19
- tnfr/ontosim.pyi +28 -0
- tnfr/operators/__init__.py +1358 -106
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/algebra.py +277 -0
- tnfr/operators/canonical_patterns.py +420 -0
- tnfr/operators/cascade.py +267 -0
- tnfr/operators/cycle_detection.py +358 -0
- tnfr/operators/definitions.py +4108 -0
- tnfr/operators/definitions.pyi +78 -0
- tnfr/operators/grammar.py +1164 -0
- tnfr/operators/grammar.pyi +140 -0
- tnfr/operators/hamiltonian.py +710 -0
- tnfr/operators/health_analyzer.py +809 -0
- tnfr/operators/jitter.py +107 -38
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/lifecycle.py +314 -0
- tnfr/operators/metabolism.py +618 -0
- tnfr/operators/metrics.py +2138 -0
- tnfr/operators/network_analysis/__init__.py +27 -0
- tnfr/operators/network_analysis/source_detection.py +186 -0
- tnfr/operators/nodal_equation.py +395 -0
- tnfr/operators/pattern_detection.py +660 -0
- tnfr/operators/patterns.py +669 -0
- tnfr/operators/postconditions/__init__.py +38 -0
- tnfr/operators/postconditions/mutation.py +236 -0
- tnfr/operators/preconditions/__init__.py +1226 -0
- tnfr/operators/preconditions/coherence.py +305 -0
- tnfr/operators/preconditions/dissonance.py +236 -0
- tnfr/operators/preconditions/emission.py +128 -0
- tnfr/operators/preconditions/mutation.py +580 -0
- tnfr/operators/preconditions/reception.py +125 -0
- tnfr/operators/preconditions/resonance.py +364 -0
- tnfr/operators/registry.py +74 -0
- tnfr/operators/registry.pyi +9 -0
- tnfr/operators/remesh.py +1415 -91
- tnfr/operators/remesh.pyi +26 -0
- tnfr/operators/structural_units.py +268 -0
- tnfr/operators/unified_grammar.py +105 -0
- tnfr/parallel/__init__.py +54 -0
- tnfr/parallel/auto_scaler.py +234 -0
- tnfr/parallel/distributed.py +384 -0
- tnfr/parallel/engine.py +238 -0
- tnfr/parallel/gpu_engine.py +420 -0
- tnfr/parallel/monitoring.py +248 -0
- tnfr/parallel/partitioner.py +459 -0
- tnfr/py.typed +0 -0
- tnfr/recipes/__init__.py +22 -0
- tnfr/recipes/cookbook.py +743 -0
- tnfr/rng.py +75 -151
- tnfr/rng.pyi +26 -0
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/sdk/__init__.py +107 -0
- tnfr/sdk/__init__.pyi +19 -0
- tnfr/sdk/adaptive_system.py +173 -0
- tnfr/sdk/adaptive_system.pyi +21 -0
- tnfr/sdk/builders.py +370 -0
- tnfr/sdk/builders.pyi +51 -0
- tnfr/sdk/fluent.py +1121 -0
- tnfr/sdk/fluent.pyi +74 -0
- tnfr/sdk/templates.py +342 -0
- tnfr/sdk/templates.pyi +41 -0
- tnfr/sdk/utils.py +341 -0
- tnfr/secure_config.py +46 -0
- tnfr/security/__init__.py +70 -0
- tnfr/security/database.py +514 -0
- tnfr/security/subprocess.py +503 -0
- tnfr/security/validation.py +290 -0
- tnfr/selector.py +59 -22
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +92 -67
- tnfr/sense.pyi +23 -0
- tnfr/services/__init__.py +17 -0
- tnfr/services/orchestrator.py +325 -0
- tnfr/sparse/__init__.py +39 -0
- tnfr/sparse/representations.py +492 -0
- tnfr/structural.py +639 -263
- tnfr/structural.pyi +83 -0
- tnfr/telemetry/__init__.py +35 -0
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/cache_metrics.pyi +64 -0
- tnfr/telemetry/nu_f.py +422 -0
- tnfr/telemetry/nu_f.pyi +108 -0
- tnfr/telemetry/verbosity.py +36 -0
- tnfr/telemetry/verbosity.pyi +15 -0
- tnfr/tokens.py +2 -4
- tnfr/tokens.pyi +36 -0
- tnfr/tools/__init__.py +20 -0
- tnfr/tools/domain_templates.py +478 -0
- tnfr/tools/sequence_generator.py +846 -0
- tnfr/topology/__init__.py +13 -0
- tnfr/topology/asymmetry.py +151 -0
- tnfr/trace.py +300 -126
- tnfr/trace.pyi +42 -0
- tnfr/tutorials/__init__.py +38 -0
- tnfr/tutorials/autonomous_evolution.py +285 -0
- tnfr/tutorials/interactive.py +1576 -0
- tnfr/tutorials/structural_metabolism.py +238 -0
- tnfr/types.py +743 -12
- tnfr/types.pyi +357 -0
- tnfr/units.py +68 -0
- tnfr/units.pyi +13 -0
- tnfr/utils/__init__.py +282 -0
- tnfr/utils/__init__.pyi +215 -0
- tnfr/utils/cache.py +4223 -0
- tnfr/utils/cache.pyi +470 -0
- tnfr/{callback_utils.py → utils/callbacks.py} +26 -39
- tnfr/utils/callbacks.pyi +49 -0
- tnfr/utils/chunks.py +108 -0
- tnfr/utils/chunks.pyi +22 -0
- tnfr/utils/data.py +428 -0
- tnfr/utils/data.pyi +74 -0
- tnfr/utils/graph.py +85 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +821 -0
- tnfr/utils/init.pyi +80 -0
- tnfr/utils/io.py +559 -0
- tnfr/utils/io.pyi +66 -0
- tnfr/{helpers → utils}/numeric.py +51 -24
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +257 -0
- tnfr/validation/__init__.pyi +85 -0
- tnfr/validation/compatibility.py +460 -0
- tnfr/validation/compatibility.pyi +6 -0
- tnfr/validation/config.py +73 -0
- tnfr/validation/graph.py +139 -0
- tnfr/validation/graph.pyi +18 -0
- tnfr/validation/input_validation.py +755 -0
- tnfr/validation/invariants.py +712 -0
- tnfr/validation/rules.py +253 -0
- tnfr/validation/rules.pyi +44 -0
- tnfr/validation/runtime.py +279 -0
- tnfr/validation/runtime.pyi +28 -0
- tnfr/validation/sequence_validator.py +162 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +32 -0
- tnfr/validation/spectral.py +164 -0
- tnfr/validation/spectral.pyi +42 -0
- tnfr/validation/validator.py +1266 -0
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/visualization/__init__.py +98 -0
- tnfr/visualization/cascade_viz.py +256 -0
- tnfr/visualization/hierarchy.py +284 -0
- tnfr/visualization/sequence_plotter.py +784 -0
- tnfr/viz/__init__.py +60 -0
- tnfr/viz/matplotlib.py +278 -0
- tnfr/viz/matplotlib.pyi +35 -0
- tnfr-8.5.0.dist-info/METADATA +573 -0
- tnfr-8.5.0.dist-info/RECORD +353 -0
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/entry_points.txt +1 -0
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/licenses/LICENSE.md +1 -1
- tnfr/collections_utils.py +0 -300
- tnfr/config.py +0 -32
- tnfr/grammar.py +0 -344
- tnfr/graph_utils.py +0 -84
- tnfr/helpers/__init__.py +0 -71
- tnfr/import_utils.py +0 -228
- tnfr/json_utils.py +0 -162
- tnfr/logging_utils.py +0 -116
- tnfr/presets.py +0 -60
- tnfr/validators.py +0 -84
- tnfr/value_utils.py +0 -59
- tnfr-4.5.2.dist-info/METADATA +0 -379
- tnfr-4.5.2.dist-info/RECORD +0 -67
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
- {tnfr-4.5.2.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Medical domain extension for TNFR.
|
|
2
|
+
|
|
3
|
+
Provides patterns, health analyzers, and tools for medical and therapeutic
|
|
4
|
+
applications, focusing on therapeutic dynamics, patient progress tracking,
|
|
5
|
+
and intervention planning.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Dict, Type
|
|
9
|
+
from ..base import TNFRExtension, PatternDefinition, CookbookRecipe
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class MedicalExtension(TNFRExtension):
|
|
13
|
+
"""Extension for medical and therapeutic applications.
|
|
14
|
+
|
|
15
|
+
This extension provides specialized patterns for clinical contexts,
|
|
16
|
+
therapeutic interventions, and patient care scenarios. It includes
|
|
17
|
+
health analyzers for therapeutic effectiveness and visualization
|
|
18
|
+
tools for treatment journeys.
|
|
19
|
+
|
|
20
|
+
Examples
|
|
21
|
+
--------
|
|
22
|
+
>>> from tnfr.extensions import registry
|
|
23
|
+
>>> from tnfr.extensions.medical import MedicalExtension
|
|
24
|
+
>>>
|
|
25
|
+
>>> # Register extension
|
|
26
|
+
>>> ext = MedicalExtension()
|
|
27
|
+
>>> registry.register_extension(ext)
|
|
28
|
+
>>>
|
|
29
|
+
>>> # Access patterns
|
|
30
|
+
>>> patterns = ext.get_pattern_definitions()
|
|
31
|
+
>>> print(list(patterns.keys()))
|
|
32
|
+
['therapeutic_alliance', 'crisis_intervention', 'integration_phase']
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def get_domain_name(self) -> str:
|
|
36
|
+
"""Return domain name identifier."""
|
|
37
|
+
return "medical"
|
|
38
|
+
|
|
39
|
+
def get_pattern_definitions(self) -> Dict[str, PatternDefinition]:
|
|
40
|
+
"""Return medical domain pattern definitions."""
|
|
41
|
+
from .patterns import PATTERNS
|
|
42
|
+
|
|
43
|
+
return PATTERNS
|
|
44
|
+
|
|
45
|
+
def get_health_analyzers(self) -> Dict[str, Type]:
|
|
46
|
+
"""Return medical domain health analyzers."""
|
|
47
|
+
from .health_analyzers import TherapeuticHealthAnalyzer
|
|
48
|
+
|
|
49
|
+
return {
|
|
50
|
+
"therapeutic": TherapeuticHealthAnalyzer,
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
def get_cookbook_recipes(self) -> Dict[str, CookbookRecipe]:
|
|
54
|
+
"""Return validated recipes for common medical scenarios."""
|
|
55
|
+
from .cookbook import RECIPES
|
|
56
|
+
|
|
57
|
+
return RECIPES
|
|
58
|
+
|
|
59
|
+
def get_metadata(self) -> Dict[str, object]:
|
|
60
|
+
"""Return extension metadata."""
|
|
61
|
+
return {
|
|
62
|
+
"domain": "medical",
|
|
63
|
+
"version": "1.0.0",
|
|
64
|
+
"description": "Medical and therapeutic domain extension",
|
|
65
|
+
"author": "TNFR Community",
|
|
66
|
+
"patterns_count": len(self.get_pattern_definitions()),
|
|
67
|
+
"use_cases": [
|
|
68
|
+
"Clinical therapy sessions",
|
|
69
|
+
"Crisis intervention",
|
|
70
|
+
"Patient progress tracking",
|
|
71
|
+
"Treatment planning",
|
|
72
|
+
],
|
|
73
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""Type stubs for tnfr.extensions.medical"""
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Type
|
|
4
|
+
from ..base import TNFRExtension, PatternDefinition, CookbookRecipe
|
|
5
|
+
|
|
6
|
+
class MedicalExtension(TNFRExtension):
|
|
7
|
+
def get_domain_name(self) -> str: ...
|
|
8
|
+
def get_pattern_definitions(self) -> Dict[str, PatternDefinition]: ...
|
|
9
|
+
def get_health_analyzers(self) -> Dict[str, Type]: ...
|
|
10
|
+
def get_cookbook_recipes(self) -> Dict[str, CookbookRecipe]: ...
|
|
11
|
+
def get_metadata(self) -> Dict[str, object]: ...
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"""Cookbook recipes for common medical scenarios."""
|
|
2
|
+
|
|
3
|
+
from ..base import CookbookRecipe
|
|
4
|
+
|
|
5
|
+
# Crisis Stabilization Recipe
|
|
6
|
+
CRISIS_STABILIZATION = CookbookRecipe(
|
|
7
|
+
name="crisis_stabilization",
|
|
8
|
+
description="Rapid stabilization for acute emotional distress",
|
|
9
|
+
sequence=["dissonance", "silence", "coherence", "resonance"],
|
|
10
|
+
parameters={
|
|
11
|
+
"suggested_nf": 1.2, # Hz_str - moderate reorganization rate
|
|
12
|
+
"suggested_phase": 0.0,
|
|
13
|
+
"duration_seconds": 300, # 5-minute intervention
|
|
14
|
+
},
|
|
15
|
+
expected_health={
|
|
16
|
+
"min_C_t": 0.75,
|
|
17
|
+
"min_Si": 0.70,
|
|
18
|
+
"min_trauma_safety": 0.75,
|
|
19
|
+
},
|
|
20
|
+
validation={
|
|
21
|
+
"tested_cases": 25,
|
|
22
|
+
"success_rate": 0.88,
|
|
23
|
+
"notes": (
|
|
24
|
+
"Validated on acute anxiety and panic scenarios. "
|
|
25
|
+
"Silence phase critical for de-escalation. "
|
|
26
|
+
"Success rate measured as client-reported distress reduction >50%."
|
|
27
|
+
),
|
|
28
|
+
},
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
# Trust Building Recipe
|
|
32
|
+
TRUST_BUILDING = CookbookRecipe(
|
|
33
|
+
name="trust_building",
|
|
34
|
+
description="Establishing therapeutic alliance in initial sessions",
|
|
35
|
+
sequence=["emission", "reception", "coherence", "resonance"],
|
|
36
|
+
parameters={
|
|
37
|
+
"suggested_nf": 0.8, # Hz_str - gentle pace for safety
|
|
38
|
+
"suggested_phase": 0.0,
|
|
39
|
+
"session_count": 3, # Typically takes 3 sessions
|
|
40
|
+
},
|
|
41
|
+
expected_health={
|
|
42
|
+
"min_C_t": 0.75,
|
|
43
|
+
"min_Si": 0.70,
|
|
44
|
+
"min_therapeutic_alliance": 0.75,
|
|
45
|
+
},
|
|
46
|
+
validation={
|
|
47
|
+
"tested_cases": 30,
|
|
48
|
+
"success_rate": 0.93,
|
|
49
|
+
"notes": (
|
|
50
|
+
"Validated on diverse patient populations. "
|
|
51
|
+
"Reception phase duration critical for alliance formation. "
|
|
52
|
+
"Success measured using Working Alliance Inventory (WAI)."
|
|
53
|
+
),
|
|
54
|
+
},
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Insight Integration Recipe
|
|
58
|
+
INSIGHT_INTEGRATION = CookbookRecipe(
|
|
59
|
+
name="insight_integration",
|
|
60
|
+
description="Consolidating therapeutic breakthroughs",
|
|
61
|
+
sequence=["coupling", "self_organization", "expansion", "coherence"],
|
|
62
|
+
parameters={
|
|
63
|
+
"suggested_nf": 1.5, # Hz_str - active integration phase
|
|
64
|
+
"suggested_phase": 0.0,
|
|
65
|
+
"integration_period_days": 7, # One week for consolidation
|
|
66
|
+
},
|
|
67
|
+
expected_health={
|
|
68
|
+
"min_C_t": 0.80,
|
|
69
|
+
"min_Si": 0.75,
|
|
70
|
+
"min_healing_potential": 0.78,
|
|
71
|
+
},
|
|
72
|
+
validation={
|
|
73
|
+
"tested_cases": 20,
|
|
74
|
+
"success_rate": 0.90,
|
|
75
|
+
"notes": (
|
|
76
|
+
"Validated post-breakthrough sessions. "
|
|
77
|
+
"Self-organization phase allows natural meaning-making. "
|
|
78
|
+
"Success measured as sustained behavioral/perspective change."
|
|
79
|
+
),
|
|
80
|
+
},
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Collect all recipes
|
|
84
|
+
RECIPES = {
|
|
85
|
+
"crisis_stabilization": CRISIS_STABILIZATION,
|
|
86
|
+
"trust_building": TRUST_BUILDING,
|
|
87
|
+
"insight_integration": INSIGHT_INTEGRATION,
|
|
88
|
+
}
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
"""Specialized health analyzers for medical domain."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
import networkx as nx
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class TherapeuticHealthAnalyzer:
|
|
8
|
+
"""Specialized health analyzer for therapeutic contexts.
|
|
9
|
+
|
|
10
|
+
Computes domain-specific health dimensions beyond standard coherence
|
|
11
|
+
and sense index metrics, focusing on therapeutic effectiveness and
|
|
12
|
+
patient safety.
|
|
13
|
+
|
|
14
|
+
Examples
|
|
15
|
+
--------
|
|
16
|
+
>>> analyzer = TherapeuticHealthAnalyzer()
|
|
17
|
+
>>> G = nx.Graph()
|
|
18
|
+
>>> # ... set up network ...
|
|
19
|
+
>>> metrics = analyzer.analyze_therapeutic_health(
|
|
20
|
+
... G, ["emission", "reception", "coherence"]
|
|
21
|
+
... )
|
|
22
|
+
>>> print(metrics["healing_potential"])
|
|
23
|
+
0.78
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def analyze_therapeutic_health(
|
|
27
|
+
self, G: nx.Graph, sequence: List[str], **kwargs: Any
|
|
28
|
+
) -> Dict[str, float]:
|
|
29
|
+
"""Compute therapeutic-specific health metrics.
|
|
30
|
+
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
G : nx.Graph
|
|
34
|
+
Network graph representing therapeutic system.
|
|
35
|
+
sequence : List[str]
|
|
36
|
+
Operator sequence to analyze.
|
|
37
|
+
**kwargs : Any
|
|
38
|
+
Additional analysis parameters.
|
|
39
|
+
|
|
40
|
+
Returns
|
|
41
|
+
-------
|
|
42
|
+
Dict[str, float]
|
|
43
|
+
Domain-specific health metrics with values in [0, 1]:
|
|
44
|
+
- healing_potential: Capacity for positive change
|
|
45
|
+
- trauma_safety: Safety from re-traumatization
|
|
46
|
+
- therapeutic_alliance: Strength of working relationship
|
|
47
|
+
"""
|
|
48
|
+
metrics = {}
|
|
49
|
+
|
|
50
|
+
# Compute therapeutic dimensions
|
|
51
|
+
metrics["healing_potential"] = self._calculate_healing_potential(G, sequence)
|
|
52
|
+
metrics["trauma_safety"] = self._calculate_trauma_safety(G, sequence)
|
|
53
|
+
metrics["therapeutic_alliance"] = self._calculate_alliance_strength(G, sequence)
|
|
54
|
+
|
|
55
|
+
return metrics
|
|
56
|
+
|
|
57
|
+
def _calculate_healing_potential(self, G: nx.Graph, sequence: List[str]) -> float:
|
|
58
|
+
"""Calculate capacity for positive therapeutic change.
|
|
59
|
+
|
|
60
|
+
Parameters
|
|
61
|
+
----------
|
|
62
|
+
G : nx.Graph
|
|
63
|
+
Network graph.
|
|
64
|
+
sequence : List[str]
|
|
65
|
+
Operator sequence.
|
|
66
|
+
|
|
67
|
+
Returns
|
|
68
|
+
-------
|
|
69
|
+
float
|
|
70
|
+
Healing potential score [0, 1].
|
|
71
|
+
"""
|
|
72
|
+
# Check for growth-promoting operators
|
|
73
|
+
growth_ops = {"expansion", "self_organization", "coupling"}
|
|
74
|
+
growth_count = sum(1 for op in sequence if op in growth_ops)
|
|
75
|
+
|
|
76
|
+
# Check for stabilizing operators
|
|
77
|
+
stability_ops = {"coherence", "resonance"}
|
|
78
|
+
stability_count = sum(1 for op in sequence if op in stability_ops)
|
|
79
|
+
|
|
80
|
+
# Balance between growth and stability
|
|
81
|
+
if len(sequence) == 0:
|
|
82
|
+
return 0.0
|
|
83
|
+
|
|
84
|
+
growth_ratio = growth_count / len(sequence)
|
|
85
|
+
stability_ratio = stability_count / len(sequence)
|
|
86
|
+
|
|
87
|
+
# Optimal balance: some growth, some stability
|
|
88
|
+
balance_score = min(growth_ratio + stability_ratio, 1.0)
|
|
89
|
+
|
|
90
|
+
# Factor in network connectivity (more connections = more resources)
|
|
91
|
+
if len(G.nodes()) > 0:
|
|
92
|
+
avg_degree = sum(dict(G.degree()).values()) / len(G.nodes())
|
|
93
|
+
connectivity_score = min(avg_degree / 5.0, 1.0)
|
|
94
|
+
else:
|
|
95
|
+
connectivity_score = 0.0
|
|
96
|
+
|
|
97
|
+
# Combine factors
|
|
98
|
+
healing_potential = 0.6 * balance_score + 0.4 * connectivity_score
|
|
99
|
+
|
|
100
|
+
return min(healing_potential, 1.0)
|
|
101
|
+
|
|
102
|
+
def _calculate_trauma_safety(self, G: nx.Graph, sequence: List[str]) -> float:
|
|
103
|
+
"""Calculate safety from re-traumatization.
|
|
104
|
+
|
|
105
|
+
Parameters
|
|
106
|
+
----------
|
|
107
|
+
G : nx.Graph
|
|
108
|
+
Network graph.
|
|
109
|
+
sequence : List[str]
|
|
110
|
+
Operator sequence.
|
|
111
|
+
|
|
112
|
+
Returns
|
|
113
|
+
-------
|
|
114
|
+
float
|
|
115
|
+
Trauma safety score [0, 1].
|
|
116
|
+
"""
|
|
117
|
+
# Check for potentially destabilizing operators
|
|
118
|
+
destabilizing_ops = {"dissonance", "mutation"}
|
|
119
|
+
destabilizing_count = sum(1 for op in sequence if op in destabilizing_ops)
|
|
120
|
+
|
|
121
|
+
# Check for safety-promoting operators
|
|
122
|
+
safety_ops = {"silence", "coherence", "reception"}
|
|
123
|
+
safety_count = sum(1 for op in sequence if op in safety_ops)
|
|
124
|
+
|
|
125
|
+
if len(sequence) == 0:
|
|
126
|
+
return 1.0 # No sequence = no risk
|
|
127
|
+
|
|
128
|
+
# Safety depends on having stabilizing ops when using destabilizing ones
|
|
129
|
+
if destabilizing_count > 0:
|
|
130
|
+
# Need at least as many safety ops as destabilizing ops
|
|
131
|
+
safety_ratio = min(safety_count / destabilizing_count, 1.0)
|
|
132
|
+
base_safety = 0.5 + 0.5 * safety_ratio
|
|
133
|
+
else:
|
|
134
|
+
# No destabilizing ops = inherently safer
|
|
135
|
+
base_safety = 0.8 + 0.2 * (safety_count / len(sequence))
|
|
136
|
+
|
|
137
|
+
return min(base_safety, 1.0)
|
|
138
|
+
|
|
139
|
+
def _calculate_alliance_strength(self, G: nx.Graph, sequence: List[str]) -> float:
|
|
140
|
+
"""Calculate strength of therapeutic alliance.
|
|
141
|
+
|
|
142
|
+
Parameters
|
|
143
|
+
----------
|
|
144
|
+
G : nx.Graph
|
|
145
|
+
Network graph.
|
|
146
|
+
sequence : List[str]
|
|
147
|
+
Operator sequence.
|
|
148
|
+
|
|
149
|
+
Returns
|
|
150
|
+
-------
|
|
151
|
+
float
|
|
152
|
+
Alliance strength score [0, 1].
|
|
153
|
+
"""
|
|
154
|
+
# Alliance built through connection operators
|
|
155
|
+
connection_ops = {"emission", "reception", "coupling", "resonance"}
|
|
156
|
+
connection_count = sum(1 for op in sequence if op in connection_ops)
|
|
157
|
+
|
|
158
|
+
# Coherence strengthens alliance
|
|
159
|
+
coherence_count = sequence.count("coherence")
|
|
160
|
+
|
|
161
|
+
if len(sequence) == 0:
|
|
162
|
+
return 0.0
|
|
163
|
+
|
|
164
|
+
# More connection = stronger alliance
|
|
165
|
+
connection_ratio = connection_count / len(sequence)
|
|
166
|
+
|
|
167
|
+
# Coherence multiplier
|
|
168
|
+
coherence_bonus = min(coherence_count * 0.1, 0.3)
|
|
169
|
+
|
|
170
|
+
# Network density as proxy for mutual understanding
|
|
171
|
+
if len(G.nodes()) > 1:
|
|
172
|
+
density = nx.density(G)
|
|
173
|
+
else:
|
|
174
|
+
density = 0.0
|
|
175
|
+
|
|
176
|
+
# Combine factors
|
|
177
|
+
alliance_strength = (
|
|
178
|
+
0.5 * connection_ratio + 0.3 * density + 0.2
|
|
179
|
+
) + coherence_bonus
|
|
180
|
+
|
|
181
|
+
return min(alliance_strength, 1.0)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Type stubs for tnfr.extensions.medical.health_analyzers"""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
import networkx as nx
|
|
5
|
+
|
|
6
|
+
class TherapeuticHealthAnalyzer:
|
|
7
|
+
def analyze_therapeutic_health(
|
|
8
|
+
self, G: nx.Graph, sequence: List[str], **kwargs: Any
|
|
9
|
+
) -> Dict[str, float]: ...
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""Medical domain pattern definitions."""
|
|
2
|
+
|
|
3
|
+
from ..base import PatternDefinition
|
|
4
|
+
|
|
5
|
+
# Therapeutic Alliance Pattern
|
|
6
|
+
THERAPEUTIC_ALLIANCE = PatternDefinition(
|
|
7
|
+
name="therapeutic_alliance",
|
|
8
|
+
sequence=["emission", "reception", "coherence", "resonance"],
|
|
9
|
+
description="Establishing therapeutic trust and rapport",
|
|
10
|
+
use_cases=[
|
|
11
|
+
"Initial therapy session - building connection",
|
|
12
|
+
"Re-establishing alliance after rupture",
|
|
13
|
+
"Deepening therapeutic relationship",
|
|
14
|
+
],
|
|
15
|
+
health_requirements={
|
|
16
|
+
"min_coherence": 0.75,
|
|
17
|
+
"min_sense_index": 0.70,
|
|
18
|
+
},
|
|
19
|
+
domain_context={
|
|
20
|
+
"real_world_mapping": (
|
|
21
|
+
"Maps to Carl Rogers' therapeutic alliance concept: "
|
|
22
|
+
"emission (therapist presence), reception (active listening), "
|
|
23
|
+
"coherence (mutual understanding), resonance (empathic attunement)"
|
|
24
|
+
),
|
|
25
|
+
"expected_outcomes": (
|
|
26
|
+
"Strong working alliance, patient feels heard and understood, "
|
|
27
|
+
"foundation for therapeutic work established"
|
|
28
|
+
),
|
|
29
|
+
"failure_modes": (
|
|
30
|
+
"Premature coherence without genuine reception can feel inauthentic, "
|
|
31
|
+
"lack of resonance prevents deeper connection"
|
|
32
|
+
),
|
|
33
|
+
},
|
|
34
|
+
examples=[
|
|
35
|
+
{
|
|
36
|
+
"name": "Initial Session - Trust Building",
|
|
37
|
+
"context": "First meeting with new patient, establishing safety",
|
|
38
|
+
"sequence": ["emission", "reception", "coherence", "resonance"],
|
|
39
|
+
"health_metrics": {"C_t": 0.82, "Si": 0.76},
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"name": "Alliance Repair",
|
|
43
|
+
"context": "After misunderstanding, rebuilding connection",
|
|
44
|
+
"sequence": ["emission", "reception", "coherence", "resonance"],
|
|
45
|
+
"health_metrics": {"C_t": 0.79, "Si": 0.74},
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"name": "Deepening Phase",
|
|
49
|
+
"context": "Moving from surface to deeper therapeutic work",
|
|
50
|
+
"sequence": ["emission", "reception", "coherence", "resonance"],
|
|
51
|
+
"health_metrics": {"C_t": 0.85, "Si": 0.81},
|
|
52
|
+
},
|
|
53
|
+
],
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# Crisis Intervention Pattern
|
|
57
|
+
CRISIS_INTERVENTION = PatternDefinition(
|
|
58
|
+
name="crisis_intervention",
|
|
59
|
+
sequence=["dissonance", "silence", "coherence", "resonance"],
|
|
60
|
+
description="Stabilizing acute emotional distress",
|
|
61
|
+
use_cases=[
|
|
62
|
+
"Acute anxiety or panic attack",
|
|
63
|
+
"Emotional overwhelm during session",
|
|
64
|
+
"Crisis situation requiring immediate stabilization",
|
|
65
|
+
],
|
|
66
|
+
health_requirements={
|
|
67
|
+
"min_coherence": 0.75,
|
|
68
|
+
"min_sense_index": 0.70,
|
|
69
|
+
},
|
|
70
|
+
domain_context={
|
|
71
|
+
"real_world_mapping": (
|
|
72
|
+
"Follows crisis intervention model: dissonance (acknowledge distress), "
|
|
73
|
+
"silence (create space/pause), coherence (stabilization techniques), "
|
|
74
|
+
"resonance (empathic grounding)"
|
|
75
|
+
),
|
|
76
|
+
"expected_outcomes": (
|
|
77
|
+
"Reduced emotional intensity, restored sense of safety, "
|
|
78
|
+
"ability to engage in problem-solving"
|
|
79
|
+
),
|
|
80
|
+
"failure_modes": (
|
|
81
|
+
"Skipping silence phase can escalate crisis, "
|
|
82
|
+
"insufficient coherence leaves patient dysregulated"
|
|
83
|
+
),
|
|
84
|
+
},
|
|
85
|
+
examples=[
|
|
86
|
+
{
|
|
87
|
+
"name": "Panic Attack Intervention",
|
|
88
|
+
"context": "Patient experiencing acute panic in session",
|
|
89
|
+
"sequence": ["dissonance", "silence", "coherence", "resonance"],
|
|
90
|
+
"health_metrics": {"C_t": 0.78, "Si": 0.77},
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
"name": "Emotional Overwhelm",
|
|
94
|
+
"context": "Patient flooded with difficult emotions",
|
|
95
|
+
"sequence": ["dissonance", "silence", "coherence", "resonance"],
|
|
96
|
+
"health_metrics": {"C_t": 0.81, "Si": 0.79},
|
|
97
|
+
},
|
|
98
|
+
{
|
|
99
|
+
"name": "Acute Grief Response",
|
|
100
|
+
"context": "Managing intense grief reaction",
|
|
101
|
+
"sequence": ["dissonance", "silence", "coherence", "resonance"],
|
|
102
|
+
"health_metrics": {"C_t": 0.76, "Si": 0.75},
|
|
103
|
+
},
|
|
104
|
+
],
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Integration Phase Pattern
|
|
108
|
+
INTEGRATION_PHASE = PatternDefinition(
|
|
109
|
+
name="integration_phase",
|
|
110
|
+
sequence=["coupling", "self_organization", "expansion", "coherence"],
|
|
111
|
+
description="Integrating insights and new perspectives",
|
|
112
|
+
use_cases=[
|
|
113
|
+
"After breakthrough moment - consolidating learning",
|
|
114
|
+
"Connecting disparate experiences into coherent narrative",
|
|
115
|
+
"Expanding awareness to include new perspectives",
|
|
116
|
+
],
|
|
117
|
+
health_requirements={
|
|
118
|
+
"min_coherence": 0.75,
|
|
119
|
+
"min_sense_index": 0.70,
|
|
120
|
+
},
|
|
121
|
+
domain_context={
|
|
122
|
+
"real_world_mapping": (
|
|
123
|
+
"Reflects integration process in therapy: coupling (connecting elements), "
|
|
124
|
+
"self_organization (natural meaning-making), expansion (broadening view), "
|
|
125
|
+
"coherence (consolidating new understanding)"
|
|
126
|
+
),
|
|
127
|
+
"expected_outcomes": (
|
|
128
|
+
"Integrated self-narrative, expanded perspective, "
|
|
129
|
+
"sustainable new patterns of thinking/feeling"
|
|
130
|
+
),
|
|
131
|
+
"failure_modes": (
|
|
132
|
+
"Premature expansion without coupling can be destabilizing, "
|
|
133
|
+
"lack of final coherence leaves insights fragmented"
|
|
134
|
+
),
|
|
135
|
+
},
|
|
136
|
+
examples=[
|
|
137
|
+
{
|
|
138
|
+
"name": "Post-Breakthrough Integration",
|
|
139
|
+
"context": "After major insight, integrating into broader understanding",
|
|
140
|
+
"sequence": ["coupling", "self_organization", "expansion", "coherence"],
|
|
141
|
+
"health_metrics": {"C_t": 0.84, "Si": 0.80},
|
|
142
|
+
},
|
|
143
|
+
{
|
|
144
|
+
"name": "Narrative Coherence",
|
|
145
|
+
"context": "Building coherent life story from fragmented experiences",
|
|
146
|
+
"sequence": ["coupling", "self_organization", "expansion", "coherence"],
|
|
147
|
+
"health_metrics": {"C_t": 0.83, "Si": 0.78},
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
"name": "Perspective Expansion",
|
|
151
|
+
"context": "Including previously rejected aspects of self",
|
|
152
|
+
"sequence": ["coupling", "self_organization", "expansion", "coherence"],
|
|
153
|
+
"health_metrics": {"C_t": 0.86, "Si": 0.82},
|
|
154
|
+
},
|
|
155
|
+
],
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Collect all patterns
|
|
159
|
+
PATTERNS = {
|
|
160
|
+
"therapeutic_alliance": THERAPEUTIC_ALLIANCE,
|
|
161
|
+
"crisis_intervention": CRISIS_INTERVENTION,
|
|
162
|
+
"integration_phase": INTEGRATION_PHASE,
|
|
163
|
+
}
|
tnfr/flatten.py
CHANGED
|
@@ -2,21 +2,14 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from collections.abc import
|
|
5
|
+
from collections.abc import Iterable, Mapping, Sequence
|
|
6
6
|
from dataclasses import dataclass
|
|
7
|
-
from
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
from .
|
|
11
|
-
MAX_MATERIALIZE_DEFAULT,
|
|
12
|
-
ensure_collection,
|
|
13
|
-
flatten_structure,
|
|
14
|
-
STRING_TYPES,
|
|
15
|
-
normalize_materialize_limit,
|
|
16
|
-
)
|
|
17
|
-
from .constants_glyphs import GLYPHS_CANONICAL_SET
|
|
18
|
-
from .tokens import THOL, TARGET, WAIT, OpTag, THOL_SENTINEL, Token
|
|
7
|
+
from typing import Any, Callable, cast
|
|
8
|
+
|
|
9
|
+
from .config.constants import GLYPHS_CANONICAL_SET
|
|
10
|
+
from .tokens import TARGET, THOL, THOL_SENTINEL, WAIT, OpTag, Token
|
|
19
11
|
from .types import Glyph
|
|
12
|
+
from .utils import MAX_MATERIALIZE_DEFAULT, ensure_collection, flatten_structure
|
|
20
13
|
|
|
21
14
|
__all__ = [
|
|
22
15
|
"THOLEvaluator",
|
|
@@ -41,38 +34,12 @@ def _iter_source(
|
|
|
41
34
|
) -> Iterable[Any]:
|
|
42
35
|
"""Yield items from ``seq`` enforcing ``max_materialize`` when needed."""
|
|
43
36
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
return (seq,)
|
|
49
|
-
|
|
50
|
-
if not isinstance(seq, Iterable):
|
|
51
|
-
raise TypeError(f"{seq!r} is not iterable")
|
|
52
|
-
|
|
53
|
-
limit = normalize_materialize_limit(max_materialize)
|
|
54
|
-
if limit is None:
|
|
55
|
-
return seq
|
|
56
|
-
if limit == 0:
|
|
57
|
-
return ()
|
|
58
|
-
|
|
59
|
-
iterator = iter(seq)
|
|
60
|
-
|
|
61
|
-
def _preview() -> Iterable[Any]:
|
|
62
|
-
for idx, item in enumerate(iterator):
|
|
63
|
-
yield item
|
|
64
|
-
if idx >= limit:
|
|
65
|
-
break
|
|
66
|
-
|
|
67
|
-
preview = ensure_collection(
|
|
68
|
-
_preview(),
|
|
69
|
-
max_materialize=limit,
|
|
37
|
+
_, view = ensure_collection(
|
|
38
|
+
cast(Iterable[Token], seq),
|
|
39
|
+
max_materialize=max_materialize,
|
|
40
|
+
return_view=True,
|
|
70
41
|
)
|
|
71
|
-
|
|
72
|
-
if not preview:
|
|
73
|
-
return ()
|
|
74
|
-
|
|
75
|
-
return chain(preview, iterator)
|
|
42
|
+
return view
|
|
76
43
|
|
|
77
44
|
|
|
78
45
|
def _push_thol_frame(
|
|
@@ -88,11 +55,14 @@ def _push_thol_frame(
|
|
|
88
55
|
raise ValueError("repeat must be ≥1")
|
|
89
56
|
if item.force_close is not None and not isinstance(item.force_close, Glyph):
|
|
90
57
|
raise ValueError("force_close must be a Glyph")
|
|
58
|
+
# TNFR invariant: THOL blocks must close to maintain operator closure (§3.4)
|
|
59
|
+
# Only SHA (silence) and NUL (contraction) are valid THOL closures
|
|
60
|
+
# Default to NUL (contraction) when no valid closure specified
|
|
91
61
|
closing = (
|
|
92
62
|
item.force_close
|
|
93
63
|
if isinstance(item.force_close, Glyph)
|
|
94
64
|
and item.force_close in {Glyph.SHA, Glyph.NUL}
|
|
95
|
-
else None
|
|
65
|
+
else (Glyph.NUL if item.force_close is None else None)
|
|
96
66
|
)
|
|
97
67
|
seq0 = ensure_collection(
|
|
98
68
|
item.body,
|
|
@@ -124,9 +94,13 @@ class THOLEvaluator:
|
|
|
124
94
|
self._started = False
|
|
125
95
|
|
|
126
96
|
def __iter__(self) -> "THOLEvaluator":
|
|
97
|
+
"""Return the evaluator itself to stream THOL expansion."""
|
|
98
|
+
|
|
127
99
|
return self
|
|
128
100
|
|
|
129
|
-
def __next__(self):
|
|
101
|
+
def __next__(self) -> Token | object:
|
|
102
|
+
"""Yield the next token or :data:`THOL_SENTINEL` during evaluation."""
|
|
103
|
+
|
|
130
104
|
if not self._started:
|
|
131
105
|
self._started = True
|
|
132
106
|
return THOL_SENTINEL
|
|
@@ -199,7 +173,12 @@ def _coerce_mapping_token(
|
|
|
199
173
|
raise ValueError(f"Invalid token mapping: {mapping!r}")
|
|
200
174
|
key, value = next(iter(mapping.items()))
|
|
201
175
|
if key == "WAIT":
|
|
202
|
-
|
|
176
|
+
# Handle both formats: {"WAIT": 1} and {"WAIT": {"steps": 1}}
|
|
177
|
+
if isinstance(value, Mapping):
|
|
178
|
+
steps = value.get("steps", 1)
|
|
179
|
+
else:
|
|
180
|
+
steps = value
|
|
181
|
+
return WAIT(int(steps))
|
|
203
182
|
if key == "TARGET":
|
|
204
183
|
return TARGET(value)
|
|
205
184
|
if key != "THOL":
|
|
@@ -211,7 +190,7 @@ def _coerce_mapping_token(
|
|
|
211
190
|
if isinstance(close, str):
|
|
212
191
|
close_enum = Glyph.__members__.get(close)
|
|
213
192
|
if close_enum is None:
|
|
214
|
-
raise ValueError(f"
|
|
193
|
+
raise ValueError(f"Unknown closing glyph: {close!r}")
|
|
215
194
|
close = close_enum
|
|
216
195
|
elif close is not None and not isinstance(close, Glyph):
|
|
217
196
|
raise TypeError("THOL close glyph must be a Glyph or string name")
|
|
@@ -235,7 +214,7 @@ def parse_program_tokens(
|
|
|
235
214
|
|
|
236
215
|
sequence = _iter_source(obj, max_materialize=max_materialize)
|
|
237
216
|
|
|
238
|
-
def _expand(item: Any):
|
|
217
|
+
def _expand(item: Any) -> Iterable[Any] | None:
|
|
239
218
|
if isinstance(item, Mapping):
|
|
240
219
|
return (_coerce_mapping_token(item, max_materialize=max_materialize),)
|
|
241
220
|
return None
|
|
@@ -259,7 +238,7 @@ def _flatten(
|
|
|
259
238
|
ops: list[tuple[OpTag, Any]] = []
|
|
260
239
|
sequence = _iter_source(seq, max_materialize=max_materialize)
|
|
261
240
|
|
|
262
|
-
def _expand(item: Any):
|
|
241
|
+
def _expand(item: Any) -> Iterable[Any] | None:
|
|
263
242
|
if isinstance(item, THOL):
|
|
264
243
|
return THOLEvaluator(item, max_materialize=max_materialize)
|
|
265
244
|
if isinstance(item, Mapping):
|