tnfr 3.0.3__py3-none-any.whl → 8.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of tnfr might be problematic. Click here for more details.
- tnfr/__init__.py +375 -56
- tnfr/__init__.pyi +33 -0
- tnfr/_compat.py +10 -0
- tnfr/_generated_version.py +34 -0
- tnfr/_version.py +49 -0
- tnfr/_version.pyi +7 -0
- tnfr/alias.py +723 -0
- tnfr/alias.pyi +108 -0
- tnfr/backends/__init__.py +354 -0
- tnfr/backends/jax_backend.py +173 -0
- tnfr/backends/numpy_backend.py +238 -0
- tnfr/backends/optimized_numpy.py +420 -0
- tnfr/backends/torch_backend.py +408 -0
- tnfr/cache.py +171 -0
- tnfr/cache.pyi +13 -0
- tnfr/cli/__init__.py +110 -0
- tnfr/cli/__init__.pyi +26 -0
- tnfr/cli/arguments.py +489 -0
- tnfr/cli/arguments.pyi +29 -0
- tnfr/cli/execution.py +914 -0
- tnfr/cli/execution.pyi +70 -0
- tnfr/cli/interactive_validator.py +614 -0
- tnfr/cli/utils.py +51 -0
- tnfr/cli/utils.pyi +7 -0
- tnfr/cli/validate.py +236 -0
- tnfr/compat/__init__.py +85 -0
- tnfr/compat/dataclass.py +136 -0
- tnfr/compat/jsonschema_stub.py +61 -0
- tnfr/compat/matplotlib_stub.py +73 -0
- tnfr/compat/numpy_stub.py +155 -0
- tnfr/config/__init__.py +224 -0
- tnfr/config/__init__.pyi +10 -0
- tnfr/config/constants.py +104 -0
- tnfr/config/constants.pyi +12 -0
- tnfr/config/defaults.py +54 -0
- tnfr/config/defaults_core.py +212 -0
- tnfr/config/defaults_init.py +33 -0
- tnfr/config/defaults_metric.py +104 -0
- tnfr/config/feature_flags.py +81 -0
- tnfr/config/feature_flags.pyi +16 -0
- tnfr/config/glyph_constants.py +31 -0
- tnfr/config/init.py +77 -0
- tnfr/config/init.pyi +8 -0
- tnfr/config/operator_names.py +254 -0
- tnfr/config/operator_names.pyi +36 -0
- tnfr/config/physics_derivation.py +354 -0
- tnfr/config/presets.py +83 -0
- tnfr/config/presets.pyi +7 -0
- tnfr/config/security.py +927 -0
- tnfr/config/thresholds.py +114 -0
- tnfr/config/tnfr_config.py +498 -0
- tnfr/constants/__init__.py +92 -0
- tnfr/constants/__init__.pyi +92 -0
- tnfr/constants/aliases.py +33 -0
- tnfr/constants/aliases.pyi +27 -0
- tnfr/constants/init.py +33 -0
- tnfr/constants/init.pyi +12 -0
- tnfr/constants/metric.py +104 -0
- tnfr/constants/metric.pyi +19 -0
- tnfr/core/__init__.py +33 -0
- tnfr/core/container.py +226 -0
- tnfr/core/default_implementations.py +329 -0
- tnfr/core/interfaces.py +279 -0
- tnfr/dynamics/__init__.py +238 -0
- tnfr/dynamics/__init__.pyi +83 -0
- tnfr/dynamics/adaptation.py +267 -0
- tnfr/dynamics/adaptation.pyi +7 -0
- tnfr/dynamics/adaptive_sequences.py +189 -0
- tnfr/dynamics/adaptive_sequences.pyi +14 -0
- tnfr/dynamics/aliases.py +23 -0
- tnfr/dynamics/aliases.pyi +19 -0
- tnfr/dynamics/bifurcation.py +232 -0
- tnfr/dynamics/canonical.py +229 -0
- tnfr/dynamics/canonical.pyi +48 -0
- tnfr/dynamics/coordination.py +385 -0
- tnfr/dynamics/coordination.pyi +25 -0
- tnfr/dynamics/dnfr.py +3034 -0
- tnfr/dynamics/dnfr.pyi +26 -0
- tnfr/dynamics/dynamic_limits.py +225 -0
- tnfr/dynamics/feedback.py +252 -0
- tnfr/dynamics/feedback.pyi +24 -0
- tnfr/dynamics/fused_dnfr.py +454 -0
- tnfr/dynamics/homeostasis.py +157 -0
- tnfr/dynamics/homeostasis.pyi +14 -0
- tnfr/dynamics/integrators.py +661 -0
- tnfr/dynamics/integrators.pyi +36 -0
- tnfr/dynamics/learning.py +310 -0
- tnfr/dynamics/learning.pyi +33 -0
- tnfr/dynamics/metabolism.py +254 -0
- tnfr/dynamics/nbody.py +796 -0
- tnfr/dynamics/nbody_tnfr.py +783 -0
- tnfr/dynamics/propagation.py +326 -0
- tnfr/dynamics/runtime.py +908 -0
- tnfr/dynamics/runtime.pyi +77 -0
- tnfr/dynamics/sampling.py +36 -0
- tnfr/dynamics/sampling.pyi +7 -0
- tnfr/dynamics/selectors.py +711 -0
- tnfr/dynamics/selectors.pyi +85 -0
- tnfr/dynamics/structural_clip.py +207 -0
- tnfr/errors/__init__.py +37 -0
- tnfr/errors/contextual.py +492 -0
- tnfr/execution.py +223 -0
- tnfr/execution.pyi +45 -0
- tnfr/extensions/__init__.py +205 -0
- tnfr/extensions/__init__.pyi +18 -0
- tnfr/extensions/base.py +173 -0
- tnfr/extensions/base.pyi +35 -0
- tnfr/extensions/business/__init__.py +71 -0
- tnfr/extensions/business/__init__.pyi +11 -0
- tnfr/extensions/business/cookbook.py +88 -0
- tnfr/extensions/business/cookbook.pyi +8 -0
- tnfr/extensions/business/health_analyzers.py +202 -0
- tnfr/extensions/business/health_analyzers.pyi +9 -0
- tnfr/extensions/business/patterns.py +183 -0
- tnfr/extensions/business/patterns.pyi +8 -0
- tnfr/extensions/medical/__init__.py +73 -0
- tnfr/extensions/medical/__init__.pyi +11 -0
- tnfr/extensions/medical/cookbook.py +88 -0
- tnfr/extensions/medical/cookbook.pyi +8 -0
- tnfr/extensions/medical/health_analyzers.py +181 -0
- tnfr/extensions/medical/health_analyzers.pyi +9 -0
- tnfr/extensions/medical/patterns.py +163 -0
- tnfr/extensions/medical/patterns.pyi +8 -0
- tnfr/flatten.py +262 -0
- tnfr/flatten.pyi +21 -0
- tnfr/gamma.py +354 -0
- tnfr/gamma.pyi +36 -0
- tnfr/glyph_history.py +377 -0
- tnfr/glyph_history.pyi +35 -0
- tnfr/glyph_runtime.py +19 -0
- tnfr/glyph_runtime.pyi +8 -0
- tnfr/immutable.py +218 -0
- tnfr/immutable.pyi +36 -0
- tnfr/initialization.py +203 -0
- tnfr/initialization.pyi +65 -0
- tnfr/io.py +10 -0
- tnfr/io.pyi +13 -0
- tnfr/locking.py +37 -0
- tnfr/locking.pyi +7 -0
- tnfr/mathematics/__init__.py +79 -0
- tnfr/mathematics/backend.py +453 -0
- tnfr/mathematics/backend.pyi +99 -0
- tnfr/mathematics/dynamics.py +408 -0
- tnfr/mathematics/dynamics.pyi +90 -0
- tnfr/mathematics/epi.py +391 -0
- tnfr/mathematics/epi.pyi +65 -0
- tnfr/mathematics/generators.py +242 -0
- tnfr/mathematics/generators.pyi +29 -0
- tnfr/mathematics/metrics.py +119 -0
- tnfr/mathematics/metrics.pyi +16 -0
- tnfr/mathematics/operators.py +239 -0
- tnfr/mathematics/operators.pyi +59 -0
- tnfr/mathematics/operators_factory.py +124 -0
- tnfr/mathematics/operators_factory.pyi +11 -0
- tnfr/mathematics/projection.py +87 -0
- tnfr/mathematics/projection.pyi +33 -0
- tnfr/mathematics/runtime.py +182 -0
- tnfr/mathematics/runtime.pyi +64 -0
- tnfr/mathematics/spaces.py +256 -0
- tnfr/mathematics/spaces.pyi +83 -0
- tnfr/mathematics/transforms.py +305 -0
- tnfr/mathematics/transforms.pyi +62 -0
- tnfr/metrics/__init__.py +79 -0
- tnfr/metrics/__init__.pyi +20 -0
- tnfr/metrics/buffer_cache.py +163 -0
- tnfr/metrics/buffer_cache.pyi +24 -0
- tnfr/metrics/cache_utils.py +214 -0
- tnfr/metrics/coherence.py +2009 -0
- tnfr/metrics/coherence.pyi +129 -0
- tnfr/metrics/common.py +158 -0
- tnfr/metrics/common.pyi +35 -0
- tnfr/metrics/core.py +316 -0
- tnfr/metrics/core.pyi +13 -0
- tnfr/metrics/diagnosis.py +833 -0
- tnfr/metrics/diagnosis.pyi +86 -0
- tnfr/metrics/emergence.py +245 -0
- tnfr/metrics/export.py +179 -0
- tnfr/metrics/export.pyi +7 -0
- tnfr/metrics/glyph_timing.py +379 -0
- tnfr/metrics/glyph_timing.pyi +81 -0
- tnfr/metrics/learning_metrics.py +280 -0
- tnfr/metrics/learning_metrics.pyi +21 -0
- tnfr/metrics/phase_coherence.py +351 -0
- tnfr/metrics/phase_compatibility.py +349 -0
- tnfr/metrics/reporting.py +183 -0
- tnfr/metrics/reporting.pyi +25 -0
- tnfr/metrics/sense_index.py +1203 -0
- tnfr/metrics/sense_index.pyi +9 -0
- tnfr/metrics/trig.py +373 -0
- tnfr/metrics/trig.pyi +13 -0
- tnfr/metrics/trig_cache.py +233 -0
- tnfr/metrics/trig_cache.pyi +10 -0
- tnfr/multiscale/__init__.py +32 -0
- tnfr/multiscale/hierarchical.py +517 -0
- tnfr/node.py +763 -0
- tnfr/node.pyi +139 -0
- tnfr/observers.py +255 -130
- tnfr/observers.pyi +31 -0
- tnfr/ontosim.py +144 -137
- tnfr/ontosim.pyi +28 -0
- tnfr/operators/__init__.py +1672 -0
- tnfr/operators/__init__.pyi +31 -0
- tnfr/operators/algebra.py +277 -0
- tnfr/operators/canonical_patterns.py +420 -0
- tnfr/operators/cascade.py +267 -0
- tnfr/operators/cycle_detection.py +358 -0
- tnfr/operators/definitions.py +4108 -0
- tnfr/operators/definitions.pyi +78 -0
- tnfr/operators/grammar.py +1164 -0
- tnfr/operators/grammar.pyi +140 -0
- tnfr/operators/hamiltonian.py +710 -0
- tnfr/operators/health_analyzer.py +809 -0
- tnfr/operators/jitter.py +272 -0
- tnfr/operators/jitter.pyi +11 -0
- tnfr/operators/lifecycle.py +314 -0
- tnfr/operators/metabolism.py +618 -0
- tnfr/operators/metrics.py +2138 -0
- tnfr/operators/network_analysis/__init__.py +27 -0
- tnfr/operators/network_analysis/source_detection.py +186 -0
- tnfr/operators/nodal_equation.py +395 -0
- tnfr/operators/pattern_detection.py +660 -0
- tnfr/operators/patterns.py +669 -0
- tnfr/operators/postconditions/__init__.py +38 -0
- tnfr/operators/postconditions/mutation.py +236 -0
- tnfr/operators/preconditions/__init__.py +1226 -0
- tnfr/operators/preconditions/coherence.py +305 -0
- tnfr/operators/preconditions/dissonance.py +236 -0
- tnfr/operators/preconditions/emission.py +128 -0
- tnfr/operators/preconditions/mutation.py +580 -0
- tnfr/operators/preconditions/reception.py +125 -0
- tnfr/operators/preconditions/resonance.py +364 -0
- tnfr/operators/registry.py +74 -0
- tnfr/operators/registry.pyi +9 -0
- tnfr/operators/remesh.py +1809 -0
- tnfr/operators/remesh.pyi +26 -0
- tnfr/operators/structural_units.py +268 -0
- tnfr/operators/unified_grammar.py +105 -0
- tnfr/parallel/__init__.py +54 -0
- tnfr/parallel/auto_scaler.py +234 -0
- tnfr/parallel/distributed.py +384 -0
- tnfr/parallel/engine.py +238 -0
- tnfr/parallel/gpu_engine.py +420 -0
- tnfr/parallel/monitoring.py +248 -0
- tnfr/parallel/partitioner.py +459 -0
- tnfr/py.typed +0 -0
- tnfr/recipes/__init__.py +22 -0
- tnfr/recipes/cookbook.py +743 -0
- tnfr/rng.py +178 -0
- tnfr/rng.pyi +26 -0
- tnfr/schemas/__init__.py +8 -0
- tnfr/schemas/grammar.json +94 -0
- tnfr/sdk/__init__.py +107 -0
- tnfr/sdk/__init__.pyi +19 -0
- tnfr/sdk/adaptive_system.py +173 -0
- tnfr/sdk/adaptive_system.pyi +21 -0
- tnfr/sdk/builders.py +370 -0
- tnfr/sdk/builders.pyi +51 -0
- tnfr/sdk/fluent.py +1121 -0
- tnfr/sdk/fluent.pyi +74 -0
- tnfr/sdk/templates.py +342 -0
- tnfr/sdk/templates.pyi +41 -0
- tnfr/sdk/utils.py +341 -0
- tnfr/secure_config.py +46 -0
- tnfr/security/__init__.py +70 -0
- tnfr/security/database.py +514 -0
- tnfr/security/subprocess.py +503 -0
- tnfr/security/validation.py +290 -0
- tnfr/selector.py +247 -0
- tnfr/selector.pyi +19 -0
- tnfr/sense.py +378 -0
- tnfr/sense.pyi +23 -0
- tnfr/services/__init__.py +17 -0
- tnfr/services/orchestrator.py +325 -0
- tnfr/sparse/__init__.py +39 -0
- tnfr/sparse/representations.py +492 -0
- tnfr/structural.py +705 -0
- tnfr/structural.pyi +83 -0
- tnfr/telemetry/__init__.py +35 -0
- tnfr/telemetry/cache_metrics.py +226 -0
- tnfr/telemetry/cache_metrics.pyi +64 -0
- tnfr/telemetry/nu_f.py +422 -0
- tnfr/telemetry/nu_f.pyi +108 -0
- tnfr/telemetry/verbosity.py +36 -0
- tnfr/telemetry/verbosity.pyi +15 -0
- tnfr/tokens.py +58 -0
- tnfr/tokens.pyi +36 -0
- tnfr/tools/__init__.py +20 -0
- tnfr/tools/domain_templates.py +478 -0
- tnfr/tools/sequence_generator.py +846 -0
- tnfr/topology/__init__.py +13 -0
- tnfr/topology/asymmetry.py +151 -0
- tnfr/trace.py +543 -0
- tnfr/trace.pyi +42 -0
- tnfr/tutorials/__init__.py +38 -0
- tnfr/tutorials/autonomous_evolution.py +285 -0
- tnfr/tutorials/interactive.py +1576 -0
- tnfr/tutorials/structural_metabolism.py +238 -0
- tnfr/types.py +775 -0
- tnfr/types.pyi +357 -0
- tnfr/units.py +68 -0
- tnfr/units.pyi +13 -0
- tnfr/utils/__init__.py +282 -0
- tnfr/utils/__init__.pyi +215 -0
- tnfr/utils/cache.py +4223 -0
- tnfr/utils/cache.pyi +470 -0
- tnfr/utils/callbacks.py +375 -0
- tnfr/utils/callbacks.pyi +49 -0
- tnfr/utils/chunks.py +108 -0
- tnfr/utils/chunks.pyi +22 -0
- tnfr/utils/data.py +428 -0
- tnfr/utils/data.pyi +74 -0
- tnfr/utils/graph.py +85 -0
- tnfr/utils/graph.pyi +10 -0
- tnfr/utils/init.py +821 -0
- tnfr/utils/init.pyi +80 -0
- tnfr/utils/io.py +559 -0
- tnfr/utils/io.pyi +66 -0
- tnfr/utils/numeric.py +114 -0
- tnfr/utils/numeric.pyi +21 -0
- tnfr/validation/__init__.py +257 -0
- tnfr/validation/__init__.pyi +85 -0
- tnfr/validation/compatibility.py +460 -0
- tnfr/validation/compatibility.pyi +6 -0
- tnfr/validation/config.py +73 -0
- tnfr/validation/graph.py +139 -0
- tnfr/validation/graph.pyi +18 -0
- tnfr/validation/input_validation.py +755 -0
- tnfr/validation/invariants.py +712 -0
- tnfr/validation/rules.py +253 -0
- tnfr/validation/rules.pyi +44 -0
- tnfr/validation/runtime.py +279 -0
- tnfr/validation/runtime.pyi +28 -0
- tnfr/validation/sequence_validator.py +162 -0
- tnfr/validation/soft_filters.py +170 -0
- tnfr/validation/soft_filters.pyi +32 -0
- tnfr/validation/spectral.py +164 -0
- tnfr/validation/spectral.pyi +42 -0
- tnfr/validation/validator.py +1266 -0
- tnfr/validation/window.py +39 -0
- tnfr/validation/window.pyi +1 -0
- tnfr/visualization/__init__.py +98 -0
- tnfr/visualization/cascade_viz.py +256 -0
- tnfr/visualization/hierarchy.py +284 -0
- tnfr/visualization/sequence_plotter.py +784 -0
- tnfr/viz/__init__.py +60 -0
- tnfr/viz/matplotlib.py +278 -0
- tnfr/viz/matplotlib.pyi +35 -0
- tnfr-8.5.0.dist-info/METADATA +573 -0
- tnfr-8.5.0.dist-info/RECORD +353 -0
- tnfr-8.5.0.dist-info/entry_points.txt +3 -0
- tnfr-3.0.3.dist-info/licenses/LICENSE.txt → tnfr-8.5.0.dist-info/licenses/LICENSE.md +1 -1
- tnfr/constants.py +0 -183
- tnfr/dynamics.py +0 -543
- tnfr/helpers.py +0 -198
- tnfr/main.py +0 -37
- tnfr/operators.py +0 -296
- tnfr-3.0.3.dist-info/METADATA +0 -35
- tnfr-3.0.3.dist-info/RECORD +0 -13
- {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/WHEEL +0 -0
- {tnfr-3.0.3.dist-info → tnfr-8.5.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
"""Specialized health analyzers for medical domain."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
import networkx as nx
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class TherapeuticHealthAnalyzer:
|
|
8
|
+
"""Specialized health analyzer for therapeutic contexts.
|
|
9
|
+
|
|
10
|
+
Computes domain-specific health dimensions beyond standard coherence
|
|
11
|
+
and sense index metrics, focusing on therapeutic effectiveness and
|
|
12
|
+
patient safety.
|
|
13
|
+
|
|
14
|
+
Examples
|
|
15
|
+
--------
|
|
16
|
+
>>> analyzer = TherapeuticHealthAnalyzer()
|
|
17
|
+
>>> G = nx.Graph()
|
|
18
|
+
>>> # ... set up network ...
|
|
19
|
+
>>> metrics = analyzer.analyze_therapeutic_health(
|
|
20
|
+
... G, ["emission", "reception", "coherence"]
|
|
21
|
+
... )
|
|
22
|
+
>>> print(metrics["healing_potential"])
|
|
23
|
+
0.78
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def analyze_therapeutic_health(
|
|
27
|
+
self, G: nx.Graph, sequence: List[str], **kwargs: Any
|
|
28
|
+
) -> Dict[str, float]:
|
|
29
|
+
"""Compute therapeutic-specific health metrics.
|
|
30
|
+
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
G : nx.Graph
|
|
34
|
+
Network graph representing therapeutic system.
|
|
35
|
+
sequence : List[str]
|
|
36
|
+
Operator sequence to analyze.
|
|
37
|
+
**kwargs : Any
|
|
38
|
+
Additional analysis parameters.
|
|
39
|
+
|
|
40
|
+
Returns
|
|
41
|
+
-------
|
|
42
|
+
Dict[str, float]
|
|
43
|
+
Domain-specific health metrics with values in [0, 1]:
|
|
44
|
+
- healing_potential: Capacity for positive change
|
|
45
|
+
- trauma_safety: Safety from re-traumatization
|
|
46
|
+
- therapeutic_alliance: Strength of working relationship
|
|
47
|
+
"""
|
|
48
|
+
metrics = {}
|
|
49
|
+
|
|
50
|
+
# Compute therapeutic dimensions
|
|
51
|
+
metrics["healing_potential"] = self._calculate_healing_potential(G, sequence)
|
|
52
|
+
metrics["trauma_safety"] = self._calculate_trauma_safety(G, sequence)
|
|
53
|
+
metrics["therapeutic_alliance"] = self._calculate_alliance_strength(G, sequence)
|
|
54
|
+
|
|
55
|
+
return metrics
|
|
56
|
+
|
|
57
|
+
def _calculate_healing_potential(self, G: nx.Graph, sequence: List[str]) -> float:
|
|
58
|
+
"""Calculate capacity for positive therapeutic change.
|
|
59
|
+
|
|
60
|
+
Parameters
|
|
61
|
+
----------
|
|
62
|
+
G : nx.Graph
|
|
63
|
+
Network graph.
|
|
64
|
+
sequence : List[str]
|
|
65
|
+
Operator sequence.
|
|
66
|
+
|
|
67
|
+
Returns
|
|
68
|
+
-------
|
|
69
|
+
float
|
|
70
|
+
Healing potential score [0, 1].
|
|
71
|
+
"""
|
|
72
|
+
# Check for growth-promoting operators
|
|
73
|
+
growth_ops = {"expansion", "self_organization", "coupling"}
|
|
74
|
+
growth_count = sum(1 for op in sequence if op in growth_ops)
|
|
75
|
+
|
|
76
|
+
# Check for stabilizing operators
|
|
77
|
+
stability_ops = {"coherence", "resonance"}
|
|
78
|
+
stability_count = sum(1 for op in sequence if op in stability_ops)
|
|
79
|
+
|
|
80
|
+
# Balance between growth and stability
|
|
81
|
+
if len(sequence) == 0:
|
|
82
|
+
return 0.0
|
|
83
|
+
|
|
84
|
+
growth_ratio = growth_count / len(sequence)
|
|
85
|
+
stability_ratio = stability_count / len(sequence)
|
|
86
|
+
|
|
87
|
+
# Optimal balance: some growth, some stability
|
|
88
|
+
balance_score = min(growth_ratio + stability_ratio, 1.0)
|
|
89
|
+
|
|
90
|
+
# Factor in network connectivity (more connections = more resources)
|
|
91
|
+
if len(G.nodes()) > 0:
|
|
92
|
+
avg_degree = sum(dict(G.degree()).values()) / len(G.nodes())
|
|
93
|
+
connectivity_score = min(avg_degree / 5.0, 1.0)
|
|
94
|
+
else:
|
|
95
|
+
connectivity_score = 0.0
|
|
96
|
+
|
|
97
|
+
# Combine factors
|
|
98
|
+
healing_potential = 0.6 * balance_score + 0.4 * connectivity_score
|
|
99
|
+
|
|
100
|
+
return min(healing_potential, 1.0)
|
|
101
|
+
|
|
102
|
+
def _calculate_trauma_safety(self, G: nx.Graph, sequence: List[str]) -> float:
|
|
103
|
+
"""Calculate safety from re-traumatization.
|
|
104
|
+
|
|
105
|
+
Parameters
|
|
106
|
+
----------
|
|
107
|
+
G : nx.Graph
|
|
108
|
+
Network graph.
|
|
109
|
+
sequence : List[str]
|
|
110
|
+
Operator sequence.
|
|
111
|
+
|
|
112
|
+
Returns
|
|
113
|
+
-------
|
|
114
|
+
float
|
|
115
|
+
Trauma safety score [0, 1].
|
|
116
|
+
"""
|
|
117
|
+
# Check for potentially destabilizing operators
|
|
118
|
+
destabilizing_ops = {"dissonance", "mutation"}
|
|
119
|
+
destabilizing_count = sum(1 for op in sequence if op in destabilizing_ops)
|
|
120
|
+
|
|
121
|
+
# Check for safety-promoting operators
|
|
122
|
+
safety_ops = {"silence", "coherence", "reception"}
|
|
123
|
+
safety_count = sum(1 for op in sequence if op in safety_ops)
|
|
124
|
+
|
|
125
|
+
if len(sequence) == 0:
|
|
126
|
+
return 1.0 # No sequence = no risk
|
|
127
|
+
|
|
128
|
+
# Safety depends on having stabilizing ops when using destabilizing ones
|
|
129
|
+
if destabilizing_count > 0:
|
|
130
|
+
# Need at least as many safety ops as destabilizing ops
|
|
131
|
+
safety_ratio = min(safety_count / destabilizing_count, 1.0)
|
|
132
|
+
base_safety = 0.5 + 0.5 * safety_ratio
|
|
133
|
+
else:
|
|
134
|
+
# No destabilizing ops = inherently safer
|
|
135
|
+
base_safety = 0.8 + 0.2 * (safety_count / len(sequence))
|
|
136
|
+
|
|
137
|
+
return min(base_safety, 1.0)
|
|
138
|
+
|
|
139
|
+
def _calculate_alliance_strength(self, G: nx.Graph, sequence: List[str]) -> float:
|
|
140
|
+
"""Calculate strength of therapeutic alliance.
|
|
141
|
+
|
|
142
|
+
Parameters
|
|
143
|
+
----------
|
|
144
|
+
G : nx.Graph
|
|
145
|
+
Network graph.
|
|
146
|
+
sequence : List[str]
|
|
147
|
+
Operator sequence.
|
|
148
|
+
|
|
149
|
+
Returns
|
|
150
|
+
-------
|
|
151
|
+
float
|
|
152
|
+
Alliance strength score [0, 1].
|
|
153
|
+
"""
|
|
154
|
+
# Alliance built through connection operators
|
|
155
|
+
connection_ops = {"emission", "reception", "coupling", "resonance"}
|
|
156
|
+
connection_count = sum(1 for op in sequence if op in connection_ops)
|
|
157
|
+
|
|
158
|
+
# Coherence strengthens alliance
|
|
159
|
+
coherence_count = sequence.count("coherence")
|
|
160
|
+
|
|
161
|
+
if len(sequence) == 0:
|
|
162
|
+
return 0.0
|
|
163
|
+
|
|
164
|
+
# More connection = stronger alliance
|
|
165
|
+
connection_ratio = connection_count / len(sequence)
|
|
166
|
+
|
|
167
|
+
# Coherence multiplier
|
|
168
|
+
coherence_bonus = min(coherence_count * 0.1, 0.3)
|
|
169
|
+
|
|
170
|
+
# Network density as proxy for mutual understanding
|
|
171
|
+
if len(G.nodes()) > 1:
|
|
172
|
+
density = nx.density(G)
|
|
173
|
+
else:
|
|
174
|
+
density = 0.0
|
|
175
|
+
|
|
176
|
+
# Combine factors
|
|
177
|
+
alliance_strength = (
|
|
178
|
+
0.5 * connection_ratio + 0.3 * density + 0.2
|
|
179
|
+
) + coherence_bonus
|
|
180
|
+
|
|
181
|
+
return min(alliance_strength, 1.0)
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Type stubs for tnfr.extensions.medical.health_analyzers"""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List
|
|
4
|
+
import networkx as nx
|
|
5
|
+
|
|
6
|
+
class TherapeuticHealthAnalyzer:
|
|
7
|
+
def analyze_therapeutic_health(
|
|
8
|
+
self, G: nx.Graph, sequence: List[str], **kwargs: Any
|
|
9
|
+
) -> Dict[str, float]: ...
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""Medical domain pattern definitions."""
|
|
2
|
+
|
|
3
|
+
from ..base import PatternDefinition
|
|
4
|
+
|
|
5
|
+
# Therapeutic Alliance Pattern
|
|
6
|
+
THERAPEUTIC_ALLIANCE = PatternDefinition(
|
|
7
|
+
name="therapeutic_alliance",
|
|
8
|
+
sequence=["emission", "reception", "coherence", "resonance"],
|
|
9
|
+
description="Establishing therapeutic trust and rapport",
|
|
10
|
+
use_cases=[
|
|
11
|
+
"Initial therapy session - building connection",
|
|
12
|
+
"Re-establishing alliance after rupture",
|
|
13
|
+
"Deepening therapeutic relationship",
|
|
14
|
+
],
|
|
15
|
+
health_requirements={
|
|
16
|
+
"min_coherence": 0.75,
|
|
17
|
+
"min_sense_index": 0.70,
|
|
18
|
+
},
|
|
19
|
+
domain_context={
|
|
20
|
+
"real_world_mapping": (
|
|
21
|
+
"Maps to Carl Rogers' therapeutic alliance concept: "
|
|
22
|
+
"emission (therapist presence), reception (active listening), "
|
|
23
|
+
"coherence (mutual understanding), resonance (empathic attunement)"
|
|
24
|
+
),
|
|
25
|
+
"expected_outcomes": (
|
|
26
|
+
"Strong working alliance, patient feels heard and understood, "
|
|
27
|
+
"foundation for therapeutic work established"
|
|
28
|
+
),
|
|
29
|
+
"failure_modes": (
|
|
30
|
+
"Premature coherence without genuine reception can feel inauthentic, "
|
|
31
|
+
"lack of resonance prevents deeper connection"
|
|
32
|
+
),
|
|
33
|
+
},
|
|
34
|
+
examples=[
|
|
35
|
+
{
|
|
36
|
+
"name": "Initial Session - Trust Building",
|
|
37
|
+
"context": "First meeting with new patient, establishing safety",
|
|
38
|
+
"sequence": ["emission", "reception", "coherence", "resonance"],
|
|
39
|
+
"health_metrics": {"C_t": 0.82, "Si": 0.76},
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"name": "Alliance Repair",
|
|
43
|
+
"context": "After misunderstanding, rebuilding connection",
|
|
44
|
+
"sequence": ["emission", "reception", "coherence", "resonance"],
|
|
45
|
+
"health_metrics": {"C_t": 0.79, "Si": 0.74},
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"name": "Deepening Phase",
|
|
49
|
+
"context": "Moving from surface to deeper therapeutic work",
|
|
50
|
+
"sequence": ["emission", "reception", "coherence", "resonance"],
|
|
51
|
+
"health_metrics": {"C_t": 0.85, "Si": 0.81},
|
|
52
|
+
},
|
|
53
|
+
],
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# Crisis Intervention Pattern
|
|
57
|
+
CRISIS_INTERVENTION = PatternDefinition(
|
|
58
|
+
name="crisis_intervention",
|
|
59
|
+
sequence=["dissonance", "silence", "coherence", "resonance"],
|
|
60
|
+
description="Stabilizing acute emotional distress",
|
|
61
|
+
use_cases=[
|
|
62
|
+
"Acute anxiety or panic attack",
|
|
63
|
+
"Emotional overwhelm during session",
|
|
64
|
+
"Crisis situation requiring immediate stabilization",
|
|
65
|
+
],
|
|
66
|
+
health_requirements={
|
|
67
|
+
"min_coherence": 0.75,
|
|
68
|
+
"min_sense_index": 0.70,
|
|
69
|
+
},
|
|
70
|
+
domain_context={
|
|
71
|
+
"real_world_mapping": (
|
|
72
|
+
"Follows crisis intervention model: dissonance (acknowledge distress), "
|
|
73
|
+
"silence (create space/pause), coherence (stabilization techniques), "
|
|
74
|
+
"resonance (empathic grounding)"
|
|
75
|
+
),
|
|
76
|
+
"expected_outcomes": (
|
|
77
|
+
"Reduced emotional intensity, restored sense of safety, "
|
|
78
|
+
"ability to engage in problem-solving"
|
|
79
|
+
),
|
|
80
|
+
"failure_modes": (
|
|
81
|
+
"Skipping silence phase can escalate crisis, "
|
|
82
|
+
"insufficient coherence leaves patient dysregulated"
|
|
83
|
+
),
|
|
84
|
+
},
|
|
85
|
+
examples=[
|
|
86
|
+
{
|
|
87
|
+
"name": "Panic Attack Intervention",
|
|
88
|
+
"context": "Patient experiencing acute panic in session",
|
|
89
|
+
"sequence": ["dissonance", "silence", "coherence", "resonance"],
|
|
90
|
+
"health_metrics": {"C_t": 0.78, "Si": 0.77},
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
"name": "Emotional Overwhelm",
|
|
94
|
+
"context": "Patient flooded with difficult emotions",
|
|
95
|
+
"sequence": ["dissonance", "silence", "coherence", "resonance"],
|
|
96
|
+
"health_metrics": {"C_t": 0.81, "Si": 0.79},
|
|
97
|
+
},
|
|
98
|
+
{
|
|
99
|
+
"name": "Acute Grief Response",
|
|
100
|
+
"context": "Managing intense grief reaction",
|
|
101
|
+
"sequence": ["dissonance", "silence", "coherence", "resonance"],
|
|
102
|
+
"health_metrics": {"C_t": 0.76, "Si": 0.75},
|
|
103
|
+
},
|
|
104
|
+
],
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Integration Phase Pattern
|
|
108
|
+
INTEGRATION_PHASE = PatternDefinition(
|
|
109
|
+
name="integration_phase",
|
|
110
|
+
sequence=["coupling", "self_organization", "expansion", "coherence"],
|
|
111
|
+
description="Integrating insights and new perspectives",
|
|
112
|
+
use_cases=[
|
|
113
|
+
"After breakthrough moment - consolidating learning",
|
|
114
|
+
"Connecting disparate experiences into coherent narrative",
|
|
115
|
+
"Expanding awareness to include new perspectives",
|
|
116
|
+
],
|
|
117
|
+
health_requirements={
|
|
118
|
+
"min_coherence": 0.75,
|
|
119
|
+
"min_sense_index": 0.70,
|
|
120
|
+
},
|
|
121
|
+
domain_context={
|
|
122
|
+
"real_world_mapping": (
|
|
123
|
+
"Reflects integration process in therapy: coupling (connecting elements), "
|
|
124
|
+
"self_organization (natural meaning-making), expansion (broadening view), "
|
|
125
|
+
"coherence (consolidating new understanding)"
|
|
126
|
+
),
|
|
127
|
+
"expected_outcomes": (
|
|
128
|
+
"Integrated self-narrative, expanded perspective, "
|
|
129
|
+
"sustainable new patterns of thinking/feeling"
|
|
130
|
+
),
|
|
131
|
+
"failure_modes": (
|
|
132
|
+
"Premature expansion without coupling can be destabilizing, "
|
|
133
|
+
"lack of final coherence leaves insights fragmented"
|
|
134
|
+
),
|
|
135
|
+
},
|
|
136
|
+
examples=[
|
|
137
|
+
{
|
|
138
|
+
"name": "Post-Breakthrough Integration",
|
|
139
|
+
"context": "After major insight, integrating into broader understanding",
|
|
140
|
+
"sequence": ["coupling", "self_organization", "expansion", "coherence"],
|
|
141
|
+
"health_metrics": {"C_t": 0.84, "Si": 0.80},
|
|
142
|
+
},
|
|
143
|
+
{
|
|
144
|
+
"name": "Narrative Coherence",
|
|
145
|
+
"context": "Building coherent life story from fragmented experiences",
|
|
146
|
+
"sequence": ["coupling", "self_organization", "expansion", "coherence"],
|
|
147
|
+
"health_metrics": {"C_t": 0.83, "Si": 0.78},
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
"name": "Perspective Expansion",
|
|
151
|
+
"context": "Including previously rejected aspects of self",
|
|
152
|
+
"sequence": ["coupling", "self_organization", "expansion", "coherence"],
|
|
153
|
+
"health_metrics": {"C_t": 0.86, "Si": 0.82},
|
|
154
|
+
},
|
|
155
|
+
],
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Collect all patterns
|
|
159
|
+
PATTERNS = {
|
|
160
|
+
"therapeutic_alliance": THERAPEUTIC_ALLIANCE,
|
|
161
|
+
"crisis_intervention": CRISIS_INTERVENTION,
|
|
162
|
+
"integration_phase": INTEGRATION_PHASE,
|
|
163
|
+
}
|
tnfr/flatten.py
ADDED
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
"""Flattening utilities to compile TNFR token sequences."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import Iterable, Mapping, Sequence
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Callable, cast
|
|
8
|
+
|
|
9
|
+
from .config.constants import GLYPHS_CANONICAL_SET
|
|
10
|
+
from .tokens import TARGET, THOL, THOL_SENTINEL, WAIT, OpTag, Token
|
|
11
|
+
from .types import Glyph
|
|
12
|
+
from .utils import MAX_MATERIALIZE_DEFAULT, ensure_collection, flatten_structure
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"THOLEvaluator",
|
|
16
|
+
"parse_program_tokens",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class TholFrame:
|
|
22
|
+
"""Execution frame used to evaluate nested THOL blocks."""
|
|
23
|
+
|
|
24
|
+
seq: Sequence[Token]
|
|
25
|
+
index: int
|
|
26
|
+
remaining: int
|
|
27
|
+
closing: Glyph | None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _iter_source(
|
|
31
|
+
seq: Iterable[Token] | Sequence[Token] | Any,
|
|
32
|
+
*,
|
|
33
|
+
max_materialize: int | None,
|
|
34
|
+
) -> Iterable[Any]:
|
|
35
|
+
"""Yield items from ``seq`` enforcing ``max_materialize`` when needed."""
|
|
36
|
+
|
|
37
|
+
_, view = ensure_collection(
|
|
38
|
+
cast(Iterable[Token], seq),
|
|
39
|
+
max_materialize=max_materialize,
|
|
40
|
+
return_view=True,
|
|
41
|
+
)
|
|
42
|
+
return view
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _push_thol_frame(
|
|
46
|
+
frames: list[TholFrame],
|
|
47
|
+
item: THOL,
|
|
48
|
+
*,
|
|
49
|
+
max_materialize: int | None,
|
|
50
|
+
) -> None:
|
|
51
|
+
"""Validate ``item`` and append a frame for its evaluation."""
|
|
52
|
+
|
|
53
|
+
repeats = int(item.repeat)
|
|
54
|
+
if repeats < 1:
|
|
55
|
+
raise ValueError("repeat must be ≥1")
|
|
56
|
+
if item.force_close is not None and not isinstance(item.force_close, Glyph):
|
|
57
|
+
raise ValueError("force_close must be a Glyph")
|
|
58
|
+
# TNFR invariant: THOL blocks must close to maintain operator closure (§3.4)
|
|
59
|
+
# Only SHA (silence) and NUL (contraction) are valid THOL closures
|
|
60
|
+
# Default to NUL (contraction) when no valid closure specified
|
|
61
|
+
closing = (
|
|
62
|
+
item.force_close
|
|
63
|
+
if isinstance(item.force_close, Glyph)
|
|
64
|
+
and item.force_close in {Glyph.SHA, Glyph.NUL}
|
|
65
|
+
else (Glyph.NUL if item.force_close is None else None)
|
|
66
|
+
)
|
|
67
|
+
seq0 = ensure_collection(
|
|
68
|
+
item.body,
|
|
69
|
+
max_materialize=max_materialize,
|
|
70
|
+
error_msg=f"THOL body exceeds max_materialize={max_materialize}",
|
|
71
|
+
)
|
|
72
|
+
frames.append(
|
|
73
|
+
TholFrame(
|
|
74
|
+
seq=seq0,
|
|
75
|
+
index=0,
|
|
76
|
+
remaining=repeats,
|
|
77
|
+
closing=closing,
|
|
78
|
+
)
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class THOLEvaluator:
|
|
83
|
+
"""Generator that expands a :class:`THOL` block lazily."""
|
|
84
|
+
|
|
85
|
+
def __init__(
|
|
86
|
+
self,
|
|
87
|
+
item: THOL,
|
|
88
|
+
*,
|
|
89
|
+
max_materialize: int | None = MAX_MATERIALIZE_DEFAULT,
|
|
90
|
+
) -> None:
|
|
91
|
+
self._frames: list[TholFrame] = []
|
|
92
|
+
_push_thol_frame(self._frames, item, max_materialize=max_materialize)
|
|
93
|
+
self._max_materialize = max_materialize
|
|
94
|
+
self._started = False
|
|
95
|
+
|
|
96
|
+
def __iter__(self) -> "THOLEvaluator":
|
|
97
|
+
"""Return the evaluator itself to stream THOL expansion."""
|
|
98
|
+
|
|
99
|
+
return self
|
|
100
|
+
|
|
101
|
+
def __next__(self) -> Token | object:
|
|
102
|
+
"""Yield the next token or :data:`THOL_SENTINEL` during evaluation."""
|
|
103
|
+
|
|
104
|
+
if not self._started:
|
|
105
|
+
self._started = True
|
|
106
|
+
return THOL_SENTINEL
|
|
107
|
+
while self._frames:
|
|
108
|
+
frame = self._frames[-1]
|
|
109
|
+
seq = frame.seq
|
|
110
|
+
idx = frame.index
|
|
111
|
+
if idx < len(seq):
|
|
112
|
+
token = seq[idx]
|
|
113
|
+
frame.index = idx + 1
|
|
114
|
+
if isinstance(token, THOL):
|
|
115
|
+
_push_thol_frame(
|
|
116
|
+
self._frames,
|
|
117
|
+
token,
|
|
118
|
+
max_materialize=self._max_materialize,
|
|
119
|
+
)
|
|
120
|
+
return THOL_SENTINEL
|
|
121
|
+
return token
|
|
122
|
+
else:
|
|
123
|
+
cl = frame.closing
|
|
124
|
+
frame.remaining -= 1
|
|
125
|
+
if frame.remaining > 0:
|
|
126
|
+
frame.index = 0
|
|
127
|
+
else:
|
|
128
|
+
self._frames.pop()
|
|
129
|
+
if cl is not None:
|
|
130
|
+
return cl
|
|
131
|
+
raise StopIteration
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def _flatten_target(
|
|
135
|
+
item: TARGET,
|
|
136
|
+
ops: list[tuple[OpTag, Any]],
|
|
137
|
+
) -> None:
|
|
138
|
+
ops.append((OpTag.TARGET, item))
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _flatten_wait(
|
|
142
|
+
item: WAIT,
|
|
143
|
+
ops: list[tuple[OpTag, Any]],
|
|
144
|
+
) -> None:
|
|
145
|
+
steps = max(1, int(getattr(item, "steps", 1)))
|
|
146
|
+
ops.append((OpTag.WAIT, steps))
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def _flatten_glyph(
|
|
150
|
+
item: Glyph | str,
|
|
151
|
+
ops: list[tuple[OpTag, Any]],
|
|
152
|
+
) -> None:
|
|
153
|
+
g = item.value if isinstance(item, Glyph) else str(item)
|
|
154
|
+
if g not in GLYPHS_CANONICAL_SET:
|
|
155
|
+
raise ValueError(f"Non-canonical glyph: {g}")
|
|
156
|
+
ops.append((OpTag.GLYPH, g))
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
_TOKEN_DISPATCH: dict[type, Callable[[Any, list[tuple[OpTag, Any]]], None]] = {
|
|
160
|
+
TARGET: _flatten_target,
|
|
161
|
+
WAIT: _flatten_wait,
|
|
162
|
+
Glyph: _flatten_glyph,
|
|
163
|
+
str: _flatten_glyph,
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def _coerce_mapping_token(
|
|
168
|
+
mapping: Mapping[str, Any],
|
|
169
|
+
*,
|
|
170
|
+
max_materialize: int | None,
|
|
171
|
+
) -> Token:
|
|
172
|
+
if len(mapping) != 1:
|
|
173
|
+
raise ValueError(f"Invalid token mapping: {mapping!r}")
|
|
174
|
+
key, value = next(iter(mapping.items()))
|
|
175
|
+
if key == "WAIT":
|
|
176
|
+
# Handle both formats: {"WAIT": 1} and {"WAIT": {"steps": 1}}
|
|
177
|
+
if isinstance(value, Mapping):
|
|
178
|
+
steps = value.get("steps", 1)
|
|
179
|
+
else:
|
|
180
|
+
steps = value
|
|
181
|
+
return WAIT(int(steps))
|
|
182
|
+
if key == "TARGET":
|
|
183
|
+
return TARGET(value)
|
|
184
|
+
if key != "THOL":
|
|
185
|
+
raise ValueError(f"Unrecognized token: {key!r}")
|
|
186
|
+
if not isinstance(value, Mapping):
|
|
187
|
+
raise TypeError("THOL specification must be a mapping")
|
|
188
|
+
|
|
189
|
+
close = value.get("close")
|
|
190
|
+
if isinstance(close, str):
|
|
191
|
+
close_enum = Glyph.__members__.get(close)
|
|
192
|
+
if close_enum is None:
|
|
193
|
+
raise ValueError(f"Unknown closing glyph: {close!r}")
|
|
194
|
+
close = close_enum
|
|
195
|
+
elif close is not None and not isinstance(close, Glyph):
|
|
196
|
+
raise TypeError("THOL close glyph must be a Glyph or string name")
|
|
197
|
+
|
|
198
|
+
body = parse_program_tokens(value.get("body", []), max_materialize=max_materialize)
|
|
199
|
+
repeat = int(value.get("repeat", 1))
|
|
200
|
+
return THOL(body=body, repeat=repeat, force_close=close)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def parse_program_tokens(
|
|
204
|
+
obj: Iterable[Any] | Sequence[Any] | Any,
|
|
205
|
+
*,
|
|
206
|
+
max_materialize: int | None = MAX_MATERIALIZE_DEFAULT,
|
|
207
|
+
) -> list[Token]:
|
|
208
|
+
"""Materialize ``obj`` into a list of canonical tokens.
|
|
209
|
+
|
|
210
|
+
The function accepts the same iterables handled by :func:`_flatten`,
|
|
211
|
+
including dictionaries describing ``WAIT``, ``TARGET`` and ``THOL`` tokens.
|
|
212
|
+
Nested iterables are flattened following :func:`flatten_structure` rules.
|
|
213
|
+
"""
|
|
214
|
+
|
|
215
|
+
sequence = _iter_source(obj, max_materialize=max_materialize)
|
|
216
|
+
|
|
217
|
+
def _expand(item: Any) -> Iterable[Any] | None:
|
|
218
|
+
if isinstance(item, Mapping):
|
|
219
|
+
return (_coerce_mapping_token(item, max_materialize=max_materialize),)
|
|
220
|
+
return None
|
|
221
|
+
|
|
222
|
+
tokens: list[Token] = []
|
|
223
|
+
for item in flatten_structure(sequence, expand=_expand):
|
|
224
|
+
if isinstance(item, (Glyph, WAIT, TARGET, THOL, str)):
|
|
225
|
+
tokens.append(item)
|
|
226
|
+
continue
|
|
227
|
+
raise TypeError(f"Unsupported token: {item!r}")
|
|
228
|
+
return tokens
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def _flatten(
|
|
232
|
+
seq: Iterable[Token] | Sequence[Token] | Any,
|
|
233
|
+
*,
|
|
234
|
+
max_materialize: int | None = MAX_MATERIALIZE_DEFAULT,
|
|
235
|
+
) -> list[tuple[OpTag, Any]]:
|
|
236
|
+
"""Return a list of operations ``(op, payload)`` where ``op`` ∈ :class:`OpTag`."""
|
|
237
|
+
|
|
238
|
+
ops: list[tuple[OpTag, Any]] = []
|
|
239
|
+
sequence = _iter_source(seq, max_materialize=max_materialize)
|
|
240
|
+
|
|
241
|
+
def _expand(item: Any) -> Iterable[Any] | None:
|
|
242
|
+
if isinstance(item, THOL):
|
|
243
|
+
return THOLEvaluator(item, max_materialize=max_materialize)
|
|
244
|
+
if isinstance(item, Mapping):
|
|
245
|
+
token = _coerce_mapping_token(item, max_materialize=max_materialize)
|
|
246
|
+
return (token,)
|
|
247
|
+
return None
|
|
248
|
+
|
|
249
|
+
for item in flatten_structure(sequence, expand=_expand):
|
|
250
|
+
if item is THOL_SENTINEL:
|
|
251
|
+
ops.append((OpTag.THOL, Glyph.THOL.value))
|
|
252
|
+
continue
|
|
253
|
+
handler = _TOKEN_DISPATCH.get(type(item))
|
|
254
|
+
if handler is None:
|
|
255
|
+
for cls, candidate in _TOKEN_DISPATCH.items():
|
|
256
|
+
if isinstance(item, cls):
|
|
257
|
+
handler = candidate
|
|
258
|
+
break
|
|
259
|
+
if handler is None:
|
|
260
|
+
raise TypeError(f"Unsupported token: {item!r}")
|
|
261
|
+
handler(item, ops)
|
|
262
|
+
return ops
|
tnfr/flatten.pyi
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Iterable, Iterator, Sequence
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from .tokens import THOL, Token
|
|
7
|
+
|
|
8
|
+
__all__: list[str]
|
|
9
|
+
|
|
10
|
+
def __getattr__(name: str) -> Any: ...
|
|
11
|
+
|
|
12
|
+
class THOLEvaluator(Iterator[Token | object]):
|
|
13
|
+
def __init__(self, item: THOL, *, max_materialize: int | None = ...) -> None: ...
|
|
14
|
+
def __iter__(self) -> THOLEvaluator: ...
|
|
15
|
+
def __next__(self) -> Token | object: ...
|
|
16
|
+
|
|
17
|
+
def parse_program_tokens(
|
|
18
|
+
obj: Iterable[Any] | Sequence[Any] | Any,
|
|
19
|
+
*,
|
|
20
|
+
max_materialize: int | None = ...,
|
|
21
|
+
) -> list[Token]: ...
|