margin 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- margin-0.1.0/LICENSE +21 -0
- margin-0.1.0/PKG-INFO +138 -0
- margin-0.1.0/README.md +112 -0
- margin-0.1.0/margin/__init__.py +114 -0
- margin-0.1.0/margin/algebra.py +177 -0
- margin-0.1.0/margin/bridge.py +205 -0
- margin-0.1.0/margin/calibrate.py +182 -0
- margin-0.1.0/margin/causal.py +303 -0
- margin-0.1.0/margin/composite.py +144 -0
- margin-0.1.0/margin/confidence.py +52 -0
- margin-0.1.0/margin/contract.py +293 -0
- margin-0.1.0/margin/diff.py +178 -0
- margin-0.1.0/margin/events.py +96 -0
- margin-0.1.0/margin/forecast.py +168 -0
- margin-0.1.0/margin/health.py +105 -0
- margin-0.1.0/margin/ledger.py +225 -0
- margin-0.1.0/margin/loop.py +192 -0
- margin-0.1.0/margin/observation.py +406 -0
- margin-0.1.0/margin/policy/__init__.py +52 -0
- margin-0.1.0/margin/policy/compose.py +190 -0
- margin-0.1.0/margin/policy/core.py +330 -0
- margin-0.1.0/margin/policy/temporal.py +136 -0
- margin-0.1.0/margin/policy/trace.py +200 -0
- margin-0.1.0/margin/policy/tuning.py +195 -0
- margin-0.1.0/margin/policy/validate.py +213 -0
- margin-0.1.0/margin/predicates.py +131 -0
- margin-0.1.0/margin/provenance.py +21 -0
- margin-0.1.0/margin/transitions.py +187 -0
- margin-0.1.0/margin/uncertain.py +108 -0
- margin-0.1.0/margin/validity.py +78 -0
- margin-0.1.0/margin.egg-info/PKG-INFO +138 -0
- margin-0.1.0/margin.egg-info/SOURCES.txt +63 -0
- margin-0.1.0/margin.egg-info/dependency_links.txt +1 -0
- margin-0.1.0/margin.egg-info/top_level.txt +1 -0
- margin-0.1.0/pyproject.toml +41 -0
- margin-0.1.0/setup.cfg +4 -0
- margin-0.1.0/tests/test_algebra.py +139 -0
- margin-0.1.0/tests/test_bridge.py +162 -0
- margin-0.1.0/tests/test_calibrate.py +99 -0
- margin-0.1.0/tests/test_causal.py +206 -0
- margin-0.1.0/tests/test_composite.py +125 -0
- margin-0.1.0/tests/test_confidence.py +30 -0
- margin-0.1.0/tests/test_contract.py +224 -0
- margin-0.1.0/tests/test_diff.py +111 -0
- margin-0.1.0/tests/test_events.py +97 -0
- margin-0.1.0/tests/test_forecast.py +100 -0
- margin-0.1.0/tests/test_health.py +99 -0
- margin-0.1.0/tests/test_ledger.py +199 -0
- margin-0.1.0/tests/test_loop.py +186 -0
- margin-0.1.0/tests/test_observation.py +212 -0
- margin-0.1.0/tests/test_policy.py +381 -0
- margin-0.1.0/tests/test_policy_compose.py +153 -0
- margin-0.1.0/tests/test_policy_temporal.py +140 -0
- margin-0.1.0/tests/test_policy_trace.py +138 -0
- margin-0.1.0/tests/test_policy_tuning.py +145 -0
- margin-0.1.0/tests/test_policy_validate.py +172 -0
- margin-0.1.0/tests/test_predicates.py +149 -0
- margin-0.1.0/tests/test_provenance.py +43 -0
- margin-0.1.0/tests/test_reverse_bridge.py +67 -0
- margin-0.1.0/tests/test_staleness.py +56 -0
- margin-0.1.0/tests/test_transformer.py +301 -0
- margin-0.1.0/tests/test_transitions.py +161 -0
- margin-0.1.0/tests/test_uncertain.py +83 -0
- margin-0.1.0/tests/test_validity.py +63 -0
- margin-0.1.0/tests/test_windowing.py +99 -0
margin-0.1.0/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Seth C
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
margin-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: margin
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Typed health classification, uncertainty algebra, and correction auditing for any system that measures things and needs to explain what happened.
|
|
5
|
+
Author: Seth C
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/sethc5/margin
|
|
8
|
+
Project-URL: Repository, https://github.com/sethc5/margin
|
|
9
|
+
Project-URL: Issues, https://github.com/sethc5/margin/issues
|
|
10
|
+
Keywords: health,monitoring,uncertainty,observability,typed,classification,threshold,polarity,correction,audit,ledger,policy
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
19
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
20
|
+
Classifier: Topic :: System :: Monitoring
|
|
21
|
+
Classifier: Typing :: Typed
|
|
22
|
+
Requires-Python: >=3.10
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
License-File: LICENSE
|
|
25
|
+
Dynamic: license-file
|
|
26
|
+
|
|
27
|
+
# margin
|
|
28
|
+
|
|
29
|
+
**Typed health classification for systems that measure things.**
|
|
30
|
+
|
|
31
|
+
Every system with health bars, thresholds, alerts, or status dashboards solves the same problem: take a number, decide if it's healthy, correct it if it isn't, explain what happened. Margin is that pattern, typed once, with the polarity bug fixed.
|
|
32
|
+
|
|
33
|
+
```python
|
|
34
|
+
from margin import Parser, Thresholds
|
|
35
|
+
|
|
36
|
+
parser = Parser(
|
|
37
|
+
baselines={"throughput": 500.0, "error_rate": 0.002},
|
|
38
|
+
thresholds=Thresholds(intact=400.0, ablated=150.0),
|
|
39
|
+
component_thresholds={
|
|
40
|
+
"error_rate": Thresholds(intact=0.005, ablated=0.05, higher_is_better=False),
|
|
41
|
+
},
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
expr = parser.parse({"throughput": 480.0, "error_rate": 0.03})
|
|
45
|
+
print(expr.to_string())
|
|
46
|
+
# [throughput:INTACT(-0.04σ)] [error_rate:DEGRADED(-14.00σ)]
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Throughput and error rate on the same scale. One is higher-is-better, the other is lower-is-better. Both classified correctly. Sigma-normalised so you can compare them.
|
|
50
|
+
|
|
51
|
+
## Install
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
pip install margin
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
Zero dependencies. Pure Python. 3.10+.
|
|
58
|
+
|
|
59
|
+
## What it does
|
|
60
|
+
|
|
61
|
+
A number comes in. Margin gives it:
|
|
62
|
+
|
|
63
|
+
- **Health** — INTACT / DEGRADED / ABLATED / RECOVERING / OOD
|
|
64
|
+
- **Polarity** — higher-is-better or lower-is-better, handled correctly everywhere
|
|
65
|
+
- **Sigma** — dimensionless deviation from baseline, always positive = healthier
|
|
66
|
+
- **Confidence** — how much the uncertainty interval overlaps the threshold
|
|
67
|
+
- **Provenance** — where this value came from, for correlation detection
|
|
68
|
+
- **Validity** — how the measurement ages (static, decaying, event-invalidated)
|
|
69
|
+
|
|
70
|
+
Then the correction loop:
|
|
71
|
+
|
|
72
|
+
- **Policy** — typed rules that decide what to do (RESTORE / SUPPRESS / AMPLIFY)
|
|
73
|
+
- **Constraints** — alpha clamping, cooldown, rate limiting
|
|
74
|
+
- **Escalation** — LOG / ALERT / HALT when the policy can't act
|
|
75
|
+
- **Contract** — typed success criteria ("reach INTACT within 5 steps")
|
|
76
|
+
- **Causal** — dependency graphs ("api is DEGRADED because db is ABLATED")
|
|
77
|
+
- **Ledger** — full audit trail of every correction, serializable, replayable
|
|
78
|
+
|
|
79
|
+
All in one call:
|
|
80
|
+
|
|
81
|
+
```python
|
|
82
|
+
from margin import step
|
|
83
|
+
|
|
84
|
+
result = step(expression, policy, ledger, graph, contract)
|
|
85
|
+
# result.correction — what to do
|
|
86
|
+
# result.explanations — why it happened
|
|
87
|
+
# result.decision — which rule matched, full trace
|
|
88
|
+
# result.contract — are we meeting our goals?
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
## The polarity bug
|
|
92
|
+
|
|
93
|
+
Every health system you've written has this bug. You check `if value >= threshold` and it works for throughput. Then you add error rate monitoring and the same check says 15% error rate is "healthy" because 0.15 >= 0.02.
|
|
94
|
+
|
|
95
|
+
Margin handles both polarities:
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
# Higher is better (throughput, signal strength)
|
|
99
|
+
Thresholds(intact=80.0, ablated=30.0)
|
|
100
|
+
|
|
101
|
+
# Lower is better (error rate, latency)
|
|
102
|
+
Thresholds(intact=0.02, ablated=0.10, higher_is_better=False)
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
One flag. Threads through every comparison, every sigma calculation, every correction decision, every recovery ratio. You never think about it again.
|
|
106
|
+
|
|
107
|
+
## Auto-calibrate from data
|
|
108
|
+
|
|
109
|
+
Don't guess thresholds. Derive them from healthy measurements:
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
from margin import parser_from_calibration
|
|
113
|
+
|
|
114
|
+
parser = parser_from_calibration(
|
|
115
|
+
{"rps": [490, 510, 505, 495], "latency": [48, 52, 50, 51]},
|
|
116
|
+
polarities={"latency": False},
|
|
117
|
+
)
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
## Five layers
|
|
121
|
+
|
|
122
|
+
| Layer | Question | Key types |
|
|
123
|
+
|---|---|---|
|
|
124
|
+
| **Foundation** | What was measured? | `Health`, `Observation`, `Expression`, `UncertainValue` |
|
|
125
|
+
| **Observability** | What changed? When will it cross? | `diff()`, `forecast()`, `track()`, `calibrate()` |
|
|
126
|
+
| **Policy** | What should we do? | `PolicyRule`, `Action`, `Constraint`, `Escalation` |
|
|
127
|
+
| **Contract** | Are we meeting our goals? | `HealthTarget`, `SustainHealth`, `RecoveryThreshold` |
|
|
128
|
+
| **Causal** | Why did this happen? | `CausalGraph`, `CausalLink`, `Explanation` |
|
|
129
|
+
|
|
130
|
+
Plus `step()` and `run()` to orchestrate all five in one call.
|
|
131
|
+
|
|
132
|
+
## Docs
|
|
133
|
+
|
|
134
|
+
Full specification: [margin-language.md](margin/margin-language.md)
|
|
135
|
+
|
|
136
|
+
## License
|
|
137
|
+
|
|
138
|
+
MIT
|
margin-0.1.0/README.md
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# margin
|
|
2
|
+
|
|
3
|
+
**Typed health classification for systems that measure things.**
|
|
4
|
+
|
|
5
|
+
Every system with health bars, thresholds, alerts, or status dashboards solves the same problem: take a number, decide if it's healthy, correct it if it isn't, explain what happened. Margin is that pattern, typed once, with the polarity bug fixed.
|
|
6
|
+
|
|
7
|
+
```python
|
|
8
|
+
from margin import Parser, Thresholds
|
|
9
|
+
|
|
10
|
+
parser = Parser(
|
|
11
|
+
baselines={"throughput": 500.0, "error_rate": 0.002},
|
|
12
|
+
thresholds=Thresholds(intact=400.0, ablated=150.0),
|
|
13
|
+
component_thresholds={
|
|
14
|
+
"error_rate": Thresholds(intact=0.005, ablated=0.05, higher_is_better=False),
|
|
15
|
+
},
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
expr = parser.parse({"throughput": 480.0, "error_rate": 0.03})
|
|
19
|
+
print(expr.to_string())
|
|
20
|
+
# [throughput:INTACT(-0.04σ)] [error_rate:DEGRADED(-14.00σ)]
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
Throughput and error rate on the same scale. One is higher-is-better, the other is lower-is-better. Both classified correctly. Sigma-normalised so you can compare them.
|
|
24
|
+
|
|
25
|
+
## Install
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
pip install margin
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
Zero dependencies. Pure Python. 3.10+.
|
|
32
|
+
|
|
33
|
+
## What it does
|
|
34
|
+
|
|
35
|
+
A number comes in. Margin gives it:
|
|
36
|
+
|
|
37
|
+
- **Health** — INTACT / DEGRADED / ABLATED / RECOVERING / OOD
|
|
38
|
+
- **Polarity** — higher-is-better or lower-is-better, handled correctly everywhere
|
|
39
|
+
- **Sigma** — dimensionless deviation from baseline, always positive = healthier
|
|
40
|
+
- **Confidence** — how much the uncertainty interval overlaps the threshold
|
|
41
|
+
- **Provenance** — where this value came from, for correlation detection
|
|
42
|
+
- **Validity** — how the measurement ages (static, decaying, event-invalidated)
|
|
43
|
+
|
|
44
|
+
Then the correction loop:
|
|
45
|
+
|
|
46
|
+
- **Policy** — typed rules that decide what to do (RESTORE / SUPPRESS / AMPLIFY)
|
|
47
|
+
- **Constraints** — alpha clamping, cooldown, rate limiting
|
|
48
|
+
- **Escalation** — LOG / ALERT / HALT when the policy can't act
|
|
49
|
+
- **Contract** — typed success criteria ("reach INTACT within 5 steps")
|
|
50
|
+
- **Causal** — dependency graphs ("api is DEGRADED because db is ABLATED")
|
|
51
|
+
- **Ledger** — full audit trail of every correction, serializable, replayable
|
|
52
|
+
|
|
53
|
+
All in one call:
|
|
54
|
+
|
|
55
|
+
```python
|
|
56
|
+
from margin import step
|
|
57
|
+
|
|
58
|
+
result = step(expression, policy, ledger, graph, contract)
|
|
59
|
+
# result.correction — what to do
|
|
60
|
+
# result.explanations — why it happened
|
|
61
|
+
# result.decision — which rule matched, full trace
|
|
62
|
+
# result.contract — are we meeting our goals?
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
## The polarity bug
|
|
66
|
+
|
|
67
|
+
Every health system you've written has this bug. You check `if value >= threshold` and it works for throughput. Then you add error rate monitoring and the same check says 15% error rate is "healthy" because 0.15 >= 0.02.
|
|
68
|
+
|
|
69
|
+
Margin handles both polarities:
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
# Higher is better (throughput, signal strength)
|
|
73
|
+
Thresholds(intact=80.0, ablated=30.0)
|
|
74
|
+
|
|
75
|
+
# Lower is better (error rate, latency)
|
|
76
|
+
Thresholds(intact=0.02, ablated=0.10, higher_is_better=False)
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
One flag. Threads through every comparison, every sigma calculation, every correction decision, every recovery ratio. You never think about it again.
|
|
80
|
+
|
|
81
|
+
## Auto-calibrate from data
|
|
82
|
+
|
|
83
|
+
Don't guess thresholds. Derive them from healthy measurements:
|
|
84
|
+
|
|
85
|
+
```python
|
|
86
|
+
from margin import parser_from_calibration
|
|
87
|
+
|
|
88
|
+
parser = parser_from_calibration(
|
|
89
|
+
{"rps": [490, 510, 505, 495], "latency": [48, 52, 50, 51]},
|
|
90
|
+
polarities={"latency": False},
|
|
91
|
+
)
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
## Five layers
|
|
95
|
+
|
|
96
|
+
| Layer | Question | Key types |
|
|
97
|
+
|---|---|---|
|
|
98
|
+
| **Foundation** | What was measured? | `Health`, `Observation`, `Expression`, `UncertainValue` |
|
|
99
|
+
| **Observability** | What changed? When will it cross? | `diff()`, `forecast()`, `track()`, `calibrate()` |
|
|
100
|
+
| **Policy** | What should we do? | `PolicyRule`, `Action`, `Constraint`, `Escalation` |
|
|
101
|
+
| **Contract** | Are we meeting our goals? | `HealthTarget`, `SustainHealth`, `RecoveryThreshold` |
|
|
102
|
+
| **Causal** | Why did this happen? | `CausalGraph`, `CausalLink`, `Explanation` |
|
|
103
|
+
|
|
104
|
+
Plus `step()` and `run()` to orchestrate all five in one call.
|
|
105
|
+
|
|
106
|
+
## Docs
|
|
107
|
+
|
|
108
|
+
Full specification: [margin-language.md](margin/margin-language.md)
|
|
109
|
+
|
|
110
|
+
## License
|
|
111
|
+
|
|
112
|
+
MIT
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Margin: typed uncertainty algebra and health classification.
|
|
3
|
+
|
|
4
|
+
A framework for measurements that carry uncertainty, temporal validity,
|
|
5
|
+
provenance, and typed health states — with an auditable correction ledger.
|
|
6
|
+
|
|
7
|
+
Structure:
|
|
8
|
+
Foundation: confidence, validity, provenance, uncertain, algebra,
|
|
9
|
+
health, observation, ledger
|
|
10
|
+
Observability: bridge, calibrate, composite, diff, events, forecast,
|
|
11
|
+
predicates, transitions
|
|
12
|
+
Policy: policy/ (core, temporal, compose, tuning, trace, validate)
|
|
13
|
+
Contract: contract
|
|
14
|
+
Causal: causal
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
# Foundation
|
|
18
|
+
from .confidence import Confidence
|
|
19
|
+
from .validity import Validity, ValidityMode
|
|
20
|
+
from .provenance import new_id, are_correlated, merge
|
|
21
|
+
from .uncertain import UncertainValue, Source
|
|
22
|
+
from .algebra import add, subtract, multiply, divide, scale, compare, weighted_average
|
|
23
|
+
from .health import Health, Thresholds, classify, SEVERITY
|
|
24
|
+
from .observation import Op, Observation, Correction, Expression, Parser
|
|
25
|
+
from .ledger import Record, Ledger
|
|
26
|
+
|
|
27
|
+
# Observability
|
|
28
|
+
from .bridge import observe, observe_many, delta, to_uncertain
|
|
29
|
+
from .calibrate import CalibrationResult, calibrate, calibrate_many, parser_from_calibration
|
|
30
|
+
from .composite import CompositeObservation, AggregateStrategy
|
|
31
|
+
from .diff import ComponentChange, Diff, diff
|
|
32
|
+
from .events import EventBus
|
|
33
|
+
from .forecast import Forecast, forecast
|
|
34
|
+
from .predicates import (
|
|
35
|
+
any_health, all_health, count_health, component_health,
|
|
36
|
+
any_degraded, confidence_below, sigma_below, any_correction,
|
|
37
|
+
all_of, any_of, not_, Rule, evaluate_rules,
|
|
38
|
+
)
|
|
39
|
+
from .transitions import Span, Transition, ComponentHistory, track, track_all
|
|
40
|
+
|
|
41
|
+
# Policy
|
|
42
|
+
from .policy import (
|
|
43
|
+
EscalationLevel, Escalation, Action, Constraint, PolicyRule, Policy,
|
|
44
|
+
health_sustained, health_for_at_least,
|
|
45
|
+
sigma_trending_below, fire_rate_above, no_improvement,
|
|
46
|
+
PolicyChain, CorrectionBundle, bundle_from_policy,
|
|
47
|
+
PolicyComparison, diff_policies, agreement_rate,
|
|
48
|
+
RuleStats, TuningResult,
|
|
49
|
+
analyze_backtest, suggest_tuning, apply_tuning,
|
|
50
|
+
RuleEvaluation, DecisionTrace,
|
|
51
|
+
trace_evaluate, trace_backtest,
|
|
52
|
+
ValidationIssue, ValidationResult, validate,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Contract
|
|
56
|
+
from .contract import (
|
|
57
|
+
TermStatus, TermResult, ContractTerm,
|
|
58
|
+
HealthTarget, ReachHealth, SustainHealth,
|
|
59
|
+
RecoveryThreshold, NoHarmful,
|
|
60
|
+
ContractResult, Contract,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
# Causal
|
|
64
|
+
from .causal import (
|
|
65
|
+
CauseType, CausalLink, CausalGraph,
|
|
66
|
+
CauseExplanation, Explanation,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# Loop
|
|
70
|
+
from .loop import StepResult, step, run
|
|
71
|
+
|
|
72
|
+
__all__ = [
|
|
73
|
+
# Foundation
|
|
74
|
+
"Confidence",
|
|
75
|
+
"Validity", "ValidityMode",
|
|
76
|
+
"new_id", "are_correlated", "merge",
|
|
77
|
+
"UncertainValue", "Source",
|
|
78
|
+
"add", "subtract", "multiply", "divide", "scale", "compare", "weighted_average",
|
|
79
|
+
"Health", "Thresholds", "classify", "SEVERITY",
|
|
80
|
+
"Op", "Observation", "Correction", "Expression", "Parser",
|
|
81
|
+
"Record", "Ledger",
|
|
82
|
+
# Observability
|
|
83
|
+
"observe", "observe_many", "delta", "to_uncertain",
|
|
84
|
+
"CalibrationResult", "calibrate", "calibrate_many", "parser_from_calibration",
|
|
85
|
+
"CompositeObservation", "AggregateStrategy",
|
|
86
|
+
"ComponentChange", "Diff", "diff",
|
|
87
|
+
"EventBus",
|
|
88
|
+
"Forecast", "forecast",
|
|
89
|
+
"any_health", "all_health", "count_health", "component_health",
|
|
90
|
+
"any_degraded", "confidence_below", "sigma_below", "any_correction",
|
|
91
|
+
"all_of", "any_of", "not_", "Rule", "evaluate_rules",
|
|
92
|
+
"Span", "Transition", "ComponentHistory", "track", "track_all",
|
|
93
|
+
# Policy
|
|
94
|
+
"EscalationLevel", "Escalation", "Action", "Constraint", "PolicyRule", "Policy",
|
|
95
|
+
"health_sustained", "health_for_at_least",
|
|
96
|
+
"sigma_trending_below", "fire_rate_above", "no_improvement",
|
|
97
|
+
"PolicyChain", "CorrectionBundle", "bundle_from_policy",
|
|
98
|
+
"PolicyComparison", "diff_policies", "agreement_rate",
|
|
99
|
+
"RuleStats", "TuningResult",
|
|
100
|
+
"analyze_backtest", "suggest_tuning", "apply_tuning",
|
|
101
|
+
"RuleEvaluation", "DecisionTrace",
|
|
102
|
+
"trace_evaluate", "trace_backtest",
|
|
103
|
+
"ValidationIssue", "ValidationResult", "validate",
|
|
104
|
+
# Contract
|
|
105
|
+
"TermStatus", "TermResult", "ContractTerm",
|
|
106
|
+
"HealthTarget", "ReachHealth", "SustainHealth",
|
|
107
|
+
"RecoveryThreshold", "NoHarmful",
|
|
108
|
+
"ContractResult", "Contract",
|
|
109
|
+
# Causal
|
|
110
|
+
"CauseType", "CausalLink", "CausalGraph",
|
|
111
|
+
"CauseExplanation", "Explanation",
|
|
112
|
+
# Loop
|
|
113
|
+
"StepResult", "step", "run",
|
|
114
|
+
]
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Uncertainty propagation through arithmetic operations.
|
|
3
|
+
|
|
4
|
+
Correlated values (shared provenance) combine linearly (conservative).
|
|
5
|
+
Independent values combine in quadrature.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import math
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from typing import Optional
|
|
11
|
+
|
|
12
|
+
from .uncertain import UncertainValue, Source
|
|
13
|
+
from .validity import Validity
|
|
14
|
+
from .confidence import Confidence
|
|
15
|
+
from .provenance import new_id, are_correlated
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _propagated_validity(inputs: list[UncertainValue]) -> Validity:
|
|
19
|
+
"""Conservative validity: latest measurement, shortest halflife."""
|
|
20
|
+
if not inputs:
|
|
21
|
+
return Validity.static()
|
|
22
|
+
|
|
23
|
+
latest = max(inputs, key=lambda v: v.validity.measured_at)
|
|
24
|
+
halflives = [v.validity.halflife for v in inputs if v.validity.halflife]
|
|
25
|
+
shortest = min(halflives) if halflives else None
|
|
26
|
+
|
|
27
|
+
if shortest:
|
|
28
|
+
return Validity.decaying(shortest, latest.validity.measured_at)
|
|
29
|
+
return Validity.static(latest.validity.measured_at)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def add(a: UncertainValue, b: UncertainValue) -> UncertainValue:
|
|
33
|
+
"""Add two uncertain values with correct uncertainty propagation."""
|
|
34
|
+
aa, bb = a.to_absolute(), b.to_absolute()
|
|
35
|
+
if are_correlated(a.provenance, b.provenance):
|
|
36
|
+
unc = aa.uncertainty + bb.uncertainty
|
|
37
|
+
else:
|
|
38
|
+
unc = math.sqrt(aa.uncertainty**2 + bb.uncertainty**2)
|
|
39
|
+
return UncertainValue(
|
|
40
|
+
point=aa.point + bb.point,
|
|
41
|
+
uncertainty=unc,
|
|
42
|
+
source=Source.PROPAGATED,
|
|
43
|
+
validity=_propagated_validity([a, b]),
|
|
44
|
+
provenance=list(set(a.provenance + b.provenance + [new_id()])),
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def subtract(a: UncertainValue, b: UncertainValue) -> UncertainValue:
|
|
49
|
+
"""Subtract two uncertain values."""
|
|
50
|
+
aa, bb = a.to_absolute(), b.to_absolute()
|
|
51
|
+
if are_correlated(a.provenance, b.provenance):
|
|
52
|
+
unc = aa.uncertainty + bb.uncertainty
|
|
53
|
+
else:
|
|
54
|
+
unc = math.sqrt(aa.uncertainty**2 + bb.uncertainty**2)
|
|
55
|
+
return UncertainValue(
|
|
56
|
+
point=aa.point - bb.point,
|
|
57
|
+
uncertainty=unc,
|
|
58
|
+
source=Source.PROPAGATED,
|
|
59
|
+
validity=_propagated_validity([a, b]),
|
|
60
|
+
provenance=list(set(a.provenance + b.provenance + [new_id()])),
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def multiply(a: UncertainValue, b: UncertainValue) -> UncertainValue:
|
|
65
|
+
"""Multiply two uncertain values (relative uncertainties combine).
|
|
66
|
+
|
|
67
|
+
When either operand is zero, relative uncertainty is undefined so we
|
|
68
|
+
fall back to absolute propagation: |b|*σ_a + |a|*σ_b (linear, safe).
|
|
69
|
+
"""
|
|
70
|
+
product = a.point * b.point
|
|
71
|
+
prov = list(set(a.provenance + b.provenance + [new_id()]))
|
|
72
|
+
|
|
73
|
+
if a.point == 0 or b.point == 0:
|
|
74
|
+
aa, bb = a.to_absolute(), b.to_absolute()
|
|
75
|
+
unc = abs(b.point) * aa.uncertainty + abs(a.point) * bb.uncertainty
|
|
76
|
+
return UncertainValue(
|
|
77
|
+
point=product, uncertainty=unc,
|
|
78
|
+
source=Source.PROPAGATED,
|
|
79
|
+
validity=_propagated_validity([a, b]),
|
|
80
|
+
provenance=prov,
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
ar, br = a.to_relative(), b.to_relative()
|
|
84
|
+
if are_correlated(a.provenance, b.provenance):
|
|
85
|
+
unc = ar.uncertainty + br.uncertainty
|
|
86
|
+
else:
|
|
87
|
+
unc = math.sqrt(ar.uncertainty**2 + br.uncertainty**2)
|
|
88
|
+
return UncertainValue(
|
|
89
|
+
point=product, uncertainty=unc, relative=True,
|
|
90
|
+
source=Source.PROPAGATED,
|
|
91
|
+
validity=_propagated_validity([a, b]),
|
|
92
|
+
provenance=prov,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def divide(a: UncertainValue, b: UncertainValue) -> UncertainValue:
|
|
97
|
+
"""Divide two uncertain values."""
|
|
98
|
+
if b.point == 0:
|
|
99
|
+
raise ValueError("Division by zero")
|
|
100
|
+
ar, br = a.to_relative(), b.to_relative()
|
|
101
|
+
if are_correlated(a.provenance, b.provenance):
|
|
102
|
+
unc = ar.uncertainty + br.uncertainty
|
|
103
|
+
else:
|
|
104
|
+
unc = math.sqrt(ar.uncertainty**2 + br.uncertainty**2)
|
|
105
|
+
return UncertainValue(
|
|
106
|
+
point=ar.point / br.point,
|
|
107
|
+
uncertainty=unc,
|
|
108
|
+
relative=True,
|
|
109
|
+
source=Source.PROPAGATED,
|
|
110
|
+
validity=_propagated_validity([a, b]),
|
|
111
|
+
provenance=list(set(a.provenance + b.provenance + [new_id()])),
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def scale(value: UncertainValue, factor: float) -> UncertainValue:
|
|
116
|
+
"""Scale by an exact constant. Preserves provenance without growth."""
|
|
117
|
+
return UncertainValue(
|
|
118
|
+
point=value.point * factor,
|
|
119
|
+
uncertainty=value.uncertainty * abs(factor),
|
|
120
|
+
relative=value.relative,
|
|
121
|
+
source=value.source,
|
|
122
|
+
validity=value.validity,
|
|
123
|
+
provenance=list(value.provenance),
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def compare(value: UncertainValue, threshold: float, at_time: Optional[datetime] = None) -> Confidence:
|
|
128
|
+
"""
|
|
129
|
+
Compare an uncertain value to a threshold. Returns a Confidence tier
|
|
130
|
+
based on how much the uncertainty interval overlaps the threshold.
|
|
131
|
+
"""
|
|
132
|
+
at_time = at_time or datetime.now()
|
|
133
|
+
u = value.absolute_uncertainty(at_time)
|
|
134
|
+
lower = value.point - u
|
|
135
|
+
upper = value.point + u
|
|
136
|
+
width = 2 * u
|
|
137
|
+
|
|
138
|
+
if lower < threshold < upper:
|
|
139
|
+
return Confidence.INDETERMINATE
|
|
140
|
+
|
|
141
|
+
gap = (lower - threshold) if threshold <= lower else (threshold - upper)
|
|
142
|
+
if width <= 0:
|
|
143
|
+
return Confidence.CERTAIN
|
|
144
|
+
|
|
145
|
+
ratio = gap / width
|
|
146
|
+
if ratio >= 0.5:
|
|
147
|
+
return Confidence.CERTAIN
|
|
148
|
+
elif ratio >= 0.1:
|
|
149
|
+
return Confidence.HIGH
|
|
150
|
+
elif ratio >= 0.05:
|
|
151
|
+
return Confidence.MODERATE
|
|
152
|
+
else:
|
|
153
|
+
return Confidence.LOW
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def weighted_average(values: list[UncertainValue], weights: Optional[list[float]] = None) -> UncertainValue:
|
|
157
|
+
"""
|
|
158
|
+
Weighted average. Defaults to inverse-variance weighting.
|
|
159
|
+
"""
|
|
160
|
+
if not values:
|
|
161
|
+
raise ValueError("Empty list")
|
|
162
|
+
if len(values) == 1:
|
|
163
|
+
return values[0]
|
|
164
|
+
|
|
165
|
+
if weights is None:
|
|
166
|
+
variances = [v.to_absolute().uncertainty**2 for v in values]
|
|
167
|
+
total = sum(1/v for v in variances if v > 0)
|
|
168
|
+
if total == 0:
|
|
169
|
+
weights = [1.0 / len(values)] * len(values)
|
|
170
|
+
else:
|
|
171
|
+
weights = [(1/v) / total for v in variances]
|
|
172
|
+
|
|
173
|
+
result = None
|
|
174
|
+
for v, w in zip(values, weights):
|
|
175
|
+
s = scale(v, w)
|
|
176
|
+
result = add(result, s) if result else s
|
|
177
|
+
return result
|