aponyx 0.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aponyx/__init__.py +14 -0
- aponyx/backtest/__init__.py +31 -0
- aponyx/backtest/adapters.py +77 -0
- aponyx/backtest/config.py +84 -0
- aponyx/backtest/engine.py +560 -0
- aponyx/backtest/protocols.py +101 -0
- aponyx/backtest/registry.py +334 -0
- aponyx/backtest/strategy_catalog.json +50 -0
- aponyx/cli/__init__.py +5 -0
- aponyx/cli/commands/__init__.py +8 -0
- aponyx/cli/commands/clean.py +349 -0
- aponyx/cli/commands/list.py +302 -0
- aponyx/cli/commands/report.py +167 -0
- aponyx/cli/commands/run.py +377 -0
- aponyx/cli/main.py +125 -0
- aponyx/config/__init__.py +82 -0
- aponyx/data/__init__.py +99 -0
- aponyx/data/bloomberg_config.py +306 -0
- aponyx/data/bloomberg_instruments.json +26 -0
- aponyx/data/bloomberg_securities.json +42 -0
- aponyx/data/cache.py +294 -0
- aponyx/data/fetch.py +659 -0
- aponyx/data/fetch_registry.py +135 -0
- aponyx/data/loaders.py +205 -0
- aponyx/data/providers/__init__.py +13 -0
- aponyx/data/providers/bloomberg.py +383 -0
- aponyx/data/providers/file.py +111 -0
- aponyx/data/registry.py +500 -0
- aponyx/data/requirements.py +96 -0
- aponyx/data/sample_data.py +415 -0
- aponyx/data/schemas.py +60 -0
- aponyx/data/sources.py +171 -0
- aponyx/data/synthetic_params.json +46 -0
- aponyx/data/transforms.py +336 -0
- aponyx/data/validation.py +308 -0
- aponyx/docs/__init__.py +24 -0
- aponyx/docs/adding_data_providers.md +682 -0
- aponyx/docs/cdx_knowledge_base.md +455 -0
- aponyx/docs/cdx_overlay_strategy.md +135 -0
- aponyx/docs/cli_guide.md +607 -0
- aponyx/docs/governance_design.md +551 -0
- aponyx/docs/logging_design.md +251 -0
- aponyx/docs/performance_evaluation_design.md +265 -0
- aponyx/docs/python_guidelines.md +786 -0
- aponyx/docs/signal_registry_usage.md +369 -0
- aponyx/docs/signal_suitability_design.md +558 -0
- aponyx/docs/visualization_design.md +277 -0
- aponyx/evaluation/__init__.py +11 -0
- aponyx/evaluation/performance/__init__.py +24 -0
- aponyx/evaluation/performance/adapters.py +109 -0
- aponyx/evaluation/performance/analyzer.py +384 -0
- aponyx/evaluation/performance/config.py +320 -0
- aponyx/evaluation/performance/decomposition.py +304 -0
- aponyx/evaluation/performance/metrics.py +761 -0
- aponyx/evaluation/performance/registry.py +327 -0
- aponyx/evaluation/performance/report.py +541 -0
- aponyx/evaluation/suitability/__init__.py +67 -0
- aponyx/evaluation/suitability/config.py +143 -0
- aponyx/evaluation/suitability/evaluator.py +389 -0
- aponyx/evaluation/suitability/registry.py +328 -0
- aponyx/evaluation/suitability/report.py +398 -0
- aponyx/evaluation/suitability/scoring.py +367 -0
- aponyx/evaluation/suitability/tests.py +303 -0
- aponyx/examples/01_generate_synthetic_data.py +53 -0
- aponyx/examples/02_fetch_data_file.py +82 -0
- aponyx/examples/03_fetch_data_bloomberg.py +104 -0
- aponyx/examples/04_compute_signal.py +164 -0
- aponyx/examples/05_evaluate_suitability.py +224 -0
- aponyx/examples/06_run_backtest.py +242 -0
- aponyx/examples/07_analyze_performance.py +214 -0
- aponyx/examples/08_visualize_results.py +272 -0
- aponyx/main.py +7 -0
- aponyx/models/__init__.py +45 -0
- aponyx/models/config.py +83 -0
- aponyx/models/indicator_transformation.json +52 -0
- aponyx/models/indicators.py +292 -0
- aponyx/models/metadata.py +447 -0
- aponyx/models/orchestrator.py +213 -0
- aponyx/models/registry.py +860 -0
- aponyx/models/score_transformation.json +42 -0
- aponyx/models/signal_catalog.json +29 -0
- aponyx/models/signal_composer.py +513 -0
- aponyx/models/signal_transformation.json +29 -0
- aponyx/persistence/__init__.py +16 -0
- aponyx/persistence/json_io.py +132 -0
- aponyx/persistence/parquet_io.py +378 -0
- aponyx/py.typed +0 -0
- aponyx/reporting/__init__.py +10 -0
- aponyx/reporting/generator.py +517 -0
- aponyx/visualization/__init__.py +20 -0
- aponyx/visualization/app.py +37 -0
- aponyx/visualization/plots.py +309 -0
- aponyx/visualization/visualizer.py +242 -0
- aponyx/workflows/__init__.py +18 -0
- aponyx/workflows/concrete_steps.py +720 -0
- aponyx/workflows/config.py +122 -0
- aponyx/workflows/engine.py +279 -0
- aponyx/workflows/registry.py +116 -0
- aponyx/workflows/steps.py +180 -0
- aponyx-0.1.18.dist-info/METADATA +552 -0
- aponyx-0.1.18.dist-info/RECORD +104 -0
- aponyx-0.1.18.dist-info/WHEEL +4 -0
- aponyx-0.1.18.dist-info/entry_points.txt +2 -0
- aponyx-0.1.18.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration for signal-product suitability evaluation.
|
|
3
|
+
|
|
4
|
+
Defines immutable configuration parameters for the suitability evaluation
|
|
5
|
+
process including lags, thresholds, and component weights.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass(frozen=True)
|
|
12
|
+
class SuitabilityConfig:
|
|
13
|
+
"""
|
|
14
|
+
Configuration for signal-product suitability evaluation.
|
|
15
|
+
|
|
16
|
+
This immutable dataclass defines all parameters controlling the evaluation
|
|
17
|
+
process, including forecast horizons, sample requirements, decision thresholds,
|
|
18
|
+
and component weights for composite scoring.
|
|
19
|
+
|
|
20
|
+
Parameters
|
|
21
|
+
----------
|
|
22
|
+
lags : list[int]
|
|
23
|
+
Forecast horizons to test (e.g., [1, 3, 5] for 1-, 3-, and 5-day ahead).
|
|
24
|
+
Must be non-empty list of positive integers.
|
|
25
|
+
min_obs : int
|
|
26
|
+
Minimum number of valid observations required for reliable inference.
|
|
27
|
+
Must be at least 100. Default: 500.
|
|
28
|
+
rolling_window : int
|
|
29
|
+
Rolling window size in observations for stability analysis.
|
|
30
|
+
Must be at least 50. Default: 252 (~1 year for daily data).
|
|
31
|
+
pass_threshold : float
|
|
32
|
+
Composite score threshold for PASS decision (proceed to backtest).
|
|
33
|
+
Must satisfy: 0 < hold_threshold < pass_threshold < 1.
|
|
34
|
+
Default: 0.7.
|
|
35
|
+
hold_threshold : float
|
|
36
|
+
Composite score threshold for HOLD decision (marginal, requires judgment).
|
|
37
|
+
Must satisfy: 0 < hold_threshold < pass_threshold < 1.
|
|
38
|
+
Default: 0.4.
|
|
39
|
+
data_health_weight : float
|
|
40
|
+
Weight for data health component in composite score.
|
|
41
|
+
Must be non-negative. All weights must sum to 1.0.
|
|
42
|
+
Default: 0.2.
|
|
43
|
+
predictive_weight : float
|
|
44
|
+
Weight for predictive association component in composite score.
|
|
45
|
+
Must be non-negative. All weights must sum to 1.0.
|
|
46
|
+
Default: 0.4.
|
|
47
|
+
economic_weight : float
|
|
48
|
+
Weight for economic relevance component in composite score.
|
|
49
|
+
Must be non-negative. All weights must sum to 1.0.
|
|
50
|
+
Default: 0.2.
|
|
51
|
+
stability_weight : float
|
|
52
|
+
Weight for temporal stability component in composite score.
|
|
53
|
+
Must be non-negative. All weights must sum to 1.0.
|
|
54
|
+
Default: 0.2.
|
|
55
|
+
|
|
56
|
+
Raises
|
|
57
|
+
------
|
|
58
|
+
ValueError
|
|
59
|
+
If any validation constraint is violated.
|
|
60
|
+
|
|
61
|
+
Examples
|
|
62
|
+
--------
|
|
63
|
+
>>> config = SuitabilityConfig() # Use defaults
|
|
64
|
+
>>> config = SuitabilityConfig(lags=[1, 5, 10], min_obs=1000)
|
|
65
|
+
>>> config = SuitabilityConfig(
|
|
66
|
+
... pass_threshold=0.75,
|
|
67
|
+
... hold_threshold=0.5,
|
|
68
|
+
... predictive_weight=0.5,
|
|
69
|
+
... economic_weight=0.3,
|
|
70
|
+
... data_health_weight=0.1,
|
|
71
|
+
... stability_weight=0.1,
|
|
72
|
+
... )
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
lags: list[int] = field(default_factory=lambda: [1, 3, 5])
|
|
76
|
+
min_obs: int = 500
|
|
77
|
+
rolling_window: int = 252
|
|
78
|
+
pass_threshold: float = 0.7
|
|
79
|
+
hold_threshold: float = 0.4
|
|
80
|
+
data_health_weight: float = 0.2
|
|
81
|
+
predictive_weight: float = 0.4
|
|
82
|
+
economic_weight: float = 0.2
|
|
83
|
+
stability_weight: float = 0.2
|
|
84
|
+
|
|
85
|
+
def __post_init__(self) -> None:
|
|
86
|
+
"""
|
|
87
|
+
Validate configuration parameters.
|
|
88
|
+
|
|
89
|
+
Checks that lags are valid, thresholds are properly ordered,
|
|
90
|
+
weights are non-negative and sum to 1.0, and minimum observations
|
|
91
|
+
are sufficient.
|
|
92
|
+
|
|
93
|
+
Raises
|
|
94
|
+
------
|
|
95
|
+
ValueError
|
|
96
|
+
If any validation constraint is violated.
|
|
97
|
+
"""
|
|
98
|
+
# Validate lags
|
|
99
|
+
if not self.lags:
|
|
100
|
+
raise ValueError("lags must be a non-empty list")
|
|
101
|
+
if not all(isinstance(lag, int) and lag > 0 for lag in self.lags):
|
|
102
|
+
raise ValueError(f"All lags must be positive integers, got {self.lags}")
|
|
103
|
+
|
|
104
|
+
# Validate thresholds ordering
|
|
105
|
+
if not (0 < self.hold_threshold < self.pass_threshold < 1):
|
|
106
|
+
raise ValueError(
|
|
107
|
+
f"Thresholds must satisfy 0 < hold ({self.hold_threshold}) "
|
|
108
|
+
f"< pass ({self.pass_threshold}) < 1"
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Validate weights
|
|
112
|
+
weights = [
|
|
113
|
+
self.data_health_weight,
|
|
114
|
+
self.predictive_weight,
|
|
115
|
+
self.economic_weight,
|
|
116
|
+
self.stability_weight,
|
|
117
|
+
]
|
|
118
|
+
if not all(w >= 0 for w in weights):
|
|
119
|
+
raise ValueError(
|
|
120
|
+
f"All weights must be non-negative, got {dict(zip(['data_health', 'predictive', 'economic', 'stability'], weights))}"
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
weight_sum = sum(weights)
|
|
124
|
+
if abs(weight_sum - 1.0) > 1e-6:
|
|
125
|
+
raise ValueError(
|
|
126
|
+
f"Weights must sum to 1.0, got {weight_sum:.6f}. "
|
|
127
|
+
f"Weights: data_health={self.data_health_weight}, "
|
|
128
|
+
f"predictive={self.predictive_weight}, "
|
|
129
|
+
f"economic={self.economic_weight}, "
|
|
130
|
+
f"stability={self.stability_weight}"
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Validate minimum observations
|
|
134
|
+
if self.min_obs < 100:
|
|
135
|
+
raise ValueError(
|
|
136
|
+
f"min_obs must be at least 100 for reliable inference, got {self.min_obs}"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Validate rolling window
|
|
140
|
+
if self.rolling_window < 50:
|
|
141
|
+
raise ValueError(
|
|
142
|
+
f"rolling_window must be at least 50 for meaningful statistics, got {self.rolling_window}"
|
|
143
|
+
)
|
|
@@ -0,0 +1,389 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core suitability evaluation logic.
|
|
3
|
+
|
|
4
|
+
Orchestrates statistical tests, scoring, and decision logic to evaluate
|
|
5
|
+
whether a signal contains meaningful predictive information for a traded product.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from dataclasses import dataclass, asdict
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
import numpy as np
|
|
14
|
+
import pandas as pd
|
|
15
|
+
|
|
16
|
+
from aponyx.evaluation.suitability.config import SuitabilityConfig
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class SuitabilityResult:
|
|
23
|
+
"""
|
|
24
|
+
Result container for signal-product suitability evaluation.
|
|
25
|
+
|
|
26
|
+
Contains decision, component scores, diagnostics, and metadata from
|
|
27
|
+
the evaluation process.
|
|
28
|
+
|
|
29
|
+
Attributes
|
|
30
|
+
----------
|
|
31
|
+
decision : str
|
|
32
|
+
Overall decision: "PASS" (proceed to backtest), "HOLD" (marginal),
|
|
33
|
+
or "FAIL" (do not backtest).
|
|
34
|
+
composite_score : float
|
|
35
|
+
Weighted average of component scores (0-1 scale).
|
|
36
|
+
data_health_score : float
|
|
37
|
+
Data quality and sufficiency score (0-1 scale).
|
|
38
|
+
predictive_score : float
|
|
39
|
+
Statistical association strength score (0-1 scale).
|
|
40
|
+
economic_score : float
|
|
41
|
+
Economic relevance/impact score (0-1 scale).
|
|
42
|
+
stability_score : float
|
|
43
|
+
Temporal consistency score (0-1 scale).
|
|
44
|
+
valid_obs : int
|
|
45
|
+
Number of valid observations after alignment.
|
|
46
|
+
missing_pct : float
|
|
47
|
+
Percentage of missing data.
|
|
48
|
+
correlations : dict[int, float]
|
|
49
|
+
Pearson correlations by lag horizon.
|
|
50
|
+
betas : dict[int, float]
|
|
51
|
+
Regression coefficients by lag horizon.
|
|
52
|
+
t_stats : dict[int, float]
|
|
53
|
+
T-statistics by lag horizon.
|
|
54
|
+
effect_size_bps : float
|
|
55
|
+
Economic impact estimate (bps per 1σ signal change).
|
|
56
|
+
sign_consistency_ratio : float
|
|
57
|
+
Proportion of rolling windows with consistent sign.
|
|
58
|
+
beta_cv : float
|
|
59
|
+
Coefficient of variation of rolling betas.
|
|
60
|
+
n_windows : int
|
|
61
|
+
Number of valid rolling windows analyzed.
|
|
62
|
+
timestamp : str
|
|
63
|
+
ISO timestamp of evaluation.
|
|
64
|
+
config : SuitabilityConfig
|
|
65
|
+
Configuration used for evaluation.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
decision: str
|
|
69
|
+
composite_score: float
|
|
70
|
+
data_health_score: float
|
|
71
|
+
predictive_score: float
|
|
72
|
+
economic_score: float
|
|
73
|
+
stability_score: float
|
|
74
|
+
valid_obs: int
|
|
75
|
+
missing_pct: float
|
|
76
|
+
correlations: dict[int, float]
|
|
77
|
+
betas: dict[int, float]
|
|
78
|
+
t_stats: dict[int, float]
|
|
79
|
+
effect_size_bps: float
|
|
80
|
+
sign_consistency_ratio: float
|
|
81
|
+
beta_cv: float
|
|
82
|
+
n_windows: int
|
|
83
|
+
timestamp: str
|
|
84
|
+
config: SuitabilityConfig
|
|
85
|
+
|
|
86
|
+
def to_dict(self) -> dict[str, Any]:
|
|
87
|
+
"""
|
|
88
|
+
Convert result to dictionary for JSON serialization.
|
|
89
|
+
|
|
90
|
+
Returns
|
|
91
|
+
-------
|
|
92
|
+
dict[str, Any]
|
|
93
|
+
Structured dictionary with component scores, metrics, and metadata.
|
|
94
|
+
"""
|
|
95
|
+
return {
|
|
96
|
+
"decision": self.decision,
|
|
97
|
+
"composite_score": self.composite_score,
|
|
98
|
+
"component_scores": {
|
|
99
|
+
"data_health": self.data_health_score,
|
|
100
|
+
"predictive": self.predictive_score,
|
|
101
|
+
"economic": self.economic_score,
|
|
102
|
+
"stability": self.stability_score,
|
|
103
|
+
},
|
|
104
|
+
"metrics": {
|
|
105
|
+
"valid_obs": self.valid_obs,
|
|
106
|
+
"missing_pct": self.missing_pct,
|
|
107
|
+
"correlations": self.correlations,
|
|
108
|
+
"betas": self.betas,
|
|
109
|
+
"t_stats": self.t_stats,
|
|
110
|
+
"effect_size_bps": self.effect_size_bps,
|
|
111
|
+
"sign_consistency_ratio": self.sign_consistency_ratio,
|
|
112
|
+
"beta_cv": self.beta_cv,
|
|
113
|
+
"n_windows": self.n_windows,
|
|
114
|
+
},
|
|
115
|
+
"timestamp": self.timestamp,
|
|
116
|
+
"config": asdict(self.config),
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def compute_forward_returns(
|
|
121
|
+
spread_series: pd.Series,
|
|
122
|
+
lags: list[int],
|
|
123
|
+
) -> dict[int, pd.Series]:
|
|
124
|
+
"""
|
|
125
|
+
Compute forward-looking returns (changes) from spread series.
|
|
126
|
+
|
|
127
|
+
For credit spreads, forward returns represent the change in spread from
|
|
128
|
+
time t to time t+lag. This is the target variable for predictive evaluation.
|
|
129
|
+
|
|
130
|
+
Parameters
|
|
131
|
+
----------
|
|
132
|
+
spread_series : pd.Series
|
|
133
|
+
Time series of spread levels with DatetimeIndex.
|
|
134
|
+
lags : list[int]
|
|
135
|
+
List of forward horizons (e.g., [1, 3, 5] for 1-, 3-, 5-day ahead).
|
|
136
|
+
|
|
137
|
+
Returns
|
|
138
|
+
-------
|
|
139
|
+
dict[int, pd.Series]
|
|
140
|
+
Dictionary mapping lag → forward return series.
|
|
141
|
+
Each series has the same index as input, with NaN at the end where
|
|
142
|
+
forward data is not available.
|
|
143
|
+
|
|
144
|
+
Notes
|
|
145
|
+
-----
|
|
146
|
+
Forward returns are computed as: spread[t+lag] - spread[t]
|
|
147
|
+
For credit spreads, positive return = widening, negative = tightening.
|
|
148
|
+
|
|
149
|
+
Examples
|
|
150
|
+
--------
|
|
151
|
+
>>> spreads = pd.Series([100, 102, 98, 101], index=pd.date_range('2020-01-01', periods=4))
|
|
152
|
+
>>> fwd_returns = compute_forward_returns(spreads, [1, 2])
|
|
153
|
+
>>> fwd_returns[1] # 1-day forward: [102-100, 98-102, 101-98, NaN]
|
|
154
|
+
"""
|
|
155
|
+
logger.debug(
|
|
156
|
+
"Computing forward returns for %d lags: %s",
|
|
157
|
+
len(lags),
|
|
158
|
+
lags,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
forward_returns = {}
|
|
162
|
+
for lag in lags:
|
|
163
|
+
# Shift backwards to get future values aligned to current index
|
|
164
|
+
forward_returns[lag] = spread_series.shift(-lag) - spread_series
|
|
165
|
+
|
|
166
|
+
logger.debug(
|
|
167
|
+
"Computed forward returns with %d observations per lag",
|
|
168
|
+
len(spread_series) - max(lags) if lags else len(spread_series),
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
return forward_returns
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def evaluate_signal_suitability(
|
|
175
|
+
signal: pd.Series,
|
|
176
|
+
target_change: pd.Series,
|
|
177
|
+
config: SuitabilityConfig | None = None,
|
|
178
|
+
) -> SuitabilityResult:
|
|
179
|
+
"""
|
|
180
|
+
Evaluate whether signal contains predictive information for target product.
|
|
181
|
+
|
|
182
|
+
This is the main entry point for suitability evaluation. Orchestrates
|
|
183
|
+
statistical tests, scoring, and decision logic.
|
|
184
|
+
|
|
185
|
+
Parameters
|
|
186
|
+
----------
|
|
187
|
+
signal : pd.Series
|
|
188
|
+
Signal time series with DatetimeIndex and .name attribute.
|
|
189
|
+
Should be z-score normalized for interpretability.
|
|
190
|
+
target_change : pd.Series
|
|
191
|
+
Target series (e.g., spread levels) with DatetimeIndex.
|
|
192
|
+
Forward returns will be computed internally for each lag horizon.
|
|
193
|
+
config : SuitabilityConfig, optional
|
|
194
|
+
Evaluation configuration. If None, uses defaults.
|
|
195
|
+
|
|
196
|
+
Returns
|
|
197
|
+
-------
|
|
198
|
+
SuitabilityResult
|
|
199
|
+
Comprehensive evaluation result with decision, scores, and diagnostics.
|
|
200
|
+
|
|
201
|
+
Raises
|
|
202
|
+
------
|
|
203
|
+
ValueError
|
|
204
|
+
If signal or target lack required attributes (DatetimeIndex, name).
|
|
205
|
+
|
|
206
|
+
Notes
|
|
207
|
+
-----
|
|
208
|
+
This function does NOT include trading rules, costs, or position sizing.
|
|
209
|
+
It purely evaluates the statistical and economic relationship between
|
|
210
|
+
signal and target.
|
|
211
|
+
|
|
212
|
+
The function computes forward-looking returns (target[t+lag] - target[t])
|
|
213
|
+
internally for each configured lag horizon.
|
|
214
|
+
|
|
215
|
+
Examples
|
|
216
|
+
--------
|
|
217
|
+
>>> signal = compute_spread_momentum(cdx_df, config)
|
|
218
|
+
>>> result = evaluate_signal_suitability(signal, cdx_df['spread'])
|
|
219
|
+
>>> print(result.decision, result.composite_score)
|
|
220
|
+
"""
|
|
221
|
+
from aponyx.evaluation.suitability import tests, scoring
|
|
222
|
+
|
|
223
|
+
if config is None:
|
|
224
|
+
config = SuitabilityConfig()
|
|
225
|
+
|
|
226
|
+
logger.info(
|
|
227
|
+
"Starting suitability evaluation: signal=%s, config=%s",
|
|
228
|
+
getattr(signal, "name", "unnamed"),
|
|
229
|
+
config,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Validate inputs
|
|
233
|
+
if not isinstance(signal.index, pd.DatetimeIndex):
|
|
234
|
+
raise ValueError("Signal must have DatetimeIndex")
|
|
235
|
+
if not isinstance(target_change.index, pd.DatetimeIndex):
|
|
236
|
+
raise ValueError("Target must have DatetimeIndex")
|
|
237
|
+
|
|
238
|
+
# Align signal and target on common dates
|
|
239
|
+
aligned_df = pd.DataFrame({"signal": signal, "target": target_change}).dropna()
|
|
240
|
+
signal_aligned = aligned_df["signal"]
|
|
241
|
+
target_aligned = aligned_df["target"]
|
|
242
|
+
|
|
243
|
+
logger.debug(
|
|
244
|
+
"Aligned data: original_signal=%d, original_target=%d, aligned=%d",
|
|
245
|
+
len(signal),
|
|
246
|
+
len(target_change),
|
|
247
|
+
len(aligned_df),
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
# Compute data health metrics
|
|
251
|
+
valid_obs = len(aligned_df)
|
|
252
|
+
total_obs = max(len(signal), len(target_change))
|
|
253
|
+
missing_pct = (1 - valid_obs / total_obs) * 100 if total_obs > 0 else 100.0
|
|
254
|
+
|
|
255
|
+
logger.debug(
|
|
256
|
+
"Data health: valid_obs=%d, missing_pct=%.2f%%",
|
|
257
|
+
valid_obs,
|
|
258
|
+
missing_pct,
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
# Score data health
|
|
262
|
+
data_health_score = scoring.score_data_health(
|
|
263
|
+
valid_obs=valid_obs,
|
|
264
|
+
missing_pct=missing_pct,
|
|
265
|
+
min_obs=config.min_obs,
|
|
266
|
+
)
|
|
267
|
+
logger.info("Data health score: %.3f", data_health_score)
|
|
268
|
+
|
|
269
|
+
# Compute predictive statistics for all configured lags
|
|
270
|
+
logger.debug("Computing stats for %d lags: %s", len(config.lags), config.lags)
|
|
271
|
+
|
|
272
|
+
correlations = {}
|
|
273
|
+
betas = {}
|
|
274
|
+
t_stats = {}
|
|
275
|
+
|
|
276
|
+
for lag in config.lags:
|
|
277
|
+
# Compute forward returns for this lag
|
|
278
|
+
target_fwd = target_change.shift(-lag)
|
|
279
|
+
|
|
280
|
+
# Align signal with forward target
|
|
281
|
+
aligned_lag = pd.DataFrame({"signal": signal, "target": target_fwd}).dropna()
|
|
282
|
+
signal_lag = aligned_lag["signal"]
|
|
283
|
+
target_lag = aligned_lag["target"]
|
|
284
|
+
|
|
285
|
+
# Compute correlation
|
|
286
|
+
correlations[lag] = tests.compute_correlation(signal_lag, target_lag)
|
|
287
|
+
|
|
288
|
+
# Compute regression stats
|
|
289
|
+
regression_stats = tests.compute_regression_stats(signal_lag, target_lag)
|
|
290
|
+
betas[lag] = regression_stats["beta"]
|
|
291
|
+
t_stats[lag] = regression_stats["t_stat"]
|
|
292
|
+
|
|
293
|
+
logger.debug(
|
|
294
|
+
"Lag %d: n=%d, corr=%.3f, beta=%.3f, t_stat=%.3f",
|
|
295
|
+
lag,
|
|
296
|
+
len(signal_lag),
|
|
297
|
+
correlations[lag],
|
|
298
|
+
betas[lag],
|
|
299
|
+
t_stats[lag],
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
# Score predictive association using mean |t-stat| across all lags
|
|
303
|
+
mean_abs_tstat = np.mean([abs(t) for t in t_stats.values()])
|
|
304
|
+
predictive_score = scoring.score_predictive(mean_abs_tstat)
|
|
305
|
+
logger.info(
|
|
306
|
+
"Predictive score: %.3f (mean |t-stat|=%.3f across %d lags)",
|
|
307
|
+
predictive_score,
|
|
308
|
+
mean_abs_tstat,
|
|
309
|
+
len(config.lags),
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
# Compute economic relevance
|
|
313
|
+
avg_beta = np.mean(list(betas.values()))
|
|
314
|
+
signal_std = signal_aligned.std()
|
|
315
|
+
effect_size_bps = abs(avg_beta * signal_std)
|
|
316
|
+
|
|
317
|
+
logger.debug("Economic impact: effect_size=%.3f bps", effect_size_bps)
|
|
318
|
+
|
|
319
|
+
# Score economic relevance
|
|
320
|
+
economic_score = scoring.score_economic(effect_size_bps)
|
|
321
|
+
logger.info("Economic score: %.3f", economic_score)
|
|
322
|
+
|
|
323
|
+
# Compute temporal stability using rolling window approach
|
|
324
|
+
rolling_betas = tests.compute_rolling_betas(
|
|
325
|
+
signal_aligned,
|
|
326
|
+
target_aligned,
|
|
327
|
+
window=config.rolling_window,
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
# Compute stability metrics
|
|
331
|
+
stability_metrics = tests.compute_stability_metrics(rolling_betas, avg_beta)
|
|
332
|
+
sign_consistency_ratio = stability_metrics["sign_consistency_ratio"]
|
|
333
|
+
beta_cv = stability_metrics["beta_cv"]
|
|
334
|
+
n_windows = stability_metrics["n_windows"]
|
|
335
|
+
|
|
336
|
+
logger.debug(
|
|
337
|
+
"Stability: sign_ratio=%.3f, CV=%.3f, n_windows=%d",
|
|
338
|
+
sign_consistency_ratio,
|
|
339
|
+
beta_cv,
|
|
340
|
+
n_windows,
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
# Score stability
|
|
344
|
+
stability_score = scoring.score_stability(sign_consistency_ratio, beta_cv)
|
|
345
|
+
logger.info("Stability score: %.3f", stability_score)
|
|
346
|
+
|
|
347
|
+
# Compute composite score
|
|
348
|
+
composite_score = scoring.compute_composite_score(
|
|
349
|
+
data_health_score=data_health_score,
|
|
350
|
+
predictive_score=predictive_score,
|
|
351
|
+
economic_score=economic_score,
|
|
352
|
+
stability_score=stability_score,
|
|
353
|
+
config=config,
|
|
354
|
+
)
|
|
355
|
+
logger.info("Composite score: %.3f", composite_score)
|
|
356
|
+
|
|
357
|
+
# Assign decision
|
|
358
|
+
decision = scoring.assign_decision(composite_score, config)
|
|
359
|
+
logger.info("Decision: %s", decision)
|
|
360
|
+
|
|
361
|
+
# Create result
|
|
362
|
+
result = SuitabilityResult(
|
|
363
|
+
decision=decision,
|
|
364
|
+
composite_score=composite_score,
|
|
365
|
+
data_health_score=data_health_score,
|
|
366
|
+
predictive_score=predictive_score,
|
|
367
|
+
economic_score=economic_score,
|
|
368
|
+
stability_score=stability_score,
|
|
369
|
+
valid_obs=valid_obs,
|
|
370
|
+
missing_pct=missing_pct,
|
|
371
|
+
correlations=correlations,
|
|
372
|
+
betas=betas,
|
|
373
|
+
t_stats=t_stats,
|
|
374
|
+
effect_size_bps=effect_size_bps,
|
|
375
|
+
sign_consistency_ratio=sign_consistency_ratio,
|
|
376
|
+
beta_cv=beta_cv,
|
|
377
|
+
n_windows=n_windows,
|
|
378
|
+
timestamp=datetime.now().isoformat(),
|
|
379
|
+
config=config,
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
logger.info(
|
|
383
|
+
"Evaluation complete: signal=%s, decision=%s, score=%.3f",
|
|
384
|
+
getattr(signal, "name", "unnamed"),
|
|
385
|
+
decision,
|
|
386
|
+
composite_score,
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
return result
|