ml4t-diagnostic 0.1.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ml4t/diagnostic/AGENT.md +25 -0
- ml4t/diagnostic/__init__.py +166 -0
- ml4t/diagnostic/backends/__init__.py +10 -0
- ml4t/diagnostic/backends/adapter.py +192 -0
- ml4t/diagnostic/backends/polars_backend.py +899 -0
- ml4t/diagnostic/caching/__init__.py +40 -0
- ml4t/diagnostic/caching/cache.py +331 -0
- ml4t/diagnostic/caching/decorators.py +131 -0
- ml4t/diagnostic/caching/smart_cache.py +339 -0
- ml4t/diagnostic/config/AGENT.md +24 -0
- ml4t/diagnostic/config/README.md +267 -0
- ml4t/diagnostic/config/__init__.py +219 -0
- ml4t/diagnostic/config/barrier_config.py +277 -0
- ml4t/diagnostic/config/base.py +301 -0
- ml4t/diagnostic/config/event_config.py +148 -0
- ml4t/diagnostic/config/feature_config.py +404 -0
- ml4t/diagnostic/config/multi_signal_config.py +55 -0
- ml4t/diagnostic/config/portfolio_config.py +215 -0
- ml4t/diagnostic/config/report_config.py +391 -0
- ml4t/diagnostic/config/sharpe_config.py +202 -0
- ml4t/diagnostic/config/signal_config.py +206 -0
- ml4t/diagnostic/config/trade_analysis_config.py +310 -0
- ml4t/diagnostic/config/validation.py +279 -0
- ml4t/diagnostic/core/__init__.py +29 -0
- ml4t/diagnostic/core/numba_utils.py +315 -0
- ml4t/diagnostic/core/purging.py +372 -0
- ml4t/diagnostic/core/sampling.py +471 -0
- ml4t/diagnostic/errors/__init__.py +205 -0
- ml4t/diagnostic/evaluation/AGENT.md +26 -0
- ml4t/diagnostic/evaluation/__init__.py +437 -0
- ml4t/diagnostic/evaluation/autocorrelation.py +531 -0
- ml4t/diagnostic/evaluation/barrier_analysis.py +1050 -0
- ml4t/diagnostic/evaluation/binary_metrics.py +910 -0
- ml4t/diagnostic/evaluation/dashboard.py +715 -0
- ml4t/diagnostic/evaluation/diagnostic_plots.py +1037 -0
- ml4t/diagnostic/evaluation/distribution/__init__.py +499 -0
- ml4t/diagnostic/evaluation/distribution/moments.py +299 -0
- ml4t/diagnostic/evaluation/distribution/tails.py +777 -0
- ml4t/diagnostic/evaluation/distribution/tests.py +470 -0
- ml4t/diagnostic/evaluation/drift/__init__.py +139 -0
- ml4t/diagnostic/evaluation/drift/analysis.py +432 -0
- ml4t/diagnostic/evaluation/drift/domain_classifier.py +517 -0
- ml4t/diagnostic/evaluation/drift/population_stability_index.py +310 -0
- ml4t/diagnostic/evaluation/drift/wasserstein.py +388 -0
- ml4t/diagnostic/evaluation/event_analysis.py +647 -0
- ml4t/diagnostic/evaluation/excursion.py +390 -0
- ml4t/diagnostic/evaluation/feature_diagnostics.py +873 -0
- ml4t/diagnostic/evaluation/feature_outcome.py +666 -0
- ml4t/diagnostic/evaluation/framework.py +935 -0
- ml4t/diagnostic/evaluation/metric_registry.py +255 -0
- ml4t/diagnostic/evaluation/metrics/AGENT.md +23 -0
- ml4t/diagnostic/evaluation/metrics/__init__.py +133 -0
- ml4t/diagnostic/evaluation/metrics/basic.py +160 -0
- ml4t/diagnostic/evaluation/metrics/conditional_ic.py +469 -0
- ml4t/diagnostic/evaluation/metrics/feature_outcome.py +475 -0
- ml4t/diagnostic/evaluation/metrics/ic_statistics.py +446 -0
- ml4t/diagnostic/evaluation/metrics/importance_analysis.py +338 -0
- ml4t/diagnostic/evaluation/metrics/importance_classical.py +375 -0
- ml4t/diagnostic/evaluation/metrics/importance_mda.py +371 -0
- ml4t/diagnostic/evaluation/metrics/importance_shap.py +715 -0
- ml4t/diagnostic/evaluation/metrics/information_coefficient.py +527 -0
- ml4t/diagnostic/evaluation/metrics/interactions.py +772 -0
- ml4t/diagnostic/evaluation/metrics/monotonicity.py +226 -0
- ml4t/diagnostic/evaluation/metrics/risk_adjusted.py +324 -0
- ml4t/diagnostic/evaluation/multi_signal.py +550 -0
- ml4t/diagnostic/evaluation/portfolio_analysis/__init__.py +83 -0
- ml4t/diagnostic/evaluation/portfolio_analysis/analysis.py +734 -0
- ml4t/diagnostic/evaluation/portfolio_analysis/metrics.py +589 -0
- ml4t/diagnostic/evaluation/portfolio_analysis/results.py +334 -0
- ml4t/diagnostic/evaluation/report_generation.py +824 -0
- ml4t/diagnostic/evaluation/signal_selector.py +452 -0
- ml4t/diagnostic/evaluation/stat_registry.py +139 -0
- ml4t/diagnostic/evaluation/stationarity/__init__.py +97 -0
- ml4t/diagnostic/evaluation/stationarity/analysis.py +518 -0
- ml4t/diagnostic/evaluation/stationarity/augmented_dickey_fuller.py +296 -0
- ml4t/diagnostic/evaluation/stationarity/kpss_test.py +308 -0
- ml4t/diagnostic/evaluation/stationarity/phillips_perron.py +365 -0
- ml4t/diagnostic/evaluation/stats/AGENT.md +43 -0
- ml4t/diagnostic/evaluation/stats/__init__.py +191 -0
- ml4t/diagnostic/evaluation/stats/backtest_overfitting.py +219 -0
- ml4t/diagnostic/evaluation/stats/bootstrap.py +228 -0
- ml4t/diagnostic/evaluation/stats/deflated_sharpe_ratio.py +591 -0
- ml4t/diagnostic/evaluation/stats/false_discovery_rate.py +295 -0
- ml4t/diagnostic/evaluation/stats/hac_standard_errors.py +108 -0
- ml4t/diagnostic/evaluation/stats/minimum_track_record.py +408 -0
- ml4t/diagnostic/evaluation/stats/moments.py +164 -0
- ml4t/diagnostic/evaluation/stats/rademacher_adjustment.py +436 -0
- ml4t/diagnostic/evaluation/stats/reality_check.py +155 -0
- ml4t/diagnostic/evaluation/stats/sharpe_inference.py +219 -0
- ml4t/diagnostic/evaluation/themes.py +330 -0
- ml4t/diagnostic/evaluation/threshold_analysis.py +957 -0
- ml4t/diagnostic/evaluation/trade_analysis.py +1136 -0
- ml4t/diagnostic/evaluation/trade_dashboard/__init__.py +32 -0
- ml4t/diagnostic/evaluation/trade_dashboard/app.py +315 -0
- ml4t/diagnostic/evaluation/trade_dashboard/export/__init__.py +18 -0
- ml4t/diagnostic/evaluation/trade_dashboard/export/csv.py +82 -0
- ml4t/diagnostic/evaluation/trade_dashboard/export/html.py +276 -0
- ml4t/diagnostic/evaluation/trade_dashboard/io.py +166 -0
- ml4t/diagnostic/evaluation/trade_dashboard/normalize.py +304 -0
- ml4t/diagnostic/evaluation/trade_dashboard/stats.py +386 -0
- ml4t/diagnostic/evaluation/trade_dashboard/style.py +79 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/__init__.py +21 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/patterns.py +354 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/shap_analysis.py +280 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/stat_validation.py +186 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/worst_trades.py +236 -0
- ml4t/diagnostic/evaluation/trade_dashboard/types.py +129 -0
- ml4t/diagnostic/evaluation/trade_shap/__init__.py +102 -0
- ml4t/diagnostic/evaluation/trade_shap/alignment.py +188 -0
- ml4t/diagnostic/evaluation/trade_shap/characterize.py +413 -0
- ml4t/diagnostic/evaluation/trade_shap/cluster.py +302 -0
- ml4t/diagnostic/evaluation/trade_shap/explain.py +208 -0
- ml4t/diagnostic/evaluation/trade_shap/hypotheses/__init__.py +23 -0
- ml4t/diagnostic/evaluation/trade_shap/hypotheses/generator.py +290 -0
- ml4t/diagnostic/evaluation/trade_shap/hypotheses/matcher.py +251 -0
- ml4t/diagnostic/evaluation/trade_shap/hypotheses/templates.yaml +467 -0
- ml4t/diagnostic/evaluation/trade_shap/models.py +386 -0
- ml4t/diagnostic/evaluation/trade_shap/normalize.py +116 -0
- ml4t/diagnostic/evaluation/trade_shap/pipeline.py +263 -0
- ml4t/diagnostic/evaluation/trade_shap_dashboard.py +283 -0
- ml4t/diagnostic/evaluation/trade_shap_diagnostics.py +588 -0
- ml4t/diagnostic/evaluation/validated_cv.py +535 -0
- ml4t/diagnostic/evaluation/visualization.py +1050 -0
- ml4t/diagnostic/evaluation/volatility/__init__.py +45 -0
- ml4t/diagnostic/evaluation/volatility/analysis.py +351 -0
- ml4t/diagnostic/evaluation/volatility/arch.py +258 -0
- ml4t/diagnostic/evaluation/volatility/garch.py +460 -0
- ml4t/diagnostic/integration/__init__.py +48 -0
- ml4t/diagnostic/integration/backtest_contract.py +671 -0
- ml4t/diagnostic/integration/data_contract.py +316 -0
- ml4t/diagnostic/integration/engineer_contract.py +226 -0
- ml4t/diagnostic/logging/__init__.py +77 -0
- ml4t/diagnostic/logging/logger.py +245 -0
- ml4t/diagnostic/logging/performance.py +234 -0
- ml4t/diagnostic/logging/progress.py +234 -0
- ml4t/diagnostic/logging/wandb.py +412 -0
- ml4t/diagnostic/metrics/__init__.py +9 -0
- ml4t/diagnostic/metrics/percentiles.py +128 -0
- ml4t/diagnostic/py.typed +1 -0
- ml4t/diagnostic/reporting/__init__.py +43 -0
- ml4t/diagnostic/reporting/base.py +130 -0
- ml4t/diagnostic/reporting/html_renderer.py +275 -0
- ml4t/diagnostic/reporting/json_renderer.py +51 -0
- ml4t/diagnostic/reporting/markdown_renderer.py +117 -0
- ml4t/diagnostic/results/AGENT.md +24 -0
- ml4t/diagnostic/results/__init__.py +105 -0
- ml4t/diagnostic/results/barrier_results/__init__.py +36 -0
- ml4t/diagnostic/results/barrier_results/hit_rate.py +304 -0
- ml4t/diagnostic/results/barrier_results/precision_recall.py +266 -0
- ml4t/diagnostic/results/barrier_results/profit_factor.py +297 -0
- ml4t/diagnostic/results/barrier_results/tearsheet.py +397 -0
- ml4t/diagnostic/results/barrier_results/time_to_target.py +305 -0
- ml4t/diagnostic/results/barrier_results/validation.py +38 -0
- ml4t/diagnostic/results/base.py +177 -0
- ml4t/diagnostic/results/event_results.py +349 -0
- ml4t/diagnostic/results/feature_results.py +787 -0
- ml4t/diagnostic/results/multi_signal_results.py +431 -0
- ml4t/diagnostic/results/portfolio_results.py +281 -0
- ml4t/diagnostic/results/sharpe_results.py +448 -0
- ml4t/diagnostic/results/signal_results/__init__.py +74 -0
- ml4t/diagnostic/results/signal_results/ic.py +581 -0
- ml4t/diagnostic/results/signal_results/irtc.py +110 -0
- ml4t/diagnostic/results/signal_results/quantile.py +392 -0
- ml4t/diagnostic/results/signal_results/tearsheet.py +456 -0
- ml4t/diagnostic/results/signal_results/turnover.py +213 -0
- ml4t/diagnostic/results/signal_results/validation.py +147 -0
- ml4t/diagnostic/signal/AGENT.md +17 -0
- ml4t/diagnostic/signal/__init__.py +69 -0
- ml4t/diagnostic/signal/_report.py +152 -0
- ml4t/diagnostic/signal/_utils.py +261 -0
- ml4t/diagnostic/signal/core.py +275 -0
- ml4t/diagnostic/signal/quantile.py +148 -0
- ml4t/diagnostic/signal/result.py +214 -0
- ml4t/diagnostic/signal/signal_ic.py +129 -0
- ml4t/diagnostic/signal/turnover.py +182 -0
- ml4t/diagnostic/splitters/AGENT.md +19 -0
- ml4t/diagnostic/splitters/__init__.py +36 -0
- ml4t/diagnostic/splitters/base.py +501 -0
- ml4t/diagnostic/splitters/calendar.py +421 -0
- ml4t/diagnostic/splitters/calendar_config.py +91 -0
- ml4t/diagnostic/splitters/combinatorial.py +1064 -0
- ml4t/diagnostic/splitters/config.py +322 -0
- ml4t/diagnostic/splitters/cpcv/__init__.py +57 -0
- ml4t/diagnostic/splitters/cpcv/combinations.py +119 -0
- ml4t/diagnostic/splitters/cpcv/partitioning.py +263 -0
- ml4t/diagnostic/splitters/cpcv/purge_engine.py +379 -0
- ml4t/diagnostic/splitters/cpcv/windows.py +190 -0
- ml4t/diagnostic/splitters/group_isolation.py +329 -0
- ml4t/diagnostic/splitters/persistence.py +316 -0
- ml4t/diagnostic/splitters/utils.py +207 -0
- ml4t/diagnostic/splitters/walk_forward.py +757 -0
- ml4t/diagnostic/utils/__init__.py +42 -0
- ml4t/diagnostic/utils/config.py +542 -0
- ml4t/diagnostic/utils/dependencies.py +318 -0
- ml4t/diagnostic/utils/sessions.py +127 -0
- ml4t/diagnostic/validation/__init__.py +54 -0
- ml4t/diagnostic/validation/dataframe.py +274 -0
- ml4t/diagnostic/validation/returns.py +280 -0
- ml4t/diagnostic/validation/timeseries.py +299 -0
- ml4t/diagnostic/visualization/AGENT.md +19 -0
- ml4t/diagnostic/visualization/__init__.py +223 -0
- ml4t/diagnostic/visualization/backtest/__init__.py +98 -0
- ml4t/diagnostic/visualization/backtest/cost_attribution.py +762 -0
- ml4t/diagnostic/visualization/backtest/executive_summary.py +895 -0
- ml4t/diagnostic/visualization/backtest/interactive_controls.py +673 -0
- ml4t/diagnostic/visualization/backtest/statistical_validity.py +874 -0
- ml4t/diagnostic/visualization/backtest/tearsheet.py +565 -0
- ml4t/diagnostic/visualization/backtest/template_system.py +373 -0
- ml4t/diagnostic/visualization/backtest/trade_plots.py +1172 -0
- ml4t/diagnostic/visualization/barrier_plots.py +782 -0
- ml4t/diagnostic/visualization/core.py +1060 -0
- ml4t/diagnostic/visualization/dashboards/__init__.py +36 -0
- ml4t/diagnostic/visualization/dashboards/base.py +582 -0
- ml4t/diagnostic/visualization/dashboards/importance.py +801 -0
- ml4t/diagnostic/visualization/dashboards/interaction.py +263 -0
- ml4t/diagnostic/visualization/dashboards.py +43 -0
- ml4t/diagnostic/visualization/data_extraction/__init__.py +48 -0
- ml4t/diagnostic/visualization/data_extraction/importance.py +649 -0
- ml4t/diagnostic/visualization/data_extraction/interaction.py +504 -0
- ml4t/diagnostic/visualization/data_extraction/types.py +113 -0
- ml4t/diagnostic/visualization/data_extraction/validation.py +66 -0
- ml4t/diagnostic/visualization/feature_plots.py +888 -0
- ml4t/diagnostic/visualization/interaction_plots.py +618 -0
- ml4t/diagnostic/visualization/portfolio/__init__.py +41 -0
- ml4t/diagnostic/visualization/portfolio/dashboard.py +514 -0
- ml4t/diagnostic/visualization/portfolio/drawdown_plots.py +341 -0
- ml4t/diagnostic/visualization/portfolio/returns_plots.py +487 -0
- ml4t/diagnostic/visualization/portfolio/risk_plots.py +301 -0
- ml4t/diagnostic/visualization/report_generation.py +1343 -0
- ml4t/diagnostic/visualization/signal/__init__.py +103 -0
- ml4t/diagnostic/visualization/signal/dashboard.py +911 -0
- ml4t/diagnostic/visualization/signal/event_plots.py +514 -0
- ml4t/diagnostic/visualization/signal/ic_plots.py +635 -0
- ml4t/diagnostic/visualization/signal/multi_signal_dashboard.py +974 -0
- ml4t/diagnostic/visualization/signal/multi_signal_plots.py +603 -0
- ml4t/diagnostic/visualization/signal/quantile_plots.py +625 -0
- ml4t/diagnostic/visualization/signal/turnover_plots.py +400 -0
- ml4t/diagnostic/visualization/trade_shap/__init__.py +90 -0
- ml4t_diagnostic-0.1.0a1.dist-info/METADATA +1044 -0
- ml4t_diagnostic-0.1.0a1.dist-info/RECORD +242 -0
- ml4t_diagnostic-0.1.0a1.dist-info/WHEEL +4 -0
- ml4t_diagnostic-0.1.0a1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,475 @@
|
|
|
1
|
+
"""Feature-outcome relationship analysis: Comprehensive IC diagnostics.
|
|
2
|
+
|
|
3
|
+
This module provides the main entry point for evaluating feature predictive power,
|
|
4
|
+
combining IC analysis, significance testing, monotonicity validation, and decay analysis.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import TYPE_CHECKING, Any, cast
|
|
8
|
+
|
|
9
|
+
import numpy as np
|
|
10
|
+
import pandas as pd
|
|
11
|
+
import polars as pl
|
|
12
|
+
|
|
13
|
+
from ml4t.diagnostic.evaluation.metrics.basic import compute_forward_returns
|
|
14
|
+
from ml4t.diagnostic.evaluation.metrics.ic_statistics import (
|
|
15
|
+
compute_ic_decay,
|
|
16
|
+
compute_ic_hac_stats,
|
|
17
|
+
)
|
|
18
|
+
from ml4t.diagnostic.evaluation.metrics.information_coefficient import (
|
|
19
|
+
compute_ic_ir,
|
|
20
|
+
compute_ic_series,
|
|
21
|
+
information_coefficient,
|
|
22
|
+
)
|
|
23
|
+
from ml4t.diagnostic.evaluation.metrics.monotonicity import compute_monotonicity
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
pass
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def analyze_feature_outcome(
|
|
30
|
+
predictions: pl.DataFrame | pd.DataFrame,
|
|
31
|
+
prices: pl.DataFrame | pd.DataFrame,
|
|
32
|
+
pred_col: str = "prediction",
|
|
33
|
+
price_col: str = "close",
|
|
34
|
+
date_col: str = "date",
|
|
35
|
+
group_col: str | None = None,
|
|
36
|
+
horizons: list[int] | None = None,
|
|
37
|
+
n_quantiles: int = 5,
|
|
38
|
+
method: str = "spearman",
|
|
39
|
+
include_decay: bool = True,
|
|
40
|
+
include_monotonicity: bool = True,
|
|
41
|
+
include_hac: bool = True,
|
|
42
|
+
annualization_factor: float = np.sqrt(252),
|
|
43
|
+
) -> dict[str, Any]:
|
|
44
|
+
"""Comprehensive feature-outcome relationship analysis (FR-C1-C4).
|
|
45
|
+
|
|
46
|
+
This is the main diagnostic function that combines IC analysis, significance
|
|
47
|
+
testing, monotonicity validation, and decay analysis into a single comprehensive
|
|
48
|
+
summary of feature quality.
|
|
49
|
+
|
|
50
|
+
Use this function as the primary entry point for evaluating whether a feature
|
|
51
|
+
(prediction/signal) has predictive power for outcomes (returns).
|
|
52
|
+
|
|
53
|
+
Parameters
|
|
54
|
+
----------
|
|
55
|
+
predictions : Union[pl.DataFrame, pd.DataFrame]
|
|
56
|
+
DataFrame with predictions, must have pred_col, date_col, and optionally group_col
|
|
57
|
+
prices : Union[pl.DataFrame, pd.DataFrame]
|
|
58
|
+
DataFrame with prices, must have price_col, date_col, and optionally group_col
|
|
59
|
+
pred_col : str, default "prediction"
|
|
60
|
+
Column name for predictions
|
|
61
|
+
price_col : str, default "close"
|
|
62
|
+
Column name for prices
|
|
63
|
+
date_col : str, default "date"
|
|
64
|
+
Column name for dates
|
|
65
|
+
group_col : str | None, default None
|
|
66
|
+
Column name for grouping (e.g., "symbol" for multi-asset)
|
|
67
|
+
horizons : list[int] | None, default None
|
|
68
|
+
List of forward horizons in days for multi-horizon analysis.
|
|
69
|
+
If None, uses [1, 2, 5, 10, 21] for decay analysis
|
|
70
|
+
n_quantiles : int, default 5
|
|
71
|
+
Number of quantile bins for monotonicity analysis
|
|
72
|
+
method : str, default "spearman"
|
|
73
|
+
Correlation method: "spearman" or "pearson"
|
|
74
|
+
include_decay : bool, default True
|
|
75
|
+
Whether to compute IC decay analysis
|
|
76
|
+
include_monotonicity : bool, default True
|
|
77
|
+
Whether to compute monotonicity analysis
|
|
78
|
+
include_hac : bool, default True
|
|
79
|
+
Whether to compute HAC-adjusted significance
|
|
80
|
+
annualization_factor : float, default sqrt(252)
|
|
81
|
+
Factor to annualize IC-IR (sqrt(periods_per_year))
|
|
82
|
+
|
|
83
|
+
Returns
|
|
84
|
+
-------
|
|
85
|
+
dict[str, Any]
|
|
86
|
+
Comprehensive analysis dictionary with:
|
|
87
|
+
- ic_summary: Core IC statistics (mean, std, IR, significance)
|
|
88
|
+
- ic_series: Time series of IC values
|
|
89
|
+
- decay_analysis: IC decay across horizons (if include_decay=True)
|
|
90
|
+
- monotonicity_analysis: Quantile-based monotonicity (if include_monotonicity=True)
|
|
91
|
+
- interpretation: Textual interpretation and guidance
|
|
92
|
+
- metadata: Analysis parameters and timestamps
|
|
93
|
+
|
|
94
|
+
Examples
|
|
95
|
+
--------
|
|
96
|
+
>>> # Comprehensive feature analysis
|
|
97
|
+
>>> analysis = analyze_feature_outcome(
|
|
98
|
+
... predictions=pred_df,
|
|
99
|
+
... prices=price_df,
|
|
100
|
+
... group_col="symbol",
|
|
101
|
+
... horizons=[1, 2, 5, 10, 21]
|
|
102
|
+
... )
|
|
103
|
+
>>>
|
|
104
|
+
>>> # Check core statistics
|
|
105
|
+
>>> print(f"Mean IC: {analysis['ic_summary']['mean_ic']:.4f}")
|
|
106
|
+
>>> print(f"IC-IR: {analysis['ic_summary']['ic_ir']:.2f}")
|
|
107
|
+
>>> print(f"P-value: {analysis['ic_summary']['p_value']:.4f}")
|
|
108
|
+
>>> print(f"Significant: {analysis['ic_summary']['is_significant']}")
|
|
109
|
+
Mean IC: 0.0234
|
|
110
|
+
IC-IR: 1.12
|
|
111
|
+
P-value: 0.0327
|
|
112
|
+
Significant: True
|
|
113
|
+
>>>
|
|
114
|
+
>>> # Check decay characteristics
|
|
115
|
+
>>> print(f"Half-life: {analysis['decay_analysis']['half_life']:.1f} days")
|
|
116
|
+
>>> print(f"Optimal horizon: {analysis['decay_analysis']['optimal_horizon']} days")
|
|
117
|
+
Half-life: 8.3 days
|
|
118
|
+
Optimal horizon: 1 days
|
|
119
|
+
>>>
|
|
120
|
+
>>> # Check monotonicity
|
|
121
|
+
>>> print(f"Monotonic: {analysis['monotonicity_analysis']['is_monotonic']}")
|
|
122
|
+
>>> print(f"Direction: {analysis['monotonicity_analysis']['direction']}")
|
|
123
|
+
Monotonic: True
|
|
124
|
+
Direction: increasing
|
|
125
|
+
>>>
|
|
126
|
+
>>> # Read interpretation guidance
|
|
127
|
+
>>> print(analysis['interpretation'])
|
|
128
|
+
FEATURE QUALITY: GOOD
|
|
129
|
+
- Mean IC: 0.0234 (positive predictive power)
|
|
130
|
+
- IC-IR: 1.12 (excellent consistency)
|
|
131
|
+
- Statistical Significance: p < 0.05 (robust)
|
|
132
|
+
- Monotonicity: Increasing (valid predictor)
|
|
133
|
+
- Signal Persistence: Moderate (half-life 8.3 days)
|
|
134
|
+
RECOMMENDATION: Feature shows strong predictive power with good consistency.
|
|
135
|
+
Consider using for short-to-medium term predictions (1-10 days).
|
|
136
|
+
|
|
137
|
+
Notes
|
|
138
|
+
-----
|
|
139
|
+
This function is designed to be the primary entry point for feature evaluation,
|
|
140
|
+
combining multiple analyses into a comprehensive assessment. For more focused
|
|
141
|
+
analysis, use individual functions:
|
|
142
|
+
- compute_ic_series(): Time series IC only
|
|
143
|
+
- compute_ic_ir(): Information ratio only
|
|
144
|
+
- compute_ic_decay(): Decay analysis only
|
|
145
|
+
- compute_monotonicity(): Monotonicity only
|
|
146
|
+
- compute_ic_hac_stats(): Significance testing only
|
|
147
|
+
|
|
148
|
+
Quality Thresholds:
|
|
149
|
+
- Mean IC: >0.02 is good, >0.05 is excellent
|
|
150
|
+
- IC-IR: >0.5 is good, >1.0 is excellent
|
|
151
|
+
- P-value: <0.05 for significance
|
|
152
|
+
- Monotonicity score: >0.8 for strong monotonicity
|
|
153
|
+
- Half-life: Depends on strategy horizon (align with holding period)
|
|
154
|
+
"""
|
|
155
|
+
# 1. Compute forward returns from prices using compute_forward_returns
|
|
156
|
+
prices_with_fwd = compute_forward_returns(
|
|
157
|
+
prices=prices,
|
|
158
|
+
periods=1, # 1-day forward returns for IC series
|
|
159
|
+
price_col=price_col,
|
|
160
|
+
group_col=group_col,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# 2. Merge predictions with returns
|
|
164
|
+
merge_cols = [date_col, group_col] if group_col else [date_col]
|
|
165
|
+
|
|
166
|
+
merged: pl.DataFrame | pd.DataFrame
|
|
167
|
+
if isinstance(predictions, pl.DataFrame):
|
|
168
|
+
prices_fwd_pl = cast(pl.DataFrame, prices_with_fwd)
|
|
169
|
+
merged = predictions.join(prices_fwd_pl, on=merge_cols, how="inner")
|
|
170
|
+
# Drop NaN forward returns
|
|
171
|
+
merged = merged.filter(pl.col("fwd_ret_1").is_not_null())
|
|
172
|
+
else:
|
|
173
|
+
prices_fwd_pd = cast(pd.DataFrame, prices_with_fwd)
|
|
174
|
+
merged = pd.merge(predictions, prices_fwd_pd, on=merge_cols, how="inner")
|
|
175
|
+
# Drop NaN forward returns
|
|
176
|
+
merged = merged.dropna(subset=["fwd_ret_1"])
|
|
177
|
+
|
|
178
|
+
# 3. Compute IC time series (cross-sectional IC per date)
|
|
179
|
+
# For panel data, compute IC by grouping on date and correlating across assets
|
|
180
|
+
ic_series: pl.DataFrame | pd.DataFrame # Declare type before branches
|
|
181
|
+
|
|
182
|
+
if group_col:
|
|
183
|
+
# Panel data: group by date and compute IC within each date
|
|
184
|
+
def compute_date_ic(group: pd.DataFrame) -> pd.Series:
|
|
185
|
+
# Explicitly convert to float arrays to handle ExtensionArray types
|
|
186
|
+
pred_vals = np.asarray(group[pred_col].values, dtype=np.float64)
|
|
187
|
+
ret_vals = np.asarray(group["fwd_ret_1"].values, dtype=np.float64)
|
|
188
|
+
|
|
189
|
+
# Remove NaN pairs
|
|
190
|
+
valid_mask = ~(np.isnan(pred_vals) | np.isnan(ret_vals))
|
|
191
|
+
pred_clean = pred_vals[valid_mask]
|
|
192
|
+
ret_clean = ret_vals[valid_mask]
|
|
193
|
+
|
|
194
|
+
n_obs = len(pred_clean)
|
|
195
|
+
|
|
196
|
+
if n_obs >= 2: # Need at least 2 observations for correlation
|
|
197
|
+
ic_val = information_coefficient(
|
|
198
|
+
pred_clean, ret_clean, method=method, confidence_intervals=False
|
|
199
|
+
)
|
|
200
|
+
else:
|
|
201
|
+
ic_val = np.nan
|
|
202
|
+
|
|
203
|
+
return pd.Series({"ic": ic_val, "n_obs": n_obs})
|
|
204
|
+
|
|
205
|
+
# Convert to pandas for groupby.apply() operation
|
|
206
|
+
merged_pd: pd.DataFrame = merged.to_pandas() if isinstance(merged, pl.DataFrame) else merged
|
|
207
|
+
ic_series = merged_pd.groupby(date_col).apply(compute_date_ic).reset_index()
|
|
208
|
+
else:
|
|
209
|
+
# Time series data: use standard compute_ic_series
|
|
210
|
+
ic_series = compute_ic_series(
|
|
211
|
+
predictions=merged[[date_col, pred_col]],
|
|
212
|
+
returns=merged[[date_col, "fwd_ret_1"]],
|
|
213
|
+
pred_col=pred_col,
|
|
214
|
+
ret_col="fwd_ret_1",
|
|
215
|
+
date_col=date_col,
|
|
216
|
+
method=method,
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# 4. Compute IC-IR (Information Ratio)
|
|
220
|
+
ic_ir_result = compute_ic_ir(
|
|
221
|
+
ic_series=ic_series,
|
|
222
|
+
ic_col="ic",
|
|
223
|
+
annualization_factor=annualization_factor,
|
|
224
|
+
confidence_intervals=True,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
# 5. Compute HAC-adjusted significance (if requested)
|
|
228
|
+
if include_hac:
|
|
229
|
+
hac_stats = compute_ic_hac_stats(ic_series=ic_series, ic_col="ic")
|
|
230
|
+
else:
|
|
231
|
+
# Fallback to simple statistics - explicitly convert to float array
|
|
232
|
+
if isinstance(ic_series, pl.DataFrame):
|
|
233
|
+
ic_array = np.asarray(ic_series["ic"].to_numpy(), dtype=np.float64)
|
|
234
|
+
elif isinstance(ic_series, pd.DataFrame):
|
|
235
|
+
ic_array = np.asarray(ic_series["ic"].to_numpy(), dtype=np.float64)
|
|
236
|
+
else:
|
|
237
|
+
raise TypeError(f"ic_series must be DataFrame, got {type(ic_series)}")
|
|
238
|
+
mean_ic = float(np.mean(ic_array))
|
|
239
|
+
std_ic = float(np.std(ic_array, ddof=1))
|
|
240
|
+
t_stat = mean_ic / (std_ic / np.sqrt(len(ic_array)))
|
|
241
|
+
from scipy.stats import t as t_dist
|
|
242
|
+
|
|
243
|
+
p_value = float(2 * (1 - t_dist.cdf(abs(t_stat), df=len(ic_array) - 1)))
|
|
244
|
+
hac_stats = {
|
|
245
|
+
"mean_ic": mean_ic,
|
|
246
|
+
"hac_se": std_ic / np.sqrt(len(ic_array)),
|
|
247
|
+
"t_stat": t_stat,
|
|
248
|
+
"p_value": p_value,
|
|
249
|
+
"n_periods": len(ic_array),
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
# 6. Compute IC decay analysis (if requested)
|
|
253
|
+
decay_analysis = None
|
|
254
|
+
if include_decay:
|
|
255
|
+
decay_analysis = compute_ic_decay(
|
|
256
|
+
predictions=predictions,
|
|
257
|
+
prices=prices,
|
|
258
|
+
horizons=horizons,
|
|
259
|
+
pred_col=pred_col,
|
|
260
|
+
price_col=price_col,
|
|
261
|
+
date_col=date_col,
|
|
262
|
+
group_col=group_col,
|
|
263
|
+
method=method,
|
|
264
|
+
estimate_half_life=True,
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
# 7. Compute monotonicity analysis (if requested)
|
|
268
|
+
monotonicity_analysis = None
|
|
269
|
+
if include_monotonicity:
|
|
270
|
+
# Use already-merged data with forward returns - convert to pandas for values access
|
|
271
|
+
merged_for_mono: pd.DataFrame
|
|
272
|
+
if isinstance(merged, pl.DataFrame):
|
|
273
|
+
merged_for_mono = merged.to_pandas()
|
|
274
|
+
else:
|
|
275
|
+
merged_for_mono = merged
|
|
276
|
+
|
|
277
|
+
monotonicity_analysis = compute_monotonicity(
|
|
278
|
+
features=merged_for_mono[pred_col].to_numpy(),
|
|
279
|
+
outcomes=merged_for_mono["fwd_ret_1"].to_numpy(),
|
|
280
|
+
n_quantiles=n_quantiles,
|
|
281
|
+
method=method,
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
# 8. Build comprehensive summary
|
|
285
|
+
# Extract IC values for std calculation - explicitly convert to float array
|
|
286
|
+
if isinstance(ic_series, pl.DataFrame):
|
|
287
|
+
ic_values_for_std = np.asarray(ic_series["ic"].to_numpy(), dtype=np.float64)
|
|
288
|
+
elif isinstance(ic_series, pd.DataFrame):
|
|
289
|
+
ic_values_for_std = np.asarray(ic_series["ic"].to_numpy(), dtype=np.float64)
|
|
290
|
+
else:
|
|
291
|
+
raise TypeError(f"ic_series must be DataFrame, got {type(ic_series)}")
|
|
292
|
+
|
|
293
|
+
ic_summary = {
|
|
294
|
+
"mean_ic": hac_stats["mean_ic"],
|
|
295
|
+
"std_ic": float(np.std(ic_values_for_std, ddof=1)),
|
|
296
|
+
"ic_ir": ic_ir_result["ic_ir"] if isinstance(ic_ir_result, dict) else ic_ir_result,
|
|
297
|
+
"ic_ir_lower_ci": ic_ir_result.get("lower_ci") if isinstance(ic_ir_result, dict) else None,
|
|
298
|
+
"ic_ir_upper_ci": ic_ir_result.get("upper_ci") if isinstance(ic_ir_result, dict) else None,
|
|
299
|
+
"t_stat": hac_stats["t_stat"],
|
|
300
|
+
"p_value": hac_stats["p_value"],
|
|
301
|
+
"is_significant": hac_stats["p_value"] < 0.05,
|
|
302
|
+
"n_periods": hac_stats["n_periods"],
|
|
303
|
+
"fraction_positive": float(np.mean(ic_values_for_std > 0)),
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
# 9. Generate interpretation guidance
|
|
307
|
+
interpretation = _generate_interpretation(
|
|
308
|
+
ic_summary=ic_summary,
|
|
309
|
+
decay_analysis=decay_analysis,
|
|
310
|
+
monotonicity_analysis=monotonicity_analysis,
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
# 10. Build final result
|
|
314
|
+
result = {
|
|
315
|
+
"ic_summary": ic_summary,
|
|
316
|
+
"ic_series": ic_series,
|
|
317
|
+
"interpretation": interpretation,
|
|
318
|
+
"metadata": {
|
|
319
|
+
"analysis_date": pd.Timestamp.now().isoformat(),
|
|
320
|
+
"method": method,
|
|
321
|
+
"n_quantiles": n_quantiles,
|
|
322
|
+
"horizons": horizons or [1, 2, 5, 10, 21],
|
|
323
|
+
"include_decay": include_decay,
|
|
324
|
+
"include_monotonicity": include_monotonicity,
|
|
325
|
+
"include_hac": include_hac,
|
|
326
|
+
},
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
if decay_analysis is not None:
|
|
330
|
+
result["decay_analysis"] = decay_analysis
|
|
331
|
+
|
|
332
|
+
if monotonicity_analysis is not None:
|
|
333
|
+
result["monotonicity_analysis"] = monotonicity_analysis
|
|
334
|
+
|
|
335
|
+
return result
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
def _generate_interpretation(
|
|
339
|
+
ic_summary: dict[str, Any],
|
|
340
|
+
decay_analysis: dict[str, Any] | None,
|
|
341
|
+
monotonicity_analysis: dict[str, Any] | None,
|
|
342
|
+
) -> str:
|
|
343
|
+
"""Generate human-readable interpretation of feature-outcome analysis.
|
|
344
|
+
|
|
345
|
+
Parameters
|
|
346
|
+
----------
|
|
347
|
+
ic_summary : dict
|
|
348
|
+
IC summary statistics
|
|
349
|
+
decay_analysis : dict | None
|
|
350
|
+
IC decay analysis results
|
|
351
|
+
monotonicity_analysis : dict | None
|
|
352
|
+
Monotonicity analysis results
|
|
353
|
+
|
|
354
|
+
Returns
|
|
355
|
+
-------
|
|
356
|
+
str
|
|
357
|
+
Multi-line interpretation text
|
|
358
|
+
"""
|
|
359
|
+
lines = []
|
|
360
|
+
|
|
361
|
+
# Determine overall quality
|
|
362
|
+
mean_ic = ic_summary["mean_ic"]
|
|
363
|
+
ic_ir = ic_summary["ic_ir"]
|
|
364
|
+
is_sig = ic_summary["is_significant"]
|
|
365
|
+
|
|
366
|
+
if abs(mean_ic) > 0.05 and ic_ir > 1.0 and is_sig:
|
|
367
|
+
quality = "EXCELLENT"
|
|
368
|
+
elif abs(mean_ic) > 0.02 and ic_ir > 0.5 and is_sig:
|
|
369
|
+
quality = "GOOD"
|
|
370
|
+
elif abs(mean_ic) > 0.01 and is_sig:
|
|
371
|
+
quality = "MODERATE"
|
|
372
|
+
else:
|
|
373
|
+
quality = "WEAK"
|
|
374
|
+
|
|
375
|
+
lines.append(f"FEATURE QUALITY: {quality}")
|
|
376
|
+
lines.append("")
|
|
377
|
+
|
|
378
|
+
# IC statistics
|
|
379
|
+
lines.append(
|
|
380
|
+
f"- Mean IC: {mean_ic:.4f} ({'positive' if mean_ic > 0 else 'negative'} predictive power)"
|
|
381
|
+
)
|
|
382
|
+
lines.append(
|
|
383
|
+
f"- IC-IR: {ic_ir:.2f} ({'excellent' if ic_ir > 1.0 else 'good' if ic_ir > 0.5 else 'moderate'} consistency)"
|
|
384
|
+
)
|
|
385
|
+
lines.append(
|
|
386
|
+
f"- Statistical Significance: p = {ic_summary['p_value']:.4f} ({'robust' if is_sig else 'not significant'})"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
# Monotonicity
|
|
390
|
+
if monotonicity_analysis:
|
|
391
|
+
is_mono = monotonicity_analysis["is_monotonic"]
|
|
392
|
+
direction = monotonicity_analysis["direction"]
|
|
393
|
+
score = monotonicity_analysis["monotonicity_score"]
|
|
394
|
+
lines.append(
|
|
395
|
+
f"- Monotonicity: {direction.replace('_', ' ').title()} (score: {score:.2f}, {'valid' if is_mono or score > 0.8 else 'weak'})"
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
# Decay characteristics
|
|
399
|
+
if decay_analysis and decay_analysis.get("half_life"):
|
|
400
|
+
half_life = decay_analysis["half_life"]
|
|
401
|
+
if half_life < 5:
|
|
402
|
+
persistence = "Short-term"
|
|
403
|
+
elif half_life < 20:
|
|
404
|
+
persistence = "Moderate"
|
|
405
|
+
else:
|
|
406
|
+
persistence = "Long-term"
|
|
407
|
+
lines.append(f"- Signal Persistence: {persistence} (half-life {half_life:.1f} days)")
|
|
408
|
+
|
|
409
|
+
lines.append("")
|
|
410
|
+
|
|
411
|
+
# Recommendation
|
|
412
|
+
if quality in ["EXCELLENT", "GOOD"]:
|
|
413
|
+
if decay_analysis and decay_analysis.get("half_life"):
|
|
414
|
+
hl = decay_analysis["half_life"]
|
|
415
|
+
horizon_rec = (
|
|
416
|
+
f"short-to-medium term predictions (1-{int(hl * 2)} days)"
|
|
417
|
+
if hl < 10
|
|
418
|
+
else f"medium-to-long term predictions ({int(hl)}-{int(hl * 3)} days)"
|
|
419
|
+
)
|
|
420
|
+
else:
|
|
421
|
+
horizon_rec = "predictions aligned with signal strength"
|
|
422
|
+
|
|
423
|
+
lines.append(
|
|
424
|
+
f"RECOMMENDATION: Feature shows {quality.lower()} predictive power with {'excellent' if ic_ir > 1 else 'good'} consistency."
|
|
425
|
+
)
|
|
426
|
+
lines.append(f"Consider using for {horizon_rec}.")
|
|
427
|
+
elif quality == "MODERATE":
|
|
428
|
+
lines.append("RECOMMENDATION: Feature shows moderate predictive power.")
|
|
429
|
+
lines.append(
|
|
430
|
+
"Consider combining with other features or transforming (e.g., ranking, winsorization)."
|
|
431
|
+
)
|
|
432
|
+
else:
|
|
433
|
+
lines.append("RECOMMENDATION: Feature shows weak predictive power.")
|
|
434
|
+
lines.append(
|
|
435
|
+
"Investigate data quality, consider feature transformations, or exclude from model."
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
return "\n".join(lines)
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
# Pydantic schema for analyze_feature_outcome() results
|
|
442
|
+
try:
|
|
443
|
+
from pydantic import BaseModel, Field
|
|
444
|
+
|
|
445
|
+
class ICSummary(BaseModel):
|
|
446
|
+
"""IC summary statistics."""
|
|
447
|
+
|
|
448
|
+
mean_ic: float = Field(description="Mean Information Coefficient")
|
|
449
|
+
std_ic: float = Field(description="Standard deviation of IC")
|
|
450
|
+
ic_ir: float = Field(description="IC Information Ratio")
|
|
451
|
+
ic_ir_lower_ci: float | None = Field(None, description="IC-IR lower confidence interval")
|
|
452
|
+
ic_ir_upper_ci: float | None = Field(None, description="IC-IR upper confidence interval")
|
|
453
|
+
t_stat: float = Field(description="HAC-adjusted t-statistic")
|
|
454
|
+
p_value: float = Field(description="HAC-adjusted p-value")
|
|
455
|
+
is_significant: bool = Field(description="Whether p-value < 0.05")
|
|
456
|
+
n_periods: int = Field(description="Number of periods analyzed")
|
|
457
|
+
fraction_positive: float = Field(description="Fraction of periods with positive IC")
|
|
458
|
+
|
|
459
|
+
class FeatureOutcomeAnalysis(BaseModel):
|
|
460
|
+
"""Pydantic schema for analyze_feature_outcome() results."""
|
|
461
|
+
|
|
462
|
+
ic_summary: ICSummary = Field(description="Core IC statistics")
|
|
463
|
+
interpretation: str = Field(description="Human-readable interpretation")
|
|
464
|
+
metadata: dict[str, Any] = Field(description="Analysis metadata")
|
|
465
|
+
decay_analysis: dict[str, Any] | None = Field(None, description="IC decay analysis")
|
|
466
|
+
monotonicity_analysis: dict[str, Any] | None = Field(
|
|
467
|
+
None, description="Monotonicity analysis"
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
class Config:
|
|
471
|
+
extra = "allow" # Allow ic_series and other fields
|
|
472
|
+
|
|
473
|
+
except ImportError:
|
|
474
|
+
# Pydantic not available, skip schema definition
|
|
475
|
+
pass
|