ml4t-diagnostic 0.1.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (242) hide show
  1. ml4t/diagnostic/AGENT.md +25 -0
  2. ml4t/diagnostic/__init__.py +166 -0
  3. ml4t/diagnostic/backends/__init__.py +10 -0
  4. ml4t/diagnostic/backends/adapter.py +192 -0
  5. ml4t/diagnostic/backends/polars_backend.py +899 -0
  6. ml4t/diagnostic/caching/__init__.py +40 -0
  7. ml4t/diagnostic/caching/cache.py +331 -0
  8. ml4t/diagnostic/caching/decorators.py +131 -0
  9. ml4t/diagnostic/caching/smart_cache.py +339 -0
  10. ml4t/diagnostic/config/AGENT.md +24 -0
  11. ml4t/diagnostic/config/README.md +267 -0
  12. ml4t/diagnostic/config/__init__.py +219 -0
  13. ml4t/diagnostic/config/barrier_config.py +277 -0
  14. ml4t/diagnostic/config/base.py +301 -0
  15. ml4t/diagnostic/config/event_config.py +148 -0
  16. ml4t/diagnostic/config/feature_config.py +404 -0
  17. ml4t/diagnostic/config/multi_signal_config.py +55 -0
  18. ml4t/diagnostic/config/portfolio_config.py +215 -0
  19. ml4t/diagnostic/config/report_config.py +391 -0
  20. ml4t/diagnostic/config/sharpe_config.py +202 -0
  21. ml4t/diagnostic/config/signal_config.py +206 -0
  22. ml4t/diagnostic/config/trade_analysis_config.py +310 -0
  23. ml4t/diagnostic/config/validation.py +279 -0
  24. ml4t/diagnostic/core/__init__.py +29 -0
  25. ml4t/diagnostic/core/numba_utils.py +315 -0
  26. ml4t/diagnostic/core/purging.py +372 -0
  27. ml4t/diagnostic/core/sampling.py +471 -0
  28. ml4t/diagnostic/errors/__init__.py +205 -0
  29. ml4t/diagnostic/evaluation/AGENT.md +26 -0
  30. ml4t/diagnostic/evaluation/__init__.py +437 -0
  31. ml4t/diagnostic/evaluation/autocorrelation.py +531 -0
  32. ml4t/diagnostic/evaluation/barrier_analysis.py +1050 -0
  33. ml4t/diagnostic/evaluation/binary_metrics.py +910 -0
  34. ml4t/diagnostic/evaluation/dashboard.py +715 -0
  35. ml4t/diagnostic/evaluation/diagnostic_plots.py +1037 -0
  36. ml4t/diagnostic/evaluation/distribution/__init__.py +499 -0
  37. ml4t/diagnostic/evaluation/distribution/moments.py +299 -0
  38. ml4t/diagnostic/evaluation/distribution/tails.py +777 -0
  39. ml4t/diagnostic/evaluation/distribution/tests.py +470 -0
  40. ml4t/diagnostic/evaluation/drift/__init__.py +139 -0
  41. ml4t/diagnostic/evaluation/drift/analysis.py +432 -0
  42. ml4t/diagnostic/evaluation/drift/domain_classifier.py +517 -0
  43. ml4t/diagnostic/evaluation/drift/population_stability_index.py +310 -0
  44. ml4t/diagnostic/evaluation/drift/wasserstein.py +388 -0
  45. ml4t/diagnostic/evaluation/event_analysis.py +647 -0
  46. ml4t/diagnostic/evaluation/excursion.py +390 -0
  47. ml4t/diagnostic/evaluation/feature_diagnostics.py +873 -0
  48. ml4t/diagnostic/evaluation/feature_outcome.py +666 -0
  49. ml4t/diagnostic/evaluation/framework.py +935 -0
  50. ml4t/diagnostic/evaluation/metric_registry.py +255 -0
  51. ml4t/diagnostic/evaluation/metrics/AGENT.md +23 -0
  52. ml4t/diagnostic/evaluation/metrics/__init__.py +133 -0
  53. ml4t/diagnostic/evaluation/metrics/basic.py +160 -0
  54. ml4t/diagnostic/evaluation/metrics/conditional_ic.py +469 -0
  55. ml4t/diagnostic/evaluation/metrics/feature_outcome.py +475 -0
  56. ml4t/diagnostic/evaluation/metrics/ic_statistics.py +446 -0
  57. ml4t/diagnostic/evaluation/metrics/importance_analysis.py +338 -0
  58. ml4t/diagnostic/evaluation/metrics/importance_classical.py +375 -0
  59. ml4t/diagnostic/evaluation/metrics/importance_mda.py +371 -0
  60. ml4t/diagnostic/evaluation/metrics/importance_shap.py +715 -0
  61. ml4t/diagnostic/evaluation/metrics/information_coefficient.py +527 -0
  62. ml4t/diagnostic/evaluation/metrics/interactions.py +772 -0
  63. ml4t/diagnostic/evaluation/metrics/monotonicity.py +226 -0
  64. ml4t/diagnostic/evaluation/metrics/risk_adjusted.py +324 -0
  65. ml4t/diagnostic/evaluation/multi_signal.py +550 -0
  66. ml4t/diagnostic/evaluation/portfolio_analysis/__init__.py +83 -0
  67. ml4t/diagnostic/evaluation/portfolio_analysis/analysis.py +734 -0
  68. ml4t/diagnostic/evaluation/portfolio_analysis/metrics.py +589 -0
  69. ml4t/diagnostic/evaluation/portfolio_analysis/results.py +334 -0
  70. ml4t/diagnostic/evaluation/report_generation.py +824 -0
  71. ml4t/diagnostic/evaluation/signal_selector.py +452 -0
  72. ml4t/diagnostic/evaluation/stat_registry.py +139 -0
  73. ml4t/diagnostic/evaluation/stationarity/__init__.py +97 -0
  74. ml4t/diagnostic/evaluation/stationarity/analysis.py +518 -0
  75. ml4t/diagnostic/evaluation/stationarity/augmented_dickey_fuller.py +296 -0
  76. ml4t/diagnostic/evaluation/stationarity/kpss_test.py +308 -0
  77. ml4t/diagnostic/evaluation/stationarity/phillips_perron.py +365 -0
  78. ml4t/diagnostic/evaluation/stats/AGENT.md +43 -0
  79. ml4t/diagnostic/evaluation/stats/__init__.py +191 -0
  80. ml4t/diagnostic/evaluation/stats/backtest_overfitting.py +219 -0
  81. ml4t/diagnostic/evaluation/stats/bootstrap.py +228 -0
  82. ml4t/diagnostic/evaluation/stats/deflated_sharpe_ratio.py +591 -0
  83. ml4t/diagnostic/evaluation/stats/false_discovery_rate.py +295 -0
  84. ml4t/diagnostic/evaluation/stats/hac_standard_errors.py +108 -0
  85. ml4t/diagnostic/evaluation/stats/minimum_track_record.py +408 -0
  86. ml4t/diagnostic/evaluation/stats/moments.py +164 -0
  87. ml4t/diagnostic/evaluation/stats/rademacher_adjustment.py +436 -0
  88. ml4t/diagnostic/evaluation/stats/reality_check.py +155 -0
  89. ml4t/diagnostic/evaluation/stats/sharpe_inference.py +219 -0
  90. ml4t/diagnostic/evaluation/themes.py +330 -0
  91. ml4t/diagnostic/evaluation/threshold_analysis.py +957 -0
  92. ml4t/diagnostic/evaluation/trade_analysis.py +1136 -0
  93. ml4t/diagnostic/evaluation/trade_dashboard/__init__.py +32 -0
  94. ml4t/diagnostic/evaluation/trade_dashboard/app.py +315 -0
  95. ml4t/diagnostic/evaluation/trade_dashboard/export/__init__.py +18 -0
  96. ml4t/diagnostic/evaluation/trade_dashboard/export/csv.py +82 -0
  97. ml4t/diagnostic/evaluation/trade_dashboard/export/html.py +276 -0
  98. ml4t/diagnostic/evaluation/trade_dashboard/io.py +166 -0
  99. ml4t/diagnostic/evaluation/trade_dashboard/normalize.py +304 -0
  100. ml4t/diagnostic/evaluation/trade_dashboard/stats.py +386 -0
  101. ml4t/diagnostic/evaluation/trade_dashboard/style.py +79 -0
  102. ml4t/diagnostic/evaluation/trade_dashboard/tabs/__init__.py +21 -0
  103. ml4t/diagnostic/evaluation/trade_dashboard/tabs/patterns.py +354 -0
  104. ml4t/diagnostic/evaluation/trade_dashboard/tabs/shap_analysis.py +280 -0
  105. ml4t/diagnostic/evaluation/trade_dashboard/tabs/stat_validation.py +186 -0
  106. ml4t/diagnostic/evaluation/trade_dashboard/tabs/worst_trades.py +236 -0
  107. ml4t/diagnostic/evaluation/trade_dashboard/types.py +129 -0
  108. ml4t/diagnostic/evaluation/trade_shap/__init__.py +102 -0
  109. ml4t/diagnostic/evaluation/trade_shap/alignment.py +188 -0
  110. ml4t/diagnostic/evaluation/trade_shap/characterize.py +413 -0
  111. ml4t/diagnostic/evaluation/trade_shap/cluster.py +302 -0
  112. ml4t/diagnostic/evaluation/trade_shap/explain.py +208 -0
  113. ml4t/diagnostic/evaluation/trade_shap/hypotheses/__init__.py +23 -0
  114. ml4t/diagnostic/evaluation/trade_shap/hypotheses/generator.py +290 -0
  115. ml4t/diagnostic/evaluation/trade_shap/hypotheses/matcher.py +251 -0
  116. ml4t/diagnostic/evaluation/trade_shap/hypotheses/templates.yaml +467 -0
  117. ml4t/diagnostic/evaluation/trade_shap/models.py +386 -0
  118. ml4t/diagnostic/evaluation/trade_shap/normalize.py +116 -0
  119. ml4t/diagnostic/evaluation/trade_shap/pipeline.py +263 -0
  120. ml4t/diagnostic/evaluation/trade_shap_dashboard.py +283 -0
  121. ml4t/diagnostic/evaluation/trade_shap_diagnostics.py +588 -0
  122. ml4t/diagnostic/evaluation/validated_cv.py +535 -0
  123. ml4t/diagnostic/evaluation/visualization.py +1050 -0
  124. ml4t/diagnostic/evaluation/volatility/__init__.py +45 -0
  125. ml4t/diagnostic/evaluation/volatility/analysis.py +351 -0
  126. ml4t/diagnostic/evaluation/volatility/arch.py +258 -0
  127. ml4t/diagnostic/evaluation/volatility/garch.py +460 -0
  128. ml4t/diagnostic/integration/__init__.py +48 -0
  129. ml4t/diagnostic/integration/backtest_contract.py +671 -0
  130. ml4t/diagnostic/integration/data_contract.py +316 -0
  131. ml4t/diagnostic/integration/engineer_contract.py +226 -0
  132. ml4t/diagnostic/logging/__init__.py +77 -0
  133. ml4t/diagnostic/logging/logger.py +245 -0
  134. ml4t/diagnostic/logging/performance.py +234 -0
  135. ml4t/diagnostic/logging/progress.py +234 -0
  136. ml4t/diagnostic/logging/wandb.py +412 -0
  137. ml4t/diagnostic/metrics/__init__.py +9 -0
  138. ml4t/diagnostic/metrics/percentiles.py +128 -0
  139. ml4t/diagnostic/py.typed +1 -0
  140. ml4t/diagnostic/reporting/__init__.py +43 -0
  141. ml4t/diagnostic/reporting/base.py +130 -0
  142. ml4t/diagnostic/reporting/html_renderer.py +275 -0
  143. ml4t/diagnostic/reporting/json_renderer.py +51 -0
  144. ml4t/diagnostic/reporting/markdown_renderer.py +117 -0
  145. ml4t/diagnostic/results/AGENT.md +24 -0
  146. ml4t/diagnostic/results/__init__.py +105 -0
  147. ml4t/diagnostic/results/barrier_results/__init__.py +36 -0
  148. ml4t/diagnostic/results/barrier_results/hit_rate.py +304 -0
  149. ml4t/diagnostic/results/barrier_results/precision_recall.py +266 -0
  150. ml4t/diagnostic/results/barrier_results/profit_factor.py +297 -0
  151. ml4t/diagnostic/results/barrier_results/tearsheet.py +397 -0
  152. ml4t/diagnostic/results/barrier_results/time_to_target.py +305 -0
  153. ml4t/diagnostic/results/barrier_results/validation.py +38 -0
  154. ml4t/diagnostic/results/base.py +177 -0
  155. ml4t/diagnostic/results/event_results.py +349 -0
  156. ml4t/diagnostic/results/feature_results.py +787 -0
  157. ml4t/diagnostic/results/multi_signal_results.py +431 -0
  158. ml4t/diagnostic/results/portfolio_results.py +281 -0
  159. ml4t/diagnostic/results/sharpe_results.py +448 -0
  160. ml4t/diagnostic/results/signal_results/__init__.py +74 -0
  161. ml4t/diagnostic/results/signal_results/ic.py +581 -0
  162. ml4t/diagnostic/results/signal_results/irtc.py +110 -0
  163. ml4t/diagnostic/results/signal_results/quantile.py +392 -0
  164. ml4t/diagnostic/results/signal_results/tearsheet.py +456 -0
  165. ml4t/diagnostic/results/signal_results/turnover.py +213 -0
  166. ml4t/diagnostic/results/signal_results/validation.py +147 -0
  167. ml4t/diagnostic/signal/AGENT.md +17 -0
  168. ml4t/diagnostic/signal/__init__.py +69 -0
  169. ml4t/diagnostic/signal/_report.py +152 -0
  170. ml4t/diagnostic/signal/_utils.py +261 -0
  171. ml4t/diagnostic/signal/core.py +275 -0
  172. ml4t/diagnostic/signal/quantile.py +148 -0
  173. ml4t/diagnostic/signal/result.py +214 -0
  174. ml4t/diagnostic/signal/signal_ic.py +129 -0
  175. ml4t/diagnostic/signal/turnover.py +182 -0
  176. ml4t/diagnostic/splitters/AGENT.md +19 -0
  177. ml4t/diagnostic/splitters/__init__.py +36 -0
  178. ml4t/diagnostic/splitters/base.py +501 -0
  179. ml4t/diagnostic/splitters/calendar.py +421 -0
  180. ml4t/diagnostic/splitters/calendar_config.py +91 -0
  181. ml4t/diagnostic/splitters/combinatorial.py +1064 -0
  182. ml4t/diagnostic/splitters/config.py +322 -0
  183. ml4t/diagnostic/splitters/cpcv/__init__.py +57 -0
  184. ml4t/diagnostic/splitters/cpcv/combinations.py +119 -0
  185. ml4t/diagnostic/splitters/cpcv/partitioning.py +263 -0
  186. ml4t/diagnostic/splitters/cpcv/purge_engine.py +379 -0
  187. ml4t/diagnostic/splitters/cpcv/windows.py +190 -0
  188. ml4t/diagnostic/splitters/group_isolation.py +329 -0
  189. ml4t/diagnostic/splitters/persistence.py +316 -0
  190. ml4t/diagnostic/splitters/utils.py +207 -0
  191. ml4t/diagnostic/splitters/walk_forward.py +757 -0
  192. ml4t/diagnostic/utils/__init__.py +42 -0
  193. ml4t/diagnostic/utils/config.py +542 -0
  194. ml4t/diagnostic/utils/dependencies.py +318 -0
  195. ml4t/diagnostic/utils/sessions.py +127 -0
  196. ml4t/diagnostic/validation/__init__.py +54 -0
  197. ml4t/diagnostic/validation/dataframe.py +274 -0
  198. ml4t/diagnostic/validation/returns.py +280 -0
  199. ml4t/diagnostic/validation/timeseries.py +299 -0
  200. ml4t/diagnostic/visualization/AGENT.md +19 -0
  201. ml4t/diagnostic/visualization/__init__.py +223 -0
  202. ml4t/diagnostic/visualization/backtest/__init__.py +98 -0
  203. ml4t/diagnostic/visualization/backtest/cost_attribution.py +762 -0
  204. ml4t/diagnostic/visualization/backtest/executive_summary.py +895 -0
  205. ml4t/diagnostic/visualization/backtest/interactive_controls.py +673 -0
  206. ml4t/diagnostic/visualization/backtest/statistical_validity.py +874 -0
  207. ml4t/diagnostic/visualization/backtest/tearsheet.py +565 -0
  208. ml4t/diagnostic/visualization/backtest/template_system.py +373 -0
  209. ml4t/diagnostic/visualization/backtest/trade_plots.py +1172 -0
  210. ml4t/diagnostic/visualization/barrier_plots.py +782 -0
  211. ml4t/diagnostic/visualization/core.py +1060 -0
  212. ml4t/diagnostic/visualization/dashboards/__init__.py +36 -0
  213. ml4t/diagnostic/visualization/dashboards/base.py +582 -0
  214. ml4t/diagnostic/visualization/dashboards/importance.py +801 -0
  215. ml4t/diagnostic/visualization/dashboards/interaction.py +263 -0
  216. ml4t/diagnostic/visualization/dashboards.py +43 -0
  217. ml4t/diagnostic/visualization/data_extraction/__init__.py +48 -0
  218. ml4t/diagnostic/visualization/data_extraction/importance.py +649 -0
  219. ml4t/diagnostic/visualization/data_extraction/interaction.py +504 -0
  220. ml4t/diagnostic/visualization/data_extraction/types.py +113 -0
  221. ml4t/diagnostic/visualization/data_extraction/validation.py +66 -0
  222. ml4t/diagnostic/visualization/feature_plots.py +888 -0
  223. ml4t/diagnostic/visualization/interaction_plots.py +618 -0
  224. ml4t/diagnostic/visualization/portfolio/__init__.py +41 -0
  225. ml4t/diagnostic/visualization/portfolio/dashboard.py +514 -0
  226. ml4t/diagnostic/visualization/portfolio/drawdown_plots.py +341 -0
  227. ml4t/diagnostic/visualization/portfolio/returns_plots.py +487 -0
  228. ml4t/diagnostic/visualization/portfolio/risk_plots.py +301 -0
  229. ml4t/diagnostic/visualization/report_generation.py +1343 -0
  230. ml4t/diagnostic/visualization/signal/__init__.py +103 -0
  231. ml4t/diagnostic/visualization/signal/dashboard.py +911 -0
  232. ml4t/diagnostic/visualization/signal/event_plots.py +514 -0
  233. ml4t/diagnostic/visualization/signal/ic_plots.py +635 -0
  234. ml4t/diagnostic/visualization/signal/multi_signal_dashboard.py +974 -0
  235. ml4t/diagnostic/visualization/signal/multi_signal_plots.py +603 -0
  236. ml4t/diagnostic/visualization/signal/quantile_plots.py +625 -0
  237. ml4t/diagnostic/visualization/signal/turnover_plots.py +400 -0
  238. ml4t/diagnostic/visualization/trade_shap/__init__.py +90 -0
  239. ml4t_diagnostic-0.1.0a1.dist-info/METADATA +1044 -0
  240. ml4t_diagnostic-0.1.0a1.dist-info/RECORD +242 -0
  241. ml4t_diagnostic-0.1.0a1.dist-info/WHEEL +4 -0
  242. ml4t_diagnostic-0.1.0a1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,404 @@
1
+ """Feature Evaluation Configuration.
2
+
3
+ This module provides configuration for comprehensive feature analysis:
4
+ - Stationarity testing (ADF, KPSS, Phillips-Perron)
5
+ - Autocorrelation (ACF/PACF)
6
+ - Volatility analysis (GARCH effects)
7
+ - Distribution analysis (normality, outliers)
8
+ - Correlation analysis
9
+ - PCA and dimensionality reduction
10
+ - Redundancy detection
11
+ - Information Coefficient (IC)
12
+ - ML diagnostics (SHAP, drift)
13
+
14
+ Consolidated Config:
15
+ - DiagnosticConfig: Single config with all feature analysis settings (single-level nesting)
16
+
17
+ References
18
+ ----------
19
+ López de Prado, M. (2018). "Advances in Financial Machine Learning"
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ from pathlib import Path
25
+ from typing import Literal
26
+
27
+ from pydantic import Field, field_validator, model_validator
28
+
29
+ from ml4t.diagnostic.config.base import BaseConfig, StatisticalTestConfig
30
+ from ml4t.diagnostic.config.validation import (
31
+ ClusteringMethod,
32
+ CorrelationMethod,
33
+ DistanceMetric,
34
+ DriftDetectionMethod,
35
+ LinkageMethod,
36
+ NonNegativeInt,
37
+ NormalityTest,
38
+ OutlierMethod,
39
+ PositiveFloat,
40
+ PositiveInt,
41
+ Probability,
42
+ RegressionType,
43
+ ThresholdOptimizationTarget,
44
+ VolatilityClusterMethod,
45
+ validate_min_max_range,
46
+ )
47
+
48
+ # =============================================================================
49
+ # Settings Classes (Single-Level Nesting)
50
+ # =============================================================================
51
+
52
+
53
+ class StationaritySettings(StatisticalTestConfig):
54
+ """Settings for stationarity testing (ADF, KPSS, PP)."""
55
+
56
+ adf_enabled: bool = Field(True, description="Run ADF test")
57
+ kpss_enabled: bool = Field(True, description="Run KPSS test")
58
+ pp_enabled: bool = Field(False, description="Run Phillips-Perron test")
59
+ adf_regression: RegressionType = Field(
60
+ RegressionType.CONSTANT, description="ADF regression type"
61
+ )
62
+ kpss_regression: Literal["c", "ct"] = Field("c", description="KPSS regression type")
63
+ pp_regression: RegressionType = Field(RegressionType.CONSTANT, description="PP regression type")
64
+ max_lag: Literal["auto"] | PositiveInt = Field("auto", description="Max lag for tests")
65
+
66
+ @model_validator(mode="after")
67
+ def check_at_least_one_test(self) -> StationaritySettings:
68
+ """Ensure at least one test is enabled."""
69
+ if not (self.adf_enabled or self.kpss_enabled or self.pp_enabled):
70
+ raise ValueError("At least one stationarity test must be enabled")
71
+ return self
72
+
73
+
74
+ class ACFSettings(BaseConfig):
75
+ """Settings for autocorrelation (ACF/PACF) analysis."""
76
+
77
+ enabled: bool = Field(True, description="Run ACF/PACF analysis")
78
+ n_lags: Literal["auto"] | PositiveInt = Field(40, description="Number of lags")
79
+ alpha: Probability = Field(0.05, description="Significance level for bands")
80
+ compute_pacf: bool = Field(True, description="Also compute PACF")
81
+ pacf_method: Literal["yw", "ols", "mle"] = Field("yw", description="PACF method")
82
+ use_fft: bool = Field(True, description="Use FFT (faster)")
83
+
84
+
85
+ class VolatilitySettings(BaseConfig):
86
+ """Settings for volatility analysis."""
87
+
88
+ enabled: bool = Field(True, description="Run volatility analysis")
89
+ window_sizes: list[PositiveInt] = Field(
90
+ default_factory=lambda: [21], description="Rolling windows"
91
+ )
92
+ detect_clustering: bool = Field(True, description="Test for GARCH effects")
93
+ cluster_method: VolatilityClusterMethod = Field(
94
+ VolatilityClusterMethod.LJUNG_BOX, description="Detection method"
95
+ )
96
+ significance_level: Probability = Field(0.05, description="Significance level")
97
+ compute_rolling_vol: bool = Field(True, description="Compute rolling volatility")
98
+
99
+ @field_validator("window_sizes")
100
+ @classmethod
101
+ def check_window_sizes(cls, v: list[int]) -> list[int]:
102
+ """Ensure window sizes are valid."""
103
+ if not v:
104
+ raise ValueError("Must specify at least one window size")
105
+ if any(w < 2 for w in v):
106
+ raise ValueError("Window sizes must be >= 2")
107
+ return sorted(v)
108
+
109
+
110
+ class DistributionSettings(BaseConfig):
111
+ """Settings for distribution analysis."""
112
+
113
+ enabled: bool = Field(True, description="Run distribution analysis")
114
+ test_normality: bool = Field(True, description="Test for normality")
115
+ normality_tests: list[NormalityTest] = Field(
116
+ default_factory=lambda: [NormalityTest.JARQUE_BERA], description="Normality tests"
117
+ )
118
+ compute_moments: bool = Field(True, description="Compute skew/kurtosis")
119
+ detect_outliers: bool = Field(False, description="Detect outliers")
120
+ outlier_method: OutlierMethod = Field(OutlierMethod.ZSCORE, description="Outlier method")
121
+ outlier_threshold: PositiveFloat = Field(3.0, description="Z-score threshold")
122
+
123
+
124
+ class CorrelationSettings(BaseConfig):
125
+ """Settings for correlation analysis."""
126
+
127
+ enabled: bool = Field(True, description="Run correlation analysis")
128
+ methods: list[CorrelationMethod] = Field(
129
+ default_factory=lambda: [CorrelationMethod.PEARSON], description="Correlation methods"
130
+ )
131
+ compute_pairwise: bool = Field(True, description="Compute pairwise correlations")
132
+ min_periods: PositiveInt = Field(30, description="Minimum observations")
133
+ lag_correlations: bool = Field(False, description="Compute lagged correlations")
134
+ max_lag: PositiveInt = Field(10, description="Max lag")
135
+
136
+ @field_validator("methods")
137
+ @classmethod
138
+ def check_methods(cls, v: list[CorrelationMethod]) -> list[CorrelationMethod]:
139
+ """Ensure at least one method specified."""
140
+ if not v:
141
+ raise ValueError("Must specify at least one correlation method")
142
+ return v
143
+
144
+
145
+ class PCASettings(BaseConfig):
146
+ """Settings for PCA analysis."""
147
+
148
+ enabled: bool = Field(False, description="Run PCA (opt-in)")
149
+ n_components: PositiveInt | Probability | Literal["auto"] = Field(
150
+ "auto", description="Components"
151
+ )
152
+ variance_threshold: Probability = Field(0.95, description="Variance to explain")
153
+ standardize: bool = Field(True, description="Standardize features")
154
+ rotation: Literal["varimax", "quartimax"] | None = Field(None, description="Rotation")
155
+
156
+ @model_validator(mode="after")
157
+ def check_n_components_config(self) -> PCASettings:
158
+ """Validate n_components configuration."""
159
+ if not self.enabled:
160
+ return self
161
+ if self.n_components == "auto" and not (0 < self.variance_threshold < 1):
162
+ raise ValueError("variance_threshold must be in (0, 1) when n_components='auto'")
163
+ return self
164
+
165
+
166
+ class ClusteringSettings(BaseConfig):
167
+ """Settings for feature clustering."""
168
+
169
+ enabled: bool = Field(False, description="Run clustering (opt-in)")
170
+ method: ClusteringMethod = Field(ClusteringMethod.HIERARCHICAL, description="Algorithm")
171
+ n_clusters: PositiveInt | Literal["auto"] = Field("auto", description="Number of clusters")
172
+ linkage: LinkageMethod = Field(LinkageMethod.WARD, description="Linkage method")
173
+ distance_metric: DistanceMetric = Field(DistanceMetric.EUCLIDEAN, description="Distance metric")
174
+ min_cluster_size: PositiveInt = Field(5, description="Min cluster size")
175
+ eps: PositiveFloat = Field(0.5, description="DBSCAN epsilon")
176
+
177
+
178
+ class RedundancySettings(BaseConfig):
179
+ """Settings for redundancy detection."""
180
+
181
+ enabled: bool = Field(True, description="Run redundancy detection")
182
+ correlation_threshold: Probability = Field(0.95, description="Correlation threshold")
183
+ compute_vif: bool = Field(False, description="Compute VIF")
184
+ vif_threshold: PositiveFloat = Field(10.0, description="VIF threshold")
185
+ keep_strategy: Literal["first", "last", "highest_ic"] = Field(
186
+ "highest_ic", description="Keep strategy"
187
+ )
188
+
189
+
190
+ class ICSettings(BaseConfig):
191
+ """Settings for Information Coefficient analysis."""
192
+
193
+ enabled: bool = Field(True, description="Run IC analysis")
194
+ method: CorrelationMethod = Field(CorrelationMethod.PEARSON, description="Correlation method")
195
+ lag_structure: list[NonNegativeInt] = Field(
196
+ default_factory=lambda: [0, 1, 5], description="Lags to analyze"
197
+ )
198
+ hac_adjustment: bool = Field(False, description="Newey-West HAC")
199
+ max_lag_hac: PositiveInt | Literal["auto"] = Field("auto", description="Max HAC lag")
200
+ compute_t_stats: bool = Field(True, description="Compute t-stats")
201
+ compute_decay: bool = Field(False, description="Analyze IC decay")
202
+
203
+ @field_validator("lag_structure")
204
+ @classmethod
205
+ def check_lag_structure(cls, v: list[int]) -> list[int]:
206
+ """Ensure lag structure is valid."""
207
+ if not v:
208
+ raise ValueError("Must specify at least one lag")
209
+ if any(lag < 0 for lag in v):
210
+ raise ValueError("Lags must be non-negative")
211
+ return sorted(v)
212
+
213
+
214
+ class BinaryClassificationSettings(BaseConfig):
215
+ """Settings for binary classification metrics."""
216
+
217
+ enabled: bool = Field(False, description="Run binary classification (opt-in)")
218
+ thresholds: list[float] = Field(default_factory=lambda: [0.0], description="Thresholds")
219
+ metrics: list[Literal["precision", "recall", "f1", "lift", "coverage"]] = Field(
220
+ default_factory=lambda: ["precision", "recall", "f1"], # type: ignore[arg-type]
221
+ description="Metrics",
222
+ )
223
+ positive_class: int | str = Field(1, description="Positive class label")
224
+ compute_confusion_matrix: bool = Field(True, description="Compute confusion matrix")
225
+ compute_roc_curve: bool = Field(False, description="Compute ROC curve")
226
+
227
+
228
+ class ThresholdAnalysisSettings(BaseConfig):
229
+ """Settings for threshold optimization."""
230
+
231
+ enabled: bool = Field(False, description="Run threshold analysis (opt-in)")
232
+ sweep_range: tuple[float, float] = Field((-2.0, 2.0), description="Threshold range")
233
+ n_points: PositiveInt = Field(50, description="Sweep points")
234
+ optimization_target: ThresholdOptimizationTarget = Field(
235
+ ThresholdOptimizationTarget.SHARPE, description="Optimization target"
236
+ )
237
+ constraint_metric: str | None = Field(None, description="Constraint metric")
238
+ constraint_value: float | None = Field(None, description="Constraint value")
239
+ constraint_type: Literal[">=", "<=", "=="] = Field(">=", description="Constraint type")
240
+
241
+ @model_validator(mode="after")
242
+ def validate_sweep_range(self) -> ThresholdAnalysisSettings:
243
+ """Validate sweep range."""
244
+ if self.enabled:
245
+ validate_min_max_range(self.sweep_range[0], self.sweep_range[1], "sweep_range")
246
+ return self
247
+
248
+ @model_validator(mode="after")
249
+ def validate_constraint(self) -> ThresholdAnalysisSettings:
250
+ """Validate constraint configuration."""
251
+ has_metric = self.constraint_metric is not None
252
+ has_value = self.constraint_value is not None
253
+ if has_metric != has_value:
254
+ raise ValueError(
255
+ "Both constraint_metric and constraint_value must be set (or both None)"
256
+ )
257
+ return self
258
+
259
+
260
+ class MLDiagnosticsSettings(BaseConfig):
261
+ """Settings for ML diagnostics (importance, SHAP, drift)."""
262
+
263
+ enabled: bool = Field(True, description="Run ML diagnostics")
264
+ feature_importance: bool = Field(True, description="Compute importance")
265
+ importance_method: Literal["tree", "permutation"] = Field(
266
+ "tree", description="Importance method"
267
+ )
268
+ shap_analysis: bool = Field(False, description="Compute SHAP (expensive)")
269
+ shap_sample_size: PositiveInt | None = Field(None, description="SHAP subsample size")
270
+ drift_detection: bool = Field(False, description="Detect drift")
271
+ drift_method: DriftDetectionMethod = Field(
272
+ DriftDetectionMethod.KOLMOGOROV_SMIRNOV, description="Drift method"
273
+ )
274
+ drift_window: PositiveInt = Field(63, description="Drift window")
275
+
276
+
277
+ # =============================================================================
278
+ # Consolidated Config
279
+ # =============================================================================
280
+
281
+
282
+ class DiagnosticConfig(BaseConfig):
283
+ """Consolidated configuration for feature analysis (single-level nesting).
284
+
285
+ Provides comprehensive feature diagnostics with direct access to all settings:
286
+ - config.stationarity.enabled (not config.module_a.stationarity.enabled)
287
+
288
+ Examples
289
+ --------
290
+ >>> config = DiagnosticConfig(
291
+ ... stationarity=StationaritySettings(significance_level=0.01),
292
+ ... ic=ICSettings(lag_structure=[0, 1, 5, 10, 21]),
293
+ ... )
294
+ >>> config.to_yaml("diagnostic_config.yaml")
295
+ """
296
+
297
+ # Feature Diagnostics (Module A)
298
+ stationarity: StationaritySettings = Field(
299
+ default_factory=StationaritySettings, description="Stationarity testing"
300
+ )
301
+ acf: ACFSettings = Field(default_factory=ACFSettings, description="ACF/PACF analysis")
302
+ volatility: VolatilitySettings = Field(
303
+ default_factory=VolatilitySettings, description="Volatility analysis"
304
+ )
305
+ distribution: DistributionSettings = Field(
306
+ default_factory=DistributionSettings, description="Distribution analysis"
307
+ )
308
+
309
+ # Cross-Feature Analysis (Module B)
310
+ correlation: CorrelationSettings = Field(
311
+ default_factory=CorrelationSettings, description="Correlation analysis"
312
+ )
313
+ pca: PCASettings = Field(default_factory=PCASettings, description="PCA analysis")
314
+ clustering: ClusteringSettings = Field(
315
+ default_factory=ClusteringSettings, description="Feature clustering"
316
+ )
317
+ redundancy: RedundancySettings = Field(
318
+ default_factory=RedundancySettings, description="Redundancy detection"
319
+ )
320
+
321
+ # Feature-Outcome (Module C)
322
+ ic: ICSettings = Field(default_factory=ICSettings, description="IC analysis")
323
+ binary_classification: BinaryClassificationSettings = Field(
324
+ default_factory=BinaryClassificationSettings, description="Binary classification"
325
+ )
326
+ threshold_analysis: ThresholdAnalysisSettings = Field(
327
+ default_factory=ThresholdAnalysisSettings, description="Threshold optimization"
328
+ )
329
+ ml_diagnostics: MLDiagnosticsSettings = Field(
330
+ default_factory=MLDiagnosticsSettings, description="ML diagnostics"
331
+ )
332
+
333
+ # Execution settings
334
+ export_recommendations: bool = Field(True, description="Export recommendations")
335
+ export_to_qfeatures: bool = Field(False, description="Export in qfeatures format")
336
+ return_dataframes: bool = Field(True, description="Return as DataFrames")
337
+ n_jobs: int = Field(-1, ge=-1, description="Parallel jobs")
338
+ cache_enabled: bool = Field(True, description="Enable caching")
339
+ cache_dir: Path = Field(
340
+ default_factory=lambda: Path.home() / ".cache" / "ml4t-diagnostic" / "features",
341
+ description="Cache directory",
342
+ )
343
+ verbose: bool = Field(False, description="Verbose output")
344
+
345
+ @classmethod
346
+ def for_quick_analysis(cls) -> DiagnosticConfig:
347
+ """Preset for quick exploratory analysis."""
348
+ return cls(
349
+ stationarity=StationaritySettings(pp_enabled=False),
350
+ volatility=VolatilitySettings(detect_clustering=False),
351
+ distribution=DistributionSettings(detect_outliers=False),
352
+ correlation=CorrelationSettings(lag_correlations=False),
353
+ pca=PCASettings(enabled=False),
354
+ clustering=ClusteringSettings(enabled=False),
355
+ ic=ICSettings(hac_adjustment=False, compute_decay=False),
356
+ ml_diagnostics=MLDiagnosticsSettings(shap_analysis=False, drift_detection=False),
357
+ )
358
+
359
+ @classmethod
360
+ def for_research(cls) -> DiagnosticConfig:
361
+ """Preset for academic research (comprehensive)."""
362
+ return cls(
363
+ stationarity=StationaritySettings(pp_enabled=True),
364
+ volatility=VolatilitySettings(window_sizes=[10, 21, 63]),
365
+ distribution=DistributionSettings(
366
+ detect_outliers=True,
367
+ normality_tests=[
368
+ NormalityTest.JARQUE_BERA,
369
+ NormalityTest.SHAPIRO,
370
+ NormalityTest.ANDERSON,
371
+ ],
372
+ ),
373
+ correlation=CorrelationSettings(
374
+ methods=[
375
+ CorrelationMethod.PEARSON,
376
+ CorrelationMethod.SPEARMAN,
377
+ CorrelationMethod.KENDALL,
378
+ ],
379
+ lag_correlations=True,
380
+ ),
381
+ pca=PCASettings(enabled=True),
382
+ clustering=ClusteringSettings(enabled=True),
383
+ ic=ICSettings(lag_structure=[0, 1, 5, 10, 21], hac_adjustment=True, compute_decay=True),
384
+ binary_classification=BinaryClassificationSettings(enabled=True),
385
+ threshold_analysis=ThresholdAnalysisSettings(enabled=True),
386
+ ml_diagnostics=MLDiagnosticsSettings(shap_analysis=True, drift_detection=True),
387
+ )
388
+
389
+ @classmethod
390
+ def for_production(cls) -> DiagnosticConfig:
391
+ """Preset for production monitoring (fast, focused on drift)."""
392
+ return cls(
393
+ stationarity=StationaritySettings(pp_enabled=False),
394
+ acf=ACFSettings(enabled=False),
395
+ volatility=VolatilitySettings(enabled=False),
396
+ distribution=DistributionSettings(test_normality=False, compute_moments=True),
397
+ correlation=CorrelationSettings(lag_correlations=False),
398
+ pca=PCASettings(enabled=False),
399
+ clustering=ClusteringSettings(enabled=False),
400
+ ic=ICSettings(compute_decay=False),
401
+ ml_diagnostics=MLDiagnosticsSettings(
402
+ feature_importance=True, drift_detection=True, drift_window=21
403
+ ),
404
+ )
@@ -0,0 +1,55 @@
1
+ """Configuration for multi-signal analysis.
2
+
3
+ Provides configuration for analyzing and comparing multiple trading signals.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from typing import Literal
9
+
10
+ from pydantic import Field, field_validator
11
+
12
+ from ml4t.diagnostic.config.base import BaseConfig
13
+ from ml4t.diagnostic.config.signal_config import SignalConfig
14
+
15
+
16
+ class MultiSignalAnalysisConfig(BaseConfig):
17
+ """Configuration for multi-signal analysis.
18
+
19
+ Controls behavior for analyzing and comparing multiple trading signals,
20
+ including FDR/FWER corrections and parallelization settings.
21
+ """
22
+
23
+ signal_config: SignalConfig = Field(
24
+ default_factory=SignalConfig,
25
+ description="Configuration applied to all individual signal analyses",
26
+ )
27
+ fdr_alpha: float = Field(default=0.05, ge=0.001, le=0.5)
28
+ fwer_alpha: float = Field(default=0.05, ge=0.001, le=0.5)
29
+ min_ic_threshold: float = Field(default=0.0, ge=-1.0, le=1.0)
30
+ min_observations: int = Field(default=100, ge=10)
31
+ n_jobs: int = Field(default=-1, ge=-1)
32
+ backend: Literal["loky", "threading", "multiprocessing"] = Field(default="loky")
33
+ cache_enabled: bool = Field(default=True)
34
+ cache_max_items: int = Field(default=200, ge=10, le=10000)
35
+ cache_ttl: int | None = Field(default=3600, ge=60)
36
+ max_signals_summary: int = Field(default=200, ge=10, le=1000)
37
+ max_signals_comparison: int = Field(default=20, ge=2, le=50)
38
+ max_signals_heatmap: int = Field(default=100, ge=10, le=500)
39
+ default_selection_metric: str = Field(default="ic_ir")
40
+ default_correlation_threshold: float = Field(default=0.7, ge=0.0, le=1.0)
41
+
42
+ @field_validator("default_selection_metric")
43
+ @classmethod
44
+ def validate_selection_metric(cls, v: str) -> str:
45
+ """Validate selection metric is supported."""
46
+ valid_metrics = {
47
+ "ic_mean",
48
+ "ic_ir",
49
+ "ic_t_stat",
50
+ "turnover_adj_ic",
51
+ "quantile_spread",
52
+ }
53
+ if v not in valid_metrics:
54
+ raise ValueError(f"Invalid selection metric '{v}'. Valid options: {valid_metrics}")
55
+ return v
@@ -0,0 +1,215 @@
1
+ """Portfolio Evaluation Configuration.
2
+
3
+ This module defines configuration for portfolio performance evaluation:
4
+ - Risk/return metrics (Sharpe, Sortino, Calmar, VaR, CVaR)
5
+ - Bayesian comparison (probabilistic strategy comparison)
6
+ - Time aggregation (daily, weekly, monthly, etc.)
7
+ - Drawdown analysis (underwater curves, recovery times)
8
+
9
+ Consolidated Config:
10
+ - PortfolioConfig: Single config with all portfolio analysis settings
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from pathlib import Path
16
+
17
+ from pydantic import Field, field_validator, model_validator
18
+
19
+ from ml4t.diagnostic.config.base import BaseConfig
20
+ from ml4t.diagnostic.config.validation import (
21
+ BayesianPriorDistribution,
22
+ NonNegativeFloat,
23
+ PortfolioMetric,
24
+ PositiveInt,
25
+ Probability,
26
+ TimeFrequency,
27
+ )
28
+
29
+ # =============================================================================
30
+ # Settings Classes (Single-Level Nesting)
31
+ # =============================================================================
32
+
33
+
34
+ class MetricsSettings(BaseConfig):
35
+ """Settings for risk/return metrics."""
36
+
37
+ metrics: list[PortfolioMetric] = Field(
38
+ default_factory=lambda: [
39
+ PortfolioMetric.SHARPE,
40
+ PortfolioMetric.SORTINO,
41
+ PortfolioMetric.CALMAR,
42
+ PortfolioMetric.MAX_DRAWDOWN,
43
+ ],
44
+ description="Metrics to compute",
45
+ )
46
+ risk_free_rate: NonNegativeFloat = Field(0.0, description="Annualized risk-free rate")
47
+ confidence_level: Probability = Field(0.95, description="Confidence for VaR/CVaR")
48
+ periods_per_year: PositiveInt = Field(252, description="Trading periods per year")
49
+ downside_target: float = Field(0.0, description="Target for Sortino")
50
+ omega_threshold: float = Field(0.0, description="Omega threshold")
51
+
52
+ @field_validator("metrics")
53
+ @classmethod
54
+ def check_metrics(cls, v: list[PortfolioMetric]) -> list[PortfolioMetric]:
55
+ """Ensure at least one metric specified."""
56
+ if not v:
57
+ raise ValueError("Must specify at least one metric")
58
+ return v
59
+
60
+
61
+ class BayesianSettings(BaseConfig):
62
+ """Settings for Bayesian strategy comparison."""
63
+
64
+ enabled: bool = Field(False, description="Run Bayesian comparison")
65
+ prior_distribution: BayesianPriorDistribution = Field(BayesianPriorDistribution.NORMAL)
66
+ prior_params: dict[str, float] = Field(default_factory=lambda: {"mean": 0.0, "std": 1.0})
67
+ n_samples: PositiveInt = Field(10000, description="MCMC samples")
68
+ credible_interval: Probability = Field(0.95)
69
+ compare_to_benchmark: bool = Field(False)
70
+ benchmark_column: str | None = Field(None)
71
+
72
+ @model_validator(mode="after")
73
+ def validate_benchmark(self) -> BayesianSettings:
74
+ """Validate benchmark configuration."""
75
+ if self.compare_to_benchmark and not self.benchmark_column:
76
+ raise ValueError("benchmark_column required when compare_to_benchmark=True")
77
+ return self
78
+
79
+ @model_validator(mode="after")
80
+ def validate_prior_params(self) -> BayesianSettings:
81
+ """Validate prior parameters match distribution."""
82
+ required_params = {
83
+ BayesianPriorDistribution.NORMAL: {"mean", "std"},
84
+ BayesianPriorDistribution.STUDENT_T: {"df", "loc", "scale"},
85
+ BayesianPriorDistribution.UNIFORM: {"low", "high"},
86
+ }
87
+ required = required_params[self.prior_distribution]
88
+ provided = set(self.prior_params.keys())
89
+ if required != provided:
90
+ raise ValueError(f"Prior {self.prior_distribution} requires {required}, got {provided}")
91
+ return self
92
+
93
+
94
+ class AggregationSettings(BaseConfig):
95
+ """Settings for time aggregation analysis."""
96
+
97
+ frequencies: list[TimeFrequency] = Field(default_factory=lambda: [TimeFrequency.DAILY])
98
+ compute_rolling: bool = Field(False)
99
+ rolling_windows: list[PositiveInt] = Field(default_factory=lambda: [21, 63, 252])
100
+ min_periods: PositiveInt | None = Field(None)
101
+ align_to_calendar: bool = Field(True)
102
+
103
+ @field_validator("frequencies")
104
+ @classmethod
105
+ def check_frequencies(cls, v: list[TimeFrequency]) -> list[TimeFrequency]:
106
+ """Ensure at least one frequency specified."""
107
+ if not v:
108
+ raise ValueError("Must specify at least one frequency")
109
+ return v
110
+
111
+ @field_validator("rolling_windows")
112
+ @classmethod
113
+ def check_rolling_windows(cls, v: list[int]) -> list[int]:
114
+ """Sort rolling windows for consistency."""
115
+ return sorted(v)
116
+
117
+
118
+ class DrawdownSettings(BaseConfig):
119
+ """Settings for drawdown analysis."""
120
+
121
+ enabled: bool = Field(True)
122
+ compute_underwater_curve: bool = Field(True)
123
+ top_n_drawdowns: PositiveInt = Field(5)
124
+ compute_recovery_time: bool = Field(True)
125
+ recovery_threshold: Probability = Field(1.0)
126
+
127
+
128
+ # =============================================================================
129
+ # Consolidated Config
130
+ # =============================================================================
131
+
132
+
133
+ class PortfolioConfig(BaseConfig):
134
+ """Consolidated configuration for portfolio evaluation.
135
+
136
+ Orchestrates portfolio performance analysis with metrics, Bayesian
137
+ comparison, time aggregation, and drawdown analysis.
138
+
139
+ Examples
140
+ --------
141
+ >>> config = PortfolioConfig(
142
+ ... metrics=MetricsSettings(risk_free_rate=0.02),
143
+ ... bayesian=BayesianSettings(enabled=True),
144
+ ... )
145
+ >>> config.to_yaml("portfolio_config.yaml")
146
+ """
147
+
148
+ metrics: MetricsSettings = Field(
149
+ default_factory=MetricsSettings, description="Metrics settings"
150
+ )
151
+ bayesian: BayesianSettings = Field(
152
+ default_factory=BayesianSettings, description="Bayesian comparison"
153
+ )
154
+ aggregation: AggregationSettings = Field(
155
+ default_factory=AggregationSettings, description="Time aggregation"
156
+ )
157
+ drawdown: DrawdownSettings = Field(
158
+ default_factory=DrawdownSettings, description="Drawdown analysis"
159
+ )
160
+
161
+ return_dataframes: bool = Field(True, description="Return as DataFrames")
162
+ n_jobs: int = Field(-1, ge=-1, description="Parallel jobs")
163
+ cache_enabled: bool = Field(True)
164
+ cache_dir: Path = Field(
165
+ default_factory=lambda: Path.home() / ".cache" / "ml4t-diagnostic" / "portfolio"
166
+ )
167
+ verbose: bool = Field(False)
168
+
169
+ @classmethod
170
+ def for_quick_analysis(cls) -> PortfolioConfig:
171
+ """Preset for quick exploratory analysis."""
172
+ return cls(
173
+ metrics=MetricsSettings(metrics=[PortfolioMetric.SHARPE, PortfolioMetric.MAX_DRAWDOWN]),
174
+ bayesian=BayesianSettings(enabled=False),
175
+ aggregation=AggregationSettings(compute_rolling=False),
176
+ drawdown=DrawdownSettings(compute_recovery_time=False),
177
+ )
178
+
179
+ @classmethod
180
+ def for_research(cls) -> PortfolioConfig:
181
+ """Preset for academic research."""
182
+ return cls(
183
+ metrics=MetricsSettings(
184
+ metrics=[
185
+ PortfolioMetric.SHARPE,
186
+ PortfolioMetric.SORTINO,
187
+ PortfolioMetric.CALMAR,
188
+ PortfolioMetric.MAX_DRAWDOWN,
189
+ PortfolioMetric.VAR,
190
+ PortfolioMetric.CVAR,
191
+ PortfolioMetric.OMEGA,
192
+ ]
193
+ ),
194
+ bayesian=BayesianSettings(enabled=True, n_samples=50000),
195
+ aggregation=AggregationSettings(
196
+ frequencies=[TimeFrequency.DAILY, TimeFrequency.WEEKLY, TimeFrequency.MONTHLY],
197
+ compute_rolling=True,
198
+ rolling_windows=[21, 63, 126, 252],
199
+ ),
200
+ drawdown=DrawdownSettings(compute_underwater_curve=True, top_n_drawdowns=10),
201
+ )
202
+
203
+ @classmethod
204
+ def for_production(cls) -> PortfolioConfig:
205
+ """Preset for production monitoring."""
206
+ return cls(
207
+ metrics=MetricsSettings(
208
+ metrics=[PortfolioMetric.SHARPE, PortfolioMetric.MAX_DRAWDOWN, PortfolioMetric.VAR]
209
+ ),
210
+ bayesian=BayesianSettings(enabled=False),
211
+ aggregation=AggregationSettings(
212
+ frequencies=[TimeFrequency.DAILY], compute_rolling=True, rolling_windows=[21, 63]
213
+ ),
214
+ drawdown=DrawdownSettings(compute_recovery_time=False),
215
+ )