ml4t-diagnostic 0.1.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ml4t/diagnostic/AGENT.md +25 -0
- ml4t/diagnostic/__init__.py +166 -0
- ml4t/diagnostic/backends/__init__.py +10 -0
- ml4t/diagnostic/backends/adapter.py +192 -0
- ml4t/diagnostic/backends/polars_backend.py +899 -0
- ml4t/diagnostic/caching/__init__.py +40 -0
- ml4t/diagnostic/caching/cache.py +331 -0
- ml4t/diagnostic/caching/decorators.py +131 -0
- ml4t/diagnostic/caching/smart_cache.py +339 -0
- ml4t/diagnostic/config/AGENT.md +24 -0
- ml4t/diagnostic/config/README.md +267 -0
- ml4t/diagnostic/config/__init__.py +219 -0
- ml4t/diagnostic/config/barrier_config.py +277 -0
- ml4t/diagnostic/config/base.py +301 -0
- ml4t/diagnostic/config/event_config.py +148 -0
- ml4t/diagnostic/config/feature_config.py +404 -0
- ml4t/diagnostic/config/multi_signal_config.py +55 -0
- ml4t/diagnostic/config/portfolio_config.py +215 -0
- ml4t/diagnostic/config/report_config.py +391 -0
- ml4t/diagnostic/config/sharpe_config.py +202 -0
- ml4t/diagnostic/config/signal_config.py +206 -0
- ml4t/diagnostic/config/trade_analysis_config.py +310 -0
- ml4t/diagnostic/config/validation.py +279 -0
- ml4t/diagnostic/core/__init__.py +29 -0
- ml4t/diagnostic/core/numba_utils.py +315 -0
- ml4t/diagnostic/core/purging.py +372 -0
- ml4t/diagnostic/core/sampling.py +471 -0
- ml4t/diagnostic/errors/__init__.py +205 -0
- ml4t/diagnostic/evaluation/AGENT.md +26 -0
- ml4t/diagnostic/evaluation/__init__.py +437 -0
- ml4t/diagnostic/evaluation/autocorrelation.py +531 -0
- ml4t/diagnostic/evaluation/barrier_analysis.py +1050 -0
- ml4t/diagnostic/evaluation/binary_metrics.py +910 -0
- ml4t/diagnostic/evaluation/dashboard.py +715 -0
- ml4t/diagnostic/evaluation/diagnostic_plots.py +1037 -0
- ml4t/diagnostic/evaluation/distribution/__init__.py +499 -0
- ml4t/diagnostic/evaluation/distribution/moments.py +299 -0
- ml4t/diagnostic/evaluation/distribution/tails.py +777 -0
- ml4t/diagnostic/evaluation/distribution/tests.py +470 -0
- ml4t/diagnostic/evaluation/drift/__init__.py +139 -0
- ml4t/diagnostic/evaluation/drift/analysis.py +432 -0
- ml4t/diagnostic/evaluation/drift/domain_classifier.py +517 -0
- ml4t/diagnostic/evaluation/drift/population_stability_index.py +310 -0
- ml4t/diagnostic/evaluation/drift/wasserstein.py +388 -0
- ml4t/diagnostic/evaluation/event_analysis.py +647 -0
- ml4t/diagnostic/evaluation/excursion.py +390 -0
- ml4t/diagnostic/evaluation/feature_diagnostics.py +873 -0
- ml4t/diagnostic/evaluation/feature_outcome.py +666 -0
- ml4t/diagnostic/evaluation/framework.py +935 -0
- ml4t/diagnostic/evaluation/metric_registry.py +255 -0
- ml4t/diagnostic/evaluation/metrics/AGENT.md +23 -0
- ml4t/diagnostic/evaluation/metrics/__init__.py +133 -0
- ml4t/diagnostic/evaluation/metrics/basic.py +160 -0
- ml4t/diagnostic/evaluation/metrics/conditional_ic.py +469 -0
- ml4t/diagnostic/evaluation/metrics/feature_outcome.py +475 -0
- ml4t/diagnostic/evaluation/metrics/ic_statistics.py +446 -0
- ml4t/diagnostic/evaluation/metrics/importance_analysis.py +338 -0
- ml4t/diagnostic/evaluation/metrics/importance_classical.py +375 -0
- ml4t/diagnostic/evaluation/metrics/importance_mda.py +371 -0
- ml4t/diagnostic/evaluation/metrics/importance_shap.py +715 -0
- ml4t/diagnostic/evaluation/metrics/information_coefficient.py +527 -0
- ml4t/diagnostic/evaluation/metrics/interactions.py +772 -0
- ml4t/diagnostic/evaluation/metrics/monotonicity.py +226 -0
- ml4t/diagnostic/evaluation/metrics/risk_adjusted.py +324 -0
- ml4t/diagnostic/evaluation/multi_signal.py +550 -0
- ml4t/diagnostic/evaluation/portfolio_analysis/__init__.py +83 -0
- ml4t/diagnostic/evaluation/portfolio_analysis/analysis.py +734 -0
- ml4t/diagnostic/evaluation/portfolio_analysis/metrics.py +589 -0
- ml4t/diagnostic/evaluation/portfolio_analysis/results.py +334 -0
- ml4t/diagnostic/evaluation/report_generation.py +824 -0
- ml4t/diagnostic/evaluation/signal_selector.py +452 -0
- ml4t/diagnostic/evaluation/stat_registry.py +139 -0
- ml4t/diagnostic/evaluation/stationarity/__init__.py +97 -0
- ml4t/diagnostic/evaluation/stationarity/analysis.py +518 -0
- ml4t/diagnostic/evaluation/stationarity/augmented_dickey_fuller.py +296 -0
- ml4t/diagnostic/evaluation/stationarity/kpss_test.py +308 -0
- ml4t/diagnostic/evaluation/stationarity/phillips_perron.py +365 -0
- ml4t/diagnostic/evaluation/stats/AGENT.md +43 -0
- ml4t/diagnostic/evaluation/stats/__init__.py +191 -0
- ml4t/diagnostic/evaluation/stats/backtest_overfitting.py +219 -0
- ml4t/diagnostic/evaluation/stats/bootstrap.py +228 -0
- ml4t/diagnostic/evaluation/stats/deflated_sharpe_ratio.py +591 -0
- ml4t/diagnostic/evaluation/stats/false_discovery_rate.py +295 -0
- ml4t/diagnostic/evaluation/stats/hac_standard_errors.py +108 -0
- ml4t/diagnostic/evaluation/stats/minimum_track_record.py +408 -0
- ml4t/diagnostic/evaluation/stats/moments.py +164 -0
- ml4t/diagnostic/evaluation/stats/rademacher_adjustment.py +436 -0
- ml4t/diagnostic/evaluation/stats/reality_check.py +155 -0
- ml4t/diagnostic/evaluation/stats/sharpe_inference.py +219 -0
- ml4t/diagnostic/evaluation/themes.py +330 -0
- ml4t/diagnostic/evaluation/threshold_analysis.py +957 -0
- ml4t/diagnostic/evaluation/trade_analysis.py +1136 -0
- ml4t/diagnostic/evaluation/trade_dashboard/__init__.py +32 -0
- ml4t/diagnostic/evaluation/trade_dashboard/app.py +315 -0
- ml4t/diagnostic/evaluation/trade_dashboard/export/__init__.py +18 -0
- ml4t/diagnostic/evaluation/trade_dashboard/export/csv.py +82 -0
- ml4t/diagnostic/evaluation/trade_dashboard/export/html.py +276 -0
- ml4t/diagnostic/evaluation/trade_dashboard/io.py +166 -0
- ml4t/diagnostic/evaluation/trade_dashboard/normalize.py +304 -0
- ml4t/diagnostic/evaluation/trade_dashboard/stats.py +386 -0
- ml4t/diagnostic/evaluation/trade_dashboard/style.py +79 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/__init__.py +21 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/patterns.py +354 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/shap_analysis.py +280 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/stat_validation.py +186 -0
- ml4t/diagnostic/evaluation/trade_dashboard/tabs/worst_trades.py +236 -0
- ml4t/diagnostic/evaluation/trade_dashboard/types.py +129 -0
- ml4t/diagnostic/evaluation/trade_shap/__init__.py +102 -0
- ml4t/diagnostic/evaluation/trade_shap/alignment.py +188 -0
- ml4t/diagnostic/evaluation/trade_shap/characterize.py +413 -0
- ml4t/diagnostic/evaluation/trade_shap/cluster.py +302 -0
- ml4t/diagnostic/evaluation/trade_shap/explain.py +208 -0
- ml4t/diagnostic/evaluation/trade_shap/hypotheses/__init__.py +23 -0
- ml4t/diagnostic/evaluation/trade_shap/hypotheses/generator.py +290 -0
- ml4t/diagnostic/evaluation/trade_shap/hypotheses/matcher.py +251 -0
- ml4t/diagnostic/evaluation/trade_shap/hypotheses/templates.yaml +467 -0
- ml4t/diagnostic/evaluation/trade_shap/models.py +386 -0
- ml4t/diagnostic/evaluation/trade_shap/normalize.py +116 -0
- ml4t/diagnostic/evaluation/trade_shap/pipeline.py +263 -0
- ml4t/diagnostic/evaluation/trade_shap_dashboard.py +283 -0
- ml4t/diagnostic/evaluation/trade_shap_diagnostics.py +588 -0
- ml4t/diagnostic/evaluation/validated_cv.py +535 -0
- ml4t/diagnostic/evaluation/visualization.py +1050 -0
- ml4t/diagnostic/evaluation/volatility/__init__.py +45 -0
- ml4t/diagnostic/evaluation/volatility/analysis.py +351 -0
- ml4t/diagnostic/evaluation/volatility/arch.py +258 -0
- ml4t/diagnostic/evaluation/volatility/garch.py +460 -0
- ml4t/diagnostic/integration/__init__.py +48 -0
- ml4t/diagnostic/integration/backtest_contract.py +671 -0
- ml4t/diagnostic/integration/data_contract.py +316 -0
- ml4t/diagnostic/integration/engineer_contract.py +226 -0
- ml4t/diagnostic/logging/__init__.py +77 -0
- ml4t/diagnostic/logging/logger.py +245 -0
- ml4t/diagnostic/logging/performance.py +234 -0
- ml4t/diagnostic/logging/progress.py +234 -0
- ml4t/diagnostic/logging/wandb.py +412 -0
- ml4t/diagnostic/metrics/__init__.py +9 -0
- ml4t/diagnostic/metrics/percentiles.py +128 -0
- ml4t/diagnostic/py.typed +1 -0
- ml4t/diagnostic/reporting/__init__.py +43 -0
- ml4t/diagnostic/reporting/base.py +130 -0
- ml4t/diagnostic/reporting/html_renderer.py +275 -0
- ml4t/diagnostic/reporting/json_renderer.py +51 -0
- ml4t/diagnostic/reporting/markdown_renderer.py +117 -0
- ml4t/diagnostic/results/AGENT.md +24 -0
- ml4t/diagnostic/results/__init__.py +105 -0
- ml4t/diagnostic/results/barrier_results/__init__.py +36 -0
- ml4t/diagnostic/results/barrier_results/hit_rate.py +304 -0
- ml4t/diagnostic/results/barrier_results/precision_recall.py +266 -0
- ml4t/diagnostic/results/barrier_results/profit_factor.py +297 -0
- ml4t/diagnostic/results/barrier_results/tearsheet.py +397 -0
- ml4t/diagnostic/results/barrier_results/time_to_target.py +305 -0
- ml4t/diagnostic/results/barrier_results/validation.py +38 -0
- ml4t/diagnostic/results/base.py +177 -0
- ml4t/diagnostic/results/event_results.py +349 -0
- ml4t/diagnostic/results/feature_results.py +787 -0
- ml4t/diagnostic/results/multi_signal_results.py +431 -0
- ml4t/diagnostic/results/portfolio_results.py +281 -0
- ml4t/diagnostic/results/sharpe_results.py +448 -0
- ml4t/diagnostic/results/signal_results/__init__.py +74 -0
- ml4t/diagnostic/results/signal_results/ic.py +581 -0
- ml4t/diagnostic/results/signal_results/irtc.py +110 -0
- ml4t/diagnostic/results/signal_results/quantile.py +392 -0
- ml4t/diagnostic/results/signal_results/tearsheet.py +456 -0
- ml4t/diagnostic/results/signal_results/turnover.py +213 -0
- ml4t/diagnostic/results/signal_results/validation.py +147 -0
- ml4t/diagnostic/signal/AGENT.md +17 -0
- ml4t/diagnostic/signal/__init__.py +69 -0
- ml4t/diagnostic/signal/_report.py +152 -0
- ml4t/diagnostic/signal/_utils.py +261 -0
- ml4t/diagnostic/signal/core.py +275 -0
- ml4t/diagnostic/signal/quantile.py +148 -0
- ml4t/diagnostic/signal/result.py +214 -0
- ml4t/diagnostic/signal/signal_ic.py +129 -0
- ml4t/diagnostic/signal/turnover.py +182 -0
- ml4t/diagnostic/splitters/AGENT.md +19 -0
- ml4t/diagnostic/splitters/__init__.py +36 -0
- ml4t/diagnostic/splitters/base.py +501 -0
- ml4t/diagnostic/splitters/calendar.py +421 -0
- ml4t/diagnostic/splitters/calendar_config.py +91 -0
- ml4t/diagnostic/splitters/combinatorial.py +1064 -0
- ml4t/diagnostic/splitters/config.py +322 -0
- ml4t/diagnostic/splitters/cpcv/__init__.py +57 -0
- ml4t/diagnostic/splitters/cpcv/combinations.py +119 -0
- ml4t/diagnostic/splitters/cpcv/partitioning.py +263 -0
- ml4t/diagnostic/splitters/cpcv/purge_engine.py +379 -0
- ml4t/diagnostic/splitters/cpcv/windows.py +190 -0
- ml4t/diagnostic/splitters/group_isolation.py +329 -0
- ml4t/diagnostic/splitters/persistence.py +316 -0
- ml4t/diagnostic/splitters/utils.py +207 -0
- ml4t/diagnostic/splitters/walk_forward.py +757 -0
- ml4t/diagnostic/utils/__init__.py +42 -0
- ml4t/diagnostic/utils/config.py +542 -0
- ml4t/diagnostic/utils/dependencies.py +318 -0
- ml4t/diagnostic/utils/sessions.py +127 -0
- ml4t/diagnostic/validation/__init__.py +54 -0
- ml4t/diagnostic/validation/dataframe.py +274 -0
- ml4t/diagnostic/validation/returns.py +280 -0
- ml4t/diagnostic/validation/timeseries.py +299 -0
- ml4t/diagnostic/visualization/AGENT.md +19 -0
- ml4t/diagnostic/visualization/__init__.py +223 -0
- ml4t/diagnostic/visualization/backtest/__init__.py +98 -0
- ml4t/diagnostic/visualization/backtest/cost_attribution.py +762 -0
- ml4t/diagnostic/visualization/backtest/executive_summary.py +895 -0
- ml4t/diagnostic/visualization/backtest/interactive_controls.py +673 -0
- ml4t/diagnostic/visualization/backtest/statistical_validity.py +874 -0
- ml4t/diagnostic/visualization/backtest/tearsheet.py +565 -0
- ml4t/diagnostic/visualization/backtest/template_system.py +373 -0
- ml4t/diagnostic/visualization/backtest/trade_plots.py +1172 -0
- ml4t/diagnostic/visualization/barrier_plots.py +782 -0
- ml4t/diagnostic/visualization/core.py +1060 -0
- ml4t/diagnostic/visualization/dashboards/__init__.py +36 -0
- ml4t/diagnostic/visualization/dashboards/base.py +582 -0
- ml4t/diagnostic/visualization/dashboards/importance.py +801 -0
- ml4t/diagnostic/visualization/dashboards/interaction.py +263 -0
- ml4t/diagnostic/visualization/dashboards.py +43 -0
- ml4t/diagnostic/visualization/data_extraction/__init__.py +48 -0
- ml4t/diagnostic/visualization/data_extraction/importance.py +649 -0
- ml4t/diagnostic/visualization/data_extraction/interaction.py +504 -0
- ml4t/diagnostic/visualization/data_extraction/types.py +113 -0
- ml4t/diagnostic/visualization/data_extraction/validation.py +66 -0
- ml4t/diagnostic/visualization/feature_plots.py +888 -0
- ml4t/diagnostic/visualization/interaction_plots.py +618 -0
- ml4t/diagnostic/visualization/portfolio/__init__.py +41 -0
- ml4t/diagnostic/visualization/portfolio/dashboard.py +514 -0
- ml4t/diagnostic/visualization/portfolio/drawdown_plots.py +341 -0
- ml4t/diagnostic/visualization/portfolio/returns_plots.py +487 -0
- ml4t/diagnostic/visualization/portfolio/risk_plots.py +301 -0
- ml4t/diagnostic/visualization/report_generation.py +1343 -0
- ml4t/diagnostic/visualization/signal/__init__.py +103 -0
- ml4t/diagnostic/visualization/signal/dashboard.py +911 -0
- ml4t/diagnostic/visualization/signal/event_plots.py +514 -0
- ml4t/diagnostic/visualization/signal/ic_plots.py +635 -0
- ml4t/diagnostic/visualization/signal/multi_signal_dashboard.py +974 -0
- ml4t/diagnostic/visualization/signal/multi_signal_plots.py +603 -0
- ml4t/diagnostic/visualization/signal/quantile_plots.py +625 -0
- ml4t/diagnostic/visualization/signal/turnover_plots.py +400 -0
- ml4t/diagnostic/visualization/trade_shap/__init__.py +90 -0
- ml4t_diagnostic-0.1.0a1.dist-info/METADATA +1044 -0
- ml4t_diagnostic-0.1.0a1.dist-info/RECORD +242 -0
- ml4t_diagnostic-0.1.0a1.dist-info/WHEEL +4 -0
- ml4t_diagnostic-0.1.0a1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
"""Utility functions for ml4t-diagnostic.
|
|
2
|
+
|
|
3
|
+
This module contains helper functions, configuration loaders, and other
|
|
4
|
+
utilities used throughout the library.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from ml4t.diagnostic.utils.config import (
|
|
8
|
+
ConfigError,
|
|
9
|
+
EvaluationConfig,
|
|
10
|
+
create_example_config,
|
|
11
|
+
load_config,
|
|
12
|
+
)
|
|
13
|
+
from ml4t.diagnostic.utils.dependencies import (
|
|
14
|
+
DEPS,
|
|
15
|
+
DependencyInfo,
|
|
16
|
+
OptionalDependencies,
|
|
17
|
+
check_dependency,
|
|
18
|
+
get_dependency_summary,
|
|
19
|
+
require_dependency,
|
|
20
|
+
warn_if_missing,
|
|
21
|
+
)
|
|
22
|
+
from ml4t.diagnostic.utils.sessions import (
|
|
23
|
+
assign_session_dates,
|
|
24
|
+
get_complete_sessions,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
__all__: list[str] = [
|
|
28
|
+
"ConfigError",
|
|
29
|
+
"EvaluationConfig",
|
|
30
|
+
"create_example_config",
|
|
31
|
+
"load_config",
|
|
32
|
+
"assign_session_dates",
|
|
33
|
+
"get_complete_sessions",
|
|
34
|
+
# Dependency checking
|
|
35
|
+
"DEPS",
|
|
36
|
+
"DependencyInfo",
|
|
37
|
+
"OptionalDependencies",
|
|
38
|
+
"check_dependency",
|
|
39
|
+
"require_dependency",
|
|
40
|
+
"warn_if_missing",
|
|
41
|
+
"get_dependency_summary",
|
|
42
|
+
]
|
|
@@ -0,0 +1,542 @@
|
|
|
1
|
+
"""Configuration management for ml4t-diagnostic with Pydantic schema validation.
|
|
2
|
+
|
|
3
|
+
This module provides YAML-based configuration loading and validation
|
|
4
|
+
for evaluation workflows, allowing users to define complex evaluation
|
|
5
|
+
pipelines in configuration files with comprehensive validation.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Literal, cast
|
|
11
|
+
|
|
12
|
+
import yaml
|
|
13
|
+
from pydantic import BaseModel, Field, field_validator, model_validator
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ConfigError(Exception):
|
|
17
|
+
"""Raised when configuration is invalid."""
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class SplitterConfig(BaseModel):
|
|
21
|
+
"""Configuration schema for cross-validation splitters."""
|
|
22
|
+
|
|
23
|
+
type: Literal["PurgedWalkForwardCV", "CombinatorialPurgedCV"] = Field(
|
|
24
|
+
description="Type of cross-validation splitter",
|
|
25
|
+
)
|
|
26
|
+
params: dict[str, Any] = Field(
|
|
27
|
+
default_factory=dict,
|
|
28
|
+
description="Parameters for the splitter",
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
@field_validator("params")
|
|
32
|
+
@classmethod
|
|
33
|
+
def validate_splitter_params(cls, v: dict[str, Any], info) -> dict[str, Any]:
|
|
34
|
+
"""Validate splitter-specific parameters."""
|
|
35
|
+
splitter_type = info.data.get("type")
|
|
36
|
+
|
|
37
|
+
if splitter_type == "PurgedWalkForwardCV":
|
|
38
|
+
# Validate walk-forward specific parameters
|
|
39
|
+
if "n_splits" in v and (v["n_splits"] < 2 or v["n_splits"] > 50):
|
|
40
|
+
raise ValueError("n_splits must be between 2 and 50")
|
|
41
|
+
if "test_size" in v and (v["test_size"] <= 0 or v["test_size"] >= 1):
|
|
42
|
+
raise ValueError("test_size must be between 0 and 1")
|
|
43
|
+
|
|
44
|
+
elif splitter_type == "CombinatorialPurgedCV":
|
|
45
|
+
# Validate combinatorial specific parameters
|
|
46
|
+
if "n_groups" in v and (v["n_groups"] < 2 or v["n_groups"] > 20):
|
|
47
|
+
raise ValueError("n_groups must be between 2 and 20")
|
|
48
|
+
|
|
49
|
+
return v
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class DataConfig(BaseModel):
|
|
53
|
+
"""Configuration schema for data handling parameters."""
|
|
54
|
+
|
|
55
|
+
label_horizon: int = Field(
|
|
56
|
+
ge=0,
|
|
57
|
+
le=252,
|
|
58
|
+
default=20,
|
|
59
|
+
description="Forward-looking period of labels (in periods)",
|
|
60
|
+
)
|
|
61
|
+
embargo_pct: float = Field(
|
|
62
|
+
ge=0.0,
|
|
63
|
+
le=1.0,
|
|
64
|
+
default=0.01,
|
|
65
|
+
description="Embargo percentage to prevent leakage",
|
|
66
|
+
)
|
|
67
|
+
min_samples_per_fold: int = Field(
|
|
68
|
+
ge=10,
|
|
69
|
+
le=10000,
|
|
70
|
+
default=100,
|
|
71
|
+
description="Minimum number of samples required per fold",
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class VisualizationConfig(BaseModel):
|
|
76
|
+
"""Configuration schema for visualization settings."""
|
|
77
|
+
|
|
78
|
+
theme: Literal["default", "dark", "light"] = Field(
|
|
79
|
+
default="default",
|
|
80
|
+
description="Visualization theme",
|
|
81
|
+
)
|
|
82
|
+
export_format: Literal["html", "png", "pdf", "svg"] = Field(
|
|
83
|
+
default="html",
|
|
84
|
+
description="Export format for visualizations",
|
|
85
|
+
)
|
|
86
|
+
include_dashboard: bool = Field(
|
|
87
|
+
default=True,
|
|
88
|
+
description="Whether to include interactive dashboard",
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class LoggingConfig(BaseModel):
|
|
93
|
+
"""Configuration schema for logging settings."""
|
|
94
|
+
|
|
95
|
+
level: Literal["DEBUG", "INFO", "WARNING", "ERROR"] = Field(
|
|
96
|
+
default="INFO",
|
|
97
|
+
description="Logging level",
|
|
98
|
+
)
|
|
99
|
+
use_wandb: bool = Field(
|
|
100
|
+
default=False,
|
|
101
|
+
description="Whether to use Weights & Biases logging",
|
|
102
|
+
)
|
|
103
|
+
wandb_project: str | None = Field(default=None, description="W&B project name")
|
|
104
|
+
wandb_entity: str | None = Field(default=None, description="W&B entity name")
|
|
105
|
+
|
|
106
|
+
@model_validator(mode="after")
|
|
107
|
+
def validate_wandb_config(self):
|
|
108
|
+
"""Validate W&B configuration consistency."""
|
|
109
|
+
if self.use_wandb and not self.wandb_project:
|
|
110
|
+
raise ValueError("wandb_project is required when use_wandb=True")
|
|
111
|
+
return self
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
class EvaluatorConfig(BaseModel):
|
|
115
|
+
"""Configuration schema for the main Evaluator class."""
|
|
116
|
+
|
|
117
|
+
tier: int = Field(
|
|
118
|
+
ge=1,
|
|
119
|
+
le=3,
|
|
120
|
+
default=2,
|
|
121
|
+
description="Validation tier level (1=rigorous, 2=standard, 3=fast)",
|
|
122
|
+
)
|
|
123
|
+
confidence_level: float = Field(
|
|
124
|
+
gt=0.0,
|
|
125
|
+
lt=1.0,
|
|
126
|
+
default=0.05,
|
|
127
|
+
description="Significance level for statistical tests",
|
|
128
|
+
)
|
|
129
|
+
bootstrap_samples: int = Field(
|
|
130
|
+
ge=100,
|
|
131
|
+
le=10000,
|
|
132
|
+
default=1000,
|
|
133
|
+
description="Number of bootstrap samples for confidence intervals",
|
|
134
|
+
)
|
|
135
|
+
random_state: int | None = Field(
|
|
136
|
+
ge=0,
|
|
137
|
+
le=2**31 - 1,
|
|
138
|
+
default=None,
|
|
139
|
+
description="Random seed for reproducible results",
|
|
140
|
+
)
|
|
141
|
+
n_jobs: int = Field(
|
|
142
|
+
ge=-1,
|
|
143
|
+
le=128,
|
|
144
|
+
default=1,
|
|
145
|
+
description="Number of parallel jobs (-1 for all cores)",
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
class QEvalConfig(BaseModel):
|
|
150
|
+
"""Complete configuration schema for ml4t-diagnostic evaluation workflows."""
|
|
151
|
+
|
|
152
|
+
evaluation: EvaluatorConfig = Field(
|
|
153
|
+
default_factory=EvaluatorConfig,
|
|
154
|
+
description="Main evaluator configuration",
|
|
155
|
+
)
|
|
156
|
+
splitter: SplitterConfig = Field(
|
|
157
|
+
description="Cross-validation splitter configuration",
|
|
158
|
+
)
|
|
159
|
+
metrics: list[Literal["ic", "sharpe", "sortino", "max_drawdown", "hit_rate"]] = Field(
|
|
160
|
+
default=["ic", "sharpe", "hit_rate"],
|
|
161
|
+
min_length=1,
|
|
162
|
+
max_length=10,
|
|
163
|
+
description="List of metrics to compute",
|
|
164
|
+
)
|
|
165
|
+
statistical_tests: dict[Literal["tier_1", "tier_2", "tier_3"], list[str]] = Field(
|
|
166
|
+
default={"tier_1": ["dsr", "fdr"], "tier_2": ["hac_ic"], "tier_3": []},
|
|
167
|
+
description="Statistical tests by tier",
|
|
168
|
+
)
|
|
169
|
+
data: DataConfig = Field(
|
|
170
|
+
default_factory=DataConfig,
|
|
171
|
+
description="Data handling configuration",
|
|
172
|
+
)
|
|
173
|
+
visualization: VisualizationConfig = Field(
|
|
174
|
+
default_factory=VisualizationConfig,
|
|
175
|
+
description="Visualization settings",
|
|
176
|
+
)
|
|
177
|
+
logging: LoggingConfig = Field(
|
|
178
|
+
default_factory=LoggingConfig,
|
|
179
|
+
description="Logging configuration",
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
@field_validator("metrics")
|
|
183
|
+
@classmethod
|
|
184
|
+
def validate_metrics_non_empty(cls, v: list[str]) -> list[str]:
|
|
185
|
+
"""Ensure at least one metric is specified."""
|
|
186
|
+
if not v:
|
|
187
|
+
raise ValueError("At least one metric must be specified")
|
|
188
|
+
return v
|
|
189
|
+
|
|
190
|
+
@model_validator(mode="after")
|
|
191
|
+
def validate_tier_consistency(self):
|
|
192
|
+
"""Validate configuration consistency across tiers."""
|
|
193
|
+
tier = self.evaluation.tier
|
|
194
|
+
|
|
195
|
+
# Tier 1 should use CombinatorialPurgedCV for maximum rigor
|
|
196
|
+
if tier == 1 and self.splitter.type != "CombinatorialPurgedCV":
|
|
197
|
+
raise ValueError(
|
|
198
|
+
"Tier 1 evaluation should use CombinatorialPurgedCV for maximum rigor",
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
# Tier 3 should have minimal statistical tests
|
|
202
|
+
if tier == 3 and len(self.statistical_tests.get("tier_3", [])) > 2:
|
|
203
|
+
raise ValueError(
|
|
204
|
+
"Tier 3 is designed for fast screening - limit statistical tests",
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
return self
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
class EvaluationConfigManager:
|
|
211
|
+
"""Enhanced configuration manager with Pydantic validation.
|
|
212
|
+
|
|
213
|
+
This class loads and validates YAML configuration files
|
|
214
|
+
for ml4t-diagnostic evaluation pipelines using Pydantic schemas.
|
|
215
|
+
"""
|
|
216
|
+
|
|
217
|
+
def __init__(self, config_path: str | Path | None = None):
|
|
218
|
+
"""Initialize configuration manager.
|
|
219
|
+
|
|
220
|
+
Parameters
|
|
221
|
+
----------
|
|
222
|
+
config_path : str or Path, optional
|
|
223
|
+
Path to YAML configuration file. If None, uses defaults.
|
|
224
|
+
"""
|
|
225
|
+
# Start with default configuration
|
|
226
|
+
default_config = self._create_default_config()
|
|
227
|
+
|
|
228
|
+
if config_path is not None:
|
|
229
|
+
# Load and merge user configuration
|
|
230
|
+
user_config = self._load_from_yaml(config_path)
|
|
231
|
+
self.config = self._merge_configs(default_config, user_config)
|
|
232
|
+
else:
|
|
233
|
+
self.config = default_config
|
|
234
|
+
|
|
235
|
+
def _create_default_config(self) -> QEvalConfig:
|
|
236
|
+
"""Create default configuration with all required fields."""
|
|
237
|
+
return QEvalConfig(
|
|
238
|
+
splitter=SplitterConfig(
|
|
239
|
+
type="PurgedWalkForwardCV",
|
|
240
|
+
params={
|
|
241
|
+
"n_splits": 5,
|
|
242
|
+
"test_size": 0.2,
|
|
243
|
+
"gap": 0,
|
|
244
|
+
"expanding": True,
|
|
245
|
+
},
|
|
246
|
+
),
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
def _load_from_yaml(self, config_path: str | Path) -> dict[str, Any]:
|
|
250
|
+
"""Load configuration from YAML file with validation.
|
|
251
|
+
|
|
252
|
+
Parameters
|
|
253
|
+
----------
|
|
254
|
+
config_path : str or Path
|
|
255
|
+
Path to YAML configuration file
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
-------
|
|
259
|
+
dict
|
|
260
|
+
Raw configuration dictionary
|
|
261
|
+
|
|
262
|
+
Raises:
|
|
263
|
+
------
|
|
264
|
+
ConfigError
|
|
265
|
+
If file cannot be loaded or contains invalid YAML
|
|
266
|
+
"""
|
|
267
|
+
config_path = Path(config_path)
|
|
268
|
+
|
|
269
|
+
if not config_path.exists():
|
|
270
|
+
raise ConfigError(f"Configuration file not found: {config_path}")
|
|
271
|
+
|
|
272
|
+
try:
|
|
273
|
+
with open(config_path) as f:
|
|
274
|
+
user_config = yaml.safe_load(f)
|
|
275
|
+
except yaml.YAMLError as e:
|
|
276
|
+
raise ConfigError(f"Invalid YAML in {config_path}: {e}") from e
|
|
277
|
+
|
|
278
|
+
if user_config is None:
|
|
279
|
+
user_config = {}
|
|
280
|
+
|
|
281
|
+
return user_config
|
|
282
|
+
|
|
283
|
+
def _merge_configs(
|
|
284
|
+
self,
|
|
285
|
+
base_config: QEvalConfig,
|
|
286
|
+
user_config: dict[str, Any],
|
|
287
|
+
) -> QEvalConfig:
|
|
288
|
+
"""Merge user configuration with base configuration using Pydantic validation.
|
|
289
|
+
|
|
290
|
+
Parameters
|
|
291
|
+
----------
|
|
292
|
+
base_config : QEvalConfig
|
|
293
|
+
Base configuration schema
|
|
294
|
+
user_config : dict
|
|
295
|
+
User configuration from YAML
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
-------
|
|
299
|
+
QEvalConfig
|
|
300
|
+
Validated and merged configuration
|
|
301
|
+
|
|
302
|
+
Raises:
|
|
303
|
+
------
|
|
304
|
+
ConfigError
|
|
305
|
+
If user configuration is invalid
|
|
306
|
+
"""
|
|
307
|
+
try:
|
|
308
|
+
# Convert base config to dict for merging
|
|
309
|
+
base_dict = base_config.model_dump()
|
|
310
|
+
|
|
311
|
+
# Recursively merge dictionaries
|
|
312
|
+
merged_dict = self._deep_merge_dicts(base_dict, user_config)
|
|
313
|
+
|
|
314
|
+
# Validate merged configuration with Pydantic
|
|
315
|
+
return QEvalConfig.model_validate(merged_dict)
|
|
316
|
+
|
|
317
|
+
except Exception as e:
|
|
318
|
+
raise ConfigError(f"Configuration validation failed: {e}") from e
|
|
319
|
+
|
|
320
|
+
def _deep_merge_dicts(
|
|
321
|
+
self,
|
|
322
|
+
base: dict[str, Any],
|
|
323
|
+
override: dict[str, Any],
|
|
324
|
+
) -> dict[str, Any]:
|
|
325
|
+
"""Recursively merge two dictionaries."""
|
|
326
|
+
merged = base.copy()
|
|
327
|
+
|
|
328
|
+
for key, value in override.items():
|
|
329
|
+
if key in merged and isinstance(merged[key], dict) and isinstance(value, dict):
|
|
330
|
+
merged[key] = self._deep_merge_dicts(merged[key], value)
|
|
331
|
+
else:
|
|
332
|
+
merged[key] = value
|
|
333
|
+
|
|
334
|
+
return merged
|
|
335
|
+
|
|
336
|
+
def get(self, key: str, default: Any = None) -> Any:
|
|
337
|
+
"""Get configuration value by dot-separated key path.
|
|
338
|
+
|
|
339
|
+
Parameters
|
|
340
|
+
----------
|
|
341
|
+
key : str
|
|
342
|
+
Dot-separated key path (e.g., 'evaluation.tier')
|
|
343
|
+
default : Any, optional
|
|
344
|
+
Default value if key not found
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
-------
|
|
348
|
+
Any
|
|
349
|
+
Configuration value
|
|
350
|
+
"""
|
|
351
|
+
keys = key.split(".")
|
|
352
|
+
value = self.config.model_dump()
|
|
353
|
+
|
|
354
|
+
try:
|
|
355
|
+
for k in keys:
|
|
356
|
+
value = value[k]
|
|
357
|
+
return value
|
|
358
|
+
except (KeyError, TypeError):
|
|
359
|
+
return default
|
|
360
|
+
|
|
361
|
+
def validate(self) -> None:
|
|
362
|
+
"""Validate the current configuration.
|
|
363
|
+
|
|
364
|
+
This method is automatically called during initialization,
|
|
365
|
+
but can be used to re-validate after manual modifications.
|
|
366
|
+
|
|
367
|
+
Raises:
|
|
368
|
+
------
|
|
369
|
+
ConfigError
|
|
370
|
+
If configuration is invalid
|
|
371
|
+
"""
|
|
372
|
+
try:
|
|
373
|
+
# Pydantic validation happens automatically during model creation
|
|
374
|
+
# This method is kept for API compatibility
|
|
375
|
+
self.config.model_validate(self.config.model_dump())
|
|
376
|
+
except Exception as e:
|
|
377
|
+
raise ConfigError(f"Configuration validation failed: {e}") from e
|
|
378
|
+
|
|
379
|
+
def save_to_yaml(self, config_path: str | Path) -> None:
|
|
380
|
+
"""Save current configuration to YAML file.
|
|
381
|
+
|
|
382
|
+
Parameters
|
|
383
|
+
----------
|
|
384
|
+
config_path : str or Path
|
|
385
|
+
Path where to save the configuration
|
|
386
|
+
"""
|
|
387
|
+
config_path = Path(config_path)
|
|
388
|
+
|
|
389
|
+
try:
|
|
390
|
+
with open(config_path, "w") as f:
|
|
391
|
+
# Convert Pydantic model to dict and save as YAML
|
|
392
|
+
config_dict = self.config.model_dump(exclude_none=True)
|
|
393
|
+
yaml.dump(config_dict, f, default_flow_style=False, sort_keys=False)
|
|
394
|
+
except OSError as e:
|
|
395
|
+
raise ConfigError(f"Cannot write to {config_path}: {e}") from e
|
|
396
|
+
|
|
397
|
+
def create_evaluator(self) -> Any:
|
|
398
|
+
"""Create Evaluator instance from configuration.
|
|
399
|
+
|
|
400
|
+
Returns:
|
|
401
|
+
-------
|
|
402
|
+
ml4t-diagnostic.Evaluator
|
|
403
|
+
Configured evaluator instance
|
|
404
|
+
"""
|
|
405
|
+
from ml4t.diagnostic.evaluation.framework import Evaluator
|
|
406
|
+
from ml4t.diagnostic.splitters import CombinatorialPurgedCV, PurgedWalkForwardCV
|
|
407
|
+
|
|
408
|
+
# Create splitter
|
|
409
|
+
splitter_type = self.config.splitter.type
|
|
410
|
+
splitter_params = self.config.splitter.params.copy()
|
|
411
|
+
|
|
412
|
+
# Add data-specific parameters
|
|
413
|
+
if "label_horizon" not in splitter_params:
|
|
414
|
+
splitter_params["label_horizon"] = self.config.data.label_horizon
|
|
415
|
+
if "embargo_pct" not in splitter_params:
|
|
416
|
+
splitter_params["embargo_pct"] = self.config.data.embargo_pct
|
|
417
|
+
|
|
418
|
+
if splitter_type == "PurgedWalkForwardCV":
|
|
419
|
+
splitter = PurgedWalkForwardCV(**splitter_params)
|
|
420
|
+
else: # CombinatorialPurgedCV
|
|
421
|
+
splitter = CombinatorialPurgedCV(**splitter_params)
|
|
422
|
+
|
|
423
|
+
# Get tier-specific configuration
|
|
424
|
+
tier = self.config.evaluation.tier
|
|
425
|
+
tier_key = cast(Literal["tier_1", "tier_2", "tier_3"], f"tier_{tier}")
|
|
426
|
+
statistical_tests = self.config.statistical_tests[tier_key]
|
|
427
|
+
|
|
428
|
+
# Create evaluator
|
|
429
|
+
evaluator = Evaluator(
|
|
430
|
+
splitter=splitter,
|
|
431
|
+
metrics=list(self.config.metrics) if self.config.metrics else None,
|
|
432
|
+
statistical_tests=statistical_tests,
|
|
433
|
+
tier=tier,
|
|
434
|
+
confidence_level=self.config.evaluation.confidence_level,
|
|
435
|
+
bootstrap_samples=self.config.evaluation.bootstrap_samples,
|
|
436
|
+
random_state=self.config.evaluation.random_state,
|
|
437
|
+
n_jobs=self.config.evaluation.n_jobs,
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
return evaluator
|
|
441
|
+
|
|
442
|
+
def __repr__(self) -> str:
|
|
443
|
+
"""String representation of the configuration."""
|
|
444
|
+
return f"EvaluationConfigManager(tier={self.config.evaluation.tier}, metrics={self.config.metrics})"
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
# Backward compatibility alias
|
|
448
|
+
EvaluationConfig = EvaluationConfigManager
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
def load_config(
|
|
452
|
+
config_path: str | Path | None = None,
|
|
453
|
+
) -> EvaluationConfigManager:
|
|
454
|
+
"""Load configuration from file or environment.
|
|
455
|
+
|
|
456
|
+
Parameters
|
|
457
|
+
----------
|
|
458
|
+
config_path : str or Path, optional
|
|
459
|
+
Path to configuration file. If None, checks QEVAL_CONFIG
|
|
460
|
+
environment variable, then looks for ml4t-diagnostic.yaml in current
|
|
461
|
+
directory.
|
|
462
|
+
|
|
463
|
+
Returns:
|
|
464
|
+
-------
|
|
465
|
+
EvaluationConfigManager
|
|
466
|
+
Loaded configuration
|
|
467
|
+
"""
|
|
468
|
+
if config_path is None:
|
|
469
|
+
# Check environment variable
|
|
470
|
+
config_path = os.environ.get("QEVAL_CONFIG")
|
|
471
|
+
|
|
472
|
+
if config_path is None:
|
|
473
|
+
# Check current directory
|
|
474
|
+
default_path = Path("mlquant.evaluation.yaml")
|
|
475
|
+
if default_path.exists():
|
|
476
|
+
config_path = default_path
|
|
477
|
+
|
|
478
|
+
return EvaluationConfigManager(config_path)
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
# Example configuration template
|
|
482
|
+
EXAMPLE_CONFIG = """# ml4t-diagnostic Configuration File
|
|
483
|
+
# =======================
|
|
484
|
+
|
|
485
|
+
evaluation:
|
|
486
|
+
tier: 2 # Validation tier (1, 2, or 3)
|
|
487
|
+
confidence_level: 0.05 # Significance level for tests
|
|
488
|
+
bootstrap_samples: 1000 # Number of bootstrap samples
|
|
489
|
+
random_state: 42 # Random seed for reproducibility
|
|
490
|
+
n_jobs: 1 # Number of parallel jobs
|
|
491
|
+
|
|
492
|
+
splitter:
|
|
493
|
+
type: PurgedWalkForwardCV # or CombinatorialPurgedCV
|
|
494
|
+
params:
|
|
495
|
+
n_splits: 5
|
|
496
|
+
test_size: 0.2
|
|
497
|
+
gap: 0
|
|
498
|
+
expanding: true
|
|
499
|
+
|
|
500
|
+
metrics:
|
|
501
|
+
- ic
|
|
502
|
+
- sharpe
|
|
503
|
+
- hit_rate
|
|
504
|
+
- max_drawdown
|
|
505
|
+
|
|
506
|
+
statistical_tests:
|
|
507
|
+
tier_1:
|
|
508
|
+
- dsr
|
|
509
|
+
- fdr
|
|
510
|
+
tier_2:
|
|
511
|
+
- hac_ic
|
|
512
|
+
tier_3: []
|
|
513
|
+
|
|
514
|
+
data:
|
|
515
|
+
label_horizon: 20 # Forward-looking period for labels
|
|
516
|
+
embargo_pct: 0.01 # Embargo as percentage of data
|
|
517
|
+
min_samples_per_fold: 100 # Minimum samples per CV fold
|
|
518
|
+
|
|
519
|
+
visualization:
|
|
520
|
+
theme: default # Visualization theme
|
|
521
|
+
export_format: html # Output format (html, png, svg)
|
|
522
|
+
include_dashboard: true # Generate full dashboard
|
|
523
|
+
|
|
524
|
+
logging:
|
|
525
|
+
level: INFO
|
|
526
|
+
use_wandb: false
|
|
527
|
+
wandb_project: null
|
|
528
|
+
wandb_entity: null
|
|
529
|
+
"""
|
|
530
|
+
|
|
531
|
+
|
|
532
|
+
def create_example_config(output_path: str | Path = "mlquant.evaluation.yaml") -> None:
|
|
533
|
+
"""Create an example configuration file.
|
|
534
|
+
|
|
535
|
+
Parameters
|
|
536
|
+
----------
|
|
537
|
+
output_path : str or Path
|
|
538
|
+
Path for example configuration file
|
|
539
|
+
"""
|
|
540
|
+
with open(output_path, "w") as f:
|
|
541
|
+
f.write(EXAMPLE_CONFIG)
|
|
542
|
+
print(f"Example configuration created at: {output_path}")
|