aponyx 0.1.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. aponyx/__init__.py +14 -0
  2. aponyx/backtest/__init__.py +31 -0
  3. aponyx/backtest/adapters.py +77 -0
  4. aponyx/backtest/config.py +84 -0
  5. aponyx/backtest/engine.py +560 -0
  6. aponyx/backtest/protocols.py +101 -0
  7. aponyx/backtest/registry.py +334 -0
  8. aponyx/backtest/strategy_catalog.json +50 -0
  9. aponyx/cli/__init__.py +5 -0
  10. aponyx/cli/commands/__init__.py +8 -0
  11. aponyx/cli/commands/clean.py +349 -0
  12. aponyx/cli/commands/list.py +302 -0
  13. aponyx/cli/commands/report.py +167 -0
  14. aponyx/cli/commands/run.py +377 -0
  15. aponyx/cli/main.py +125 -0
  16. aponyx/config/__init__.py +82 -0
  17. aponyx/data/__init__.py +99 -0
  18. aponyx/data/bloomberg_config.py +306 -0
  19. aponyx/data/bloomberg_instruments.json +26 -0
  20. aponyx/data/bloomberg_securities.json +42 -0
  21. aponyx/data/cache.py +294 -0
  22. aponyx/data/fetch.py +659 -0
  23. aponyx/data/fetch_registry.py +135 -0
  24. aponyx/data/loaders.py +205 -0
  25. aponyx/data/providers/__init__.py +13 -0
  26. aponyx/data/providers/bloomberg.py +383 -0
  27. aponyx/data/providers/file.py +111 -0
  28. aponyx/data/registry.py +500 -0
  29. aponyx/data/requirements.py +96 -0
  30. aponyx/data/sample_data.py +415 -0
  31. aponyx/data/schemas.py +60 -0
  32. aponyx/data/sources.py +171 -0
  33. aponyx/data/synthetic_params.json +46 -0
  34. aponyx/data/transforms.py +336 -0
  35. aponyx/data/validation.py +308 -0
  36. aponyx/docs/__init__.py +24 -0
  37. aponyx/docs/adding_data_providers.md +682 -0
  38. aponyx/docs/cdx_knowledge_base.md +455 -0
  39. aponyx/docs/cdx_overlay_strategy.md +135 -0
  40. aponyx/docs/cli_guide.md +607 -0
  41. aponyx/docs/governance_design.md +551 -0
  42. aponyx/docs/logging_design.md +251 -0
  43. aponyx/docs/performance_evaluation_design.md +265 -0
  44. aponyx/docs/python_guidelines.md +786 -0
  45. aponyx/docs/signal_registry_usage.md +369 -0
  46. aponyx/docs/signal_suitability_design.md +558 -0
  47. aponyx/docs/visualization_design.md +277 -0
  48. aponyx/evaluation/__init__.py +11 -0
  49. aponyx/evaluation/performance/__init__.py +24 -0
  50. aponyx/evaluation/performance/adapters.py +109 -0
  51. aponyx/evaluation/performance/analyzer.py +384 -0
  52. aponyx/evaluation/performance/config.py +320 -0
  53. aponyx/evaluation/performance/decomposition.py +304 -0
  54. aponyx/evaluation/performance/metrics.py +761 -0
  55. aponyx/evaluation/performance/registry.py +327 -0
  56. aponyx/evaluation/performance/report.py +541 -0
  57. aponyx/evaluation/suitability/__init__.py +67 -0
  58. aponyx/evaluation/suitability/config.py +143 -0
  59. aponyx/evaluation/suitability/evaluator.py +389 -0
  60. aponyx/evaluation/suitability/registry.py +328 -0
  61. aponyx/evaluation/suitability/report.py +398 -0
  62. aponyx/evaluation/suitability/scoring.py +367 -0
  63. aponyx/evaluation/suitability/tests.py +303 -0
  64. aponyx/examples/01_generate_synthetic_data.py +53 -0
  65. aponyx/examples/02_fetch_data_file.py +82 -0
  66. aponyx/examples/03_fetch_data_bloomberg.py +104 -0
  67. aponyx/examples/04_compute_signal.py +164 -0
  68. aponyx/examples/05_evaluate_suitability.py +224 -0
  69. aponyx/examples/06_run_backtest.py +242 -0
  70. aponyx/examples/07_analyze_performance.py +214 -0
  71. aponyx/examples/08_visualize_results.py +272 -0
  72. aponyx/main.py +7 -0
  73. aponyx/models/__init__.py +45 -0
  74. aponyx/models/config.py +83 -0
  75. aponyx/models/indicator_transformation.json +52 -0
  76. aponyx/models/indicators.py +292 -0
  77. aponyx/models/metadata.py +447 -0
  78. aponyx/models/orchestrator.py +213 -0
  79. aponyx/models/registry.py +860 -0
  80. aponyx/models/score_transformation.json +42 -0
  81. aponyx/models/signal_catalog.json +29 -0
  82. aponyx/models/signal_composer.py +513 -0
  83. aponyx/models/signal_transformation.json +29 -0
  84. aponyx/persistence/__init__.py +16 -0
  85. aponyx/persistence/json_io.py +132 -0
  86. aponyx/persistence/parquet_io.py +378 -0
  87. aponyx/py.typed +0 -0
  88. aponyx/reporting/__init__.py +10 -0
  89. aponyx/reporting/generator.py +517 -0
  90. aponyx/visualization/__init__.py +20 -0
  91. aponyx/visualization/app.py +37 -0
  92. aponyx/visualization/plots.py +309 -0
  93. aponyx/visualization/visualizer.py +242 -0
  94. aponyx/workflows/__init__.py +18 -0
  95. aponyx/workflows/concrete_steps.py +720 -0
  96. aponyx/workflows/config.py +122 -0
  97. aponyx/workflows/engine.py +279 -0
  98. aponyx/workflows/registry.py +116 -0
  99. aponyx/workflows/steps.py +180 -0
  100. aponyx-0.1.18.dist-info/METADATA +552 -0
  101. aponyx-0.1.18.dist-info/RECORD +104 -0
  102. aponyx-0.1.18.dist-info/WHEEL +4 -0
  103. aponyx-0.1.18.dist-info/entry_points.txt +2 -0
  104. aponyx-0.1.18.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,82 @@
1
+ """
2
+ Load all market data instruments from file sources.
3
+
4
+ Prerequisites
5
+ -------------
6
+ Raw data files must exist in data/raw/synthetic/ with registry.json:
7
+ - cdx_ig_5y_{hash}.parquet (CDX IG 5Y spreads)
8
+ - cdx_ig_10y_{hash}.parquet (CDX IG 10Y spreads)
9
+ - cdx_hy_5y_{hash}.parquet (CDX HY 5Y spreads)
10
+ - itrx_xover_5y_{hash}.parquet (iTraxx Crossover 5Y spreads)
11
+ - itrx_eur_5y_{hash}.parquet (iTraxx Europe 5Y spreads)
12
+ - vix_{hash}.parquet (VIX volatility index)
13
+ - hyg_{hash}.parquet (HYG high yield ETF)
14
+ - lqd_{hash}.parquet (LQD investment grade ETF)
15
+ - registry.json (security-to-file mapping)
16
+
17
+ Run scripts/generate_synthetic.py first if files don't exist.
18
+
19
+ Outputs
20
+ -------
21
+ Validated DataFrames for each instrument:
22
+ - CDX instruments: spread column with DatetimeIndex
23
+ - VIX: level column with DatetimeIndex
24
+ - ETF instruments: close column with DatetimeIndex
25
+
26
+ All data validated against schema expectations.
27
+
28
+ Examples
29
+ --------
30
+ Run from project root:
31
+ python -m aponyx.examples.02_fetch_data_file
32
+
33
+ Expected output: Eight validated DataFrames with ~1260 rows each.
34
+ """
35
+
36
+ import pandas as pd
37
+
38
+ from aponyx.config import RAW_DIR
39
+ from aponyx.data import fetch_cdx, fetch_vix, fetch_etf, FileSource
40
+ from aponyx.data.bloomberg_config import list_securities
41
+
42
+
43
+ def main() -> dict[str, pd.DataFrame]:
44
+ """
45
+ Load and validate all market data from file sources.
46
+
47
+ Loads all instruments defined in bloomberg_securities.json from
48
+ the synthetic data directory. Uses FileSource with registry-based
49
+ lookup for security-to-file mapping.
50
+
51
+ Returns
52
+ -------
53
+ dict[str, pd.DataFrame]
54
+ Dictionary mapping security IDs to validated DataFrames.
55
+ """
56
+ synthetic_dir = RAW_DIR / "synthetic"
57
+
58
+ # Initialize FileSource with registry (auto-loads registry.json)
59
+ source = FileSource(synthetic_dir)
60
+
61
+ data = {}
62
+
63
+ # Load CDX instruments
64
+ cdx_securities = list_securities(instrument_type="cdx")
65
+ for security in cdx_securities:
66
+ df = fetch_cdx(source, security=security)
67
+ data[security] = df
68
+
69
+ # Load VIX
70
+ data["vix"] = fetch_vix(source, security="vix")
71
+
72
+ # Load ETF instruments
73
+ etf_securities = list_securities(instrument_type="etf")
74
+ for security in etf_securities:
75
+ df = fetch_etf(source, security=security)
76
+ data[security] = df
77
+
78
+ return data
79
+
80
+
81
+ if __name__ == "__main__":
82
+ main()
@@ -0,0 +1,104 @@
1
+ """
2
+ Load all market data instruments from Bloomberg Terminal.
3
+
4
+ Prerequisites
5
+ -------------
6
+ Active Bloomberg Terminal session required.
7
+ Bloomberg securities configured in data/bloomberg_securities.json:
8
+ - CDX instruments (IG 5Y, IG 10Y, HY 5Y, iTraxx Europe 5Y, iTraxx Crossover 5Y)
9
+ - VIX volatility index
10
+ - Credit ETFs (HYG, LQD)
11
+
12
+ Note: Bloomberg data is automatically saved to data/raw/bloomberg/ with
13
+ hash-based naming for permanent storage.
14
+
15
+ Outputs
16
+ -------
17
+ Validated DataFrames for each instrument:
18
+ - CDX instruments: spread column with DatetimeIndex
19
+ - VIX: close column with DatetimeIndex
20
+ - ETF instruments: close column with DatetimeIndex
21
+
22
+ Data saved to:
23
+ - Raw storage: data/raw/bloomberg/{security}_{hash}.parquet (permanent)
24
+ - Cache: data/cache/bloomberg_{instrument}_{hash}.parquet (temporary)
25
+
26
+ Examples
27
+ --------
28
+ Run from project root:
29
+ python -m aponyx.examples.03_fetch_data_bloomberg
30
+
31
+ Expected output: Eight validated DataFrames with historical data.
32
+ Date range depends on Bloomberg data availability (typically 5+ years).
33
+ """
34
+
35
+ from datetime import datetime, timedelta
36
+
37
+ import pandas as pd
38
+
39
+ from aponyx.data import fetch_cdx, fetch_vix, fetch_etf, BloombergSource
40
+ from aponyx.data.bloomberg_config import list_securities
41
+
42
+
43
+ def main() -> dict[str, pd.DataFrame]:
44
+ """
45
+ Load and validate all market data from Bloomberg Terminal.
46
+
47
+ Fetches all securities defined in bloomberg_securities.json.
48
+ Uses fetch interface with BloombergSource for automatic validation,
49
+ caching, and raw storage.
50
+
51
+ Returns
52
+ -------
53
+ dict[str, pd.DataFrame]
54
+ Dictionary mapping security IDs to validated DataFrames.
55
+
56
+ Notes
57
+ -----
58
+ Data is automatically saved to raw/bloomberg/ for permanent storage.
59
+ Subsequent calls use cache unless data is stale (see CACHE_TTL_DAYS config).
60
+ """
61
+ from aponyx.data.bloomberg_config import get_security_spec
62
+
63
+ end_date = datetime.now().strftime("%Y-%m-%d")
64
+ start_date = (datetime.now() - timedelta(days=5 * 365)).strftime("%Y-%m-%d")
65
+
66
+ source = BloombergSource()
67
+ data = {}
68
+
69
+ # Load all securities from catalog
70
+ all_securities = list_securities()
71
+ for security_id in all_securities:
72
+ spec = get_security_spec(security_id)
73
+ instrument_type = spec.instrument_type
74
+
75
+ if instrument_type == "vix":
76
+ df = fetch_vix(
77
+ source,
78
+ start_date=start_date,
79
+ end_date=end_date,
80
+ )
81
+ elif instrument_type == "etf":
82
+ df = fetch_etf(
83
+ source,
84
+ security=security_id,
85
+ start_date=start_date,
86
+ end_date=end_date,
87
+ )
88
+ elif instrument_type == "cdx":
89
+ df = fetch_cdx(
90
+ source,
91
+ security=security_id,
92
+ start_date=start_date,
93
+ end_date=end_date,
94
+ )
95
+ else:
96
+ raise ValueError(f"Unknown instrument type: {instrument_type}")
97
+
98
+ data[security_id] = df
99
+
100
+ return data
101
+
102
+
103
+ if __name__ == "__main__":
104
+ main()
@@ -0,0 +1,164 @@
1
+ """
2
+ Compute all enabled signals from catalog using market data.
3
+
4
+ Prerequisites
5
+ -------------
6
+ Data fetched from previous step (02_fetch_data_file.py or 03_fetch_data_bloomberg.py):
7
+ - Cached data in data/cache/{provider}/ for required instruments
8
+ - Data registry populated with dataset entries
9
+
10
+ Workflow
11
+ --------
12
+ 1. Determine required data keys from ALL enabled signals
13
+ 2. Load all required market data once from registry
14
+ 3. Compute all enabled signals via four-stage transformation pipeline
15
+ 4. Individual signals then used separately for evaluation/backtesting
16
+
17
+ Four-Stage Transformation Pipeline
18
+ ----------------------------------
19
+ Security → Indicator → Score → Signal → Position
20
+
21
+ 1. Indicator Transformation: Compute economic metric (e.g., spread difference in bps)
22
+ 2. Score Transformation: Normalize indicator (e.g., z-score)
23
+ 3. Signal Transformation: Apply trading rules (floor, cap, neutral_range)
24
+ 4. Position Calculation: Backtest layer (out of scope for this script)
25
+
26
+ Outputs
27
+ -------
28
+ Dict of computed signals (one pd.Series per enabled signal).
29
+ Saved to data/workflows/signals/{signal_name}.parquet for next steps.
30
+
31
+ Examples
32
+ --------
33
+ Run from project root:
34
+ python -m aponyx.examples.04_compute_signal
35
+
36
+ Returns dict with signal names as keys and pd.Series as values.
37
+ Expected: 3 signals (cdx_etf_basis, cdx_vix_gap, spread_momentum).
38
+ """
39
+
40
+ import pandas as pd
41
+
42
+ from aponyx.config import (
43
+ REGISTRY_PATH,
44
+ DATA_DIR,
45
+ SIGNAL_CATALOG_PATH,
46
+ DATA_WORKFLOWS_DIR,
47
+ INDICATOR_TRANSFORMATION_PATH,
48
+ )
49
+ from aponyx.data import DataRegistry
50
+ from aponyx.models import SignalRegistry, compute_registered_signals
51
+ from aponyx.models.registry import (
52
+ IndicatorTransformationRegistry,
53
+ )
54
+ from aponyx.persistence import save_parquet
55
+
56
+
57
+ def main() -> dict[str, pd.Series]:
58
+ """
59
+ Execute batch signal computation workflow.
60
+
61
+ Loads all required market data from registry, then computes
62
+ all enabled signals via the four-stage transformation pipeline.
63
+
64
+ Returns
65
+ -------
66
+ dict[str, pd.Series]
67
+ Mapping from signal name to computed signal series.
68
+ """
69
+ market_data = load_all_required_data()
70
+ signals = compute_all_signals(market_data)
71
+ save_all_signals(signals)
72
+ return signals
73
+
74
+
75
+ def load_all_required_data() -> dict[str, pd.DataFrame]:
76
+ """
77
+ Load all market data required by enabled signals.
78
+
79
+ Uses default_securities from each indicator's metadata to determine
80
+ which specific securities to load for each instrument type.
81
+
82
+ Returns
83
+ -------
84
+ dict[str, pd.DataFrame]
85
+ Market data mapping with all required instruments.
86
+ Keys are generic identifiers (e.g., "cdx", "etf", "vix").
87
+
88
+ Notes
89
+ -----
90
+ Collects data requirements from indicator_transformation.json
91
+ based on which indicators are referenced by enabled signals.
92
+ """
93
+ data_registry = DataRegistry(REGISTRY_PATH, DATA_DIR)
94
+ signal_registry = SignalRegistry(SIGNAL_CATALOG_PATH)
95
+ indicator_registry = IndicatorTransformationRegistry(INDICATOR_TRANSFORMATION_PATH)
96
+
97
+ # Build mapping from instrument type to security ID
98
+ # by collecting default_securities from indicators used by enabled signals
99
+ instrument_to_security: dict[str, str] = {}
100
+ for signal_name, signal_meta in signal_registry.get_enabled().items():
101
+ indicator_meta = indicator_registry.get_metadata(
102
+ signal_meta.indicator_transformation
103
+ )
104
+ for inst_type, security_id in indicator_meta.default_securities.items():
105
+ instrument_to_security[inst_type] = security_id
106
+
107
+ # Load data for each instrument type using the mapped security
108
+ market_data: dict[str, pd.DataFrame] = {}
109
+ for inst_type, security_id in sorted(instrument_to_security.items()):
110
+ df = data_registry.load_dataset_by_security(security_id)
111
+ market_data[inst_type] = df
112
+
113
+ return market_data
114
+
115
+
116
+ def compute_all_signals(
117
+ market_data: dict[str, pd.DataFrame],
118
+ ) -> dict[str, pd.Series]:
119
+ """
120
+ Compute all enabled signals using four-stage transformation pipeline.
121
+
122
+ Parameters
123
+ ----------
124
+ market_data : dict[str, pd.DataFrame]
125
+ Complete market data with all required instruments.
126
+
127
+ Returns
128
+ -------
129
+ dict[str, pd.Series]
130
+ Mapping from signal name to computed signal series.
131
+
132
+ Notes
133
+ -----
134
+ Orchestrator computes ALL enabled signals in one pass via compose_signal().
135
+ Individual signals are then selected for evaluation/backtesting.
136
+ """
137
+ signal_registry = SignalRegistry(SIGNAL_CATALOG_PATH)
138
+ return compute_registered_signals(signal_registry, market_data)
139
+
140
+
141
+ def save_all_signals(signals: dict[str, pd.Series]) -> None:
142
+ """
143
+ Save computed signals to workflows directory.
144
+
145
+ Parameters
146
+ ----------
147
+ signals : dict[str, pd.Series]
148
+ Mapping from signal name to computed signal series.
149
+
150
+ Notes
151
+ -----
152
+ Saves each signal as data/workflows/signals/{signal_name}.parquet.
153
+ """
154
+ signals_dir = DATA_WORKFLOWS_DIR / "signals"
155
+ signals_dir.mkdir(parents=True, exist_ok=True)
156
+
157
+ for signal_name, signal_series in signals.items():
158
+ signal_path = signals_dir / f"{signal_name}.parquet"
159
+ signal_df = signal_series.to_frame(name="value")
160
+ save_parquet(signal_df, signal_path)
161
+
162
+
163
+ if __name__ == "__main__":
164
+ main()
@@ -0,0 +1,224 @@
1
+ """
2
+ Evaluate signal-product suitability before backtesting.
3
+
4
+ Prerequisites
5
+ -------------
6
+ Signals saved from previous step (04_compute_signal.py):
7
+ - Signal files exist in data/workflows/signals/{signal_name}.parquet
8
+ - CDX spread data available from registry
9
+
10
+ Outputs
11
+ -------
12
+ SuitabilityResult with decision and component scores:
13
+ - Decision: PASS, HOLD, or FAIL
14
+ - Component scores: data_health, predictive, economic, stability
15
+ - Suitability report saved to data/workflows/reports/{signal_name}_{product}.md
16
+ - Evaluation registered in suitability_registry.json
17
+
18
+ Examples
19
+ --------
20
+ Run from project root:
21
+ python -m aponyx.examples.05_evaluate_suitability
22
+
23
+ Expected output: SuitabilityResult with PASS/HOLD/FAIL decision.
24
+ Report saved to data/workflows/reports/spread_momentum_cdx_ig_5y.md.
25
+ """
26
+
27
+ import pandas as pd
28
+
29
+ from aponyx.config import (
30
+ REGISTRY_PATH,
31
+ DATA_DIR,
32
+ DATA_WORKFLOWS_DIR,
33
+ SUITABILITY_REGISTRY_PATH,
34
+ )
35
+ from aponyx.data.registry import DataRegistry
36
+ from aponyx.evaluation.suitability import (
37
+ SuitabilityConfig,
38
+ SuitabilityResult,
39
+ evaluate_signal_suitability,
40
+ compute_forward_returns,
41
+ generate_suitability_report,
42
+ save_report,
43
+ SuitabilityRegistry,
44
+ )
45
+ from aponyx.persistence import load_parquet
46
+
47
+
48
+ def main() -> SuitabilityResult:
49
+ """
50
+ Execute suitability evaluation workflow.
51
+
52
+ Evaluates one signal against its target product using
53
+ 4-component scoring framework.
54
+
55
+ Returns
56
+ -------
57
+ SuitabilityResult
58
+ Evaluation result with decision and component scores.
59
+ """
60
+ signal_name, product = define_evaluation_pair()
61
+ signal, target_change = prepare_evaluation_data(signal_name, product)
62
+ config = define_evaluation_config()
63
+ result = evaluate_suitability(signal, target_change, config)
64
+ save_and_register_evaluation(result, signal_name, product)
65
+ return result
66
+
67
+
68
+ def define_evaluation_pair() -> tuple[str, str]:
69
+ """
70
+ Define signal-product pair for evaluation.
71
+
72
+ Returns
73
+ -------
74
+ tuple[str, str]
75
+ Signal name and product identifier.
76
+
77
+ Notes
78
+ -----
79
+ Choose one signal from catalog for demonstration.
80
+ In practice, evaluate all enabled signals separately.
81
+ """
82
+ signal_name = "spread_momentum"
83
+ product = "cdx_ig_5y"
84
+ return signal_name, product
85
+
86
+
87
+ def prepare_evaluation_data(
88
+ signal_name: str,
89
+ product: str,
90
+ ) -> tuple[pd.Series, pd.Series]:
91
+ """
92
+ Load signal and compute target returns for evaluation.
93
+
94
+ Parameters
95
+ ----------
96
+ signal_name : str
97
+ Name of signal to load from processed directory.
98
+ product : str
99
+ Product identifier for target returns.
100
+
101
+ Returns
102
+ -------
103
+ tuple[pd.Series, pd.Series]
104
+ Signal series and target change series (aligned).
105
+
106
+ Notes
107
+ -----
108
+ Loads signal saved by previous step (04_compute_signal.py).
109
+ Target is forward spread change (positive = widening).
110
+ """
111
+ signal = load_signal(signal_name)
112
+ spread_df = load_spread_data(product)
113
+
114
+ # Compute forward returns for 1-day ahead (default evaluation horizon)
115
+ forward_returns = compute_forward_returns(spread_df["spread"], lags=[1])
116
+ target_change = forward_returns[1]
117
+
118
+ return signal, target_change
119
+
120
+
121
+ def load_signal(signal_name: str) -> pd.Series:
122
+ """
123
+ Load signal from workflows directory.
124
+
125
+ Parameters
126
+ ----------
127
+ signal_name : str
128
+ Name of signal file (without .parquet extension).
129
+
130
+ Returns
131
+ -------
132
+ pd.Series
133
+ Signal series with DatetimeIndex.
134
+ """
135
+ signal_path = DATA_WORKFLOWS_DIR / "signals" / f"{signal_name}.parquet"
136
+ signal_df = load_parquet(signal_path)
137
+ return signal_df["value"]
138
+
139
+
140
+ def load_spread_data(product: str) -> pd.DataFrame:
141
+ """
142
+ Load spread data for target product.
143
+
144
+ Parameters
145
+ ----------
146
+ product : str
147
+ Product identifier (e.g., "cdx_ig_5y").
148
+
149
+ Returns
150
+ -------
151
+ pd.DataFrame
152
+ Spread data with DatetimeIndex.
153
+
154
+ Notes
155
+ -----
156
+ Uses DataRegistry.load_dataset_by_security() for efficient lookup.
157
+ """
158
+ data_registry = DataRegistry(REGISTRY_PATH, DATA_DIR)
159
+ return data_registry.load_dataset_by_security(product)
160
+
161
+
162
+ def define_evaluation_config() -> SuitabilityConfig:
163
+ """
164
+ Define suitability evaluation configuration.
165
+
166
+ Returns
167
+ -------
168
+ SuitabilityConfig
169
+ Configuration with test parameters and thresholds.
170
+ """
171
+ return SuitabilityConfig()
172
+
173
+
174
+ def evaluate_suitability(
175
+ signal: pd.Series,
176
+ target_change: pd.Series,
177
+ config: SuitabilityConfig,
178
+ ) -> SuitabilityResult:
179
+ """
180
+ Run suitability evaluation with 4-component scoring.
181
+
182
+ Parameters
183
+ ----------
184
+ signal : pd.Series
185
+ Signal to evaluate.
186
+ target_change : pd.Series
187
+ Forward target returns.
188
+ config : SuitabilityConfig
189
+ Evaluation configuration.
190
+
191
+ Returns
192
+ -------
193
+ SuitabilityResult
194
+ Evaluation result with decision and component scores.
195
+ """
196
+ return evaluate_signal_suitability(signal, target_change, config)
197
+
198
+
199
+ def save_and_register_evaluation(
200
+ result: SuitabilityResult,
201
+ signal_name: str,
202
+ product: str,
203
+ ) -> None:
204
+ """
205
+ Save markdown report and register evaluation.
206
+
207
+ Parameters
208
+ ----------
209
+ result : SuitabilityResult
210
+ Evaluation result.
211
+ signal_name : str
212
+ Name of evaluated signal.
213
+ product : str
214
+ Product identifier.
215
+ """
216
+ report = generate_suitability_report(result, signal_name, product)
217
+ save_report(report, signal_name, product, DATA_WORKFLOWS_DIR / "reports")
218
+
219
+ registry = SuitabilityRegistry(SUITABILITY_REGISTRY_PATH)
220
+ registry.register_evaluation(result, signal_name, product)
221
+
222
+
223
+ if __name__ == "__main__":
224
+ main()