aponyx 0.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aponyx/__init__.py +14 -0
- aponyx/backtest/__init__.py +31 -0
- aponyx/backtest/adapters.py +77 -0
- aponyx/backtest/config.py +84 -0
- aponyx/backtest/engine.py +560 -0
- aponyx/backtest/protocols.py +101 -0
- aponyx/backtest/registry.py +334 -0
- aponyx/backtest/strategy_catalog.json +50 -0
- aponyx/cli/__init__.py +5 -0
- aponyx/cli/commands/__init__.py +8 -0
- aponyx/cli/commands/clean.py +349 -0
- aponyx/cli/commands/list.py +302 -0
- aponyx/cli/commands/report.py +167 -0
- aponyx/cli/commands/run.py +377 -0
- aponyx/cli/main.py +125 -0
- aponyx/config/__init__.py +82 -0
- aponyx/data/__init__.py +99 -0
- aponyx/data/bloomberg_config.py +306 -0
- aponyx/data/bloomberg_instruments.json +26 -0
- aponyx/data/bloomberg_securities.json +42 -0
- aponyx/data/cache.py +294 -0
- aponyx/data/fetch.py +659 -0
- aponyx/data/fetch_registry.py +135 -0
- aponyx/data/loaders.py +205 -0
- aponyx/data/providers/__init__.py +13 -0
- aponyx/data/providers/bloomberg.py +383 -0
- aponyx/data/providers/file.py +111 -0
- aponyx/data/registry.py +500 -0
- aponyx/data/requirements.py +96 -0
- aponyx/data/sample_data.py +415 -0
- aponyx/data/schemas.py +60 -0
- aponyx/data/sources.py +171 -0
- aponyx/data/synthetic_params.json +46 -0
- aponyx/data/transforms.py +336 -0
- aponyx/data/validation.py +308 -0
- aponyx/docs/__init__.py +24 -0
- aponyx/docs/adding_data_providers.md +682 -0
- aponyx/docs/cdx_knowledge_base.md +455 -0
- aponyx/docs/cdx_overlay_strategy.md +135 -0
- aponyx/docs/cli_guide.md +607 -0
- aponyx/docs/governance_design.md +551 -0
- aponyx/docs/logging_design.md +251 -0
- aponyx/docs/performance_evaluation_design.md +265 -0
- aponyx/docs/python_guidelines.md +786 -0
- aponyx/docs/signal_registry_usage.md +369 -0
- aponyx/docs/signal_suitability_design.md +558 -0
- aponyx/docs/visualization_design.md +277 -0
- aponyx/evaluation/__init__.py +11 -0
- aponyx/evaluation/performance/__init__.py +24 -0
- aponyx/evaluation/performance/adapters.py +109 -0
- aponyx/evaluation/performance/analyzer.py +384 -0
- aponyx/evaluation/performance/config.py +320 -0
- aponyx/evaluation/performance/decomposition.py +304 -0
- aponyx/evaluation/performance/metrics.py +761 -0
- aponyx/evaluation/performance/registry.py +327 -0
- aponyx/evaluation/performance/report.py +541 -0
- aponyx/evaluation/suitability/__init__.py +67 -0
- aponyx/evaluation/suitability/config.py +143 -0
- aponyx/evaluation/suitability/evaluator.py +389 -0
- aponyx/evaluation/suitability/registry.py +328 -0
- aponyx/evaluation/suitability/report.py +398 -0
- aponyx/evaluation/suitability/scoring.py +367 -0
- aponyx/evaluation/suitability/tests.py +303 -0
- aponyx/examples/01_generate_synthetic_data.py +53 -0
- aponyx/examples/02_fetch_data_file.py +82 -0
- aponyx/examples/03_fetch_data_bloomberg.py +104 -0
- aponyx/examples/04_compute_signal.py +164 -0
- aponyx/examples/05_evaluate_suitability.py +224 -0
- aponyx/examples/06_run_backtest.py +242 -0
- aponyx/examples/07_analyze_performance.py +214 -0
- aponyx/examples/08_visualize_results.py +272 -0
- aponyx/main.py +7 -0
- aponyx/models/__init__.py +45 -0
- aponyx/models/config.py +83 -0
- aponyx/models/indicator_transformation.json +52 -0
- aponyx/models/indicators.py +292 -0
- aponyx/models/metadata.py +447 -0
- aponyx/models/orchestrator.py +213 -0
- aponyx/models/registry.py +860 -0
- aponyx/models/score_transformation.json +42 -0
- aponyx/models/signal_catalog.json +29 -0
- aponyx/models/signal_composer.py +513 -0
- aponyx/models/signal_transformation.json +29 -0
- aponyx/persistence/__init__.py +16 -0
- aponyx/persistence/json_io.py +132 -0
- aponyx/persistence/parquet_io.py +378 -0
- aponyx/py.typed +0 -0
- aponyx/reporting/__init__.py +10 -0
- aponyx/reporting/generator.py +517 -0
- aponyx/visualization/__init__.py +20 -0
- aponyx/visualization/app.py +37 -0
- aponyx/visualization/plots.py +309 -0
- aponyx/visualization/visualizer.py +242 -0
- aponyx/workflows/__init__.py +18 -0
- aponyx/workflows/concrete_steps.py +720 -0
- aponyx/workflows/config.py +122 -0
- aponyx/workflows/engine.py +279 -0
- aponyx/workflows/registry.py +116 -0
- aponyx/workflows/steps.py +180 -0
- aponyx-0.1.18.dist-info/METADATA +552 -0
- aponyx-0.1.18.dist-info/RECORD +104 -0
- aponyx-0.1.18.dist-info/WHEEL +4 -0
- aponyx-0.1.18.dist-info/entry_points.txt +2 -0
- aponyx-0.1.18.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Registry for tracking performance evaluations.
|
|
3
|
+
|
|
4
|
+
Provides CRUD operations for evaluation metadata with JSON persistence.
|
|
5
|
+
Follows the SuitabilityRegistry pattern for mutable state management.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from dataclasses import asdict, dataclass, field
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from aponyx.persistence import load_json, save_json
|
|
15
|
+
|
|
16
|
+
from .config import PerformanceResult
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class PerformanceEntry:
|
|
23
|
+
"""
|
|
24
|
+
Metadata record for a performance evaluation.
|
|
25
|
+
|
|
26
|
+
Attributes
|
|
27
|
+
----------
|
|
28
|
+
evaluation_id : str
|
|
29
|
+
Unique identifier for this evaluation.
|
|
30
|
+
signal_id : str
|
|
31
|
+
Signal name/identifier.
|
|
32
|
+
strategy_id : str
|
|
33
|
+
Strategy identifier (from backtest config).
|
|
34
|
+
evaluated_at : str
|
|
35
|
+
ISO timestamp of evaluation.
|
|
36
|
+
sharpe_ratio : float
|
|
37
|
+
Sharpe ratio from base metrics.
|
|
38
|
+
max_drawdown : float
|
|
39
|
+
Maximum drawdown from base metrics.
|
|
40
|
+
stability_score : float
|
|
41
|
+
Overall stability score (0-1).
|
|
42
|
+
evaluator_version : str
|
|
43
|
+
Version of evaluator used.
|
|
44
|
+
report_path : str | None
|
|
45
|
+
Path to generated report file, if saved.
|
|
46
|
+
metadata : dict[str, Any]
|
|
47
|
+
Additional metadata (config, extended metrics, attribution).
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
evaluation_id: str
|
|
51
|
+
signal_id: str
|
|
52
|
+
strategy_id: str
|
|
53
|
+
evaluated_at: str
|
|
54
|
+
sharpe_ratio: float
|
|
55
|
+
max_drawdown: float
|
|
56
|
+
stability_score: float
|
|
57
|
+
evaluator_version: str
|
|
58
|
+
report_path: str | None = None
|
|
59
|
+
metadata: dict[str, Any] = field(default_factory=dict)
|
|
60
|
+
|
|
61
|
+
def to_dict(self) -> dict[str, Any]:
|
|
62
|
+
"""Convert to dictionary for JSON serialization."""
|
|
63
|
+
return asdict(self)
|
|
64
|
+
|
|
65
|
+
@classmethod
|
|
66
|
+
def from_dict(cls, data: dict[str, Any]) -> "PerformanceEntry":
|
|
67
|
+
"""Create entry from dictionary."""
|
|
68
|
+
return cls(**data)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class PerformanceRegistry:
|
|
72
|
+
"""
|
|
73
|
+
Registry for tracking performance evaluations.
|
|
74
|
+
|
|
75
|
+
Implements CRUD operations (create, read, update, delete) with
|
|
76
|
+
JSON persistence. Follows the SuitabilityRegistry pattern for
|
|
77
|
+
mutable state management.
|
|
78
|
+
|
|
79
|
+
Parameters
|
|
80
|
+
----------
|
|
81
|
+
registry_path : str | Path
|
|
82
|
+
Path to JSON registry file.
|
|
83
|
+
|
|
84
|
+
Examples
|
|
85
|
+
--------
|
|
86
|
+
>>> from aponyx.config import PERFORMANCE_REGISTRY_PATH
|
|
87
|
+
>>> registry = PerformanceRegistry(PERFORMANCE_REGISTRY_PATH)
|
|
88
|
+
>>> eval_id = registry.register_evaluation(result, "cdx_etf_basis", "simple_threshold")
|
|
89
|
+
>>> entry = registry.get_evaluation(eval_id)
|
|
90
|
+
>>> evaluations = registry.list_evaluations(signal_id="cdx_etf_basis")
|
|
91
|
+
"""
|
|
92
|
+
|
|
93
|
+
def __init__(self, registry_path: str | Path):
|
|
94
|
+
"""
|
|
95
|
+
Initialize registry with JSON persistence.
|
|
96
|
+
|
|
97
|
+
Parameters
|
|
98
|
+
----------
|
|
99
|
+
registry_path : str | Path
|
|
100
|
+
Path to registry JSON file.
|
|
101
|
+
"""
|
|
102
|
+
self.registry_path = Path(registry_path)
|
|
103
|
+
self._catalog: dict[str, dict] = {}
|
|
104
|
+
|
|
105
|
+
# Load existing registry or create new
|
|
106
|
+
if self.registry_path.exists():
|
|
107
|
+
try:
|
|
108
|
+
self._catalog = load_json(self.registry_path)
|
|
109
|
+
logger.info(
|
|
110
|
+
"Loaded existing performance registry: %d evaluations",
|
|
111
|
+
len(self._catalog),
|
|
112
|
+
)
|
|
113
|
+
except Exception as e:
|
|
114
|
+
logger.warning(
|
|
115
|
+
"Failed to load registry from %s: %s, creating new",
|
|
116
|
+
self.registry_path,
|
|
117
|
+
e,
|
|
118
|
+
)
|
|
119
|
+
self._catalog = {}
|
|
120
|
+
self._save()
|
|
121
|
+
else:
|
|
122
|
+
logger.info("Creating new performance registry at %s", self.registry_path)
|
|
123
|
+
self._save()
|
|
124
|
+
|
|
125
|
+
def register_evaluation(
|
|
126
|
+
self,
|
|
127
|
+
result: PerformanceResult,
|
|
128
|
+
signal_id: str,
|
|
129
|
+
strategy_id: str,
|
|
130
|
+
report_path: str | None = None,
|
|
131
|
+
) -> str:
|
|
132
|
+
"""
|
|
133
|
+
Register new performance evaluation result.
|
|
134
|
+
|
|
135
|
+
Parameters
|
|
136
|
+
----------
|
|
137
|
+
result : PerformanceResult
|
|
138
|
+
Performance evaluation result to register.
|
|
139
|
+
signal_id : str
|
|
140
|
+
Signal identifier.
|
|
141
|
+
strategy_id : str
|
|
142
|
+
Strategy identifier from backtest config.
|
|
143
|
+
report_path : str | None
|
|
144
|
+
Path to saved report file.
|
|
145
|
+
|
|
146
|
+
Returns
|
|
147
|
+
-------
|
|
148
|
+
str
|
|
149
|
+
Unique evaluation ID for retrieval.
|
|
150
|
+
|
|
151
|
+
Notes
|
|
152
|
+
-----
|
|
153
|
+
Evaluation ID format: {signal_id}_{strategy_id}_{timestamp}
|
|
154
|
+
Automatically persists registry to disk.
|
|
155
|
+
"""
|
|
156
|
+
# Generate unique evaluation ID
|
|
157
|
+
timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
158
|
+
evaluation_id = f"{signal_id}_{strategy_id}_{timestamp_str}"
|
|
159
|
+
|
|
160
|
+
logger.debug("Registering performance evaluation: %s", evaluation_id)
|
|
161
|
+
|
|
162
|
+
# Extract key metrics from PerformanceMetrics dataclass
|
|
163
|
+
sharpe_ratio = result.metrics.sharpe_ratio
|
|
164
|
+
max_drawdown = result.metrics.max_drawdown
|
|
165
|
+
|
|
166
|
+
# Create entry
|
|
167
|
+
entry = PerformanceEntry(
|
|
168
|
+
evaluation_id=evaluation_id,
|
|
169
|
+
signal_id=signal_id,
|
|
170
|
+
strategy_id=strategy_id,
|
|
171
|
+
evaluated_at=result.timestamp,
|
|
172
|
+
sharpe_ratio=sharpe_ratio,
|
|
173
|
+
max_drawdown=max_drawdown,
|
|
174
|
+
stability_score=result.stability_score,
|
|
175
|
+
evaluator_version=result.metadata.get("evaluator_version", "unknown"),
|
|
176
|
+
report_path=report_path,
|
|
177
|
+
metadata={
|
|
178
|
+
"extended_metrics": asdict(result.metrics),
|
|
179
|
+
"subperiod_analysis": result.subperiod_analysis,
|
|
180
|
+
"attribution": result.attribution,
|
|
181
|
+
"summary": result.summary,
|
|
182
|
+
"config": asdict(result.config),
|
|
183
|
+
"backtest_config": result.metadata.get("backtest_config", {}),
|
|
184
|
+
},
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Add to catalog and save
|
|
188
|
+
self._catalog[evaluation_id] = entry.to_dict()
|
|
189
|
+
self._save()
|
|
190
|
+
|
|
191
|
+
logger.info(
|
|
192
|
+
"Registered performance evaluation: %s (stability=%.3f, sharpe=%.2f)",
|
|
193
|
+
evaluation_id,
|
|
194
|
+
result.stability_score,
|
|
195
|
+
sharpe_ratio,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
return evaluation_id
|
|
199
|
+
|
|
200
|
+
def get_evaluation(self, evaluation_id: str) -> PerformanceEntry:
|
|
201
|
+
"""
|
|
202
|
+
Retrieve evaluation by ID.
|
|
203
|
+
|
|
204
|
+
Parameters
|
|
205
|
+
----------
|
|
206
|
+
evaluation_id : str
|
|
207
|
+
Unique evaluation identifier.
|
|
208
|
+
|
|
209
|
+
Returns
|
|
210
|
+
-------
|
|
211
|
+
PerformanceEntry
|
|
212
|
+
Typed evaluation entry.
|
|
213
|
+
|
|
214
|
+
Raises
|
|
215
|
+
------
|
|
216
|
+
KeyError
|
|
217
|
+
If evaluation ID not found.
|
|
218
|
+
"""
|
|
219
|
+
if evaluation_id not in self._catalog:
|
|
220
|
+
raise KeyError(f"Performance evaluation not found: {evaluation_id}")
|
|
221
|
+
|
|
222
|
+
logger.debug("Retrieved performance evaluation: %s", evaluation_id)
|
|
223
|
+
return PerformanceEntry.from_dict(self._catalog[evaluation_id])
|
|
224
|
+
|
|
225
|
+
def get_evaluation_info(self, evaluation_id: str) -> dict[str, Any]:
|
|
226
|
+
"""
|
|
227
|
+
Retrieve evaluation as dictionary.
|
|
228
|
+
|
|
229
|
+
Parameters
|
|
230
|
+
----------
|
|
231
|
+
evaluation_id : str
|
|
232
|
+
Unique evaluation identifier.
|
|
233
|
+
|
|
234
|
+
Returns
|
|
235
|
+
-------
|
|
236
|
+
dict[str, Any]
|
|
237
|
+
Copy of evaluation data.
|
|
238
|
+
|
|
239
|
+
Raises
|
|
240
|
+
------
|
|
241
|
+
KeyError
|
|
242
|
+
If evaluation ID not found.
|
|
243
|
+
"""
|
|
244
|
+
if evaluation_id not in self._catalog:
|
|
245
|
+
raise KeyError(f"Performance evaluation not found: {evaluation_id}")
|
|
246
|
+
|
|
247
|
+
return self._catalog[evaluation_id].copy()
|
|
248
|
+
|
|
249
|
+
def list_evaluations(
|
|
250
|
+
self,
|
|
251
|
+
signal_id: str | None = None,
|
|
252
|
+
strategy_id: str | None = None,
|
|
253
|
+
) -> list[str]:
|
|
254
|
+
"""
|
|
255
|
+
List evaluations with optional filters.
|
|
256
|
+
|
|
257
|
+
Parameters
|
|
258
|
+
----------
|
|
259
|
+
signal_id : str | None
|
|
260
|
+
Filter by signal identifier.
|
|
261
|
+
strategy_id : str | None
|
|
262
|
+
Filter by strategy identifier.
|
|
263
|
+
|
|
264
|
+
Returns
|
|
265
|
+
-------
|
|
266
|
+
list[str]
|
|
267
|
+
Sorted list of evaluation IDs matching filters.
|
|
268
|
+
|
|
269
|
+
Examples
|
|
270
|
+
--------
|
|
271
|
+
>>> registry.list_evaluations() # All evaluations
|
|
272
|
+
>>> registry.list_evaluations(signal_id="cdx_etf_basis")
|
|
273
|
+
>>> registry.list_evaluations(strategy_id="simple_threshold")
|
|
274
|
+
"""
|
|
275
|
+
results = []
|
|
276
|
+
|
|
277
|
+
for eval_id, info in self._catalog.items():
|
|
278
|
+
# Apply filters
|
|
279
|
+
if signal_id and info.get("signal_id") != signal_id:
|
|
280
|
+
continue
|
|
281
|
+
if strategy_id and info.get("strategy_id") != strategy_id:
|
|
282
|
+
continue
|
|
283
|
+
|
|
284
|
+
results.append(eval_id)
|
|
285
|
+
|
|
286
|
+
logger.debug(
|
|
287
|
+
"Listed performance evaluations: %d total, %d matching filters",
|
|
288
|
+
len(self._catalog),
|
|
289
|
+
len(results),
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
return sorted(results)
|
|
293
|
+
|
|
294
|
+
def remove_evaluation(self, evaluation_id: str) -> None:
|
|
295
|
+
"""
|
|
296
|
+
Remove evaluation from registry.
|
|
297
|
+
|
|
298
|
+
Parameters
|
|
299
|
+
----------
|
|
300
|
+
evaluation_id : str
|
|
301
|
+
Unique evaluation identifier.
|
|
302
|
+
|
|
303
|
+
Raises
|
|
304
|
+
------
|
|
305
|
+
KeyError
|
|
306
|
+
If evaluation ID not found.
|
|
307
|
+
|
|
308
|
+
Notes
|
|
309
|
+
-----
|
|
310
|
+
Does not delete associated report file.
|
|
311
|
+
Automatically persists registry to disk.
|
|
312
|
+
"""
|
|
313
|
+
if evaluation_id not in self._catalog:
|
|
314
|
+
raise KeyError(f"Performance evaluation not found: {evaluation_id}")
|
|
315
|
+
|
|
316
|
+
del self._catalog[evaluation_id]
|
|
317
|
+
self._save()
|
|
318
|
+
|
|
319
|
+
logger.info("Removed performance evaluation: %s", evaluation_id)
|
|
320
|
+
|
|
321
|
+
def _save(self) -> None:
|
|
322
|
+
"""Persist registry to JSON."""
|
|
323
|
+
# Ensure parent directory exists
|
|
324
|
+
self.registry_path.parent.mkdir(parents=True, exist_ok=True)
|
|
325
|
+
|
|
326
|
+
save_json(self._catalog, self.registry_path)
|
|
327
|
+
logger.debug("Saved performance registry to %s", self.registry_path)
|