aponyx 0.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aponyx/__init__.py +14 -0
- aponyx/backtest/__init__.py +31 -0
- aponyx/backtest/adapters.py +77 -0
- aponyx/backtest/config.py +84 -0
- aponyx/backtest/engine.py +560 -0
- aponyx/backtest/protocols.py +101 -0
- aponyx/backtest/registry.py +334 -0
- aponyx/backtest/strategy_catalog.json +50 -0
- aponyx/cli/__init__.py +5 -0
- aponyx/cli/commands/__init__.py +8 -0
- aponyx/cli/commands/clean.py +349 -0
- aponyx/cli/commands/list.py +302 -0
- aponyx/cli/commands/report.py +167 -0
- aponyx/cli/commands/run.py +377 -0
- aponyx/cli/main.py +125 -0
- aponyx/config/__init__.py +82 -0
- aponyx/data/__init__.py +99 -0
- aponyx/data/bloomberg_config.py +306 -0
- aponyx/data/bloomberg_instruments.json +26 -0
- aponyx/data/bloomberg_securities.json +42 -0
- aponyx/data/cache.py +294 -0
- aponyx/data/fetch.py +659 -0
- aponyx/data/fetch_registry.py +135 -0
- aponyx/data/loaders.py +205 -0
- aponyx/data/providers/__init__.py +13 -0
- aponyx/data/providers/bloomberg.py +383 -0
- aponyx/data/providers/file.py +111 -0
- aponyx/data/registry.py +500 -0
- aponyx/data/requirements.py +96 -0
- aponyx/data/sample_data.py +415 -0
- aponyx/data/schemas.py +60 -0
- aponyx/data/sources.py +171 -0
- aponyx/data/synthetic_params.json +46 -0
- aponyx/data/transforms.py +336 -0
- aponyx/data/validation.py +308 -0
- aponyx/docs/__init__.py +24 -0
- aponyx/docs/adding_data_providers.md +682 -0
- aponyx/docs/cdx_knowledge_base.md +455 -0
- aponyx/docs/cdx_overlay_strategy.md +135 -0
- aponyx/docs/cli_guide.md +607 -0
- aponyx/docs/governance_design.md +551 -0
- aponyx/docs/logging_design.md +251 -0
- aponyx/docs/performance_evaluation_design.md +265 -0
- aponyx/docs/python_guidelines.md +786 -0
- aponyx/docs/signal_registry_usage.md +369 -0
- aponyx/docs/signal_suitability_design.md +558 -0
- aponyx/docs/visualization_design.md +277 -0
- aponyx/evaluation/__init__.py +11 -0
- aponyx/evaluation/performance/__init__.py +24 -0
- aponyx/evaluation/performance/adapters.py +109 -0
- aponyx/evaluation/performance/analyzer.py +384 -0
- aponyx/evaluation/performance/config.py +320 -0
- aponyx/evaluation/performance/decomposition.py +304 -0
- aponyx/evaluation/performance/metrics.py +761 -0
- aponyx/evaluation/performance/registry.py +327 -0
- aponyx/evaluation/performance/report.py +541 -0
- aponyx/evaluation/suitability/__init__.py +67 -0
- aponyx/evaluation/suitability/config.py +143 -0
- aponyx/evaluation/suitability/evaluator.py +389 -0
- aponyx/evaluation/suitability/registry.py +328 -0
- aponyx/evaluation/suitability/report.py +398 -0
- aponyx/evaluation/suitability/scoring.py +367 -0
- aponyx/evaluation/suitability/tests.py +303 -0
- aponyx/examples/01_generate_synthetic_data.py +53 -0
- aponyx/examples/02_fetch_data_file.py +82 -0
- aponyx/examples/03_fetch_data_bloomberg.py +104 -0
- aponyx/examples/04_compute_signal.py +164 -0
- aponyx/examples/05_evaluate_suitability.py +224 -0
- aponyx/examples/06_run_backtest.py +242 -0
- aponyx/examples/07_analyze_performance.py +214 -0
- aponyx/examples/08_visualize_results.py +272 -0
- aponyx/main.py +7 -0
- aponyx/models/__init__.py +45 -0
- aponyx/models/config.py +83 -0
- aponyx/models/indicator_transformation.json +52 -0
- aponyx/models/indicators.py +292 -0
- aponyx/models/metadata.py +447 -0
- aponyx/models/orchestrator.py +213 -0
- aponyx/models/registry.py +860 -0
- aponyx/models/score_transformation.json +42 -0
- aponyx/models/signal_catalog.json +29 -0
- aponyx/models/signal_composer.py +513 -0
- aponyx/models/signal_transformation.json +29 -0
- aponyx/persistence/__init__.py +16 -0
- aponyx/persistence/json_io.py +132 -0
- aponyx/persistence/parquet_io.py +378 -0
- aponyx/py.typed +0 -0
- aponyx/reporting/__init__.py +10 -0
- aponyx/reporting/generator.py +517 -0
- aponyx/visualization/__init__.py +20 -0
- aponyx/visualization/app.py +37 -0
- aponyx/visualization/plots.py +309 -0
- aponyx/visualization/visualizer.py +242 -0
- aponyx/workflows/__init__.py +18 -0
- aponyx/workflows/concrete_steps.py +720 -0
- aponyx/workflows/config.py +122 -0
- aponyx/workflows/engine.py +279 -0
- aponyx/workflows/registry.py +116 -0
- aponyx/workflows/steps.py +180 -0
- aponyx-0.1.18.dist-info/METADATA +552 -0
- aponyx-0.1.18.dist-info/RECORD +104 -0
- aponyx-0.1.18.dist-info/WHEEL +4 -0
- aponyx-0.1.18.dist-info/entry_points.txt +2 -0
- aponyx-0.1.18.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,517 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Report generation logic for workflow results.
|
|
3
|
+
|
|
4
|
+
Aggregates metrics, charts, and analysis into formatted reports
|
|
5
|
+
for console output, markdown files, or HTML documents.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from enum import Enum
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ReportFormat(str, Enum):
|
|
18
|
+
"""Report output format options."""
|
|
19
|
+
|
|
20
|
+
CONSOLE = "console"
|
|
21
|
+
MARKDOWN = "markdown"
|
|
22
|
+
HTML = "html"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class ReportData:
|
|
27
|
+
"""
|
|
28
|
+
Aggregated data for report generation.
|
|
29
|
+
|
|
30
|
+
Attributes
|
|
31
|
+
----------
|
|
32
|
+
workflow_dir : Path
|
|
33
|
+
Workflow output directory path.
|
|
34
|
+
label : str
|
|
35
|
+
Workflow label.
|
|
36
|
+
signal_name : str
|
|
37
|
+
Signal identifier.
|
|
38
|
+
strategy_name : str
|
|
39
|
+
Strategy identifier.
|
|
40
|
+
suitability_report : str | None
|
|
41
|
+
Suitability evaluation report content.
|
|
42
|
+
performance_report : str | None
|
|
43
|
+
Performance analysis report content.
|
|
44
|
+
has_visualizations : bool
|
|
45
|
+
Whether visualization files exist.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
workflow_dir: Path
|
|
49
|
+
label: str
|
|
50
|
+
signal_name: str
|
|
51
|
+
strategy_name: str
|
|
52
|
+
suitability_report: str | None = None
|
|
53
|
+
performance_report: str | None = None
|
|
54
|
+
has_visualizations: bool = False
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def generate_report(
|
|
58
|
+
workflow_dir: Path,
|
|
59
|
+
format: ReportFormat | str = ReportFormat.CONSOLE,
|
|
60
|
+
) -> dict[str, str | Path | None]:
|
|
61
|
+
"""
|
|
62
|
+
Generate comprehensive research report from workflow results.
|
|
63
|
+
|
|
64
|
+
Aggregates suitability evaluation, performance metrics, and visualization
|
|
65
|
+
references into a unified report document.
|
|
66
|
+
|
|
67
|
+
Parameters
|
|
68
|
+
----------
|
|
69
|
+
workflow_dir : Path
|
|
70
|
+
Workflow output directory containing metadata.json and reports.
|
|
71
|
+
format : ReportFormat or str
|
|
72
|
+
Output format (console, markdown, or html).
|
|
73
|
+
|
|
74
|
+
Returns
|
|
75
|
+
-------
|
|
76
|
+
dict
|
|
77
|
+
Dictionary with 'content' (str) and 'output_path' (Path or None).
|
|
78
|
+
output_path is None for console format.
|
|
79
|
+
|
|
80
|
+
Raises
|
|
81
|
+
------
|
|
82
|
+
FileNotFoundError
|
|
83
|
+
If workflow directory or required reports not found.
|
|
84
|
+
|
|
85
|
+
Examples
|
|
86
|
+
--------
|
|
87
|
+
Generate console report:
|
|
88
|
+
>>> from pathlib import Path
|
|
89
|
+
>>> workflow_dir = Path("data/workflows/my_test_20241202_120000")
|
|
90
|
+
>>> result = generate_report(workflow_dir)
|
|
91
|
+
>>> print(result["content"])
|
|
92
|
+
|
|
93
|
+
Generate markdown file:
|
|
94
|
+
>>> result = generate_report(workflow_dir, format="markdown")
|
|
95
|
+
>>> print(f"Saved to: {result['output_path']}")
|
|
96
|
+
"""
|
|
97
|
+
# Convert string to enum if needed
|
|
98
|
+
if isinstance(format, str):
|
|
99
|
+
format = ReportFormat(format.lower())
|
|
100
|
+
|
|
101
|
+
# Validate workflow directory exists
|
|
102
|
+
if not workflow_dir.exists():
|
|
103
|
+
raise FileNotFoundError(f"Workflow directory not found: {workflow_dir}")
|
|
104
|
+
|
|
105
|
+
# Load metadata
|
|
106
|
+
metadata_path = workflow_dir / "metadata.json"
|
|
107
|
+
if not metadata_path.exists():
|
|
108
|
+
raise FileNotFoundError(
|
|
109
|
+
f"Metadata not found in workflow directory: {workflow_dir}"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
import json
|
|
113
|
+
|
|
114
|
+
with open(metadata_path, "r", encoding="utf-8") as f:
|
|
115
|
+
metadata = json.load(f)
|
|
116
|
+
|
|
117
|
+
label = metadata.get("label", "unknown")
|
|
118
|
+
signal_name = metadata.get("signal", "unknown")
|
|
119
|
+
strategy_name = metadata.get("strategy", "unknown")
|
|
120
|
+
|
|
121
|
+
logger.info(
|
|
122
|
+
"Generating %s report: workflow=%s (signal=%s, strategy=%s)",
|
|
123
|
+
format.value,
|
|
124
|
+
label,
|
|
125
|
+
signal_name,
|
|
126
|
+
strategy_name,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# Collect report data
|
|
130
|
+
data = _collect_report_data(workflow_dir, label, signal_name, strategy_name)
|
|
131
|
+
|
|
132
|
+
# Generate report based on format
|
|
133
|
+
if format == ReportFormat.CONSOLE:
|
|
134
|
+
content = _generate_console_report(data)
|
|
135
|
+
elif format == ReportFormat.MARKDOWN:
|
|
136
|
+
content = _generate_markdown_report(data)
|
|
137
|
+
else: # HTML
|
|
138
|
+
content = _generate_html_report(data)
|
|
139
|
+
|
|
140
|
+
# Save to file if not console
|
|
141
|
+
output_path: Path | None = None
|
|
142
|
+
if format != ReportFormat.CONSOLE:
|
|
143
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
144
|
+
filename = f"report_{timestamp}.{_get_extension(format)}"
|
|
145
|
+
# Save to workflow's reports folder
|
|
146
|
+
output_path = workflow_dir / "reports" / filename
|
|
147
|
+
|
|
148
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
149
|
+
output_path.write_text(content, encoding="utf-8")
|
|
150
|
+
logger.info("Report saved: %s", output_path)
|
|
151
|
+
|
|
152
|
+
return {"content": content, "output_path": output_path}
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _collect_report_data(
|
|
156
|
+
workflow_dir: Path,
|
|
157
|
+
label: str,
|
|
158
|
+
signal_name: str,
|
|
159
|
+
strategy_name: str,
|
|
160
|
+
) -> ReportData:
|
|
161
|
+
"""
|
|
162
|
+
Collect workflow results for report generation.
|
|
163
|
+
|
|
164
|
+
Parameters
|
|
165
|
+
----------
|
|
166
|
+
workflow_dir : Path
|
|
167
|
+
Workflow output directory.
|
|
168
|
+
label : str
|
|
169
|
+
Workflow label.
|
|
170
|
+
signal_name : str
|
|
171
|
+
Signal identifier.
|
|
172
|
+
strategy_name : str
|
|
173
|
+
Strategy identifier.
|
|
174
|
+
|
|
175
|
+
Returns
|
|
176
|
+
-------
|
|
177
|
+
ReportData
|
|
178
|
+
Aggregated report data.
|
|
179
|
+
|
|
180
|
+
Raises
|
|
181
|
+
------
|
|
182
|
+
FileNotFoundError
|
|
183
|
+
If required workflow results not found.
|
|
184
|
+
"""
|
|
185
|
+
data = ReportData(
|
|
186
|
+
workflow_dir=workflow_dir,
|
|
187
|
+
label=label,
|
|
188
|
+
signal_name=signal_name,
|
|
189
|
+
strategy_name=strategy_name,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
logger.debug("Loading reports from workflow directory: %s", workflow_dir.name)
|
|
193
|
+
|
|
194
|
+
logger.debug("Loading reports from workflow directory: %s", workflow_dir.name)
|
|
195
|
+
|
|
196
|
+
# Load reports from reports/ subdirectory
|
|
197
|
+
reports_dir = workflow_dir / "reports"
|
|
198
|
+
if reports_dir.exists():
|
|
199
|
+
# Load suitability report (filename pattern: suitability_evaluation_{timestamp}.md)
|
|
200
|
+
suitability_files = list(reports_dir.glob("suitability_evaluation_*.md"))
|
|
201
|
+
if suitability_files:
|
|
202
|
+
suitability_file = sorted(suitability_files)[-1]
|
|
203
|
+
data.suitability_report = suitability_file.read_text(encoding="utf-8")
|
|
204
|
+
logger.debug("Loaded suitability report: %s", suitability_file.name)
|
|
205
|
+
|
|
206
|
+
# Load performance report (filename pattern: performance_analysis_{timestamp}.md)
|
|
207
|
+
performance_files = list(reports_dir.glob("performance_analysis_*.md"))
|
|
208
|
+
if performance_files:
|
|
209
|
+
performance_file = sorted(performance_files)[-1]
|
|
210
|
+
data.performance_report = performance_file.read_text(encoding="utf-8")
|
|
211
|
+
logger.debug("Loaded performance report: %s", performance_file.name)
|
|
212
|
+
|
|
213
|
+
# Check for visualizations
|
|
214
|
+
viz_dir = workflow_dir / "visualizations"
|
|
215
|
+
data.has_visualizations = viz_dir.exists() and any(viz_dir.glob("*.html"))
|
|
216
|
+
|
|
217
|
+
# Validate that we have some results
|
|
218
|
+
if not (data.suitability_report or data.performance_report):
|
|
219
|
+
raise FileNotFoundError(
|
|
220
|
+
f"No reports found in workflow directory {workflow_dir}. "
|
|
221
|
+
f"Run workflow with all steps enabled."
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
return data
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def _generate_console_report(data: ReportData) -> str:
|
|
228
|
+
"""Generate console-friendly report."""
|
|
229
|
+
lines = []
|
|
230
|
+
lines.append("=" * 80)
|
|
231
|
+
lines.append(f"Research Report: {data.label}")
|
|
232
|
+
lines.append(f"Signal: {data.signal_name} | Strategy: {data.strategy_name}")
|
|
233
|
+
lines.append("=" * 80)
|
|
234
|
+
lines.append("")
|
|
235
|
+
|
|
236
|
+
# Suitability section
|
|
237
|
+
if data.suitability_report:
|
|
238
|
+
lines.append("SUITABILITY EVALUATION")
|
|
239
|
+
lines.append("-" * 80)
|
|
240
|
+
# Extract key metrics from markdown (simplified)
|
|
241
|
+
lines.append(_extract_console_summary(data.suitability_report))
|
|
242
|
+
lines.append("")
|
|
243
|
+
|
|
244
|
+
# Performance section
|
|
245
|
+
if data.performance_report:
|
|
246
|
+
lines.append("PERFORMANCE ANALYSIS")
|
|
247
|
+
lines.append("-" * 80)
|
|
248
|
+
lines.append(_extract_console_summary(data.performance_report))
|
|
249
|
+
lines.append("")
|
|
250
|
+
|
|
251
|
+
# Visualizations
|
|
252
|
+
if data.has_visualizations:
|
|
253
|
+
lines.append("VISUALIZATIONS")
|
|
254
|
+
lines.append("-" * 80)
|
|
255
|
+
viz_dir = data.workflow_dir / "visualizations"
|
|
256
|
+
for viz_file in sorted(viz_dir.glob("*.html")):
|
|
257
|
+
lines.append(f" • {viz_file.name}: {viz_file}")
|
|
258
|
+
lines.append("")
|
|
259
|
+
|
|
260
|
+
# Workflow info
|
|
261
|
+
lines.append("WORKFLOW OUTPUT")
|
|
262
|
+
lines.append("-" * 80)
|
|
263
|
+
lines.append(f" Directory: {data.workflow_dir}")
|
|
264
|
+
lines.append("")
|
|
265
|
+
|
|
266
|
+
lines.append("=" * 80)
|
|
267
|
+
|
|
268
|
+
return "\n".join(lines)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def _generate_markdown_report(data: ReportData) -> str:
|
|
272
|
+
"""Generate markdown report."""
|
|
273
|
+
lines = []
|
|
274
|
+
lines.append(f"# Research Report: {data.label}")
|
|
275
|
+
lines.append("")
|
|
276
|
+
lines.append(f"**Signal:** {data.signal_name} | **Strategy:** {data.strategy_name}")
|
|
277
|
+
lines.append("")
|
|
278
|
+
lines.append(f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
|
279
|
+
lines.append("")
|
|
280
|
+
|
|
281
|
+
# Suitability section
|
|
282
|
+
if data.suitability_report:
|
|
283
|
+
lines.append("## Suitability Evaluation")
|
|
284
|
+
lines.append("")
|
|
285
|
+
lines.append(data.suitability_report)
|
|
286
|
+
lines.append("")
|
|
287
|
+
|
|
288
|
+
# Performance section
|
|
289
|
+
if data.performance_report:
|
|
290
|
+
lines.append("## Performance Analysis")
|
|
291
|
+
lines.append("")
|
|
292
|
+
lines.append(data.performance_report)
|
|
293
|
+
lines.append("")
|
|
294
|
+
|
|
295
|
+
# Visualizations
|
|
296
|
+
if data.has_visualizations:
|
|
297
|
+
lines.append("## Visualizations")
|
|
298
|
+
lines.append("")
|
|
299
|
+
viz_dir = data.workflow_dir / "visualizations"
|
|
300
|
+
for viz_file in sorted(viz_dir.glob("*.html")):
|
|
301
|
+
lines.append(f"- [{viz_file.stem}]({viz_file.resolve().as_uri()})")
|
|
302
|
+
lines.append("")
|
|
303
|
+
|
|
304
|
+
# Workflow info
|
|
305
|
+
lines.append("## Workflow Details")
|
|
306
|
+
lines.append("")
|
|
307
|
+
lines.append(f"**Output Directory:** `{data.workflow_dir}`")
|
|
308
|
+
lines.append("")
|
|
309
|
+
|
|
310
|
+
return "\n".join(lines)
|
|
311
|
+
|
|
312
|
+
|
|
313
|
+
def _generate_html_report(data: ReportData) -> str:
|
|
314
|
+
"""Generate HTML report."""
|
|
315
|
+
html_parts = []
|
|
316
|
+
|
|
317
|
+
# HTML header
|
|
318
|
+
html_parts.append("<!DOCTYPE html>")
|
|
319
|
+
html_parts.append('<html lang="en">')
|
|
320
|
+
html_parts.append("<head>")
|
|
321
|
+
html_parts.append(' <meta charset="UTF-8">')
|
|
322
|
+
html_parts.append(
|
|
323
|
+
' <meta name="viewport" content="width=device-width, initial-scale=1.0">'
|
|
324
|
+
)
|
|
325
|
+
html_parts.append(f" <title>Research Report: {data.label}</title>")
|
|
326
|
+
html_parts.append(" <style>")
|
|
327
|
+
html_parts.append(
|
|
328
|
+
" body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; max-width: 1200px; margin: 40px auto; padding: 0 20px; line-height: 1.6; }"
|
|
329
|
+
)
|
|
330
|
+
html_parts.append(
|
|
331
|
+
" h1 { border-bottom: 3px solid #333; padding-bottom: 10px; }"
|
|
332
|
+
)
|
|
333
|
+
html_parts.append(
|
|
334
|
+
" h2 { border-bottom: 1px solid #ccc; padding-bottom: 5px; margin-top: 40px; }"
|
|
335
|
+
)
|
|
336
|
+
html_parts.append(
|
|
337
|
+
" .metadata { color: #666; font-size: 0.9em; margin-bottom: 30px; }"
|
|
338
|
+
)
|
|
339
|
+
html_parts.append(
|
|
340
|
+
" pre { background: #f5f5f5; padding: 15px; border-radius: 5px; overflow-x: auto; }"
|
|
341
|
+
)
|
|
342
|
+
html_parts.append(
|
|
343
|
+
" table { border-collapse: collapse; width: 100%; margin: 20px 0; }"
|
|
344
|
+
)
|
|
345
|
+
html_parts.append(
|
|
346
|
+
" th, td { border: 1px solid #ddd; padding: 8px; text-align: left; }"
|
|
347
|
+
)
|
|
348
|
+
html_parts.append(" th { background-color: #f2f2f2; }")
|
|
349
|
+
html_parts.append(" </style>")
|
|
350
|
+
html_parts.append("</head>")
|
|
351
|
+
html_parts.append("<body>")
|
|
352
|
+
|
|
353
|
+
# Title
|
|
354
|
+
html_parts.append(f" <h1>Research Report: {data.label}</h1>")
|
|
355
|
+
html_parts.append(
|
|
356
|
+
f' <div class="metadata">Signal: {data.signal_name} | Strategy: {data.strategy_name}</div>'
|
|
357
|
+
)
|
|
358
|
+
html_parts.append(
|
|
359
|
+
f' <div class="metadata">Generated: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}</div>'
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
# Suitability section
|
|
363
|
+
if data.suitability_report:
|
|
364
|
+
html_parts.append(" <h2>Suitability Evaluation</h2>")
|
|
365
|
+
html_parts.append(_markdown_to_html(data.suitability_report))
|
|
366
|
+
|
|
367
|
+
# Performance section
|
|
368
|
+
if data.performance_report:
|
|
369
|
+
html_parts.append(" <h2>Performance Analysis</h2>")
|
|
370
|
+
html_parts.append(_markdown_to_html(data.performance_report))
|
|
371
|
+
|
|
372
|
+
# Visualizations
|
|
373
|
+
if data.has_visualizations:
|
|
374
|
+
html_parts.append(" <h2>Visualizations</h2>")
|
|
375
|
+
html_parts.append(" <ul>")
|
|
376
|
+
viz_dir = data.workflow_dir / "visualizations"
|
|
377
|
+
for viz_file in sorted(viz_dir.glob("*.html")):
|
|
378
|
+
html_parts.append(
|
|
379
|
+
f' <li><a href="{viz_file.resolve().as_uri()}" target="_blank">{viz_file.stem}</a></li>'
|
|
380
|
+
)
|
|
381
|
+
html_parts.append(" </ul>")
|
|
382
|
+
|
|
383
|
+
# Workflow info
|
|
384
|
+
html_parts.append(" <h2>Workflow Details</h2>")
|
|
385
|
+
html_parts.append(
|
|
386
|
+
f" <p><strong>Output Directory:</strong> <code>{data.workflow_dir}</code></p>"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
# HTML footer
|
|
390
|
+
html_parts.append("</body>")
|
|
391
|
+
html_parts.append("</html>")
|
|
392
|
+
|
|
393
|
+
return "\n".join(html_parts)
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
def _extract_console_summary(markdown_text: str) -> str:
|
|
397
|
+
"""Extract key summary from markdown report for console output."""
|
|
398
|
+
lines = []
|
|
399
|
+
in_table = False
|
|
400
|
+
|
|
401
|
+
for line in markdown_text.split("\n"):
|
|
402
|
+
# Include headers (remove markdown formatting)
|
|
403
|
+
if line.startswith("#"):
|
|
404
|
+
header = line.lstrip("#").strip()
|
|
405
|
+
if header and not header.startswith("Research Report"):
|
|
406
|
+
lines.append(f" {header}")
|
|
407
|
+
|
|
408
|
+
# Include table content (simplified)
|
|
409
|
+
elif "|" in line and line.strip().startswith("|"):
|
|
410
|
+
if not in_table:
|
|
411
|
+
in_table = True
|
|
412
|
+
if not line.strip().startswith("|-"): # Skip separator lines
|
|
413
|
+
# Clean up table formatting for console
|
|
414
|
+
cells = [cell.strip() for cell in line.split("|") if cell.strip()]
|
|
415
|
+
if cells:
|
|
416
|
+
lines.append(" " + " | ".join(cells))
|
|
417
|
+
else:
|
|
418
|
+
in_table = False
|
|
419
|
+
# Include important summary lines
|
|
420
|
+
if line.strip() and not line.startswith("**Generated"):
|
|
421
|
+
if any(
|
|
422
|
+
keyword in line.lower()
|
|
423
|
+
for keyword in [
|
|
424
|
+
"decision:",
|
|
425
|
+
"score:",
|
|
426
|
+
"sharpe",
|
|
427
|
+
"return",
|
|
428
|
+
"drawdown",
|
|
429
|
+
]
|
|
430
|
+
):
|
|
431
|
+
lines.append(f" {line.strip()}")
|
|
432
|
+
|
|
433
|
+
return "\n".join(lines) if lines else " (See full report for details)"
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
def _markdown_to_html(markdown_text: str) -> str:
|
|
437
|
+
"""
|
|
438
|
+
Convert simple markdown to HTML (basic implementation).
|
|
439
|
+
|
|
440
|
+
Handles headers, bold text, tables, and paragraphs.
|
|
441
|
+
For full markdown support, would use library like markdown or mistune.
|
|
442
|
+
"""
|
|
443
|
+
lines = []
|
|
444
|
+
in_table = False
|
|
445
|
+
|
|
446
|
+
for line in markdown_text.split("\n"):
|
|
447
|
+
# Skip top-level headers (already in main HTML)
|
|
448
|
+
if line.startswith("# "):
|
|
449
|
+
continue
|
|
450
|
+
|
|
451
|
+
# Headers
|
|
452
|
+
elif line.startswith("## "):
|
|
453
|
+
lines.append(f" <h3>{line[3:].strip()}</h3>")
|
|
454
|
+
elif line.startswith("### "):
|
|
455
|
+
lines.append(f" <h4>{line[4:].strip()}</h4>")
|
|
456
|
+
|
|
457
|
+
# Tables
|
|
458
|
+
elif "|" in line and line.strip().startswith("|"):
|
|
459
|
+
if not in_table:
|
|
460
|
+
lines.append(" <table>")
|
|
461
|
+
in_table = True
|
|
462
|
+
|
|
463
|
+
if line.strip().startswith("|-"): # Separator line
|
|
464
|
+
continue
|
|
465
|
+
|
|
466
|
+
cells = [cell.strip() for cell in line.split("|") if cell.strip()]
|
|
467
|
+
if cells:
|
|
468
|
+
# Detect header row (first row in table)
|
|
469
|
+
if len(lines) > 0 and lines[-1] == " <table>":
|
|
470
|
+
lines.append(" <tr>")
|
|
471
|
+
for cell in cells:
|
|
472
|
+
lines.append(
|
|
473
|
+
f" <th>{_process_inline_markdown(cell)}</th>"
|
|
474
|
+
)
|
|
475
|
+
lines.append(" </tr>")
|
|
476
|
+
else:
|
|
477
|
+
lines.append(" <tr>")
|
|
478
|
+
for cell in cells:
|
|
479
|
+
lines.append(
|
|
480
|
+
f" <td>{_process_inline_markdown(cell)}</td>"
|
|
481
|
+
)
|
|
482
|
+
lines.append(" </tr>")
|
|
483
|
+
else:
|
|
484
|
+
if in_table:
|
|
485
|
+
lines.append(" </table>")
|
|
486
|
+
in_table = False
|
|
487
|
+
|
|
488
|
+
# Paragraphs
|
|
489
|
+
if line.strip():
|
|
490
|
+
lines.append(f" <p>{_process_inline_markdown(line.strip())}</p>")
|
|
491
|
+
else:
|
|
492
|
+
lines.append("")
|
|
493
|
+
|
|
494
|
+
if in_table:
|
|
495
|
+
lines.append(" </table>")
|
|
496
|
+
|
|
497
|
+
return "\n".join(lines)
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
def _process_inline_markdown(text: str) -> str:
|
|
501
|
+
"""Process inline markdown formatting (bold, code)."""
|
|
502
|
+
# Bold
|
|
503
|
+
import re
|
|
504
|
+
|
|
505
|
+
text = re.sub(r"\*\*(.*?)\*\*", r"<strong>\1</strong>", text)
|
|
506
|
+
# Code
|
|
507
|
+
text = re.sub(r"`(.*?)`", r"<code>\1</code>", text)
|
|
508
|
+
return text
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
def _get_extension(format: ReportFormat) -> str:
|
|
512
|
+
"""Get file extension for report format."""
|
|
513
|
+
if format == ReportFormat.MARKDOWN:
|
|
514
|
+
return "md"
|
|
515
|
+
elif format == ReportFormat.HTML:
|
|
516
|
+
return "html"
|
|
517
|
+
return "txt"
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Visualization layer for research framework.
|
|
3
|
+
|
|
4
|
+
Provides modular plotting interface for backtest results, signals, and risk metrics.
|
|
5
|
+
All functions return Plotly figure objects for integration with Streamlit or notebooks.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .plots import (
|
|
9
|
+
plot_drawdown,
|
|
10
|
+
plot_equity_curve,
|
|
11
|
+
plot_signal,
|
|
12
|
+
)
|
|
13
|
+
from .visualizer import Visualizer
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"Visualizer",
|
|
17
|
+
"plot_equity_curve",
|
|
18
|
+
"plot_signal",
|
|
19
|
+
"plot_drawdown",
|
|
20
|
+
]
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Streamlit dashboard application stub.
|
|
3
|
+
|
|
4
|
+
Placeholder for future interactive web interface integrating
|
|
5
|
+
backtest results, signal monitoring, and risk analytics.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def main() -> None:
|
|
14
|
+
"""
|
|
15
|
+
Launch Streamlit dashboard for backtest visualization.
|
|
16
|
+
|
|
17
|
+
Notes
|
|
18
|
+
-----
|
|
19
|
+
Placeholder for future implementation.
|
|
20
|
+
Will integrate Visualizer class with interactive controls.
|
|
21
|
+
|
|
22
|
+
Planned features:
|
|
23
|
+
- Parameter selection widgets
|
|
24
|
+
- Real-time signal monitoring
|
|
25
|
+
- Performance metric cards
|
|
26
|
+
- Interactive chart panels
|
|
27
|
+
- Export and report generation
|
|
28
|
+
"""
|
|
29
|
+
logger.info("Streamlit app not yet implemented")
|
|
30
|
+
raise NotImplementedError(
|
|
31
|
+
"Streamlit dashboard is a placeholder. "
|
|
32
|
+
"Run with: streamlit run src/aponyx/visualization/app.py"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
if __name__ == "__main__":
|
|
37
|
+
main()
|