aponyx 0.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aponyx/__init__.py +14 -0
- aponyx/backtest/__init__.py +31 -0
- aponyx/backtest/adapters.py +77 -0
- aponyx/backtest/config.py +84 -0
- aponyx/backtest/engine.py +560 -0
- aponyx/backtest/protocols.py +101 -0
- aponyx/backtest/registry.py +334 -0
- aponyx/backtest/strategy_catalog.json +50 -0
- aponyx/cli/__init__.py +5 -0
- aponyx/cli/commands/__init__.py +8 -0
- aponyx/cli/commands/clean.py +349 -0
- aponyx/cli/commands/list.py +302 -0
- aponyx/cli/commands/report.py +167 -0
- aponyx/cli/commands/run.py +377 -0
- aponyx/cli/main.py +125 -0
- aponyx/config/__init__.py +82 -0
- aponyx/data/__init__.py +99 -0
- aponyx/data/bloomberg_config.py +306 -0
- aponyx/data/bloomberg_instruments.json +26 -0
- aponyx/data/bloomberg_securities.json +42 -0
- aponyx/data/cache.py +294 -0
- aponyx/data/fetch.py +659 -0
- aponyx/data/fetch_registry.py +135 -0
- aponyx/data/loaders.py +205 -0
- aponyx/data/providers/__init__.py +13 -0
- aponyx/data/providers/bloomberg.py +383 -0
- aponyx/data/providers/file.py +111 -0
- aponyx/data/registry.py +500 -0
- aponyx/data/requirements.py +96 -0
- aponyx/data/sample_data.py +415 -0
- aponyx/data/schemas.py +60 -0
- aponyx/data/sources.py +171 -0
- aponyx/data/synthetic_params.json +46 -0
- aponyx/data/transforms.py +336 -0
- aponyx/data/validation.py +308 -0
- aponyx/docs/__init__.py +24 -0
- aponyx/docs/adding_data_providers.md +682 -0
- aponyx/docs/cdx_knowledge_base.md +455 -0
- aponyx/docs/cdx_overlay_strategy.md +135 -0
- aponyx/docs/cli_guide.md +607 -0
- aponyx/docs/governance_design.md +551 -0
- aponyx/docs/logging_design.md +251 -0
- aponyx/docs/performance_evaluation_design.md +265 -0
- aponyx/docs/python_guidelines.md +786 -0
- aponyx/docs/signal_registry_usage.md +369 -0
- aponyx/docs/signal_suitability_design.md +558 -0
- aponyx/docs/visualization_design.md +277 -0
- aponyx/evaluation/__init__.py +11 -0
- aponyx/evaluation/performance/__init__.py +24 -0
- aponyx/evaluation/performance/adapters.py +109 -0
- aponyx/evaluation/performance/analyzer.py +384 -0
- aponyx/evaluation/performance/config.py +320 -0
- aponyx/evaluation/performance/decomposition.py +304 -0
- aponyx/evaluation/performance/metrics.py +761 -0
- aponyx/evaluation/performance/registry.py +327 -0
- aponyx/evaluation/performance/report.py +541 -0
- aponyx/evaluation/suitability/__init__.py +67 -0
- aponyx/evaluation/suitability/config.py +143 -0
- aponyx/evaluation/suitability/evaluator.py +389 -0
- aponyx/evaluation/suitability/registry.py +328 -0
- aponyx/evaluation/suitability/report.py +398 -0
- aponyx/evaluation/suitability/scoring.py +367 -0
- aponyx/evaluation/suitability/tests.py +303 -0
- aponyx/examples/01_generate_synthetic_data.py +53 -0
- aponyx/examples/02_fetch_data_file.py +82 -0
- aponyx/examples/03_fetch_data_bloomberg.py +104 -0
- aponyx/examples/04_compute_signal.py +164 -0
- aponyx/examples/05_evaluate_suitability.py +224 -0
- aponyx/examples/06_run_backtest.py +242 -0
- aponyx/examples/07_analyze_performance.py +214 -0
- aponyx/examples/08_visualize_results.py +272 -0
- aponyx/main.py +7 -0
- aponyx/models/__init__.py +45 -0
- aponyx/models/config.py +83 -0
- aponyx/models/indicator_transformation.json +52 -0
- aponyx/models/indicators.py +292 -0
- aponyx/models/metadata.py +447 -0
- aponyx/models/orchestrator.py +213 -0
- aponyx/models/registry.py +860 -0
- aponyx/models/score_transformation.json +42 -0
- aponyx/models/signal_catalog.json +29 -0
- aponyx/models/signal_composer.py +513 -0
- aponyx/models/signal_transformation.json +29 -0
- aponyx/persistence/__init__.py +16 -0
- aponyx/persistence/json_io.py +132 -0
- aponyx/persistence/parquet_io.py +378 -0
- aponyx/py.typed +0 -0
- aponyx/reporting/__init__.py +10 -0
- aponyx/reporting/generator.py +517 -0
- aponyx/visualization/__init__.py +20 -0
- aponyx/visualization/app.py +37 -0
- aponyx/visualization/plots.py +309 -0
- aponyx/visualization/visualizer.py +242 -0
- aponyx/workflows/__init__.py +18 -0
- aponyx/workflows/concrete_steps.py +720 -0
- aponyx/workflows/config.py +122 -0
- aponyx/workflows/engine.py +279 -0
- aponyx/workflows/registry.py +116 -0
- aponyx/workflows/steps.py +180 -0
- aponyx-0.1.18.dist-info/METADATA +552 -0
- aponyx-0.1.18.dist-info/RECORD +104 -0
- aponyx-0.1.18.dist-info/WHEEL +4 -0
- aponyx-0.1.18.dist-info/entry_points.txt +2 -0
- aponyx-0.1.18.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Generate research report command.
|
|
3
|
+
|
|
4
|
+
Creates comprehensive analysis documents from workflow results.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
import click
|
|
12
|
+
|
|
13
|
+
from aponyx.reporting import generate_report
|
|
14
|
+
from aponyx.config import DATA_WORKFLOWS_DIR
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _resolve_workflow_dir(workflow: str) -> Path:
|
|
20
|
+
"""
|
|
21
|
+
Resolve workflow directory from label or index.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
workflow : str
|
|
26
|
+
Workflow label or numeric index.
|
|
27
|
+
|
|
28
|
+
Returns
|
|
29
|
+
-------
|
|
30
|
+
Path
|
|
31
|
+
Resolved workflow directory path.
|
|
32
|
+
|
|
33
|
+
Raises
|
|
34
|
+
------
|
|
35
|
+
click.ClickException
|
|
36
|
+
If workflow not found or invalid index.
|
|
37
|
+
"""
|
|
38
|
+
if not DATA_WORKFLOWS_DIR.exists():
|
|
39
|
+
raise click.ClickException(
|
|
40
|
+
"No workflows directory found. Run a workflow first."
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
# Collect all workflows with valid metadata
|
|
44
|
+
workflows = []
|
|
45
|
+
for workflow_dir in DATA_WORKFLOWS_DIR.iterdir():
|
|
46
|
+
if not workflow_dir.is_dir():
|
|
47
|
+
continue
|
|
48
|
+
|
|
49
|
+
metadata_path = workflow_dir / "metadata.json"
|
|
50
|
+
if not metadata_path.exists():
|
|
51
|
+
continue
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
with open(metadata_path, "r", encoding="utf-8") as f:
|
|
55
|
+
metadata = json.load(f)
|
|
56
|
+
|
|
57
|
+
# Skip workflows without label (old format)
|
|
58
|
+
if "label" not in metadata:
|
|
59
|
+
continue
|
|
60
|
+
|
|
61
|
+
workflows.append(
|
|
62
|
+
{
|
|
63
|
+
"dir": workflow_dir,
|
|
64
|
+
"label": metadata["label"],
|
|
65
|
+
"timestamp": metadata.get("timestamp", ""),
|
|
66
|
+
}
|
|
67
|
+
)
|
|
68
|
+
except Exception as e:
|
|
69
|
+
logger.debug("Failed to load metadata from %s: %s", workflow_dir, e)
|
|
70
|
+
continue
|
|
71
|
+
|
|
72
|
+
if not workflows:
|
|
73
|
+
raise click.ClickException(
|
|
74
|
+
"No workflows found with valid metadata. Run a workflow first."
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Sort by timestamp descending (newest first)
|
|
78
|
+
workflows.sort(key=lambda w: w["timestamp"], reverse=True)
|
|
79
|
+
|
|
80
|
+
# Try to parse as index
|
|
81
|
+
try:
|
|
82
|
+
idx = int(workflow)
|
|
83
|
+
if idx < 0 or idx >= len(workflows):
|
|
84
|
+
raise click.ClickException(
|
|
85
|
+
f"Index {idx} out of range. Valid indices: 0-{len(workflows) - 1}. "
|
|
86
|
+
f"Use 'aponyx list workflows' to see available workflows."
|
|
87
|
+
)
|
|
88
|
+
return workflows[idx]["dir"]
|
|
89
|
+
except ValueError:
|
|
90
|
+
pass
|
|
91
|
+
|
|
92
|
+
# Search by label (latest matching timestamp)
|
|
93
|
+
matching = [w for w in workflows if w["label"] == workflow]
|
|
94
|
+
if not matching:
|
|
95
|
+
raise click.ClickException(
|
|
96
|
+
f"Workflow '{workflow}' not found. "
|
|
97
|
+
f"Use 'aponyx list workflows' to see available workflows."
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
# Return latest matching workflow
|
|
101
|
+
return matching[0]["dir"]
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
@click.command(name="report")
|
|
105
|
+
@click.option(
|
|
106
|
+
"--workflow",
|
|
107
|
+
required=True,
|
|
108
|
+
type=str,
|
|
109
|
+
help="Workflow label or numeric index from 'aponyx list workflows'",
|
|
110
|
+
)
|
|
111
|
+
@click.option(
|
|
112
|
+
"--format",
|
|
113
|
+
type=click.Choice(["console", "markdown", "html"], case_sensitive=False),
|
|
114
|
+
default="console",
|
|
115
|
+
help="Report output format (default: console)",
|
|
116
|
+
)
|
|
117
|
+
def report(
|
|
118
|
+
workflow: str,
|
|
119
|
+
format: str,
|
|
120
|
+
) -> None:
|
|
121
|
+
"""
|
|
122
|
+
Generate comprehensive research report from workflow results.
|
|
123
|
+
|
|
124
|
+
Aggregates suitability evaluation, performance metrics, and visualization
|
|
125
|
+
references into a single document. Supports console output, markdown, and HTML.
|
|
126
|
+
|
|
127
|
+
Workflow can be specified by:
|
|
128
|
+
- Label (e.g., "my_test_run")
|
|
129
|
+
- Index from 'aponyx list workflows' (e.g., "0" for most recent)
|
|
130
|
+
|
|
131
|
+
Reports are saved to the workflow's reports/ folder.
|
|
132
|
+
|
|
133
|
+
\b
|
|
134
|
+
Examples:
|
|
135
|
+
aponyx list workflows
|
|
136
|
+
aponyx report --workflow my_test_run
|
|
137
|
+
aponyx report --workflow 0
|
|
138
|
+
aponyx report --workflow my_test_run --format markdown
|
|
139
|
+
aponyx report --workflow 0 --format html
|
|
140
|
+
|
|
141
|
+
Note: Indices are ephemeral and change as new workflows are added.
|
|
142
|
+
Use workflow labels for stable references.
|
|
143
|
+
"""
|
|
144
|
+
try:
|
|
145
|
+
# Resolve workflow directory
|
|
146
|
+
workflow_dir = _resolve_workflow_dir(workflow)
|
|
147
|
+
|
|
148
|
+
# Generate report
|
|
149
|
+
result = generate_report(
|
|
150
|
+
workflow_dir=workflow_dir,
|
|
151
|
+
format=format,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
if format == "console":
|
|
155
|
+
click.echo(result["content"])
|
|
156
|
+
else:
|
|
157
|
+
click.echo(f"Report saved: {result['output_path']}")
|
|
158
|
+
|
|
159
|
+
except FileNotFoundError as e:
|
|
160
|
+
click.echo(str(e), err=True)
|
|
161
|
+
raise click.Abort()
|
|
162
|
+
except click.ClickException:
|
|
163
|
+
raise
|
|
164
|
+
except Exception as e:
|
|
165
|
+
logger.exception("Report generation error")
|
|
166
|
+
click.echo(f"Report generation failed: {e}", err=True)
|
|
167
|
+
raise click.Abort()
|
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Run workflow command.
|
|
3
|
+
|
|
4
|
+
Executes research workflows for signal-strategy combinations using YAML config files.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
import click
|
|
13
|
+
import yaml
|
|
14
|
+
|
|
15
|
+
from aponyx.workflows import WorkflowEngine, WorkflowConfig
|
|
16
|
+
from aponyx.models.registry import (
|
|
17
|
+
SignalRegistry,
|
|
18
|
+
IndicatorTransformationRegistry,
|
|
19
|
+
ScoreTransformationRegistry,
|
|
20
|
+
SignalTransformationRegistry,
|
|
21
|
+
)
|
|
22
|
+
from aponyx.backtest.registry import StrategyRegistry
|
|
23
|
+
from aponyx.config import (
|
|
24
|
+
SIGNAL_CATALOG_PATH,
|
|
25
|
+
INDICATOR_TRANSFORMATION_PATH,
|
|
26
|
+
SCORE_TRANSFORMATION_PATH,
|
|
27
|
+
SIGNAL_TRANSFORMATION_PATH,
|
|
28
|
+
STRATEGY_CATALOG_PATH,
|
|
29
|
+
BLOOMBERG_SECURITIES_PATH,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _validate_config_references(
|
|
36
|
+
signal_name: str,
|
|
37
|
+
strategy_name: str,
|
|
38
|
+
indicator_override: str | None,
|
|
39
|
+
score_transformation_override: str | None,
|
|
40
|
+
signal_transformation_override: str | None,
|
|
41
|
+
securities: dict[str, str] | None,
|
|
42
|
+
) -> None:
|
|
43
|
+
"""
|
|
44
|
+
Validate all catalog references before workflow execution.
|
|
45
|
+
|
|
46
|
+
Parameters
|
|
47
|
+
----------
|
|
48
|
+
signal_name : str
|
|
49
|
+
Signal name to validate.
|
|
50
|
+
strategy_name : str
|
|
51
|
+
Strategy name to validate.
|
|
52
|
+
indicator_override : str | None
|
|
53
|
+
Indicator transformation override to validate (if provided).
|
|
54
|
+
score_transformation_override : str | None
|
|
55
|
+
Score transformation override to validate (if provided).
|
|
56
|
+
signal_transformation_override : str | None
|
|
57
|
+
Signal transformation override to validate (if provided).
|
|
58
|
+
securities : dict[str, str] | None
|
|
59
|
+
Security mapping to validate (if provided).
|
|
60
|
+
|
|
61
|
+
Raises
|
|
62
|
+
------
|
|
63
|
+
click.ClickException
|
|
64
|
+
If any validation fails, with helpful error message and available options.
|
|
65
|
+
"""
|
|
66
|
+
# Validate signal exists
|
|
67
|
+
signal_registry = SignalRegistry(SIGNAL_CATALOG_PATH)
|
|
68
|
+
if not signal_registry.signal_exists(signal_name):
|
|
69
|
+
available = ", ".join(sorted(signal_registry.list_all().keys()))
|
|
70
|
+
raise click.ClickException(
|
|
71
|
+
f"Signal '{signal_name}' not found in catalog.\n"
|
|
72
|
+
f"Available signals: {available}"
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# Validate strategy exists
|
|
76
|
+
strategy_registry = StrategyRegistry(STRATEGY_CATALOG_PATH)
|
|
77
|
+
if not strategy_registry.strategy_exists(strategy_name):
|
|
78
|
+
available = ", ".join(sorted(strategy_registry.list_all().keys()))
|
|
79
|
+
raise click.ClickException(
|
|
80
|
+
f"Strategy '{strategy_name}' not found in catalog.\n"
|
|
81
|
+
f"Available strategies: {available}"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Validate indicator override (if provided)
|
|
85
|
+
if indicator_override:
|
|
86
|
+
indicator_registry = IndicatorTransformationRegistry(
|
|
87
|
+
INDICATOR_TRANSFORMATION_PATH
|
|
88
|
+
)
|
|
89
|
+
if not indicator_registry.indicator_exists(indicator_override):
|
|
90
|
+
available = ", ".join(sorted(indicator_registry.list_all().keys()))
|
|
91
|
+
raise click.ClickException(
|
|
92
|
+
f"Indicator '{indicator_override}' not found in catalog.\n"
|
|
93
|
+
f"Available indicators: {available}"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Validate score transformation override (if provided)
|
|
97
|
+
if score_transformation_override:
|
|
98
|
+
score_registry = ScoreTransformationRegistry(SCORE_TRANSFORMATION_PATH)
|
|
99
|
+
if not score_registry.transformation_exists(score_transformation_override):
|
|
100
|
+
available = ", ".join(sorted(score_registry.list_all().keys()))
|
|
101
|
+
raise click.ClickException(
|
|
102
|
+
f"Score transformation '{score_transformation_override}' not found in score_transformation.json.\n"
|
|
103
|
+
f"Available score transformations: {available}"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Validate signal transformation override (if provided)
|
|
107
|
+
if signal_transformation_override:
|
|
108
|
+
signal_trans_registry = SignalTransformationRegistry(SIGNAL_TRANSFORMATION_PATH)
|
|
109
|
+
if not signal_trans_registry.transformation_exists(
|
|
110
|
+
signal_transformation_override
|
|
111
|
+
):
|
|
112
|
+
available = ", ".join(sorted(signal_trans_registry.list_all().keys()))
|
|
113
|
+
raise click.ClickException(
|
|
114
|
+
f"Signal transformation '{signal_transformation_override}' not found in signal_transformation.json.\n"
|
|
115
|
+
f"Available signal transformations: {available}"
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# Validate securities mapping (if provided)
|
|
119
|
+
if securities:
|
|
120
|
+
with open(BLOOMBERG_SECURITIES_PATH, "r", encoding="utf-8") as f:
|
|
121
|
+
bloomberg_securities = json.load(f)
|
|
122
|
+
|
|
123
|
+
for inst_type, security_id in securities.items():
|
|
124
|
+
if security_id not in bloomberg_securities:
|
|
125
|
+
available = ", ".join(sorted(bloomberg_securities.keys()))
|
|
126
|
+
raise click.ClickException(
|
|
127
|
+
f"Security '{security_id}' not found in bloomberg_securities.json.\n"
|
|
128
|
+
f"Available securities: {available}"
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# Check instrument_type matches
|
|
132
|
+
security_info = bloomberg_securities[security_id]
|
|
133
|
+
if security_info["instrument_type"] != inst_type:
|
|
134
|
+
# Filter available securities by instrument_type
|
|
135
|
+
filtered = [
|
|
136
|
+
k
|
|
137
|
+
for k, v in bloomberg_securities.items()
|
|
138
|
+
if v["instrument_type"] == inst_type
|
|
139
|
+
]
|
|
140
|
+
available_filtered = ", ".join(sorted(filtered))
|
|
141
|
+
raise click.ClickException(
|
|
142
|
+
f"Security '{security_id}' has instrument_type '{security_info['instrument_type']}', expected '{inst_type}'.\n"
|
|
143
|
+
f"Available {inst_type} securities: {available_filtered}"
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def _display_workflow_config(
|
|
148
|
+
config: WorkflowConfig,
|
|
149
|
+
config_dict: dict[str, Any],
|
|
150
|
+
) -> None:
|
|
151
|
+
"""
|
|
152
|
+
Display complete workflow configuration with source attribution.
|
|
153
|
+
|
|
154
|
+
Shows all configuration fields with tags indicating source:
|
|
155
|
+
[config], [from signal], [from indicator], [default]
|
|
156
|
+
|
|
157
|
+
Parameters
|
|
158
|
+
----------
|
|
159
|
+
config : WorkflowConfig
|
|
160
|
+
Workflow configuration to display.
|
|
161
|
+
config_dict : dict[str, Any]
|
|
162
|
+
Original YAML dict to determine what was user-specified.
|
|
163
|
+
"""
|
|
164
|
+
header = "=== Workflow Configuration ==="
|
|
165
|
+
click.echo(header)
|
|
166
|
+
|
|
167
|
+
# Load registries for metadata lookup
|
|
168
|
+
signal_registry = SignalRegistry(SIGNAL_CATALOG_PATH)
|
|
169
|
+
signal_metadata = signal_registry.get_metadata(config.signal_name)
|
|
170
|
+
|
|
171
|
+
indicator_registry = IndicatorTransformationRegistry(INDICATOR_TRANSFORMATION_PATH)
|
|
172
|
+
|
|
173
|
+
# Resolve actual indicator transformation (override or from signal)
|
|
174
|
+
if config.indicator_transformation_override:
|
|
175
|
+
indicator_name = config.indicator_transformation_override
|
|
176
|
+
indicator_source = "[config]"
|
|
177
|
+
else:
|
|
178
|
+
indicator_name = signal_metadata.indicator_transformation
|
|
179
|
+
indicator_source = "[from signal]"
|
|
180
|
+
|
|
181
|
+
indicator_metadata = indicator_registry.get_metadata(indicator_name)
|
|
182
|
+
|
|
183
|
+
# Resolve actual score transformation (override or from signal)
|
|
184
|
+
if config.score_transformation_override:
|
|
185
|
+
score_transformation_name = config.score_transformation_override
|
|
186
|
+
score_source = "[config]"
|
|
187
|
+
else:
|
|
188
|
+
score_transformation_name = signal_metadata.score_transformation
|
|
189
|
+
score_source = "[from signal]"
|
|
190
|
+
|
|
191
|
+
# Resolve actual signal transformation (override or from signal)
|
|
192
|
+
if config.signal_transformation_override:
|
|
193
|
+
signal_transformation_name = config.signal_transformation_override
|
|
194
|
+
signal_source = "[config]"
|
|
195
|
+
else:
|
|
196
|
+
signal_transformation_name = signal_metadata.signal_transformation
|
|
197
|
+
signal_source = "[from signal]"
|
|
198
|
+
|
|
199
|
+
# Resolve actual securities (mapping or from indicator defaults)
|
|
200
|
+
if config.security_mapping:
|
|
201
|
+
securities_str = ", ".join(
|
|
202
|
+
f"{k}:{v}" for k, v in sorted(config.security_mapping.items())
|
|
203
|
+
)
|
|
204
|
+
securities_source = "[config]"
|
|
205
|
+
else:
|
|
206
|
+
securities_str = ", ".join(
|
|
207
|
+
f"{k}:{v}" for k, v in sorted(indicator_metadata.default_securities.items())
|
|
208
|
+
)
|
|
209
|
+
securities_source = "[from indicator]"
|
|
210
|
+
|
|
211
|
+
# Display all fields with proper alignment
|
|
212
|
+
click.echo(f"Label: {config.label} [config]")
|
|
213
|
+
click.echo(
|
|
214
|
+
f"Product: {config.product} {'[config]' if 'product' in config_dict else '[default]'}"
|
|
215
|
+
)
|
|
216
|
+
click.echo(f"Signal: {config.signal_name} [config]")
|
|
217
|
+
click.echo(f"Indicator Transform: {indicator_name} {indicator_source}")
|
|
218
|
+
click.echo(f"Securities: {securities_str} {securities_source}")
|
|
219
|
+
click.echo(f"Score Transform: {score_transformation_name} {score_source}")
|
|
220
|
+
click.echo(
|
|
221
|
+
f"Signal Transform: {signal_transformation_name} {signal_source}"
|
|
222
|
+
)
|
|
223
|
+
click.echo(f"Strategy: {config.strategy_name} [config]")
|
|
224
|
+
click.echo(
|
|
225
|
+
f"Data: {config.data_source} {'[config]' if 'data' in config_dict else '[default]'}"
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
# Display steps
|
|
229
|
+
if config.steps:
|
|
230
|
+
steps_str = ", ".join(config.steps)
|
|
231
|
+
steps_source = "[config]"
|
|
232
|
+
else:
|
|
233
|
+
steps_str = "all"
|
|
234
|
+
steps_source = "[default]"
|
|
235
|
+
click.echo(f"Steps: {steps_str} {steps_source}")
|
|
236
|
+
|
|
237
|
+
# Display force re-run
|
|
238
|
+
force_source = "[config]" if "force" in config_dict else "[default]"
|
|
239
|
+
click.echo(f"Force re-run: {config.force_rerun} {force_source}")
|
|
240
|
+
|
|
241
|
+
click.echo("=" * len(header))
|
|
242
|
+
click.echo()
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
@click.command(name="run")
|
|
246
|
+
@click.argument(
|
|
247
|
+
"config_path",
|
|
248
|
+
type=click.Path(exists=True, path_type=Path),
|
|
249
|
+
)
|
|
250
|
+
def run(config_path: Path) -> None:
|
|
251
|
+
"""
|
|
252
|
+
Run research workflow using YAML configuration file.
|
|
253
|
+
|
|
254
|
+
Executes full pipeline: data -> signal -> evaluation -> backtest -> visualization.
|
|
255
|
+
Skips completed steps unless force: true is specified in config.
|
|
256
|
+
|
|
257
|
+
All workflow parameters must be specified in the YAML config file.
|
|
258
|
+
|
|
259
|
+
Required YAML fields:
|
|
260
|
+
- signal: Signal name (must exist in signal_catalog.json)
|
|
261
|
+
- product: Product identifier (e.g., "cdx_ig_5y")
|
|
262
|
+
- strategy: Strategy name (must exist in strategy_catalog.json)
|
|
263
|
+
|
|
264
|
+
Optional YAML fields:
|
|
265
|
+
- indicator: Indicator transformation override (default: from signal)
|
|
266
|
+
- score_transformation: Score transformation override (default: from signal)
|
|
267
|
+
- signal_transformation: Signal transformation override (default: from signal)
|
|
268
|
+
- securities: Security mapping dict (default: from indicator)
|
|
269
|
+
- data: Data source (default: "synthetic")
|
|
270
|
+
- steps: List of steps to execute (default: all)
|
|
271
|
+
- force: Boolean to force re-run (default: false)
|
|
272
|
+
|
|
273
|
+
\b
|
|
274
|
+
Examples:
|
|
275
|
+
# Minimal config (workflow_minimal.yaml)
|
|
276
|
+
signal: spread_momentum
|
|
277
|
+
product: cdx_ig_5y
|
|
278
|
+
strategy: balanced
|
|
279
|
+
|
|
280
|
+
# Complete config (workflow_complete.yaml)
|
|
281
|
+
signal: cdx_etf_basis
|
|
282
|
+
product: cdx_ig_5y
|
|
283
|
+
strategy: balanced
|
|
284
|
+
indicator: cdx_etf_spread_diff_60d
|
|
285
|
+
score_transformation: z_score_60d
|
|
286
|
+
signal_transformation: bounded_2_0
|
|
287
|
+
securities:
|
|
288
|
+
cdx: cdx_hy_5y
|
|
289
|
+
etf: hyg
|
|
290
|
+
data: bloomberg
|
|
291
|
+
steps: [data, signal, backtest]
|
|
292
|
+
force: true
|
|
293
|
+
|
|
294
|
+
# Run workflow
|
|
295
|
+
aponyx run examples/workflow_minimal.yaml
|
|
296
|
+
aponyx run examples/workflow_complete.yaml
|
|
297
|
+
"""
|
|
298
|
+
# Load YAML configuration
|
|
299
|
+
try:
|
|
300
|
+
with open(config_path, "r", encoding="utf-8") as f:
|
|
301
|
+
config_dict = yaml.safe_load(f) or {}
|
|
302
|
+
logger.info("Loaded configuration from %s", config_path)
|
|
303
|
+
except Exception as e:
|
|
304
|
+
raise click.ClickException(f"Failed to load config file: {e}")
|
|
305
|
+
|
|
306
|
+
# Validate required fields present in YAML
|
|
307
|
+
required_fields = ["label", "signal", "product", "strategy"]
|
|
308
|
+
missing_fields = [f for f in required_fields if f not in config_dict]
|
|
309
|
+
if missing_fields:
|
|
310
|
+
raise click.ClickException(
|
|
311
|
+
f"Missing required field(s) in config file: {', '.join(missing_fields)}\n"
|
|
312
|
+
f"Required fields: label, signal, product, strategy"
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
# Extract fields from YAML (map simple keys to WorkflowConfig field names)
|
|
316
|
+
label = config_dict["label"]
|
|
317
|
+
signal_name = config_dict["signal"]
|
|
318
|
+
product_id = config_dict["product"]
|
|
319
|
+
strategy_name = config_dict["strategy"]
|
|
320
|
+
indicator_override = config_dict.get("indicator")
|
|
321
|
+
score_transformation_override = config_dict.get("score_transformation")
|
|
322
|
+
signal_transformation_override = config_dict.get("signal_transformation")
|
|
323
|
+
securities = config_dict.get("securities")
|
|
324
|
+
data_source = config_dict.get("data", "synthetic")
|
|
325
|
+
step_list = config_dict.get("steps")
|
|
326
|
+
force_rerun = config_dict.get("force", False)
|
|
327
|
+
|
|
328
|
+
# Validate all catalog references
|
|
329
|
+
_validate_config_references(
|
|
330
|
+
signal_name=signal_name,
|
|
331
|
+
strategy_name=strategy_name,
|
|
332
|
+
indicator_override=indicator_override,
|
|
333
|
+
score_transformation_override=score_transformation_override,
|
|
334
|
+
signal_transformation_override=signal_transformation_override,
|
|
335
|
+
securities=securities,
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
# Create WorkflowConfig
|
|
339
|
+
try:
|
|
340
|
+
workflow_config = WorkflowConfig(
|
|
341
|
+
label=label,
|
|
342
|
+
signal_name=signal_name,
|
|
343
|
+
strategy_name=strategy_name,
|
|
344
|
+
product=product_id,
|
|
345
|
+
data_source=data_source, # type: ignore
|
|
346
|
+
security_mapping=securities,
|
|
347
|
+
indicator_transformation_override=indicator_override,
|
|
348
|
+
score_transformation_override=score_transformation_override,
|
|
349
|
+
signal_transformation_override=signal_transformation_override,
|
|
350
|
+
steps=step_list, # type: ignore
|
|
351
|
+
force_rerun=force_rerun,
|
|
352
|
+
)
|
|
353
|
+
except ValueError as e:
|
|
354
|
+
raise click.ClickException(f"Configuration error: {e}")
|
|
355
|
+
|
|
356
|
+
# Display configuration with source attribution
|
|
357
|
+
_display_workflow_config(workflow_config, config_dict)
|
|
358
|
+
|
|
359
|
+
# Execute workflow
|
|
360
|
+
engine = WorkflowEngine(workflow_config)
|
|
361
|
+
results = engine.execute()
|
|
362
|
+
|
|
363
|
+
# Display results
|
|
364
|
+
if results["errors"]:
|
|
365
|
+
click.echo(
|
|
366
|
+
f"Workflow failed: {results['steps_completed']} steps completed", err=True
|
|
367
|
+
)
|
|
368
|
+
for error in results["errors"]:
|
|
369
|
+
click.echo(f" {error['step']}: {error['error']}", err=True)
|
|
370
|
+
raise click.Abort()
|
|
371
|
+
|
|
372
|
+
click.echo(
|
|
373
|
+
f"Completed {results['steps_completed']} steps in {results['duration_seconds']:.1f}s"
|
|
374
|
+
)
|
|
375
|
+
if results["steps_skipped"] > 0:
|
|
376
|
+
click.echo(f"Skipped {results['steps_skipped']} cached steps")
|
|
377
|
+
click.echo(f"Results: {results['output_dir']}")
|
aponyx/cli/main.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
"""Command-line interface for systematic macro credit research."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import sys
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
|
|
7
|
+
import click
|
|
8
|
+
|
|
9
|
+
from aponyx import __version__
|
|
10
|
+
from aponyx.cli.commands import run, report, list_items, clean
|
|
11
|
+
from aponyx.config import LOGS_DIR
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class BannerGroup(click.Group):
|
|
15
|
+
"""Custom Click Group that displays banner before help."""
|
|
16
|
+
|
|
17
|
+
def format_help(self, ctx: click.Context, formatter: click.HelpFormatter) -> str:
|
|
18
|
+
"""Format help with banner at the top."""
|
|
19
|
+
# Check if --no-banner is in sys.argv directly
|
|
20
|
+
import sys
|
|
21
|
+
|
|
22
|
+
no_banner = "--no-banner" in sys.argv
|
|
23
|
+
|
|
24
|
+
# Print banner before help
|
|
25
|
+
if not no_banner:
|
|
26
|
+
print_banner()
|
|
27
|
+
|
|
28
|
+
# Return standard help
|
|
29
|
+
return super().format_help(ctx, formatter)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
# ASCII Art Banner
|
|
33
|
+
BANNER = r"""
|
|
34
|
+
___
|
|
35
|
+
/ _ | ___ ___ ___ __ ____ __
|
|
36
|
+
/ __ |/ _ \/ _ \/ _ \/ // /\ \ /
|
|
37
|
+
/_/ |_/ .__/\___/_//_/\_, / /_\_\
|
|
38
|
+
/_/ /___/
|
|
39
|
+
|
|
40
|
+
Systematic Macro Credit Research
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def print_banner() -> None:
|
|
45
|
+
"""Display stylized CLI banner."""
|
|
46
|
+
click.echo(click.style(BANNER, fg="cyan", bold=True))
|
|
47
|
+
click.echo(
|
|
48
|
+
click.style(f" Version {__version__}", fg="bright_black")
|
|
49
|
+
+ click.style(" | ", fg="bright_black")
|
|
50
|
+
+ click.style("Python 3.12+", fg="bright_black")
|
|
51
|
+
)
|
|
52
|
+
click.echo()
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@click.group(
|
|
56
|
+
name="aponyx",
|
|
57
|
+
cls=BannerGroup,
|
|
58
|
+
context_settings={"help_option_names": ["-h", "--help"]},
|
|
59
|
+
invoke_without_command=True,
|
|
60
|
+
)
|
|
61
|
+
@click.option(
|
|
62
|
+
"-v",
|
|
63
|
+
"--verbose",
|
|
64
|
+
is_flag=True,
|
|
65
|
+
help="Enable verbose logging to see detailed execution information",
|
|
66
|
+
)
|
|
67
|
+
@click.option(
|
|
68
|
+
"--no-banner",
|
|
69
|
+
is_flag=True,
|
|
70
|
+
help="Suppress the startup banner",
|
|
71
|
+
)
|
|
72
|
+
@click.pass_context
|
|
73
|
+
def cli(ctx: click.Context, verbose: bool, no_banner: bool) -> None:
|
|
74
|
+
"""Systematic Macro Credit Research CLI."""
|
|
75
|
+
# If no subcommand, just show help (banner already shown by format_help)
|
|
76
|
+
if ctx.invoked_subcommand is None:
|
|
77
|
+
click.echo(ctx.get_help())
|
|
78
|
+
ctx.exit()
|
|
79
|
+
|
|
80
|
+
# Configure logging based on verbosity
|
|
81
|
+
log_level = logging.DEBUG if verbose else logging.WARNING
|
|
82
|
+
|
|
83
|
+
# Create logs directory if it doesn't exist
|
|
84
|
+
LOGS_DIR.mkdir(parents=True, exist_ok=True)
|
|
85
|
+
|
|
86
|
+
# Timestamped log file
|
|
87
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
88
|
+
log_file = LOGS_DIR / f"aponyx_{timestamp}.log"
|
|
89
|
+
|
|
90
|
+
# Configure logging with both console and file handlers
|
|
91
|
+
logging.basicConfig(
|
|
92
|
+
level=log_level,
|
|
93
|
+
format="%(levelname)s - %(name)s - %(message)s",
|
|
94
|
+
handlers=[
|
|
95
|
+
logging.StreamHandler(), # Console output
|
|
96
|
+
logging.FileHandler(log_file, encoding="utf-8"), # File output
|
|
97
|
+
],
|
|
98
|
+
force=True,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
logger = logging.getLogger(__name__)
|
|
102
|
+
logger.debug("Logging to file: %s", log_file)
|
|
103
|
+
|
|
104
|
+
# Store verbose flag in context for commands to access
|
|
105
|
+
ctx.ensure_object(dict)
|
|
106
|
+
ctx.obj["verbose"] = verbose
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
# Register commands
|
|
110
|
+
cli.add_command(run)
|
|
111
|
+
cli.add_command(report)
|
|
112
|
+
cli.add_command(list_items)
|
|
113
|
+
cli.add_command(clean)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def main() -> None:
|
|
117
|
+
"""Entry point for installed CLI."""
|
|
118
|
+
try:
|
|
119
|
+
cli()
|
|
120
|
+
except Exception:
|
|
121
|
+
sys.exit(1)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
if __name__ == "__main__":
|
|
125
|
+
main()
|