aponyx 0.1.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. aponyx/__init__.py +14 -0
  2. aponyx/backtest/__init__.py +31 -0
  3. aponyx/backtest/adapters.py +77 -0
  4. aponyx/backtest/config.py +84 -0
  5. aponyx/backtest/engine.py +560 -0
  6. aponyx/backtest/protocols.py +101 -0
  7. aponyx/backtest/registry.py +334 -0
  8. aponyx/backtest/strategy_catalog.json +50 -0
  9. aponyx/cli/__init__.py +5 -0
  10. aponyx/cli/commands/__init__.py +8 -0
  11. aponyx/cli/commands/clean.py +349 -0
  12. aponyx/cli/commands/list.py +302 -0
  13. aponyx/cli/commands/report.py +167 -0
  14. aponyx/cli/commands/run.py +377 -0
  15. aponyx/cli/main.py +125 -0
  16. aponyx/config/__init__.py +82 -0
  17. aponyx/data/__init__.py +99 -0
  18. aponyx/data/bloomberg_config.py +306 -0
  19. aponyx/data/bloomberg_instruments.json +26 -0
  20. aponyx/data/bloomberg_securities.json +42 -0
  21. aponyx/data/cache.py +294 -0
  22. aponyx/data/fetch.py +659 -0
  23. aponyx/data/fetch_registry.py +135 -0
  24. aponyx/data/loaders.py +205 -0
  25. aponyx/data/providers/__init__.py +13 -0
  26. aponyx/data/providers/bloomberg.py +383 -0
  27. aponyx/data/providers/file.py +111 -0
  28. aponyx/data/registry.py +500 -0
  29. aponyx/data/requirements.py +96 -0
  30. aponyx/data/sample_data.py +415 -0
  31. aponyx/data/schemas.py +60 -0
  32. aponyx/data/sources.py +171 -0
  33. aponyx/data/synthetic_params.json +46 -0
  34. aponyx/data/transforms.py +336 -0
  35. aponyx/data/validation.py +308 -0
  36. aponyx/docs/__init__.py +24 -0
  37. aponyx/docs/adding_data_providers.md +682 -0
  38. aponyx/docs/cdx_knowledge_base.md +455 -0
  39. aponyx/docs/cdx_overlay_strategy.md +135 -0
  40. aponyx/docs/cli_guide.md +607 -0
  41. aponyx/docs/governance_design.md +551 -0
  42. aponyx/docs/logging_design.md +251 -0
  43. aponyx/docs/performance_evaluation_design.md +265 -0
  44. aponyx/docs/python_guidelines.md +786 -0
  45. aponyx/docs/signal_registry_usage.md +369 -0
  46. aponyx/docs/signal_suitability_design.md +558 -0
  47. aponyx/docs/visualization_design.md +277 -0
  48. aponyx/evaluation/__init__.py +11 -0
  49. aponyx/evaluation/performance/__init__.py +24 -0
  50. aponyx/evaluation/performance/adapters.py +109 -0
  51. aponyx/evaluation/performance/analyzer.py +384 -0
  52. aponyx/evaluation/performance/config.py +320 -0
  53. aponyx/evaluation/performance/decomposition.py +304 -0
  54. aponyx/evaluation/performance/metrics.py +761 -0
  55. aponyx/evaluation/performance/registry.py +327 -0
  56. aponyx/evaluation/performance/report.py +541 -0
  57. aponyx/evaluation/suitability/__init__.py +67 -0
  58. aponyx/evaluation/suitability/config.py +143 -0
  59. aponyx/evaluation/suitability/evaluator.py +389 -0
  60. aponyx/evaluation/suitability/registry.py +328 -0
  61. aponyx/evaluation/suitability/report.py +398 -0
  62. aponyx/evaluation/suitability/scoring.py +367 -0
  63. aponyx/evaluation/suitability/tests.py +303 -0
  64. aponyx/examples/01_generate_synthetic_data.py +53 -0
  65. aponyx/examples/02_fetch_data_file.py +82 -0
  66. aponyx/examples/03_fetch_data_bloomberg.py +104 -0
  67. aponyx/examples/04_compute_signal.py +164 -0
  68. aponyx/examples/05_evaluate_suitability.py +224 -0
  69. aponyx/examples/06_run_backtest.py +242 -0
  70. aponyx/examples/07_analyze_performance.py +214 -0
  71. aponyx/examples/08_visualize_results.py +272 -0
  72. aponyx/main.py +7 -0
  73. aponyx/models/__init__.py +45 -0
  74. aponyx/models/config.py +83 -0
  75. aponyx/models/indicator_transformation.json +52 -0
  76. aponyx/models/indicators.py +292 -0
  77. aponyx/models/metadata.py +447 -0
  78. aponyx/models/orchestrator.py +213 -0
  79. aponyx/models/registry.py +860 -0
  80. aponyx/models/score_transformation.json +42 -0
  81. aponyx/models/signal_catalog.json +29 -0
  82. aponyx/models/signal_composer.py +513 -0
  83. aponyx/models/signal_transformation.json +29 -0
  84. aponyx/persistence/__init__.py +16 -0
  85. aponyx/persistence/json_io.py +132 -0
  86. aponyx/persistence/parquet_io.py +378 -0
  87. aponyx/py.typed +0 -0
  88. aponyx/reporting/__init__.py +10 -0
  89. aponyx/reporting/generator.py +517 -0
  90. aponyx/visualization/__init__.py +20 -0
  91. aponyx/visualization/app.py +37 -0
  92. aponyx/visualization/plots.py +309 -0
  93. aponyx/visualization/visualizer.py +242 -0
  94. aponyx/workflows/__init__.py +18 -0
  95. aponyx/workflows/concrete_steps.py +720 -0
  96. aponyx/workflows/config.py +122 -0
  97. aponyx/workflows/engine.py +279 -0
  98. aponyx/workflows/registry.py +116 -0
  99. aponyx/workflows/steps.py +180 -0
  100. aponyx-0.1.18.dist-info/METADATA +552 -0
  101. aponyx-0.1.18.dist-info/RECORD +104 -0
  102. aponyx-0.1.18.dist-info/WHEEL +4 -0
  103. aponyx-0.1.18.dist-info/entry_points.txt +2 -0
  104. aponyx-0.1.18.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,42 @@
1
+ [
2
+ {
3
+ "name": "z_score_20d",
4
+ "description": "Z-score normalization over 20-day rolling window with minimum 10 periods",
5
+ "transform_type": "z_score",
6
+ "parameters": {
7
+ "window": 20,
8
+ "min_periods": 10
9
+ },
10
+ "enabled": true
11
+ },
12
+ {
13
+ "name": "z_score_60d",
14
+ "description": "Z-score normalization over 60-day rolling window with minimum 30 periods",
15
+ "transform_type": "z_score",
16
+ "parameters": {
17
+ "window": 60,
18
+ "min_periods": 30
19
+ },
20
+ "enabled": true
21
+ },
22
+ {
23
+ "name": "volatility_adjust_20d",
24
+ "description": "Change normalized by 20-day rolling volatility",
25
+ "transform_type": "normalized_change",
26
+ "parameters": {
27
+ "window": 20,
28
+ "min_periods": 10,
29
+ "periods": 1
30
+ },
31
+ "enabled": true
32
+ },
33
+ {
34
+ "name": "diff_5d",
35
+ "description": "5-day first difference (absolute change)",
36
+ "transform_type": "diff",
37
+ "parameters": {
38
+ "periods": 5
39
+ },
40
+ "enabled": true
41
+ }
42
+ ]
@@ -0,0 +1,29 @@
1
+ [
2
+ {
3
+ "name": "cdx_etf_basis",
4
+ "description": "Flow-driven mispricing signal from CDX-ETF basis divergence",
5
+ "indicator_transformation": "cdx_etf_spread_diff",
6
+ "score_transformation": "z_score_20d",
7
+ "signal_transformation": "passthrough",
8
+ "enabled": true,
9
+ "sign_multiplier": 1
10
+ },
11
+ {
12
+ "name": "cdx_vix_gap",
13
+ "description": "Cross-asset risk sentiment divergence between credit and equity volatility",
14
+ "indicator_transformation": "cdx_vix_deviation_gap_20d",
15
+ "score_transformation": "z_score_60d",
16
+ "signal_transformation": "passthrough",
17
+ "enabled": true,
18
+ "sign_multiplier": 1
19
+ },
20
+ {
21
+ "name": "spread_momentum",
22
+ "description": "Short-term volatility-adjusted momentum in CDX spreads",
23
+ "indicator_transformation": "spread_momentum_5d",
24
+ "score_transformation": "volatility_adjust_20d",
25
+ "signal_transformation": "passthrough",
26
+ "enabled": true,
27
+ "sign_multiplier": 1
28
+ }
29
+ ]
@@ -0,0 +1,513 @@
1
+ """
2
+ Signal composition functions for constructing trading signals via four-stage pipeline.
3
+
4
+ FOUR-STAGE TRANSFORMATION PIPELINE
5
+ -----------------------------------
6
+ Security → Indicator → Score → Signal → Position
7
+
8
+ 1. **Indicator Transformation**: Compute economic metrics from raw securities (e.g., spread difference in bps)
9
+ 2. **Score Transformation**: Normalize indicator to common scale (e.g., z-score)
10
+ 3. **Signal Transformation**: Apply trading rules (floor, cap, neutral_range, scaling)
11
+ 4. **Position Calculation**: Backtest layer converts signal to positions (out of scope for this module)
12
+
13
+ This module implements stages 1-3 with independent inspection capabilities.
14
+ """
15
+
16
+ import logging
17
+ from typing import Any
18
+
19
+ import pandas as pd
20
+
21
+ from ..data.transforms import (
22
+ TransformType,
23
+ apply_signal_transformation,
24
+ apply_transform,
25
+ )
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ def apply_score_transformation(
31
+ indicator_series: pd.Series,
32
+ transformation_metadata: dict[str, Any],
33
+ ) -> pd.Series:
34
+ """
35
+ Apply a score transformation to an indicator time series.
36
+
37
+ Score transformations normalize indicators to a common scale (z-scores, percentiles, etc.)
38
+ for cross-signal comparison and combination.
39
+
40
+ Parameters
41
+ ----------
42
+ indicator_series : pd.Series
43
+ Input indicator time series (in economically interpretable units).
44
+ transformation_metadata : dict[str, Any]
45
+ Score transformation metadata with:
46
+ - transform_type: TransformType (z_score, normalized_change, diff, etc.)
47
+ - parameters: dict with window, min_periods, periods, etc.
48
+
49
+ Returns
50
+ -------
51
+ pd.Series
52
+ Normalized score series (dimensionless, typically z-scores).
53
+
54
+ Notes
55
+ -----
56
+ - Input indicators are in interpretable units (bps, ratios, percentages)
57
+ - Output scores are dimensionless for trading signal use
58
+ - Preserves DatetimeIndex alignment
59
+ - NaN values propagate from input or transformation calculation
60
+
61
+ Examples
62
+ --------
63
+ >>> spread_diff = pd.Series([10, 12, 8, 15], index=pd.date_range('2024-01-01', periods=4))
64
+ >>> metadata = {"transform_type": "z_score", "parameters": {"window": 20, "min_periods": 10}}
65
+ >>> score = apply_score_transformation(spread_diff, metadata)
66
+ """
67
+ transform_type: TransformType = transformation_metadata["transform_type"]
68
+ parameters = transformation_metadata["parameters"]
69
+
70
+ logger.debug(
71
+ "Applying score transformation: type=%s, params=%s, input_points=%d",
72
+ transform_type,
73
+ parameters,
74
+ len(indicator_series),
75
+ )
76
+
77
+ # Apply transformation using data.transforms module
78
+ transformed = apply_transform(
79
+ indicator_series,
80
+ transform_type,
81
+ **parameters,
82
+ )
83
+
84
+ valid_count = transformed.notna().sum()
85
+ logger.debug("Score transformation yielded %d valid values", valid_count)
86
+
87
+ return transformed
88
+
89
+
90
+ def compose_signal(
91
+ signal_name: str,
92
+ market_data: dict[str, pd.DataFrame],
93
+ indicator_registry: Any, # IndicatorTransformationRegistry type
94
+ score_registry: Any, # ScoreTransformationRegistry type
95
+ signal_transformation_registry: Any, # SignalTransformationRegistry type
96
+ signal_registry: Any, # SignalRegistry type
97
+ *,
98
+ indicator_transformation_override: str | None = None,
99
+ score_transformation_override: str | None = None,
100
+ signal_transformation_override: str | None = None,
101
+ include_intermediates: bool = False,
102
+ ) -> pd.Series | dict[str, pd.Series]:
103
+ """
104
+ Compose a trading signal via four-stage transformation pipeline.
105
+
106
+ FOUR-STAGE PIPELINE
107
+ -------------------
108
+ Security → Indicator → Score → Signal → Position
109
+
110
+ Each signal references exactly one transformation from each stage (1:1:1 relationship).
111
+ This function orchestrates the complete pipeline with optional intermediate inspection.
112
+
113
+ Parameters
114
+ ----------
115
+ signal_name : str
116
+ Signal identifier (must exist in signal_registry).
117
+ market_data : dict[str, pd.DataFrame]
118
+ Market data keyed by instrument type (cdx, vix, etf, etc.).
119
+ indicator_registry : IndicatorTransformationRegistry
120
+ Registry for loading indicator transformation metadata.
121
+ score_registry : ScoreTransformationRegistry
122
+ Registry for loading score transformation metadata.
123
+ signal_transformation_registry : SignalTransformationRegistry
124
+ Registry for loading signal transformation metadata.
125
+ signal_registry : SignalRegistry
126
+ Registry for loading signal metadata.
127
+ indicator_transformation_override : str or None, optional
128
+ Override the indicator transformation from signal catalog.
129
+ Must exist in indicator_registry.
130
+ score_transformation_override : str or None, optional
131
+ Override the score transformation from signal catalog.
132
+ Must exist in score_registry.
133
+ signal_transformation_override : str or None, optional
134
+ Override the signal transformation from signal catalog.
135
+ Must exist in signal_transformation_registry.
136
+ include_intermediates : bool, default False
137
+ If True, return dict with intermediate stages.
138
+ If False, return final signal series only.
139
+
140
+ Returns
141
+ -------
142
+ pd.Series or dict[str, pd.Series]
143
+ If include_intermediates=False: Final signal series.
144
+ If include_intermediates=True: Dict with keys:
145
+ - "indicator": Raw indicator output (bps, ratios, etc.)
146
+ - "score": Normalized score (z-score, etc.)
147
+ - "signal": Final signal after trading rules applied
148
+
149
+ Raises
150
+ ------
151
+ KeyError
152
+ If signal_name not found in signal_registry.
153
+ If any transformation reference not found in its registry.
154
+ ValueError
155
+ If market data missing required instruments.
156
+
157
+ Notes
158
+ -----
159
+ - Transformation order is fixed: indicator → score → signal_transformation
160
+ - Sign multiplier applied after signal_transformation
161
+ - Each stage can be inspected independently via include_intermediates=True
162
+ - NaN values propagate through all stages
163
+
164
+ Examples
165
+ --------
166
+ Basic usage (final signal only):
167
+ >>> signal = compose_signal(
168
+ ... signal_name="cdx_etf_basis",
169
+ ... market_data={"cdx": cdx_df, "etf": etf_df"},
170
+ ... indicator_registry=indicator_reg,
171
+ ... score_registry=score_reg,
172
+ ... signal_transformation_registry=signal_trans_reg,
173
+ ... signal_registry=signal_reg,
174
+ ... )
175
+
176
+ With intermediate inspection:
177
+ >>> result = compose_signal(
178
+ ... signal_name="cdx_etf_basis",
179
+ ... market_data={"cdx": cdx_df, "etf": etf_df},
180
+ ... indicator_registry=indicator_reg,
181
+ ... score_registry=score_reg,
182
+ ... signal_transformation_registry=signal_trans_reg,
183
+ ... signal_registry=signal_reg,
184
+ ... include_intermediates=True,
185
+ ... )
186
+ >>> print(result["indicator"].tail()) # Raw basis in bps
187
+ >>> print(result["score"].tail()) # Normalized z-score
188
+ >>> print(result["signal"].tail()) # Final trading signal
189
+ """
190
+ from .indicators import compute_indicator
191
+
192
+ # Load signal metadata
193
+ signal_metadata = signal_registry.get_metadata(signal_name)
194
+
195
+ logger.info("Composing signal via 4-stage pipeline: signal=%s", signal_name)
196
+
197
+ # Apply runtime overrides (with validation)
198
+ indicator_name = (
199
+ indicator_transformation_override
200
+ if indicator_transformation_override is not None
201
+ else signal_metadata.indicator_transformation
202
+ )
203
+ score_transformation_name = (
204
+ score_transformation_override
205
+ if score_transformation_override is not None
206
+ else signal_metadata.score_transformation
207
+ )
208
+ signal_transformation_name = (
209
+ signal_transformation_override
210
+ if signal_transformation_override is not None
211
+ else signal_metadata.signal_transformation
212
+ )
213
+
214
+ # Validate overrides exist in registries (fail-fast)
215
+ if indicator_transformation_override is not None:
216
+ if not indicator_registry.indicator_exists(indicator_transformation_override):
217
+ raise ValueError(
218
+ f"indicator_transformation_override '{indicator_transformation_override}' "
219
+ f"not found in indicator_registry. Available: {sorted(indicator_registry.get_all_indicators())}"
220
+ )
221
+ logger.info(
222
+ "Override: indicator_transformation=%s (catalog default: %s)",
223
+ indicator_transformation_override,
224
+ signal_metadata.indicator_transformation,
225
+ )
226
+
227
+ if score_transformation_override is not None:
228
+ if not score_registry.transformation_exists(score_transformation_override):
229
+ raise ValueError(
230
+ f"score_transformation_override '{score_transformation_override}' "
231
+ f"not found in score_registry. Available: {sorted(score_registry.list_all().keys())}"
232
+ )
233
+ logger.info(
234
+ "Override: score_transformation=%s (catalog default: %s)",
235
+ score_transformation_override,
236
+ signal_metadata.score_transformation,
237
+ )
238
+
239
+ if signal_transformation_override is not None:
240
+ if not signal_transformation_registry.transformation_exists(
241
+ signal_transformation_override
242
+ ):
243
+ raise ValueError(
244
+ f"signal_transformation_override '{signal_transformation_override}' "
245
+ f"not found in signal_transformation_registry. Available: {sorted(signal_transformation_registry.list_all().keys())}"
246
+ )
247
+ logger.info(
248
+ "Override: signal_transformation=%s (catalog default: %s)",
249
+ signal_transformation_override,
250
+ signal_metadata.signal_transformation,
251
+ )
252
+
253
+ # Stage 1: Indicator Transformation
254
+ # ----------------------------------
255
+ # Compute economic metric from raw securities (e.g., spread difference in bps)
256
+ indicator_metadata = indicator_registry.get_metadata(indicator_name)
257
+
258
+ logger.debug("Stage 1: Computing indicator transformation: %s", indicator_name)
259
+ indicator_series = compute_indicator(
260
+ indicator_name=indicator_name,
261
+ market_data=market_data,
262
+ indicator_metadata=indicator_metadata,
263
+ )
264
+ logger.debug(
265
+ "Indicator output: %d values, unit=%s",
266
+ indicator_series.notna().sum(),
267
+ indicator_metadata.output_units,
268
+ )
269
+
270
+ # Stage 2: Score Transformation
271
+ # ------------------------------
272
+ # Normalize indicator to common scale (e.g., z-score)
273
+ score_transformation_metadata = score_registry.get_metadata(
274
+ score_transformation_name
275
+ )
276
+
277
+ logger.debug(
278
+ "Stage 2: Applying score transformation: %s", score_transformation_name
279
+ )
280
+ score_series = apply_score_transformation(
281
+ indicator_series,
282
+ vars(score_transformation_metadata),
283
+ )
284
+ logger.debug("Score output: %d values", score_series.notna().sum())
285
+
286
+ # Stage 3: Signal Transformation
287
+ # -------------------------------
288
+ # Apply trading rules (floor, cap, neutral_range, scaling)
289
+ signal_transformation_metadata = signal_transformation_registry.get_metadata(
290
+ signal_transformation_name
291
+ )
292
+
293
+ logger.debug(
294
+ "Stage 3: Applying signal transformation: %s", signal_transformation_name
295
+ )
296
+ signal_series = apply_signal_transformation(
297
+ score_series,
298
+ scaling=signal_transformation_metadata.scaling,
299
+ floor=signal_transformation_metadata.floor,
300
+ cap=signal_transformation_metadata.cap,
301
+ neutral_range=signal_transformation_metadata.neutral_range,
302
+ )
303
+ logger.debug("Signal output: %d values", signal_series.notna().sum())
304
+
305
+ # Stage 4: Sign Convention Alignment
306
+ # -----------------------------------
307
+ # Apply sign multiplier to ensure positive = long credit risk
308
+ final_signal = signal_series * signal_metadata.sign_multiplier
309
+
310
+ logger.info(
311
+ "Signal composition complete: signal=%s, final_values=%d",
312
+ signal_name,
313
+ final_signal.notna().sum(),
314
+ )
315
+
316
+ # Return final signal or intermediates dict
317
+ if include_intermediates:
318
+ return {
319
+ "indicator": indicator_series,
320
+ "score": score_series,
321
+ "signal": final_signal,
322
+ }
323
+ else:
324
+ return final_signal
325
+
326
+
327
+ def compute_indicator_stage(
328
+ signal_name: str,
329
+ market_data: dict[str, pd.DataFrame],
330
+ indicator_registry: Any, # IndicatorTransformationRegistry type
331
+ signal_registry: Any, # SignalRegistry type
332
+ ) -> pd.Series:
333
+ """
334
+ Compute indicator transformation stage only (Stage 1).
335
+
336
+ Use for debugging or analysis when you need the raw indicator output
337
+ without subsequent normalization or trading rules.
338
+
339
+ Parameters
340
+ ----------
341
+ signal_name : str
342
+ Signal identifier (must exist in signal_registry).
343
+ market_data : dict[str, pd.DataFrame]
344
+ Market data keyed by instrument type.
345
+ indicator_registry : IndicatorTransformationRegistry
346
+ Registry for loading indicator transformation metadata.
347
+ signal_registry : SignalRegistry
348
+ Registry for loading signal metadata.
349
+
350
+ Returns
351
+ -------
352
+ pd.Series
353
+ Raw indicator output in economic units (bps, ratios, etc.).
354
+
355
+ Examples
356
+ --------
357
+ >>> indicator = compute_indicator_stage(
358
+ ... signal_name="cdx_etf_basis",
359
+ ... market_data={"cdx": cdx_df, "etf": etf_df},
360
+ ... indicator_registry=indicator_reg,
361
+ ... signal_registry=signal_reg,
362
+ ... )
363
+ >>> print(f"Basis: {indicator.tail()}") # Raw bps values
364
+ """
365
+ from .indicators import compute_indicator
366
+
367
+ signal_metadata = signal_registry.get_metadata(signal_name)
368
+ indicator_name = signal_metadata.indicator_transformation
369
+ indicator_metadata = indicator_registry.get_metadata(indicator_name)
370
+
371
+ logger.info(
372
+ "Computing indicator stage only: signal=%s, indicator=%s",
373
+ signal_name,
374
+ indicator_name,
375
+ )
376
+
377
+ return compute_indicator(
378
+ indicator_name=indicator_name,
379
+ market_data=market_data,
380
+ indicator_metadata=indicator_metadata,
381
+ )
382
+
383
+
384
+ def compute_score_stage(
385
+ signal_name: str,
386
+ market_data: dict[str, pd.DataFrame],
387
+ indicator_registry: Any, # IndicatorTransformationRegistry type
388
+ score_registry: Any, # ScoreTransformationRegistry type
389
+ signal_registry: Any, # SignalRegistry type
390
+ ) -> pd.Series:
391
+ """
392
+ Compute through score transformation stage (Stages 1-2).
393
+
394
+ Use for debugging normalization behavior without trading rules applied.
395
+
396
+ Parameters
397
+ ----------
398
+ signal_name : str
399
+ Signal identifier (must exist in signal_registry).
400
+ market_data : dict[str, pd.DataFrame]
401
+ Market data keyed by instrument type.
402
+ indicator_registry : IndicatorTransformationRegistry
403
+ Registry for loading indicator transformation metadata.
404
+ score_registry : ScoreTransformationRegistry
405
+ Registry for loading score transformation metadata.
406
+ signal_registry : SignalRegistry
407
+ Registry for loading signal metadata.
408
+
409
+ Returns
410
+ -------
411
+ pd.Series
412
+ Normalized score (z-score, etc.).
413
+
414
+ Examples
415
+ --------
416
+ >>> score = compute_score_stage(
417
+ ... signal_name="cdx_etf_basis",
418
+ ... market_data={"cdx": cdx_df, "etf": etf_df},
419
+ ... indicator_registry=indicator_reg,
420
+ ... score_registry=score_reg,
421
+ ... signal_registry=signal_reg,
422
+ ... )
423
+ >>> print(f"Z-score: {score.tail()}") # Normalized values
424
+ """
425
+ from .indicators import compute_indicator
426
+
427
+ signal_metadata = signal_registry.get_metadata(signal_name)
428
+
429
+ # Stage 1: Indicator
430
+ indicator_name = signal_metadata.indicator_transformation
431
+ indicator_metadata = indicator_registry.get_metadata(indicator_name)
432
+ indicator_series = compute_indicator(
433
+ indicator_name=indicator_name,
434
+ market_data=market_data,
435
+ indicator_metadata=indicator_metadata,
436
+ )
437
+
438
+ # Stage 2: Score
439
+ score_transformation_name = signal_metadata.score_transformation
440
+ score_transformation_metadata = score_registry.get_metadata(
441
+ score_transformation_name
442
+ )
443
+
444
+ logger.info(
445
+ "Computing score stage: signal=%s, indicator=%s, score=%s",
446
+ signal_name,
447
+ indicator_name,
448
+ score_transformation_name,
449
+ )
450
+
451
+ return apply_score_transformation(
452
+ indicator_series,
453
+ vars(score_transformation_metadata),
454
+ )
455
+
456
+
457
+ def compute_signal_stage(
458
+ signal_name: str,
459
+ market_data: dict[str, pd.DataFrame],
460
+ indicator_registry: Any, # IndicatorTransformationRegistry type
461
+ score_registry: Any, # ScoreTransformationRegistry type
462
+ signal_transformation_registry: Any, # SignalTransformationRegistry type
463
+ signal_registry: Any, # SignalRegistry type
464
+ ) -> pd.Series:
465
+ """
466
+ Compute through signal transformation stage (Stages 1-3).
467
+
468
+ This is equivalent to compose_signal() with include_intermediates=False,
469
+ provided for symmetry with other stage-specific functions.
470
+
471
+ Parameters
472
+ ----------
473
+ signal_name : str
474
+ Signal identifier (must exist in signal_registry).
475
+ market_data : dict[str, pd.DataFrame]
476
+ Market data keyed by instrument type.
477
+ indicator_registry : IndicatorTransformationRegistry
478
+ Registry for loading indicator transformation metadata.
479
+ score_registry : ScoreTransformationRegistry
480
+ Registry for loading score transformation metadata.
481
+ signal_transformation_registry : SignalTransformationRegistry
482
+ Registry for loading signal transformation metadata.
483
+ signal_registry : SignalRegistry
484
+ Registry for loading signal metadata.
485
+
486
+ Returns
487
+ -------
488
+ pd.Series
489
+ Final trading signal (with trading rules applied, sign convention aligned).
490
+
491
+ Examples
492
+ --------
493
+ >>> signal = compute_signal_stage(
494
+ ... signal_name="cdx_etf_basis",
495
+ ... market_data={"cdx": cdx_df, "etf": etf_df},
496
+ ... indicator_registry=indicator_reg,
497
+ ... score_registry=score_reg,
498
+ ... signal_transformation_registry=signal_trans_reg,
499
+ ... signal_registry=signal_reg,
500
+ ... )
501
+ >>> print(f"Signal: {signal.tail()}") # Bounded, final values
502
+ """
503
+ logger.info("Computing signal stage (full pipeline): signal=%s", signal_name)
504
+
505
+ return compose_signal(
506
+ signal_name=signal_name,
507
+ market_data=market_data,
508
+ indicator_registry=indicator_registry,
509
+ score_registry=score_registry,
510
+ signal_transformation_registry=signal_transformation_registry,
511
+ signal_registry=signal_registry,
512
+ include_intermediates=False,
513
+ )
@@ -0,0 +1,29 @@
1
+ [
2
+ {
3
+ "name": "passthrough",
4
+ "description": "No trading rule transformation (raw score as signal)",
5
+ "scaling": 1.0,
6
+ "floor": null,
7
+ "cap": null,
8
+ "neutral_range": null,
9
+ "enabled": true
10
+ },
11
+ {
12
+ "name": "bounded_1_5",
13
+ "description": "Signal bounded to [-1.5, 1.5] with neutral zone [-0.25, 0.25]",
14
+ "scaling": 1.0,
15
+ "floor": -1.5,
16
+ "cap": 1.5,
17
+ "neutral_range": [-0.25, 0.25],
18
+ "enabled": true
19
+ },
20
+ {
21
+ "name": "bounded_2_0",
22
+ "description": "Signal bounded to [-2.0, 2.0] without neutral zone",
23
+ "scaling": 1.0,
24
+ "floor": -2.0,
25
+ "cap": 2.0,
26
+ "neutral_range": null,
27
+ "enabled": true
28
+ }
29
+ ]
@@ -0,0 +1,16 @@
1
+ """
2
+ Persistence layer for time series data and metadata management.
3
+
4
+ Provides clean abstractions for Parquet and JSON I/O.
5
+ """
6
+
7
+ from .parquet_io import save_parquet, load_parquet, list_parquet_files
8
+ from .json_io import save_json, load_json
9
+
10
+ __all__ = [
11
+ "save_parquet",
12
+ "load_parquet",
13
+ "list_parquet_files",
14
+ "save_json",
15
+ "load_json",
16
+ ]