quantmllibrary 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. quantml/__init__.py +74 -0
  2. quantml/autograd.py +154 -0
  3. quantml/cli/__init__.py +10 -0
  4. quantml/cli/run_experiment.py +385 -0
  5. quantml/config/__init__.py +28 -0
  6. quantml/config/config.py +259 -0
  7. quantml/data/__init__.py +33 -0
  8. quantml/data/cache.py +149 -0
  9. quantml/data/feature_store.py +234 -0
  10. quantml/data/futures.py +254 -0
  11. quantml/data/loaders.py +236 -0
  12. quantml/data/memory_optimizer.py +234 -0
  13. quantml/data/validators.py +390 -0
  14. quantml/experiments/__init__.py +23 -0
  15. quantml/experiments/logger.py +208 -0
  16. quantml/experiments/results.py +158 -0
  17. quantml/experiments/tracker.py +223 -0
  18. quantml/features/__init__.py +25 -0
  19. quantml/features/base.py +104 -0
  20. quantml/features/gap_features.py +124 -0
  21. quantml/features/registry.py +138 -0
  22. quantml/features/volatility_features.py +140 -0
  23. quantml/features/volume_features.py +142 -0
  24. quantml/functional.py +37 -0
  25. quantml/models/__init__.py +27 -0
  26. quantml/models/attention.py +258 -0
  27. quantml/models/dropout.py +130 -0
  28. quantml/models/gru.py +319 -0
  29. quantml/models/linear.py +112 -0
  30. quantml/models/lstm.py +353 -0
  31. quantml/models/mlp.py +286 -0
  32. quantml/models/normalization.py +289 -0
  33. quantml/models/rnn.py +154 -0
  34. quantml/models/tcn.py +238 -0
  35. quantml/online.py +209 -0
  36. quantml/ops.py +1707 -0
  37. quantml/optim/__init__.py +42 -0
  38. quantml/optim/adafactor.py +206 -0
  39. quantml/optim/adagrad.py +157 -0
  40. quantml/optim/adam.py +267 -0
  41. quantml/optim/lookahead.py +97 -0
  42. quantml/optim/quant_optimizer.py +228 -0
  43. quantml/optim/radam.py +192 -0
  44. quantml/optim/rmsprop.py +203 -0
  45. quantml/optim/schedulers.py +286 -0
  46. quantml/optim/sgd.py +181 -0
  47. quantml/py.typed +0 -0
  48. quantml/streaming.py +175 -0
  49. quantml/tensor.py +462 -0
  50. quantml/time_series.py +447 -0
  51. quantml/training/__init__.py +135 -0
  52. quantml/training/alpha_eval.py +203 -0
  53. quantml/training/backtest.py +280 -0
  54. quantml/training/backtest_analysis.py +168 -0
  55. quantml/training/cv.py +106 -0
  56. quantml/training/data_loader.py +177 -0
  57. quantml/training/ensemble.py +84 -0
  58. quantml/training/feature_importance.py +135 -0
  59. quantml/training/features.py +364 -0
  60. quantml/training/futures_backtest.py +266 -0
  61. quantml/training/gradient_clipping.py +206 -0
  62. quantml/training/losses.py +248 -0
  63. quantml/training/lr_finder.py +127 -0
  64. quantml/training/metrics.py +376 -0
  65. quantml/training/regularization.py +89 -0
  66. quantml/training/trainer.py +239 -0
  67. quantml/training/walk_forward.py +190 -0
  68. quantml/utils/__init__.py +51 -0
  69. quantml/utils/gradient_check.py +274 -0
  70. quantml/utils/logging.py +181 -0
  71. quantml/utils/ops_cpu.py +231 -0
  72. quantml/utils/profiling.py +364 -0
  73. quantml/utils/reproducibility.py +220 -0
  74. quantml/utils/serialization.py +335 -0
  75. quantmllibrary-0.1.0.dist-info/METADATA +536 -0
  76. quantmllibrary-0.1.0.dist-info/RECORD +79 -0
  77. quantmllibrary-0.1.0.dist-info/WHEEL +5 -0
  78. quantmllibrary-0.1.0.dist-info/licenses/LICENSE +22 -0
  79. quantmllibrary-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,203 @@
1
+ """
2
+ Alpha evaluation framework for signal quality assessment.
3
+
4
+ This module provides tools for evaluating alpha signals, including
5
+ Information Coefficient (IC), turnover analysis, decay analysis, and more.
6
+ """
7
+
8
+ from typing import List, Optional, Dict, Any, Union
9
+ from quantml.training.metrics import information_coefficient, rank_ic, turnover
10
+
11
+ # Try to import NumPy
12
+ try:
13
+ import numpy as np
14
+ HAS_NUMPY = True
15
+ except ImportError:
16
+ HAS_NUMPY = False
17
+ np = None
18
+
19
+
20
+ class AlphaEvaluator:
21
+ """
22
+ Alpha signal quality evaluator.
23
+
24
+ This class provides comprehensive evaluation of alpha signals,
25
+ including IC analysis, turnover, decay, and factor exposure.
26
+
27
+ Attributes:
28
+ predictions: List of predictions/signals
29
+ actuals: List of actual/realized values
30
+ timestamps: Optional timestamps for time-based analysis
31
+
32
+ Examples:
33
+ >>> evaluator = AlphaEvaluator(predictions, actuals)
34
+ >>> metrics = evaluator.evaluate()
35
+ >>> print(f"IC: {metrics['ic']}, Rank IC: {metrics['rank_ic']}")
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ predictions: List[float],
41
+ actuals: List[float],
42
+ timestamps: Optional[List] = None
43
+ ):
44
+ """
45
+ Initialize alpha evaluator.
46
+
47
+ Args:
48
+ predictions: Predicted values/signals
49
+ actuals: Actual/realized values
50
+ timestamps: Optional timestamps for time-based analysis
51
+ """
52
+ if len(predictions) != len(actuals):
53
+ raise ValueError("predictions and actuals must have same length")
54
+
55
+ self.predictions = predictions
56
+ self.actuals = actuals
57
+ self.timestamps = timestamps if timestamps else list(range(len(predictions)))
58
+ self.n = len(predictions)
59
+
60
+ def evaluate(self) -> Dict[str, Any]:
61
+ """
62
+ Perform comprehensive alpha evaluation.
63
+
64
+ Returns:
65
+ Dictionary with evaluation metrics
66
+ """
67
+ metrics = {}
68
+
69
+ # Basic IC metrics
70
+ metrics['ic'] = information_coefficient(self.predictions, self.actuals)
71
+ metrics['rank_ic'] = rank_ic(self.predictions, self.actuals)
72
+
73
+ # Turnover analysis
74
+ metrics['turnover'] = turnover(self.predictions)
75
+
76
+ # Decay analysis
77
+ decay_metrics = self.decay_analysis()
78
+ metrics.update(decay_metrics)
79
+
80
+ # Hit rate
81
+ from quantml.training.metrics import hit_rate
82
+ metrics['hit_rate'] = hit_rate(self.predictions, self.actuals)
83
+
84
+ # Signal statistics
85
+ metrics['signal_mean'] = sum(self.predictions) / len(self.predictions)
86
+ metrics['signal_std'] = self._std(self.predictions)
87
+
88
+ return metrics
89
+
90
+ def decay_analysis(self, max_lag: int = 5) -> Dict[str, Any]:
91
+ """
92
+ Analyze signal decay over time (how long signal remains predictive).
93
+
94
+ Args:
95
+ max_lag: Maximum lag to analyze
96
+
97
+ Returns:
98
+ Dictionary with decay metrics
99
+ """
100
+ decay_ics = []
101
+
102
+ for lag in range(max_lag + 1):
103
+ if lag == 0:
104
+ # Current IC
105
+ ic = information_coefficient(self.predictions, self.actuals)
106
+ else:
107
+ # IC at lag
108
+ if lag < len(self.predictions):
109
+ pred_lag = self.predictions[:-lag] if lag > 0 else self.predictions
110
+ actual_lag = self.actuals[lag:]
111
+ if len(pred_lag) == len(actual_lag) and len(pred_lag) > 0:
112
+ ic = information_coefficient(pred_lag, actual_lag)
113
+ else:
114
+ ic = 0.0
115
+ else:
116
+ ic = 0.0
117
+ decay_ics.append(ic)
118
+
119
+ # Calculate half-life (lag where IC drops to half)
120
+ initial_ic = abs(decay_ics[0]) if decay_ics else 0.0
121
+ half_ic = initial_ic / 2.0
122
+
123
+ half_life = max_lag
124
+ for i, ic in enumerate(decay_ics):
125
+ if abs(ic) <= half_ic:
126
+ half_life = i
127
+ break
128
+
129
+ return {
130
+ 'decay_ics': decay_ics,
131
+ 'half_life': half_life,
132
+ 'initial_ic': initial_ic,
133
+ 'decay_rate': (decay_ics[0] - decay_ics[-1]) / max_lag if max_lag > 0 else 0.0
134
+ }
135
+
136
+ def rolling_ic(self, window: int = 21) -> List[float]:
137
+ """
138
+ Calculate rolling Information Coefficient.
139
+
140
+ Args:
141
+ window: Rolling window size
142
+
143
+ Returns:
144
+ List of rolling IC values
145
+ """
146
+ rolling_ics = []
147
+
148
+ for i in range(window, len(self.predictions)):
149
+ pred_window = self.predictions[i-window:i]
150
+ actual_window = self.actuals[i-window:i]
151
+ ic = information_coefficient(pred_window, actual_window)
152
+ rolling_ics.append(ic)
153
+
154
+ return rolling_ics
155
+
156
+ def factor_exposure(self, factors: Dict[str, List[float]]) -> Dict[str, float]:
157
+ """
158
+ Calculate factor exposure of predictions.
159
+
160
+ Args:
161
+ factors: Dictionary of factor name -> factor values
162
+
163
+ Returns:
164
+ Dictionary of factor exposures (correlations)
165
+ """
166
+ exposures = {}
167
+
168
+ for factor_name, factor_values in factors.items():
169
+ if len(factor_values) != len(self.predictions):
170
+ continue
171
+ exposure = information_coefficient(self.predictions, factor_values)
172
+ exposures[factor_name] = exposure
173
+
174
+ return exposures
175
+
176
+ def _std(self, values: List[float]) -> float:
177
+ """Calculate standard deviation."""
178
+ if len(values) == 0:
179
+ return 0.0
180
+ mean_val = sum(values) / len(values)
181
+ variance = sum((v - mean_val) ** 2 for v in values) / len(values)
182
+ return variance ** 0.5
183
+
184
+
185
+ def evaluate_alpha_signals(
186
+ predictions: List[float],
187
+ actuals: List[float],
188
+ timestamps: Optional[List] = None
189
+ ) -> Dict[str, Any]:
190
+ """
191
+ Convenience function for alpha evaluation.
192
+
193
+ Args:
194
+ predictions: Predicted values
195
+ actuals: Actual values
196
+ timestamps: Optional timestamps
197
+
198
+ Returns:
199
+ Evaluation metrics dictionary
200
+ """
201
+ evaluator = AlphaEvaluator(predictions, actuals, timestamps)
202
+ return evaluator.evaluate()
203
+
@@ -0,0 +1,280 @@
1
+ """
2
+ Backtesting engine for strategy evaluation.
3
+
4
+ This module provides a backtesting framework for evaluating trading strategies
5
+ with position sizing, transaction costs, and performance metrics.
6
+ """
7
+
8
+ from typing import List, Optional, Callable, Dict, Any
9
+ from quantml.training.metrics import (
10
+ sharpe_ratio, sortino_ratio, calmar_ratio, max_drawdown
11
+ )
12
+
13
+ # Try to import NumPy
14
+ try:
15
+ import numpy as np
16
+ HAS_NUMPY = True
17
+ except ImportError:
18
+ HAS_NUMPY = False
19
+ np = None
20
+
21
+
22
+ class BacktestEngine:
23
+ """
24
+ Backtesting engine for strategy evaluation.
25
+
26
+ This class simulates trading a strategy on historical data, tracking
27
+ positions, P&L, and performance metrics.
28
+
29
+ Attributes:
30
+ initial_capital: Starting capital
31
+ commission: Commission per trade (as fraction)
32
+ slippage: Slippage per trade (as fraction)
33
+ position_sizing: Position sizing function
34
+
35
+ Examples:
36
+ >>> engine = BacktestEngine(initial_capital=100000)
37
+ >>> signals = [0.5, -0.3, 0.8, ...] # Trading signals
38
+ >>> prices = [100, 101, 102, ...] # Price data
39
+ >>> results = engine.run(signals, prices)
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ initial_capital: float = 100000.0,
45
+ commission: float = 0.001, # 0.1%
46
+ slippage: float = 0.0005, # 0.05%
47
+ position_sizing: Optional[Callable] = None
48
+ ):
49
+ """
50
+ Initialize backtesting engine.
51
+
52
+ Args:
53
+ initial_capital: Starting capital
54
+ commission: Commission rate per trade
55
+ slippage: Slippage rate per trade
56
+ position_sizing: Function to determine position size from signal
57
+ Default: signal * capital (simple)
58
+ """
59
+ self.initial_capital = initial_capital
60
+ self.commission = commission
61
+ self.slippage = slippage
62
+ self.position_sizing = position_sizing if position_sizing else self._default_position_sizing
63
+
64
+ def _default_position_sizing(self, signal: float, capital: float, price: float) -> float:
65
+ """Default position sizing: signal * capital / price."""
66
+ return signal * capital / price if price > 0 else 0.0
67
+
68
+ def run(
69
+ self,
70
+ signals: List[float],
71
+ prices: List[float],
72
+ volumes: Optional[List[float]] = None
73
+ ) -> Dict[str, Any]:
74
+ """
75
+ Run backtest on signals and prices.
76
+
77
+ Args:
78
+ signals: Trading signals (-1 to 1, or position sizes)
79
+ prices: Price data
80
+ volumes: Optional volume data (for VWAP execution)
81
+
82
+ Returns:
83
+ Dictionary with backtest results and metrics
84
+ """
85
+ if len(signals) != len(prices):
86
+ raise ValueError("signals and prices must have same length")
87
+
88
+ n = len(signals)
89
+ capital = self.initial_capital
90
+ position = 0.0 # Current position (number of shares)
91
+ equity_curve = [capital]
92
+ trades = []
93
+ returns = []
94
+
95
+ for i in range(n):
96
+ signal = signals[i]
97
+ price = prices[i]
98
+
99
+ # Determine target position
100
+ target_position_value = self.position_sizing(signal, capital, price)
101
+ target_position = target_position_value / price if price > 0 else 0.0
102
+
103
+ # Calculate trade
104
+ trade_size = target_position - position
105
+
106
+ if abs(trade_size) > 1e-6: # Only trade if significant
107
+ # Apply slippage
108
+ execution_price = price * (1 + self.slippage * (1 if trade_size > 0 else -1))
109
+
110
+ # Calculate costs
111
+ trade_value = abs(trade_size * execution_price)
112
+ commission_cost = trade_value * self.commission
113
+ slippage_cost = abs(trade_size * price * self.slippage)
114
+ total_cost = commission_cost + slippage_cost
115
+
116
+ # Update capital
117
+ capital -= trade_size * execution_price + total_cost
118
+
119
+ # Update position
120
+ position = target_position
121
+
122
+ trades.append({
123
+ 'index': i,
124
+ 'price': price,
125
+ 'execution_price': execution_price,
126
+ 'size': trade_size,
127
+ 'cost': total_cost
128
+ })
129
+
130
+ # Update equity
131
+ current_value = capital + position * price
132
+ equity_curve.append(current_value)
133
+
134
+ # Calculate return
135
+ if i > 0:
136
+ prev_value = equity_curve[-2]
137
+ ret = (current_value - prev_value) / prev_value if prev_value > 0 else 0.0
138
+ returns.append(ret)
139
+
140
+ # Calculate metrics
141
+ final_value = equity_curve[-1]
142
+ total_return = (final_value - self.initial_capital) / self.initial_capital
143
+
144
+ # Performance metrics
145
+ sharpe = sharpe_ratio(returns) if returns else 0.0
146
+ sortino = sortino_ratio(returns) if returns else 0.0
147
+ calmar = calmar_ratio(returns) if returns else 0.0
148
+ max_dd = max_drawdown(returns) if returns else 0.0
149
+
150
+ # Calculate trade-level P&L
151
+ completed_trades = []
152
+ open_trade = None
153
+
154
+ for i, trade in enumerate(trades):
155
+ if open_trade is None:
156
+ # Open new trade
157
+ open_trade = {
158
+ 'entry_index': trade['index'],
159
+ 'entry_price': trade['execution_price'],
160
+ 'entry_size': trade['size'],
161
+ 'direction': 1 if trade['size'] > 0 else -1
162
+ }
163
+ else:
164
+ # Check if trade closes position
165
+ if (open_trade['direction'] > 0 and trade['size'] < 0) or \
166
+ (open_trade['direction'] < 0 and trade['size'] > 0):
167
+ # Close trade
168
+ exit_price = trade['execution_price']
169
+ exit_size = abs(trade['size'])
170
+ entry_size = abs(open_trade['entry_size'])
171
+
172
+ # Calculate P&L
173
+ if open_trade['direction'] > 0: # Long
174
+ pnl = (exit_price - open_trade['entry_price']) * min(entry_size, exit_size)
175
+ else: # Short
176
+ pnl = (open_trade['entry_price'] - exit_price) * min(entry_size, exit_size)
177
+
178
+ pnl -= trade['cost'] # Subtract exit cost
179
+
180
+ completed_trades.append({
181
+ 'entry_index': open_trade['entry_index'],
182
+ 'exit_index': trade['index'],
183
+ 'entry_price': open_trade['entry_price'],
184
+ 'exit_price': exit_price,
185
+ 'size': min(entry_size, exit_size),
186
+ 'direction': open_trade['direction'],
187
+ 'pnl': pnl,
188
+ 'duration': trade['index'] - open_trade['entry_index'],
189
+ 'return_pct': pnl / (open_trade['entry_price'] * min(entry_size, exit_size)) if open_trade['entry_price'] > 0 else 0.0
190
+ })
191
+
192
+ # Update open trade if partial close
193
+ if exit_size < entry_size:
194
+ open_trade['entry_size'] = open_trade['entry_size'] - (exit_size if open_trade['direction'] > 0 else -exit_size)
195
+ else:
196
+ open_trade = None
197
+
198
+ # Trade statistics
199
+ n_trades = len(trades)
200
+ n_completed_trades = len(completed_trades)
201
+ winning_trades = [t for t in completed_trades if t['pnl'] > 0]
202
+ losing_trades = [t for t in completed_trades if t['pnl'] <= 0]
203
+
204
+ win_rate = len(winning_trades) / n_completed_trades if n_completed_trades > 0 else 0.0
205
+ avg_win = sum(t['pnl'] for t in winning_trades) / len(winning_trades) if winning_trades else 0.0
206
+ avg_loss = sum(t['pnl'] for t in losing_trades) / len(losing_trades) if losing_trades else 0.0
207
+ profit_factor = abs(sum(t['pnl'] for t in winning_trades) / sum(t['pnl'] for t in losing_trades)) if losing_trades and sum(t['pnl'] for t in losing_trades) != 0 else float('inf') if winning_trades else 0.0
208
+
209
+ avg_duration = sum(t['duration'] for t in completed_trades) / n_completed_trades if n_completed_trades > 0 else 0.0
210
+
211
+ return {
212
+ 'initial_capital': self.initial_capital,
213
+ 'final_value': final_value,
214
+ 'total_return': total_return,
215
+ 'equity_curve': equity_curve,
216
+ 'returns': returns,
217
+ 'trades': trades,
218
+ 'completed_trades': completed_trades,
219
+ 'n_trades': n_trades,
220
+ 'n_completed_trades': n_completed_trades,
221
+ 'win_rate': win_rate,
222
+ 'avg_win': avg_win,
223
+ 'avg_loss': avg_loss,
224
+ 'profit_factor': profit_factor,
225
+ 'avg_duration': avg_duration,
226
+ 'sharpe_ratio': sharpe,
227
+ 'sortino_ratio': sortino,
228
+ 'calmar_ratio': calmar,
229
+ 'max_drawdown': max_dd
230
+ }
231
+
232
+ def run_with_predictions(
233
+ self,
234
+ predictions: List[float],
235
+ prices: List[float],
236
+ targets: Optional[List[float]] = None
237
+ ) -> Dict[str, Any]:
238
+ """
239
+ Run backtest using model predictions as signals.
240
+
241
+ Args:
242
+ predictions: Model predictions (can be returns, prices, or signals)
243
+ prices: Price data
244
+ targets: Optional target returns (for comparison)
245
+
246
+ Returns:
247
+ Backtest results dictionary
248
+ """
249
+ # Convert predictions to signals (normalize to -1 to 1)
250
+ if HAS_NUMPY:
251
+ try:
252
+ pred_arr = np.array(predictions)
253
+ # Normalize to [-1, 1]
254
+ pred_min, pred_max = np.min(pred_arr), np.max(pred_arr)
255
+ if pred_max > pred_min:
256
+ signals = 2 * (pred_arr - pred_min) / (pred_max - pred_min) - 1
257
+ else:
258
+ signals = pred_arr * 0 # All zeros
259
+ signals = signals.tolist()
260
+ except (ValueError, TypeError):
261
+ signals = predictions
262
+ else:
263
+ # Pure Python normalization
264
+ pred_min = min(predictions)
265
+ pred_max = max(predictions)
266
+ if pred_max > pred_min:
267
+ signals = [2 * (p - pred_min) / (pred_max - pred_min) - 1 for p in predictions]
268
+ else:
269
+ signals = [0.0] * len(predictions)
270
+
271
+ results = self.run(signals, prices)
272
+
273
+ if targets is not None:
274
+ # Add prediction accuracy metrics
275
+ from quantml.training.metrics import information_coefficient, hit_rate
276
+ results['ic'] = information_coefficient(predictions, targets)
277
+ results['hit_rate'] = hit_rate(predictions, targets)
278
+
279
+ return results
280
+
@@ -0,0 +1,168 @@
1
+ """
2
+ Backtest analysis and visualization utilities.
3
+
4
+ Provides trade-level analysis, performance heatmaps, and regime-based breakdowns.
5
+ """
6
+
7
+ from typing import List, Dict, Any, Optional
8
+ from collections import defaultdict
9
+
10
+
11
+ def analyze_trades(completed_trades: List[Dict[str, Any]]) -> Dict[str, Any]:
12
+ """
13
+ Analyze completed trades for detailed statistics.
14
+
15
+ Args:
16
+ completed_trades: List of completed trade dictionaries
17
+
18
+ Returns:
19
+ Dictionary with trade analysis
20
+ """
21
+ if not completed_trades:
22
+ return {
23
+ 'n_trades': 0,
24
+ 'win_rate': 0.0,
25
+ 'avg_win': 0.0,
26
+ 'avg_loss': 0.0,
27
+ 'profit_factor': 0.0,
28
+ 'avg_duration': 0.0
29
+ }
30
+
31
+ winning_trades = [t for t in completed_trades if t.get('pnl', 0) > 0]
32
+ losing_trades = [t for t in completed_trades if t.get('pnl', 0) <= 0]
33
+
34
+ win_rate = len(winning_trades) / len(completed_trades)
35
+ avg_win = sum(t['pnl'] for t in winning_trades) / len(winning_trades) if winning_trades else 0.0
36
+ avg_loss = sum(t['pnl'] for t in losing_trades) / len(losing_trades) if losing_trades else 0.0
37
+
38
+ total_win = sum(t['pnl'] for t in winning_trades)
39
+ total_loss = abs(sum(t['pnl'] for t in losing_trades))
40
+ profit_factor = total_win / total_loss if total_loss > 0 else float('inf') if total_win > 0 else 0.0
41
+
42
+ avg_duration = sum(t.get('duration', 0) for t in completed_trades) / len(completed_trades)
43
+
44
+ # Largest win/loss
45
+ largest_win = max((t['pnl'] for t in winning_trades), default=0.0)
46
+ largest_loss = min((t['pnl'] for t in losing_trades), default=0.0)
47
+
48
+ return {
49
+ 'n_trades': len(completed_trades),
50
+ 'win_rate': win_rate,
51
+ 'avg_win': avg_win,
52
+ 'avg_loss': avg_loss,
53
+ 'profit_factor': profit_factor,
54
+ 'avg_duration': avg_duration,
55
+ 'largest_win': largest_win,
56
+ 'largest_loss': largest_loss,
57
+ 'total_pnl': sum(t['pnl'] for t in completed_trades)
58
+ }
59
+
60
+
61
+ def create_performance_heatmap(
62
+ trades: List[Dict[str, Any]],
63
+ prices: List[float],
64
+ regimes: Optional[List[int]] = None,
65
+ time_of_day: Optional[List[int]] = None
66
+ ) -> Dict[str, Any]:
67
+ """
68
+ Create performance heatmap by regime and time of day.
69
+
70
+ Args:
71
+ trades: List of trades
72
+ prices: Price data
73
+ regimes: Regime labels (0=low, 1=normal, 2=high)
74
+ time_of_day: Hour of day (0-23)
75
+
76
+ Returns:
77
+ Dictionary with heatmap data
78
+ """
79
+ if not trades:
80
+ return {}
81
+
82
+ # Group trades by regime and time
83
+ heatmap_data = defaultdict(lambda: {'pnl': 0.0, 'count': 0})
84
+
85
+ for trade in trades:
86
+ idx = trade.get('index', 0)
87
+
88
+ regime = regimes[idx] if regimes and idx < len(regimes) else 1 # Default to normal
89
+ hour = time_of_day[idx] if time_of_day and idx < len(time_of_day) else 12 # Default to noon
90
+
91
+ key = f"regime_{regime}_hour_{hour}"
92
+ heatmap_data[key]['pnl'] += trade.get('pnl', 0.0)
93
+ heatmap_data[key]['count'] += 1
94
+
95
+ # Convert to structured format
96
+ result = {}
97
+ for key, data in heatmap_data.items():
98
+ result[key] = {
99
+ 'pnl': data['pnl'],
100
+ 'count': data['count'],
101
+ 'avg_pnl': data['pnl'] / data['count'] if data['count'] > 0 else 0.0
102
+ }
103
+
104
+ return result
105
+
106
+
107
+ def analyze_by_regime(
108
+ completed_trades: List[Dict[str, Any]],
109
+ regimes: List[int]
110
+ ) -> Dict[int, Dict[str, Any]]:
111
+ """
112
+ Analyze performance by volatility/volume regime.
113
+
114
+ Args:
115
+ completed_trades: Completed trades
116
+ regimes: Regime labels for each time period
117
+
118
+ Returns:
119
+ Dictionary mapping regime -> statistics
120
+ """
121
+ regime_trades = defaultdict(list)
122
+
123
+ for trade in completed_trades:
124
+ entry_idx = trade.get('entry_index', 0)
125
+ if entry_idx < len(regimes):
126
+ regime = regimes[entry_idx]
127
+ regime_trades[regime].append(trade)
128
+
129
+ results = {}
130
+ for regime, trades in regime_trades.items():
131
+ results[regime] = analyze_trades(trades)
132
+ results[regime]['regime'] = regime
133
+ results[regime]['n_trades'] = len(trades)
134
+
135
+ return results
136
+
137
+
138
+ def create_trade_summary(completed_trades: List[Dict[str, Any]]) -> str:
139
+ """
140
+ Create human-readable trade summary.
141
+
142
+ Args:
143
+ completed_trades: Completed trades
144
+
145
+ Returns:
146
+ Formatted summary string
147
+ """
148
+ if not completed_trades:
149
+ return "No completed trades."
150
+
151
+ analysis = analyze_trades(completed_trades)
152
+
153
+ summary = f"""
154
+ Trade Summary
155
+ =============
156
+ Total Trades: {analysis['n_trades']}
157
+ Win Rate: {analysis['win_rate']*100:.2f}%
158
+ Average Win: ${analysis['avg_win']:.2f}
159
+ Average Loss: ${analysis['avg_loss']:.2f}
160
+ Profit Factor: {analysis['profit_factor']:.2f}
161
+ Average Duration: {analysis['avg_duration']:.1f} periods
162
+ Largest Win: ${analysis['largest_win']:.2f}
163
+ Largest Loss: ${analysis['largest_loss']:.2f}
164
+ Total P&L: ${analysis['total_pnl']:.2f}
165
+ """
166
+
167
+ return summary
168
+