mcli-framework 7.1.1__py3-none-any.whl → 7.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (94) hide show
  1. mcli/app/completion_cmd.py +59 -49
  2. mcli/app/completion_helpers.py +60 -138
  3. mcli/app/logs_cmd.py +6 -2
  4. mcli/app/main.py +17 -14
  5. mcli/app/model_cmd.py +19 -4
  6. mcli/chat/chat.py +3 -2
  7. mcli/lib/search/cached_vectorizer.py +1 -0
  8. mcli/lib/services/data_pipeline.py +12 -5
  9. mcli/lib/services/lsh_client.py +68 -57
  10. mcli/ml/api/app.py +28 -36
  11. mcli/ml/api/middleware.py +8 -16
  12. mcli/ml/api/routers/admin_router.py +3 -1
  13. mcli/ml/api/routers/auth_router.py +32 -56
  14. mcli/ml/api/routers/backtest_router.py +3 -1
  15. mcli/ml/api/routers/data_router.py +3 -1
  16. mcli/ml/api/routers/model_router.py +35 -74
  17. mcli/ml/api/routers/monitoring_router.py +3 -1
  18. mcli/ml/api/routers/portfolio_router.py +3 -1
  19. mcli/ml/api/routers/prediction_router.py +60 -65
  20. mcli/ml/api/routers/trade_router.py +6 -2
  21. mcli/ml/api/routers/websocket_router.py +12 -9
  22. mcli/ml/api/schemas.py +10 -2
  23. mcli/ml/auth/auth_manager.py +49 -114
  24. mcli/ml/auth/models.py +30 -15
  25. mcli/ml/auth/permissions.py +12 -19
  26. mcli/ml/backtesting/backtest_engine.py +134 -108
  27. mcli/ml/backtesting/performance_metrics.py +142 -108
  28. mcli/ml/cache.py +12 -18
  29. mcli/ml/cli/main.py +37 -23
  30. mcli/ml/config/settings.py +29 -12
  31. mcli/ml/dashboard/app.py +122 -130
  32. mcli/ml/dashboard/app_integrated.py +955 -154
  33. mcli/ml/dashboard/app_supabase.py +176 -108
  34. mcli/ml/dashboard/app_training.py +212 -206
  35. mcli/ml/dashboard/cli.py +14 -5
  36. mcli/ml/data_ingestion/api_connectors.py +51 -81
  37. mcli/ml/data_ingestion/data_pipeline.py +127 -125
  38. mcli/ml/data_ingestion/stream_processor.py +72 -80
  39. mcli/ml/database/migrations/env.py +3 -2
  40. mcli/ml/database/models.py +112 -79
  41. mcli/ml/database/session.py +6 -5
  42. mcli/ml/experimentation/ab_testing.py +149 -99
  43. mcli/ml/features/ensemble_features.py +9 -8
  44. mcli/ml/features/political_features.py +6 -5
  45. mcli/ml/features/recommendation_engine.py +15 -14
  46. mcli/ml/features/stock_features.py +7 -6
  47. mcli/ml/features/test_feature_engineering.py +8 -7
  48. mcli/ml/logging.py +10 -15
  49. mcli/ml/mlops/data_versioning.py +57 -64
  50. mcli/ml/mlops/experiment_tracker.py +49 -41
  51. mcli/ml/mlops/model_serving.py +59 -62
  52. mcli/ml/mlops/pipeline_orchestrator.py +203 -149
  53. mcli/ml/models/base_models.py +8 -7
  54. mcli/ml/models/ensemble_models.py +6 -5
  55. mcli/ml/models/recommendation_models.py +7 -6
  56. mcli/ml/models/test_models.py +18 -14
  57. mcli/ml/monitoring/drift_detection.py +95 -74
  58. mcli/ml/monitoring/metrics.py +10 -22
  59. mcli/ml/optimization/portfolio_optimizer.py +172 -132
  60. mcli/ml/predictions/prediction_engine.py +62 -50
  61. mcli/ml/preprocessing/data_cleaners.py +6 -5
  62. mcli/ml/preprocessing/feature_extractors.py +7 -6
  63. mcli/ml/preprocessing/ml_pipeline.py +3 -2
  64. mcli/ml/preprocessing/politician_trading_preprocessor.py +11 -10
  65. mcli/ml/preprocessing/test_preprocessing.py +4 -4
  66. mcli/ml/scripts/populate_sample_data.py +36 -16
  67. mcli/ml/tasks.py +82 -83
  68. mcli/ml/tests/test_integration.py +86 -76
  69. mcli/ml/tests/test_training_dashboard.py +169 -142
  70. mcli/mygroup/test_cmd.py +2 -1
  71. mcli/self/self_cmd.py +31 -16
  72. mcli/self/test_cmd.py +2 -1
  73. mcli/workflow/dashboard/dashboard_cmd.py +13 -6
  74. mcli/workflow/lsh_integration.py +46 -58
  75. mcli/workflow/politician_trading/commands.py +576 -427
  76. mcli/workflow/politician_trading/config.py +7 -7
  77. mcli/workflow/politician_trading/connectivity.py +35 -33
  78. mcli/workflow/politician_trading/data_sources.py +72 -71
  79. mcli/workflow/politician_trading/database.py +18 -16
  80. mcli/workflow/politician_trading/demo.py +4 -3
  81. mcli/workflow/politician_trading/models.py +5 -5
  82. mcli/workflow/politician_trading/monitoring.py +13 -13
  83. mcli/workflow/politician_trading/scrapers.py +332 -224
  84. mcli/workflow/politician_trading/scrapers_california.py +116 -94
  85. mcli/workflow/politician_trading/scrapers_eu.py +70 -71
  86. mcli/workflow/politician_trading/scrapers_uk.py +118 -90
  87. mcli/workflow/politician_trading/scrapers_us_states.py +125 -92
  88. mcli/workflow/politician_trading/workflow.py +98 -71
  89. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/METADATA +1 -1
  90. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/RECORD +94 -94
  91. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/WHEEL +0 -0
  92. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/entry_points.txt +0 -0
  93. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/licenses/LICENSE +0 -0
  94. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/top_level.txt +0 -0
@@ -1,13 +1,14 @@
1
1
  """Performance metrics and analysis for backtesting"""
2
2
 
3
- import pandas as pd
4
- import numpy as np
5
- from typing import Dict, Any, Optional, List, Tuple
3
+ import logging
6
4
  from dataclasses import dataclass
5
+ from datetime import datetime
6
+ from typing import Any, Dict, List, Optional, Tuple
7
+
7
8
  import matplotlib.pyplot as plt
9
+ import numpy as np
10
+ import pandas as pd
8
11
  import seaborn as sns
9
- from datetime import datetime
10
- import logging
11
12
 
12
13
  logger = logging.getLogger(__name__)
13
14
 
@@ -15,6 +16,7 @@ logger = logging.getLogger(__name__)
15
16
  @dataclass
16
17
  class PortfolioMetrics:
17
18
  """Portfolio performance metrics"""
19
+
18
20
  total_return: float
19
21
  annualized_return: float
20
22
  volatility: float
@@ -38,6 +40,7 @@ class PortfolioMetrics:
38
40
  @dataclass
39
41
  class RiskMetrics:
40
42
  """Risk metrics"""
43
+
41
44
  value_at_risk_95: float
42
45
  conditional_var_95: float
43
46
  value_at_risk_99: float
@@ -58,9 +61,12 @@ class PerformanceAnalyzer:
58
61
  def __init__(self, risk_free_rate: float = 0.02):
59
62
  self.risk_free_rate = risk_free_rate
60
63
 
61
- def calculate_metrics(self, returns: pd.Series,
62
- benchmark_returns: Optional[pd.Series] = None,
63
- trades: Optional[pd.DataFrame] = None) -> Tuple[PortfolioMetrics, RiskMetrics]:
64
+ def calculate_metrics(
65
+ self,
66
+ returns: pd.Series,
67
+ benchmark_returns: Optional[pd.Series] = None,
68
+ trades: Optional[pd.DataFrame] = None,
69
+ ) -> Tuple[PortfolioMetrics, RiskMetrics]:
64
70
  """Calculate comprehensive performance metrics"""
65
71
 
66
72
  # Portfolio metrics
@@ -71,8 +77,9 @@ class PerformanceAnalyzer:
71
77
 
72
78
  return portfolio_metrics, risk_metrics
73
79
 
74
- def _calculate_portfolio_metrics(self, returns: pd.Series,
75
- trades: Optional[pd.DataFrame] = None) -> PortfolioMetrics:
80
+ def _calculate_portfolio_metrics(
81
+ self, returns: pd.Series, trades: Optional[pd.DataFrame] = None
82
+ ) -> PortfolioMetrics:
76
83
  """Calculate portfolio performance metrics"""
77
84
 
78
85
  # Basic returns
@@ -84,12 +91,16 @@ class PerformanceAnalyzer:
84
91
 
85
92
  # Sharpe ratio
86
93
  excess_returns = returns - self.risk_free_rate / 252
87
- sharpe_ratio = excess_returns.mean() / returns.std() * np.sqrt(252) if returns.std() > 0 else 0
94
+ sharpe_ratio = (
95
+ excess_returns.mean() / returns.std() * np.sqrt(252) if returns.std() > 0 else 0
96
+ )
88
97
 
89
98
  # Sortino ratio (downside deviation)
90
99
  downside_returns = returns[returns < 0]
91
100
  downside_std = downside_returns.std() * np.sqrt(252)
92
- sortino_ratio = (annualized_return - self.risk_free_rate) / downside_std if downside_std > 0 else 0
101
+ sortino_ratio = (
102
+ (annualized_return - self.risk_free_rate) / downside_std if downside_std > 0 else 0
103
+ )
93
104
 
94
105
  # Drawdown analysis
95
106
  cumulative = (1 + returns).cumprod()
@@ -107,15 +118,15 @@ class PerformanceAnalyzer:
107
118
  trade_metrics = self._analyze_trades(trades)
108
119
  else:
109
120
  trade_metrics = {
110
- 'win_rate': 0.5,
111
- 'profit_factor': 1.0,
112
- 'avg_win': 0,
113
- 'avg_loss': 0,
114
- 'largest_win': 0,
115
- 'largest_loss': 0,
116
- 'consecutive_wins': 0,
117
- 'consecutive_losses': 0,
118
- 'payoff_ratio': 1.0
121
+ "win_rate": 0.5,
122
+ "profit_factor": 1.0,
123
+ "avg_win": 0,
124
+ "avg_loss": 0,
125
+ "largest_win": 0,
126
+ "largest_loss": 0,
127
+ "consecutive_wins": 0,
128
+ "consecutive_losses": 0,
129
+ "payoff_ratio": 1.0,
119
130
  }
120
131
 
121
132
  # Recovery factor
@@ -131,11 +142,12 @@ class PerformanceAnalyzer:
131
142
  max_drawdown=max_drawdown,
132
143
  max_drawdown_duration=max_dd_duration,
133
144
  recovery_factor=recovery_factor,
134
- **trade_metrics
145
+ **trade_metrics,
135
146
  )
136
147
 
137
- def _calculate_risk_metrics(self, returns: pd.Series,
138
- benchmark_returns: Optional[pd.Series] = None) -> RiskMetrics:
148
+ def _calculate_risk_metrics(
149
+ self, returns: pd.Series, benchmark_returns: Optional[pd.Series] = None
150
+ ) -> RiskMetrics:
139
151
  """Calculate risk metrics"""
140
152
 
141
153
  # Value at Risk (VaR)
@@ -149,37 +161,49 @@ class PerformanceAnalyzer:
149
161
  # Market risk metrics
150
162
  if benchmark_returns is not None and len(benchmark_returns) > 0:
151
163
  # Align series
152
- aligned = pd.DataFrame({'returns': returns, 'benchmark': benchmark_returns}).dropna()
164
+ aligned = pd.DataFrame({"returns": returns, "benchmark": benchmark_returns}).dropna()
153
165
 
154
166
  if len(aligned) > 1:
155
167
  # Beta
156
168
  covariance = aligned.cov()
157
- beta = covariance.loc['returns', 'benchmark'] / aligned['benchmark'].var()
169
+ beta = covariance.loc["returns", "benchmark"] / aligned["benchmark"].var()
158
170
 
159
171
  # Alpha
160
- alpha = aligned['returns'].mean() - beta * aligned['benchmark'].mean()
172
+ alpha = aligned["returns"].mean() - beta * aligned["benchmark"].mean()
161
173
  alpha = alpha * 252 # Annualize
162
174
 
163
175
  # Correlation
164
- correlation = aligned.corr().loc['returns', 'benchmark']
176
+ correlation = aligned.corr().loc["returns", "benchmark"]
165
177
 
166
178
  # Information ratio
167
- active_returns = aligned['returns'] - aligned['benchmark']
179
+ active_returns = aligned["returns"] - aligned["benchmark"]
168
180
  tracking_error = active_returns.std() * np.sqrt(252)
169
- information_ratio = active_returns.mean() * 252 / tracking_error if tracking_error > 0 else 0
181
+ information_ratio = (
182
+ active_returns.mean() * 252 / tracking_error if tracking_error > 0 else 0
183
+ )
170
184
 
171
185
  # Treynor ratio
172
- treynor_ratio = (aligned['returns'].mean() * 252 - self.risk_free_rate) / beta if beta != 0 else 0
186
+ treynor_ratio = (
187
+ (aligned["returns"].mean() * 252 - self.risk_free_rate) / beta
188
+ if beta != 0
189
+ else 0
190
+ )
173
191
 
174
192
  # Capture ratios
175
- up_market = aligned[aligned['benchmark'] > 0]
176
- down_market = aligned[aligned['benchmark'] < 0]
177
-
178
- upside_capture = (up_market['returns'].mean() / up_market['benchmark'].mean()
179
- if len(up_market) > 0 and up_market['benchmark'].mean() != 0 else 1.0)
180
-
181
- downside_capture = (down_market['returns'].mean() / down_market['benchmark'].mean()
182
- if len(down_market) > 0 and down_market['benchmark'].mean() != 0 else 1.0)
193
+ up_market = aligned[aligned["benchmark"] > 0]
194
+ down_market = aligned[aligned["benchmark"] < 0]
195
+
196
+ upside_capture = (
197
+ up_market["returns"].mean() / up_market["benchmark"].mean()
198
+ if len(up_market) > 0 and up_market["benchmark"].mean() != 0
199
+ else 1.0
200
+ )
201
+
202
+ downside_capture = (
203
+ down_market["returns"].mean() / down_market["benchmark"].mean()
204
+ if len(down_market) > 0 and down_market["benchmark"].mean() != 0
205
+ else 1.0
206
+ )
183
207
  else:
184
208
  beta = alpha = correlation = information_ratio = treynor_ratio = 0
185
209
  upside_capture = downside_capture = 1.0
@@ -203,7 +227,7 @@ class PerformanceAnalyzer:
203
227
  treynor_ratio=treynor_ratio,
204
228
  downside_deviation=downside_deviation,
205
229
  upside_capture=upside_capture,
206
- downside_capture=downside_capture
230
+ downside_capture=downside_capture,
207
231
  )
208
232
 
209
233
  def _calculate_max_drawdown_duration(self, drawdown: pd.Series) -> int:
@@ -228,59 +252,59 @@ class PerformanceAnalyzer:
228
252
  def _analyze_trades(self, trades: pd.DataFrame) -> Dict[str, float]:
229
253
  """Analyze trade statistics"""
230
254
  # Filter for trades with PnL
231
- pnl_trades = trades[trades['pnl'].notna()].copy()
255
+ pnl_trades = trades[trades["pnl"].notna()].copy()
232
256
 
233
257
  if len(pnl_trades) == 0:
234
258
  return {
235
- 'win_rate': 0.5,
236
- 'profit_factor': 1.0,
237
- 'avg_win': 0,
238
- 'avg_loss': 0,
239
- 'largest_win': 0,
240
- 'largest_loss': 0,
241
- 'consecutive_wins': 0,
242
- 'consecutive_losses': 0,
243
- 'payoff_ratio': 1.0
259
+ "win_rate": 0.5,
260
+ "profit_factor": 1.0,
261
+ "avg_win": 0,
262
+ "avg_loss": 0,
263
+ "largest_win": 0,
264
+ "largest_loss": 0,
265
+ "consecutive_wins": 0,
266
+ "consecutive_losses": 0,
267
+ "payoff_ratio": 1.0,
244
268
  }
245
269
 
246
270
  # Winning and losing trades
247
- winning_trades = pnl_trades[pnl_trades['pnl'] > 0]
248
- losing_trades = pnl_trades[pnl_trades['pnl'] < 0]
271
+ winning_trades = pnl_trades[pnl_trades["pnl"] > 0]
272
+ losing_trades = pnl_trades[pnl_trades["pnl"] < 0]
249
273
 
250
274
  # Win rate
251
275
  win_rate = len(winning_trades) / len(pnl_trades)
252
276
 
253
277
  # Average win/loss
254
- avg_win = winning_trades['pnl'].mean() if len(winning_trades) > 0 else 0
255
- avg_loss = abs(losing_trades['pnl'].mean()) if len(losing_trades) > 0 else 0
278
+ avg_win = winning_trades["pnl"].mean() if len(winning_trades) > 0 else 0
279
+ avg_loss = abs(losing_trades["pnl"].mean()) if len(losing_trades) > 0 else 0
256
280
 
257
281
  # Profit factor
258
- gross_profit = winning_trades['pnl'].sum() if len(winning_trades) > 0 else 0
259
- gross_loss = abs(losing_trades['pnl'].sum()) if len(losing_trades) > 0 else 1
282
+ gross_profit = winning_trades["pnl"].sum() if len(winning_trades) > 0 else 0
283
+ gross_loss = abs(losing_trades["pnl"].sum()) if len(losing_trades) > 0 else 1
260
284
  profit_factor = gross_profit / gross_loss if gross_loss != 0 else 0
261
285
 
262
286
  # Largest win/loss
263
- largest_win = winning_trades['pnl'].max() if len(winning_trades) > 0 else 0
264
- largest_loss = abs(losing_trades['pnl'].min()) if len(losing_trades) > 0 else 0
287
+ largest_win = winning_trades["pnl"].max() if len(winning_trades) > 0 else 0
288
+ largest_loss = abs(losing_trades["pnl"].min()) if len(losing_trades) > 0 else 0
265
289
 
266
290
  # Consecutive wins/losses
267
- pnl_trades['is_win'] = pnl_trades['pnl'] > 0
268
- consecutive_wins = self._max_consecutive(pnl_trades['is_win'].values, True)
269
- consecutive_losses = self._max_consecutive(pnl_trades['is_win'].values, False)
291
+ pnl_trades["is_win"] = pnl_trades["pnl"] > 0
292
+ consecutive_wins = self._max_consecutive(pnl_trades["is_win"].values, True)
293
+ consecutive_losses = self._max_consecutive(pnl_trades["is_win"].values, False)
270
294
 
271
295
  # Payoff ratio
272
296
  payoff_ratio = avg_win / avg_loss if avg_loss > 0 else 0
273
297
 
274
298
  return {
275
- 'win_rate': win_rate,
276
- 'profit_factor': profit_factor,
277
- 'avg_win': avg_win,
278
- 'avg_loss': avg_loss,
279
- 'largest_win': largest_win,
280
- 'largest_loss': largest_loss,
281
- 'consecutive_wins': consecutive_wins,
282
- 'consecutive_losses': consecutive_losses,
283
- 'payoff_ratio': payoff_ratio
299
+ "win_rate": win_rate,
300
+ "profit_factor": profit_factor,
301
+ "avg_win": avg_win,
302
+ "avg_loss": avg_loss,
303
+ "largest_win": largest_win,
304
+ "largest_loss": largest_loss,
305
+ "consecutive_wins": consecutive_wins,
306
+ "consecutive_losses": consecutive_losses,
307
+ "payoff_ratio": payoff_ratio,
284
308
  }
285
309
 
286
310
  def _max_consecutive(self, arr: np.ndarray, value: bool) -> int:
@@ -304,26 +328,28 @@ def plot_performance(backtest_result, save_path: Optional[str] = None):
304
328
 
305
329
  # Portfolio value
306
330
  ax = axes[0, 0]
307
- ax.plot(backtest_result.portfolio_value.index,
308
- backtest_result.portfolio_value.values, label='Portfolio')
331
+ ax.plot(
332
+ backtest_result.portfolio_value.index,
333
+ backtest_result.portfolio_value.values,
334
+ label="Portfolio",
335
+ )
309
336
  if backtest_result.benchmark_returns is not None:
310
337
  benchmark_cumulative = (1 + backtest_result.benchmark_returns).cumprod()
311
338
  benchmark_value = benchmark_cumulative * backtest_result.portfolio_value.iloc[0]
312
- ax.plot(benchmark_value.index, benchmark_value.values,
313
- label='Benchmark', alpha=0.7)
314
- ax.set_title('Portfolio Value')
315
- ax.set_xlabel('Date')
316
- ax.set_ylabel('Value ($)')
339
+ ax.plot(benchmark_value.index, benchmark_value.values, label="Benchmark", alpha=0.7)
340
+ ax.set_title("Portfolio Value")
341
+ ax.set_xlabel("Date")
342
+ ax.set_ylabel("Value ($)")
317
343
  ax.legend()
318
344
  ax.grid(True, alpha=0.3)
319
345
 
320
346
  # Returns distribution
321
347
  ax = axes[0, 1]
322
- ax.hist(backtest_result.returns.values * 100, bins=50, edgecolor='black')
323
- ax.set_title('Returns Distribution')
324
- ax.set_xlabel('Daily Return (%)')
325
- ax.set_ylabel('Frequency')
326
- ax.axvline(x=0, color='red', linestyle='--', alpha=0.5)
348
+ ax.hist(backtest_result.returns.values * 100, bins=50, edgecolor="black")
349
+ ax.set_title("Returns Distribution")
350
+ ax.set_xlabel("Daily Return (%)")
351
+ ax.set_ylabel("Frequency")
352
+ ax.axvline(x=0, color="red", linestyle="--", alpha=0.5)
327
353
  ax.grid(True, alpha=0.3)
328
354
 
329
355
  # Drawdown
@@ -331,44 +357,45 @@ def plot_performance(backtest_result, save_path: Optional[str] = None):
331
357
  cumulative = (1 + backtest_result.returns).cumprod()
332
358
  running_max = cumulative.expanding().max()
333
359
  drawdown = ((cumulative - running_max) / running_max) * 100
334
- ax.fill_between(drawdown.index, drawdown.values, 0, color='red', alpha=0.3)
335
- ax.set_title('Drawdown')
336
- ax.set_xlabel('Date')
337
- ax.set_ylabel('Drawdown (%)')
360
+ ax.fill_between(drawdown.index, drawdown.values, 0, color="red", alpha=0.3)
361
+ ax.set_title("Drawdown")
362
+ ax.set_xlabel("Date")
363
+ ax.set_ylabel("Drawdown (%)")
338
364
  ax.grid(True, alpha=0.3)
339
365
 
340
366
  # Rolling Sharpe Ratio
341
367
  ax = axes[1, 1]
342
368
  rolling_sharpe = (
343
- backtest_result.returns.rolling(window=60).mean() /
344
- backtest_result.returns.rolling(window=60).std() * np.sqrt(252)
369
+ backtest_result.returns.rolling(window=60).mean()
370
+ / backtest_result.returns.rolling(window=60).std()
371
+ * np.sqrt(252)
345
372
  )
346
373
  ax.plot(rolling_sharpe.index, rolling_sharpe.values)
347
- ax.set_title('Rolling Sharpe Ratio (60 days)')
348
- ax.set_xlabel('Date')
349
- ax.set_ylabel('Sharpe Ratio')
350
- ax.axhline(y=0, color='red', linestyle='--', alpha=0.5)
374
+ ax.set_title("Rolling Sharpe Ratio (60 days)")
375
+ ax.set_xlabel("Date")
376
+ ax.set_ylabel("Sharpe Ratio")
377
+ ax.axhline(y=0, color="red", linestyle="--", alpha=0.5)
351
378
  ax.grid(True, alpha=0.3)
352
379
 
353
380
  # Trade analysis
354
381
  ax = axes[2, 0]
355
- if not backtest_result.trades.empty and 'pnl' in backtest_result.trades.columns:
356
- pnl_trades = backtest_result.trades[backtest_result.trades['pnl'].notna()]
382
+ if not backtest_result.trades.empty and "pnl" in backtest_result.trades.columns:
383
+ pnl_trades = backtest_result.trades[backtest_result.trades["pnl"].notna()]
357
384
  if not pnl_trades.empty:
358
- colors = ['green' if pnl > 0 else 'red' for pnl in pnl_trades['pnl']]
359
- ax.bar(range(len(pnl_trades)), pnl_trades['pnl'].values, color=colors, alpha=0.6)
360
- ax.set_title('Trade PnL')
361
- ax.set_xlabel('Trade Number')
362
- ax.set_ylabel('PnL ($)')
363
- ax.axhline(y=0, color='black', linestyle='-', alpha=0.3)
385
+ colors = ["green" if pnl > 0 else "red" for pnl in pnl_trades["pnl"]]
386
+ ax.bar(range(len(pnl_trades)), pnl_trades["pnl"].values, color=colors, alpha=0.6)
387
+ ax.set_title("Trade PnL")
388
+ ax.set_xlabel("Trade Number")
389
+ ax.set_ylabel("PnL ($)")
390
+ ax.axhline(y=0, color="black", linestyle="-", alpha=0.3)
364
391
  else:
365
- ax.text(0.5, 0.5, 'No trades', ha='center', va='center')
366
- ax.set_title('Trade PnL')
392
+ ax.text(0.5, 0.5, "No trades", ha="center", va="center")
393
+ ax.set_title("Trade PnL")
367
394
  ax.grid(True, alpha=0.3)
368
395
 
369
396
  # Metrics summary
370
397
  ax = axes[2, 1]
371
- ax.axis('off')
398
+ ax.axis("off")
372
399
  metrics_text = f"""
373
400
  Performance Metrics:
374
401
  ─────────────────
@@ -380,14 +407,21 @@ Max Drawdown: {backtest_result.metrics['max_drawdown']:.2%}
380
407
  Win Rate: {backtest_result.metrics['win_rate']:.2%}
381
408
  Total Trades: {backtest_result.metrics['total_trades']}
382
409
  """
383
- ax.text(0.1, 0.9, metrics_text, transform=ax.transAxes,
384
- fontsize=10, verticalalignment='top', fontfamily='monospace')
410
+ ax.text(
411
+ 0.1,
412
+ 0.9,
413
+ metrics_text,
414
+ transform=ax.transAxes,
415
+ fontsize=10,
416
+ verticalalignment="top",
417
+ fontfamily="monospace",
418
+ )
385
419
 
386
- plt.suptitle(f'Backtest Results - {backtest_result.strategy_name}', fontsize=14)
420
+ plt.suptitle(f"Backtest Results - {backtest_result.strategy_name}", fontsize=14)
387
421
  plt.tight_layout()
388
422
 
389
423
  if save_path:
390
- plt.savefig(save_path, dpi=100, bbox_inches='tight')
424
+ plt.savefig(save_path, dpi=100, bbox_inches="tight")
391
425
  logger.info(f"Performance chart saved to {save_path}")
392
426
 
393
- return fig
427
+ return fig
mcli/ml/cache.py CHANGED
@@ -1,12 +1,12 @@
1
1
  """Redis caching layer for ML system"""
2
2
 
3
+ import asyncio
4
+ import hashlib
3
5
  import json
4
6
  import pickle
5
- import hashlib
6
- import asyncio
7
- from typing import Optional, Any, Union, Callable
8
- from functools import wraps
9
7
  from datetime import timedelta
8
+ from functools import wraps
9
+ from typing import Any, Callable, Optional, Union
10
10
 
11
11
  import redis
12
12
  from redis import asyncio as aioredis
@@ -45,8 +45,8 @@ class CacheManager:
45
45
  port=settings.redis.port,
46
46
  db=settings.redis.db,
47
47
  password=settings.redis.password,
48
- max_connections=settings.redis.max_connections
49
- )
48
+ max_connections=settings.redis.max_connections,
49
+ ),
50
50
  )
51
51
 
52
52
  # Test connection
@@ -68,7 +68,7 @@ class CacheManager:
68
68
  settings.redis.url,
69
69
  encoding="utf-8",
70
70
  decode_responses=False,
71
- max_connections=settings.redis.max_connections
71
+ max_connections=settings.redis.max_connections,
72
72
  )
73
73
 
74
74
  # Test connection
@@ -88,7 +88,7 @@ class CacheManager:
88
88
  try:
89
89
  # Try JSON first (for simple types)
90
90
  if isinstance(value, (dict, list, str, int, float, bool, type(None))):
91
- return json.dumps(value).encode('utf-8')
91
+ return json.dumps(value).encode("utf-8")
92
92
  except:
93
93
  pass
94
94
 
@@ -102,7 +102,7 @@ class CacheManager:
102
102
 
103
103
  # Try JSON first
104
104
  try:
105
- return json.loads(value.decode('utf-8'))
105
+ return json.loads(value.decode("utf-8"))
106
106
  except:
107
107
  pass
108
108
 
@@ -272,10 +272,7 @@ def cached(expire: int = 3600, key_prefix: str = None):
272
272
  key_parts.insert(0, key_prefix)
273
273
 
274
274
  # Add function arguments to key
275
- key_data = {
276
- 'args': args,
277
- 'kwargs': kwargs
278
- }
275
+ key_data = {"args": args, "kwargs": kwargs}
279
276
  key_hash = hashlib.md5(
280
277
  json.dumps(key_data, sort_keys=True, default=str).encode()
281
278
  ).hexdigest()
@@ -306,10 +303,7 @@ def cached(expire: int = 3600, key_prefix: str = None):
306
303
  if key_prefix:
307
304
  key_parts.insert(0, key_prefix)
308
305
 
309
- key_data = {
310
- 'args': args,
311
- 'kwargs': kwargs
312
- }
306
+ key_data = {"args": args, "kwargs": kwargs}
313
307
  key_hash = hashlib.md5(
314
308
  json.dumps(key_data, sort_keys=True, default=str).encode()
315
309
  ).hexdigest()
@@ -397,4 +391,4 @@ def cache_get(key: str):
397
391
 
398
392
  def cache_delete(key: str):
399
393
  """Delete cache entry"""
400
- return cache_manager.delete(key)
394
+ return cache_manager.delete(key)
mcli/ml/cli/main.py CHANGED
@@ -1,25 +1,29 @@
1
1
  """Main CLI interface for ML system"""
2
2
 
3
- import typer
4
3
  import asyncio
5
4
  from pathlib import Path
6
- from typing import Optional, List
5
+ from typing import List, Optional
6
+
7
+ import typer
7
8
  from rich.console import Console
8
- from rich.table import Table
9
9
  from rich.progress import Progress, SpinnerColumn, TextColumn
10
+ from rich.table import Table
10
11
 
11
- from mcli.ml.config import settings, create_settings
12
+ from mcli.ml.backtesting.backtest_engine import BacktestConfig, BacktestEngine
13
+ from mcli.ml.config import create_settings, settings
14
+ from mcli.ml.experimentation.ab_testing import ABTestingFramework
12
15
  from mcli.ml.mlops.pipeline_orchestrator import MLPipeline, PipelineConfig
13
- from mcli.ml.backtesting.backtest_engine import BacktestEngine, BacktestConfig
14
- from mcli.ml.optimization.portfolio_optimizer import AdvancedPortfolioOptimizer, OptimizationObjective
15
16
  from mcli.ml.monitoring.drift_detection import ModelMonitor
16
- from mcli.ml.experimentation.ab_testing import ABTestingFramework
17
+ from mcli.ml.optimization.portfolio_optimizer import (
18
+ AdvancedPortfolioOptimizer,
19
+ OptimizationObjective,
20
+ )
17
21
 
18
22
  app = typer.Typer(
19
23
  name="mcli-ml",
20
24
  help="ML system for politician trading analysis and stock recommendations",
21
25
  no_args_is_help=True,
22
- rich_markup_mode="rich"
26
+ rich_markup_mode="rich",
23
27
  )
24
28
 
25
29
  console = Console()
@@ -33,7 +37,9 @@ def train(
33
37
  batch_size: Optional[int] = typer.Option(None, "--batch-size", help="Training batch size"),
34
38
  learning_rate: Optional[float] = typer.Option(None, "--lr", help="Learning rate"),
35
39
  device: Optional[str] = typer.Option(None, "--device", help="Device (cpu, cuda, auto)"),
36
- dry_run: bool = typer.Option(False, "--dry-run", help="Validate configuration without training"),
40
+ dry_run: bool = typer.Option(
41
+ False, "--dry-run", help="Validate configuration without training"
42
+ ),
37
43
  ):
38
44
  """Train ML models for stock recommendations"""
39
45
 
@@ -82,12 +88,12 @@ def train(
82
88
  console.print(f"Model saved to: {result.get('model_path', 'Unknown')}")
83
89
 
84
90
  # Display metrics if available
85
- if 'metrics' in result:
91
+ if "metrics" in result:
86
92
  metrics_table = Table(title="Training Metrics")
87
93
  metrics_table.add_column("Metric", style="cyan")
88
94
  metrics_table.add_column("Value", style="magenta")
89
95
 
90
- for metric, value in result['metrics'].items():
96
+ for metric, value in result["metrics"].items():
91
97
  metrics_table.add_row(metric, str(value))
92
98
 
93
99
  console.print(metrics_table)
@@ -116,6 +122,7 @@ def serve(
116
122
  console.print(f"Workers: {workers}")
117
123
 
118
124
  import uvicorn
125
+
119
126
  from mcli.ml.mlops.model_serving import app as serving_app
120
127
 
121
128
  uvicorn.run(
@@ -146,7 +153,7 @@ def backtest(
146
153
  config = BacktestConfig(
147
154
  initial_capital=initial_capital,
148
155
  commission=commission,
149
- benchmark='SPY',
156
+ benchmark="SPY",
150
157
  )
151
158
 
152
159
  async def run_backtest():
@@ -162,7 +169,9 @@ def backtest(
162
169
  try:
163
170
  # In a real implementation, you'd load actual price data
164
171
  # For now, we'll just validate the setup
165
- console.print("[yellow]Note: This is a demo setup. Connect to actual data sources for real backtesting.[/yellow]")
172
+ console.print(
173
+ "[yellow]Note: This is a demo setup. Connect to actual data sources for real backtesting.[/yellow]"
174
+ )
166
175
 
167
176
  progress.update(task, description="Backtest completed!")
168
177
  console.print("[green]✓ Backtest completed successfully![/green]")
@@ -204,7 +213,9 @@ def optimize(
204
213
  obj_enum = OptimizationObjective(objective)
205
214
  except ValueError:
206
215
  console.print(f"[red]Invalid objective: {objective}[/red]")
207
- console.print(f"Valid objectives: {', '.join([obj.value for obj in OptimizationObjective])}")
216
+ console.print(
217
+ f"Valid objectives: {', '.join([obj.value for obj in OptimizationObjective])}"
218
+ )
208
219
  raise typer.Exit(1)
209
220
 
210
221
  async def run_optimization():
@@ -217,7 +228,9 @@ def optimize(
217
228
 
218
229
  try:
219
230
  # In a real implementation, you'd fetch actual returns and covariance
220
- console.print("[yellow]Note: Using sample data for demo. Connect to data sources for real optimization.[/yellow]")
231
+ console.print(
232
+ "[yellow]Note: Using sample data for demo. Connect to data sources for real optimization.[/yellow]"
233
+ )
221
234
 
222
235
  progress.update(task, description="Optimization completed!")
223
236
  console.print("[green]✓ Portfolio optimization completed![/green]")
@@ -228,7 +241,7 @@ def optimize(
228
241
  allocation_table.add_column("Weight", style="magenta")
229
242
 
230
243
  # Sample allocation
231
- weights = [0.35, 0.30, 0.25, 0.10][:len(tickers)]
244
+ weights = [0.35, 0.30, 0.25, 0.10][: len(tickers)]
232
245
  for ticker, weight in zip(tickers, weights):
233
246
  allocation_table.add_row(ticker, f"{weight:.1%}")
234
247
 
@@ -267,7 +280,9 @@ def monitor(
267
280
  monitor = ModelMonitor(model_name)
268
281
 
269
282
  if check_drift:
270
- console.print("[yellow]Note: Connect to real data sources for actual drift detection.[/yellow]")
283
+ console.print(
284
+ "[yellow]Note: Connect to real data sources for actual drift detection.[/yellow]"
285
+ )
271
286
  console.print("[green]✓ No significant drift detected[/green]")
272
287
 
273
288
  if generate_report:
@@ -311,16 +326,15 @@ def experiment(
311
326
 
312
327
  for exp in experiments:
313
328
  exp_table.add_row(
314
- exp["id"][:8] + "...",
315
- exp["name"],
316
- exp["status"],
317
- str(exp["variants"])
329
+ exp["id"][:8] + "...", exp["name"], exp["status"], str(exp["variants"])
318
330
  )
319
331
 
320
332
  console.print(exp_table)
321
333
 
322
334
  else:
323
- console.print(f"[yellow]Action '{action}' would be executed for experiment {experiment_id or 'N/A'}[/yellow]")
335
+ console.print(
336
+ f"[yellow]Action '{action}' would be executed for experiment {experiment_id or 'N/A'}[/yellow]"
337
+ )
324
338
 
325
339
 
326
340
  @app.command()
@@ -395,4 +409,4 @@ def config(
395
409
 
396
410
 
397
411
  if __name__ == "__main__":
398
- app()
412
+ app()