mcli-framework 7.1.1__py3-none-any.whl → 7.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (94) hide show
  1. mcli/app/completion_cmd.py +59 -49
  2. mcli/app/completion_helpers.py +60 -138
  3. mcli/app/logs_cmd.py +6 -2
  4. mcli/app/main.py +17 -14
  5. mcli/app/model_cmd.py +19 -4
  6. mcli/chat/chat.py +3 -2
  7. mcli/lib/search/cached_vectorizer.py +1 -0
  8. mcli/lib/services/data_pipeline.py +12 -5
  9. mcli/lib/services/lsh_client.py +68 -57
  10. mcli/ml/api/app.py +28 -36
  11. mcli/ml/api/middleware.py +8 -16
  12. mcli/ml/api/routers/admin_router.py +3 -1
  13. mcli/ml/api/routers/auth_router.py +32 -56
  14. mcli/ml/api/routers/backtest_router.py +3 -1
  15. mcli/ml/api/routers/data_router.py +3 -1
  16. mcli/ml/api/routers/model_router.py +35 -74
  17. mcli/ml/api/routers/monitoring_router.py +3 -1
  18. mcli/ml/api/routers/portfolio_router.py +3 -1
  19. mcli/ml/api/routers/prediction_router.py +60 -65
  20. mcli/ml/api/routers/trade_router.py +6 -2
  21. mcli/ml/api/routers/websocket_router.py +12 -9
  22. mcli/ml/api/schemas.py +10 -2
  23. mcli/ml/auth/auth_manager.py +49 -114
  24. mcli/ml/auth/models.py +30 -15
  25. mcli/ml/auth/permissions.py +12 -19
  26. mcli/ml/backtesting/backtest_engine.py +134 -108
  27. mcli/ml/backtesting/performance_metrics.py +142 -108
  28. mcli/ml/cache.py +12 -18
  29. mcli/ml/cli/main.py +37 -23
  30. mcli/ml/config/settings.py +29 -12
  31. mcli/ml/dashboard/app.py +122 -130
  32. mcli/ml/dashboard/app_integrated.py +955 -154
  33. mcli/ml/dashboard/app_supabase.py +176 -108
  34. mcli/ml/dashboard/app_training.py +212 -206
  35. mcli/ml/dashboard/cli.py +14 -5
  36. mcli/ml/data_ingestion/api_connectors.py +51 -81
  37. mcli/ml/data_ingestion/data_pipeline.py +127 -125
  38. mcli/ml/data_ingestion/stream_processor.py +72 -80
  39. mcli/ml/database/migrations/env.py +3 -2
  40. mcli/ml/database/models.py +112 -79
  41. mcli/ml/database/session.py +6 -5
  42. mcli/ml/experimentation/ab_testing.py +149 -99
  43. mcli/ml/features/ensemble_features.py +9 -8
  44. mcli/ml/features/political_features.py +6 -5
  45. mcli/ml/features/recommendation_engine.py +15 -14
  46. mcli/ml/features/stock_features.py +7 -6
  47. mcli/ml/features/test_feature_engineering.py +8 -7
  48. mcli/ml/logging.py +10 -15
  49. mcli/ml/mlops/data_versioning.py +57 -64
  50. mcli/ml/mlops/experiment_tracker.py +49 -41
  51. mcli/ml/mlops/model_serving.py +59 -62
  52. mcli/ml/mlops/pipeline_orchestrator.py +203 -149
  53. mcli/ml/models/base_models.py +8 -7
  54. mcli/ml/models/ensemble_models.py +6 -5
  55. mcli/ml/models/recommendation_models.py +7 -6
  56. mcli/ml/models/test_models.py +18 -14
  57. mcli/ml/monitoring/drift_detection.py +95 -74
  58. mcli/ml/monitoring/metrics.py +10 -22
  59. mcli/ml/optimization/portfolio_optimizer.py +172 -132
  60. mcli/ml/predictions/prediction_engine.py +62 -50
  61. mcli/ml/preprocessing/data_cleaners.py +6 -5
  62. mcli/ml/preprocessing/feature_extractors.py +7 -6
  63. mcli/ml/preprocessing/ml_pipeline.py +3 -2
  64. mcli/ml/preprocessing/politician_trading_preprocessor.py +11 -10
  65. mcli/ml/preprocessing/test_preprocessing.py +4 -4
  66. mcli/ml/scripts/populate_sample_data.py +36 -16
  67. mcli/ml/tasks.py +82 -83
  68. mcli/ml/tests/test_integration.py +86 -76
  69. mcli/ml/tests/test_training_dashboard.py +169 -142
  70. mcli/mygroup/test_cmd.py +2 -1
  71. mcli/self/self_cmd.py +31 -16
  72. mcli/self/test_cmd.py +2 -1
  73. mcli/workflow/dashboard/dashboard_cmd.py +13 -6
  74. mcli/workflow/lsh_integration.py +46 -58
  75. mcli/workflow/politician_trading/commands.py +576 -427
  76. mcli/workflow/politician_trading/config.py +7 -7
  77. mcli/workflow/politician_trading/connectivity.py +35 -33
  78. mcli/workflow/politician_trading/data_sources.py +72 -71
  79. mcli/workflow/politician_trading/database.py +18 -16
  80. mcli/workflow/politician_trading/demo.py +4 -3
  81. mcli/workflow/politician_trading/models.py +5 -5
  82. mcli/workflow/politician_trading/monitoring.py +13 -13
  83. mcli/workflow/politician_trading/scrapers.py +332 -224
  84. mcli/workflow/politician_trading/scrapers_california.py +116 -94
  85. mcli/workflow/politician_trading/scrapers_eu.py +70 -71
  86. mcli/workflow/politician_trading/scrapers_uk.py +118 -90
  87. mcli/workflow/politician_trading/scrapers_us_states.py +125 -92
  88. mcli/workflow/politician_trading/workflow.py +98 -71
  89. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/METADATA +1 -1
  90. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/RECORD +94 -94
  91. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/WHEEL +0 -0
  92. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/entry_points.txt +0 -0
  93. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/licenses/LICENSE +0 -0
  94. {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/top_level.txt +0 -0
@@ -1,17 +1,19 @@
1
1
  """Configuration management for ML system"""
2
2
 
3
+ import logging
3
4
  import os
4
5
  from pathlib import Path
5
- from typing import Dict, Any, Optional, List
6
+ from typing import Any, Dict, List, Optional
7
+
6
8
  from pydantic import Field, field_validator
7
9
  from pydantic_settings import BaseSettings, SettingsConfigDict
8
- import logging
9
10
 
10
11
  logger = logging.getLogger(__name__)
11
12
 
12
13
 
13
14
  class DatabaseSettings(BaseSettings):
14
15
  """Database configuration"""
16
+
15
17
  model_config = SettingsConfigDict(env_prefix="DB_")
16
18
 
17
19
  host: str = Field(default="localhost", description="Database host")
@@ -39,11 +41,14 @@ class DatabaseSettings(BaseSettings):
39
41
  # Use aiosqlite for local development if no user is specified
40
42
  if not self.user:
41
43
  return f"sqlite+aiosqlite:///{self.name}"
42
- return f"postgresql+asyncpg://{self.user}:{self.password}@{self.host}:{self.port}/{self.name}"
44
+ return (
45
+ f"postgresql+asyncpg://{self.user}:{self.password}@{self.host}:{self.port}/{self.name}"
46
+ )
43
47
 
44
48
 
45
49
  class RedisSettings(BaseSettings):
46
50
  """Redis configuration"""
51
+
47
52
  model_config = SettingsConfigDict(env_prefix="REDIS_")
48
53
 
49
54
  host: str = Field(default="localhost", description="Redis host")
@@ -64,10 +69,15 @@ class RedisSettings(BaseSettings):
64
69
 
65
70
  class MLflowSettings(BaseSettings):
66
71
  """MLflow configuration"""
72
+
67
73
  model_config = SettingsConfigDict(env_prefix="MLFLOW_")
68
74
 
69
- tracking_uri: str = Field(default="http://localhost:5000", description="MLflow tracking server URI")
70
- experiment_name: str = Field(default="politician_trading", description="Default experiment name")
75
+ tracking_uri: str = Field(
76
+ default="http://localhost:5000", description="MLflow tracking server URI"
77
+ )
78
+ experiment_name: str = Field(
79
+ default="politician_trading", description="Default experiment name"
80
+ )
71
81
  artifact_root: Optional[str] = Field(default=None, description="Artifact storage root")
72
82
 
73
83
  # Authentication
@@ -77,6 +87,7 @@ class MLflowSettings(BaseSettings):
77
87
 
78
88
  class ModelSettings(BaseSettings):
79
89
  """Model configuration"""
90
+
80
91
  model_config = SettingsConfigDict(env_prefix="MODEL_")
81
92
 
82
93
  # Model paths
@@ -105,12 +116,15 @@ class ModelSettings(BaseSettings):
105
116
 
106
117
  class DataSettings(BaseSettings):
107
118
  """Data configuration"""
119
+
108
120
  model_config = SettingsConfigDict(env_prefix="DATA_")
109
121
 
110
122
  # Data paths
111
123
  data_dir: Path = Field(default=Path("data"), description="Data storage directory")
112
124
  raw_dir: Path = Field(default=Path("data/raw"), description="Raw data directory")
113
- processed_dir: Path = Field(default=Path("data/processed"), description="Processed data directory")
125
+ processed_dir: Path = Field(
126
+ default=Path("data/processed"), description="Processed data directory"
127
+ )
114
128
 
115
129
  # DVC settings
116
130
  dvc_remote: str = Field(default="local", description="DVC remote storage")
@@ -129,6 +143,7 @@ class DataSettings(BaseSettings):
129
143
 
130
144
  class APISettings(BaseSettings):
131
145
  """API configuration"""
146
+
132
147
  model_config = SettingsConfigDict(env_prefix="API_")
133
148
 
134
149
  # Server settings
@@ -152,6 +167,7 @@ class APISettings(BaseSettings):
152
167
 
153
168
  class MonitoringSettings(BaseSettings):
154
169
  """Monitoring configuration"""
170
+
155
171
  model_config = SettingsConfigDict(env_prefix="MONITORING_")
156
172
 
157
173
  # Metrics
@@ -173,6 +189,7 @@ class MonitoringSettings(BaseSettings):
173
189
 
174
190
  class SecuritySettings(BaseSettings):
175
191
  """Security configuration"""
192
+
176
193
  model_config = SettingsConfigDict(env_prefix="SECURITY_")
177
194
 
178
195
  # Authentication
@@ -196,15 +213,15 @@ class SecuritySettings(BaseSettings):
196
213
 
197
214
  class Settings(BaseSettings):
198
215
  """Main application settings"""
216
+
199
217
  model_config = SettingsConfigDict(
200
- env_file=".env",
201
- env_file_encoding="utf-8",
202
- case_sensitive=False,
203
- extra="ignore"
218
+ env_file=".env", env_file_encoding="utf-8", case_sensitive=False, extra="ignore"
204
219
  )
205
220
 
206
221
  # Environment
207
- environment: str = Field(default="development", description="Environment (development, staging, production)")
222
+ environment: str = Field(
223
+ default="development", description="Environment (development, staging, production)"
224
+ )
208
225
  debug: bool = Field(default=False, description="Debug mode")
209
226
 
210
227
  # Component settings
@@ -391,4 +408,4 @@ if __name__ == "__main__":
391
408
  except ValueError as e:
392
409
  print(f"\nValidation working: {e}")
393
410
 
394
- print("\nSettings validation complete!")
411
+ print("\nSettings validation complete!")
mcli/ml/dashboard/app.py CHANGED
@@ -1,34 +1,39 @@
1
1
  """Streamlit dashboard for ML system monitoring"""
2
2
 
3
- import streamlit as st
4
- import pandas as pd
5
- import plotly.express as px
6
- import plotly.graph_objects as go
7
- from plotly.subplots import make_subplots
8
3
  import asyncio
9
- import requests
10
4
  import time
11
5
  from datetime import datetime, timedelta
6
+
12
7
  import numpy as np
8
+ import pandas as pd
9
+ import plotly.express as px
10
+ import plotly.graph_objects as go
11
+ import requests
12
+ import streamlit as st
13
+ from plotly.subplots import make_subplots
13
14
 
14
- from mcli.ml.database.session import SessionLocal
15
- from mcli.ml.database.models import (
16
- Model, Prediction, Portfolio, User, Trade,
17
- StockData, BacktestResult, ModelStatus
18
- )
19
15
  from mcli.ml.cache import cache_manager
20
16
  from mcli.ml.config import settings
17
+ from mcli.ml.database.models import (
18
+ BacktestResult,
19
+ Model,
20
+ ModelStatus,
21
+ Portfolio,
22
+ Prediction,
23
+ StockData,
24
+ Trade,
25
+ User,
26
+ )
27
+ from mcli.ml.database.session import SessionLocal
21
28
 
22
29
  # Page config
23
30
  st.set_page_config(
24
- page_title="MCLI ML Dashboard",
25
- page_icon="📊",
26
- layout="wide",
27
- initial_sidebar_state="expanded"
31
+ page_title="MCLI ML Dashboard", page_icon="📊", layout="wide", initial_sidebar_state="expanded"
28
32
  )
29
33
 
30
34
  # Custom CSS
31
- st.markdown("""
35
+ st.markdown(
36
+ """
32
37
  <style>
33
38
  .metric-card {
34
39
  background-color: #f0f2f6;
@@ -58,7 +63,9 @@ st.markdown("""
58
63
  border-radius: 0.25rem;
59
64
  }
60
65
  </style>
61
- """, unsafe_allow_html=True)
66
+ """,
67
+ unsafe_allow_html=True,
68
+ )
62
69
 
63
70
 
64
71
  @st.cache_data(ttl=30)
@@ -74,27 +81,31 @@ def get_system_metrics():
74
81
 
75
82
  # User metrics
76
83
  total_users = db.query(User).count()
77
- active_users = db.query(User).filter(
78
- User.last_login_at >= datetime.utcnow() - timedelta(days=1)
79
- ).count()
84
+ active_users = (
85
+ db.query(User)
86
+ .filter(User.last_login_at >= datetime.utcnow() - timedelta(days=1))
87
+ .count()
88
+ )
80
89
 
81
90
  # Prediction metrics
82
- predictions_today = db.query(Prediction).filter(
83
- Prediction.prediction_date >= datetime.utcnow().date()
84
- ).count()
91
+ predictions_today = (
92
+ db.query(Prediction)
93
+ .filter(Prediction.prediction_date >= datetime.utcnow().date())
94
+ .count()
95
+ )
85
96
 
86
97
  # Portfolio metrics
87
98
  active_portfolios = db.query(Portfolio).filter(Portfolio.is_active == True).count()
88
99
 
89
100
  return {
90
- 'total_models': total_models,
91
- 'deployed_models': deployed_models,
92
- 'training_models': training_models,
93
- 'total_users': total_users,
94
- 'active_users': active_users,
95
- 'predictions_today': predictions_today,
96
- 'active_portfolios': active_portfolios,
97
- 'timestamp': datetime.utcnow()
101
+ "total_models": total_models,
102
+ "deployed_models": deployed_models,
103
+ "training_models": training_models,
104
+ "total_users": total_users,
105
+ "active_users": active_users,
106
+ "predictions_today": predictions_today,
107
+ "active_portfolios": active_portfolios,
108
+ "timestamp": datetime.utcnow(),
98
109
  }
99
110
  finally:
100
111
  db.close()
@@ -110,12 +121,14 @@ def get_model_performance():
110
121
 
111
122
  data = []
112
123
  for model in models:
113
- data.append({
114
- 'name': model.name,
115
- 'accuracy': model.test_accuracy or 0,
116
- 'created_at': model.created_at,
117
- 'last_updated': model.updated_at
118
- })
124
+ data.append(
125
+ {
126
+ "name": model.name,
127
+ "accuracy": model.test_accuracy or 0,
128
+ "created_at": model.created_at,
129
+ "last_updated": model.updated_at,
130
+ }
131
+ )
119
132
 
120
133
  return pd.DataFrame(data)
121
134
  finally:
@@ -128,19 +141,21 @@ def get_recent_predictions():
128
141
  db = SessionLocal()
129
142
 
130
143
  try:
131
- predictions = db.query(Prediction).order_by(
132
- Prediction.prediction_date.desc()
133
- ).limit(100).all()
144
+ predictions = (
145
+ db.query(Prediction).order_by(Prediction.prediction_date.desc()).limit(100).all()
146
+ )
134
147
 
135
148
  data = []
136
149
  for pred in predictions:
137
- data.append({
138
- 'ticker': pred.ticker,
139
- 'predicted_return': pred.predicted_return,
140
- 'confidence': pred.confidence_score,
141
- 'prediction_date': pred.prediction_date,
142
- 'target_date': pred.target_date
143
- })
150
+ data.append(
151
+ {
152
+ "ticker": pred.ticker,
153
+ "predicted_return": pred.predicted_return,
154
+ "confidence": pred.confidence_score,
155
+ "prediction_date": pred.prediction_date,
156
+ "target_date": pred.target_date,
157
+ }
158
+ )
144
159
 
145
160
  return pd.DataFrame(data)
146
161
  finally:
@@ -157,13 +172,15 @@ def get_portfolio_performance():
157
172
 
158
173
  data = []
159
174
  for portfolio in portfolios:
160
- data.append({
161
- 'name': portfolio.name,
162
- 'total_return': portfolio.total_return or 0,
163
- 'sharpe_ratio': portfolio.sharpe_ratio or 0,
164
- 'max_drawdown': portfolio.max_drawdown or 0,
165
- 'current_value': portfolio.current_value or 0
166
- })
175
+ data.append(
176
+ {
177
+ "name": portfolio.name,
178
+ "total_return": portfolio.total_return or 0,
179
+ "sharpe_ratio": portfolio.sharpe_ratio or 0,
180
+ "max_drawdown": portfolio.max_drawdown or 0,
181
+ "current_value": portfolio.current_value or 0,
182
+ }
183
+ )
167
184
 
168
185
  return pd.DataFrame(data)
169
186
  finally:
@@ -199,7 +216,7 @@ def main():
199
216
  st.sidebar.title("Navigation")
200
217
  page = st.sidebar.selectbox(
201
218
  "Choose a page",
202
- ["Overview", "Models", "Predictions", "Portfolios", "System Health", "Live Monitoring"]
219
+ ["Overview", "Models", "Predictions", "Portfolios", "System Health", "Live Monitoring"],
203
220
  )
204
221
 
205
222
  # Auto-refresh toggle
@@ -241,28 +258,22 @@ def show_overview():
241
258
  with col1:
242
259
  st.metric(
243
260
  label="Deployed Models",
244
- value=metrics['deployed_models'],
245
- delta=f"{metrics['training_models']} training"
261
+ value=metrics["deployed_models"],
262
+ delta=f"{metrics['training_models']} training",
246
263
  )
247
264
 
248
265
  with col2:
249
266
  st.metric(
250
267
  label="Active Users",
251
- value=metrics['active_users'],
252
- delta=f"{metrics['total_users']} total"
268
+ value=metrics["active_users"],
269
+ delta=f"{metrics['total_users']} total",
253
270
  )
254
271
 
255
272
  with col3:
256
- st.metric(
257
- label="Predictions Today",
258
- value=metrics['predictions_today']
259
- )
273
+ st.metric(label="Predictions Today", value=metrics["predictions_today"])
260
274
 
261
275
  with col4:
262
- st.metric(
263
- label="Active Portfolios",
264
- value=metrics['active_portfolios']
265
- )
276
+ st.metric(label="Active Portfolios", value=metrics["active_portfolios"])
266
277
 
267
278
  # Charts
268
279
  col1, col2 = st.columns(2)
@@ -271,12 +282,7 @@ def show_overview():
271
282
  st.subheader("Model Performance")
272
283
  model_data = get_model_performance()
273
284
  if not model_data.empty:
274
- fig = px.bar(
275
- model_data,
276
- x='name',
277
- y='accuracy',
278
- title="Model Accuracy Comparison"
279
- )
285
+ fig = px.bar(model_data, x="name", y="accuracy", title="Model Accuracy Comparison")
280
286
  st.plotly_chart(fig, use_container_width=True)
281
287
  else:
282
288
  st.info("No model performance data available")
@@ -287,9 +293,7 @@ def show_overview():
287
293
  if not pred_data.empty:
288
294
  # Show confidence distribution
289
295
  fig = px.histogram(
290
- pred_data,
291
- x='confidence',
292
- title="Prediction Confidence Distribution"
296
+ pred_data, x="confidence", title="Prediction Confidence Distribution"
293
297
  )
294
298
  st.plotly_chart(fig, use_container_width=True)
295
299
  else:
@@ -309,11 +313,7 @@ def show_models():
309
313
 
310
314
  # Model accuracy chart
311
315
  fig = px.line(
312
- model_data,
313
- x='created_at',
314
- y='accuracy',
315
- color='name',
316
- title="Model Accuracy Over Time"
316
+ model_data, x="created_at", y="accuracy", color="name", title="Model Accuracy Over Time"
317
317
  )
318
318
  st.plotly_chart(fig, use_container_width=True)
319
319
  else:
@@ -332,23 +332,19 @@ def show_predictions():
332
332
  with col1:
333
333
  selected_tickers = st.multiselect(
334
334
  "Filter by Ticker",
335
- options=pred_data['ticker'].unique(),
336
- default=pred_data['ticker'].unique()[:5]
335
+ options=pred_data["ticker"].unique(),
336
+ default=pred_data["ticker"].unique()[:5],
337
337
  )
338
338
 
339
339
  with col2:
340
340
  confidence_threshold = st.slider(
341
- "Minimum Confidence",
342
- min_value=0.0,
343
- max_value=1.0,
344
- value=0.5,
345
- step=0.1
341
+ "Minimum Confidence", min_value=0.0, max_value=1.0, value=0.5, step=0.1
346
342
  )
347
343
 
348
344
  # Filter data
349
345
  filtered_data = pred_data[
350
- (pred_data['ticker'].isin(selected_tickers)) &
351
- (pred_data['confidence'] >= confidence_threshold)
346
+ (pred_data["ticker"].isin(selected_tickers))
347
+ & (pred_data["confidence"] >= confidence_threshold)
352
348
  ]
353
349
 
354
350
  # Display filtered data
@@ -361,21 +357,21 @@ def show_predictions():
361
357
  with col1:
362
358
  fig = px.scatter(
363
359
  filtered_data,
364
- x='confidence',
365
- y='predicted_return',
366
- color='ticker',
367
- title="Confidence vs Predicted Return"
360
+ x="confidence",
361
+ y="predicted_return",
362
+ color="ticker",
363
+ title="Confidence vs Predicted Return",
368
364
  )
369
365
  st.plotly_chart(fig, use_container_width=True)
370
366
 
371
367
  with col2:
372
368
  # Group by ticker and show average return
373
- avg_returns = filtered_data.groupby('ticker')['predicted_return'].mean().reset_index()
369
+ avg_returns = filtered_data.groupby("ticker")["predicted_return"].mean().reset_index()
374
370
  fig = px.bar(
375
371
  avg_returns,
376
- x='ticker',
377
- y='predicted_return',
378
- title="Average Predicted Return by Ticker"
372
+ x="ticker",
373
+ y="predicted_return",
374
+ title="Average Predicted Return by Ticker",
379
375
  )
380
376
  st.plotly_chart(fig, use_container_width=True)
381
377
 
@@ -399,21 +395,18 @@ def show_portfolios():
399
395
 
400
396
  with col1:
401
397
  fig = px.bar(
402
- portfolio_data,
403
- x='name',
404
- y='total_return',
405
- title="Total Return by Portfolio"
398
+ portfolio_data, x="name", y="total_return", title="Total Return by Portfolio"
406
399
  )
407
400
  st.plotly_chart(fig, use_container_width=True)
408
401
 
409
402
  with col2:
410
403
  fig = px.scatter(
411
404
  portfolio_data,
412
- x='sharpe_ratio',
413
- y='total_return',
414
- size='current_value',
415
- hover_data=['name'],
416
- title="Risk-Return Analysis"
405
+ x="sharpe_ratio",
406
+ y="total_return",
407
+ size="current_value",
408
+ hover_data=["name"],
409
+ title="Risk-Return Analysis",
417
410
  )
418
411
  st.plotly_chart(fig, use_container_width=True)
419
412
 
@@ -451,29 +444,26 @@ def show_system_health():
451
444
  st.subheader("System Metrics")
452
445
 
453
446
  # Generate sample time series data
454
- times = pd.date_range(start=datetime.now() - timedelta(hours=24), end=datetime.now(), freq='H')
447
+ times = pd.date_range(start=datetime.now() - timedelta(hours=24), end=datetime.now(), freq="H")
455
448
  cpu_usage = np.random.normal(45, 10, len(times))
456
449
  memory_usage = np.random.normal(60, 15, len(times))
457
450
 
458
- metrics_df = pd.DataFrame({
459
- 'time': times,
460
- 'cpu_usage': np.clip(cpu_usage, 0, 100),
461
- 'memory_usage': np.clip(memory_usage, 0, 100)
462
- })
463
-
464
- fig = make_subplots(
465
- rows=2, cols=1,
466
- subplot_titles=('CPU Usage (%)', 'Memory Usage (%)')
451
+ metrics_df = pd.DataFrame(
452
+ {
453
+ "time": times,
454
+ "cpu_usage": np.clip(cpu_usage, 0, 100),
455
+ "memory_usage": np.clip(memory_usage, 0, 100),
456
+ }
467
457
  )
468
458
 
459
+ fig = make_subplots(rows=2, cols=1, subplot_titles=("CPU Usage (%)", "Memory Usage (%)"))
460
+
469
461
  fig.add_trace(
470
- go.Scatter(x=metrics_df['time'], y=metrics_df['cpu_usage'], name='CPU'),
471
- row=1, col=1
462
+ go.Scatter(x=metrics_df["time"], y=metrics_df["cpu_usage"], name="CPU"), row=1, col=1
472
463
  )
473
464
 
474
465
  fig.add_trace(
475
- go.Scatter(x=metrics_df['time'], y=metrics_df['memory_usage'], name='Memory'),
476
- row=2, col=1
466
+ go.Scatter(x=metrics_df["time"], y=metrics_df["memory_usage"], name="Memory"), row=2, col=1
477
467
  )
478
468
 
479
469
  fig.update_layout(height=500, title_text="System Resource Usage (24h)")
@@ -507,26 +497,28 @@ def show_live_monitoring():
507
497
  with col2:
508
498
  st.metric("Avg Confidence", f"{np.random.uniform(0.7, 0.9):.3f}")
509
499
  with col3:
510
- st.metric("Active Models", metrics['deployed_models'])
500
+ st.metric("Active Models", metrics["deployed_models"])
511
501
 
512
502
  # Simulate new predictions
513
503
  with prediction_placeholder.container():
514
- new_preds = pd.DataFrame({
515
- 'Ticker': np.random.choice(['AAPL', 'GOOGL', 'MSFT', 'TSLA'], 5),
516
- 'Prediction': np.random.uniform(-0.05, 0.05, 5),
517
- 'Confidence': np.random.uniform(0.6, 0.95, 5),
518
- 'Time': [datetime.now() - timedelta(seconds=x*10) for x in range(5)]
519
- })
504
+ new_preds = pd.DataFrame(
505
+ {
506
+ "Ticker": np.random.choice(["AAPL", "GOOGL", "MSFT", "TSLA"], 5),
507
+ "Prediction": np.random.uniform(-0.05, 0.05, 5),
508
+ "Confidence": np.random.uniform(0.6, 0.95, 5),
509
+ "Time": [datetime.now() - timedelta(seconds=x * 10) for x in range(5)],
510
+ }
511
+ )
520
512
  st.dataframe(new_preds, use_container_width=True)
521
513
 
522
514
  # Model status
523
515
  with model_placeholder.container():
524
516
  model_data = get_model_performance()
525
517
  if not model_data.empty:
526
- st.dataframe(model_data[['name', 'accuracy']], use_container_width=True)
518
+ st.dataframe(model_data[["name", "accuracy"]], use_container_width=True)
527
519
 
528
520
  time.sleep(5)
529
521
 
530
522
 
531
523
  if __name__ == "__main__":
532
- main()
524
+ main()