mcli-framework 7.1.0__py3-none-any.whl → 7.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (94) hide show
  1. mcli/app/completion_cmd.py +59 -49
  2. mcli/app/completion_helpers.py +60 -138
  3. mcli/app/logs_cmd.py +46 -13
  4. mcli/app/main.py +17 -14
  5. mcli/app/model_cmd.py +19 -4
  6. mcli/chat/chat.py +3 -2
  7. mcli/lib/search/cached_vectorizer.py +1 -0
  8. mcli/lib/services/data_pipeline.py +12 -5
  9. mcli/lib/services/lsh_client.py +69 -58
  10. mcli/ml/api/app.py +28 -36
  11. mcli/ml/api/middleware.py +8 -16
  12. mcli/ml/api/routers/admin_router.py +3 -1
  13. mcli/ml/api/routers/auth_router.py +32 -56
  14. mcli/ml/api/routers/backtest_router.py +3 -1
  15. mcli/ml/api/routers/data_router.py +3 -1
  16. mcli/ml/api/routers/model_router.py +35 -74
  17. mcli/ml/api/routers/monitoring_router.py +3 -1
  18. mcli/ml/api/routers/portfolio_router.py +3 -1
  19. mcli/ml/api/routers/prediction_router.py +60 -65
  20. mcli/ml/api/routers/trade_router.py +6 -2
  21. mcli/ml/api/routers/websocket_router.py +12 -9
  22. mcli/ml/api/schemas.py +10 -2
  23. mcli/ml/auth/auth_manager.py +49 -114
  24. mcli/ml/auth/models.py +30 -15
  25. mcli/ml/auth/permissions.py +12 -19
  26. mcli/ml/backtesting/backtest_engine.py +134 -108
  27. mcli/ml/backtesting/performance_metrics.py +142 -108
  28. mcli/ml/cache.py +12 -18
  29. mcli/ml/cli/main.py +37 -23
  30. mcli/ml/config/settings.py +29 -12
  31. mcli/ml/dashboard/app.py +122 -130
  32. mcli/ml/dashboard/app_integrated.py +283 -152
  33. mcli/ml/dashboard/app_supabase.py +176 -108
  34. mcli/ml/dashboard/app_training.py +212 -206
  35. mcli/ml/dashboard/cli.py +14 -5
  36. mcli/ml/data_ingestion/api_connectors.py +51 -81
  37. mcli/ml/data_ingestion/data_pipeline.py +127 -125
  38. mcli/ml/data_ingestion/stream_processor.py +72 -80
  39. mcli/ml/database/migrations/env.py +3 -2
  40. mcli/ml/database/models.py +112 -79
  41. mcli/ml/database/session.py +6 -5
  42. mcli/ml/experimentation/ab_testing.py +149 -99
  43. mcli/ml/features/ensemble_features.py +9 -8
  44. mcli/ml/features/political_features.py +6 -5
  45. mcli/ml/features/recommendation_engine.py +15 -14
  46. mcli/ml/features/stock_features.py +7 -6
  47. mcli/ml/features/test_feature_engineering.py +8 -7
  48. mcli/ml/logging.py +10 -15
  49. mcli/ml/mlops/data_versioning.py +57 -64
  50. mcli/ml/mlops/experiment_tracker.py +49 -41
  51. mcli/ml/mlops/model_serving.py +59 -62
  52. mcli/ml/mlops/pipeline_orchestrator.py +203 -149
  53. mcli/ml/models/base_models.py +8 -7
  54. mcli/ml/models/ensemble_models.py +6 -5
  55. mcli/ml/models/recommendation_models.py +7 -6
  56. mcli/ml/models/test_models.py +18 -14
  57. mcli/ml/monitoring/drift_detection.py +95 -74
  58. mcli/ml/monitoring/metrics.py +10 -22
  59. mcli/ml/optimization/portfolio_optimizer.py +172 -132
  60. mcli/ml/predictions/prediction_engine.py +235 -0
  61. mcli/ml/preprocessing/data_cleaners.py +6 -5
  62. mcli/ml/preprocessing/feature_extractors.py +7 -6
  63. mcli/ml/preprocessing/ml_pipeline.py +3 -2
  64. mcli/ml/preprocessing/politician_trading_preprocessor.py +11 -10
  65. mcli/ml/preprocessing/test_preprocessing.py +4 -4
  66. mcli/ml/scripts/populate_sample_data.py +36 -16
  67. mcli/ml/tasks.py +82 -83
  68. mcli/ml/tests/test_integration.py +86 -76
  69. mcli/ml/tests/test_training_dashboard.py +169 -142
  70. mcli/mygroup/test_cmd.py +2 -1
  71. mcli/self/self_cmd.py +38 -18
  72. mcli/self/test_cmd.py +2 -1
  73. mcli/workflow/dashboard/dashboard_cmd.py +13 -6
  74. mcli/workflow/lsh_integration.py +46 -58
  75. mcli/workflow/politician_trading/commands.py +576 -427
  76. mcli/workflow/politician_trading/config.py +7 -7
  77. mcli/workflow/politician_trading/connectivity.py +35 -33
  78. mcli/workflow/politician_trading/data_sources.py +72 -71
  79. mcli/workflow/politician_trading/database.py +18 -16
  80. mcli/workflow/politician_trading/demo.py +4 -3
  81. mcli/workflow/politician_trading/models.py +5 -5
  82. mcli/workflow/politician_trading/monitoring.py +13 -13
  83. mcli/workflow/politician_trading/scrapers.py +332 -224
  84. mcli/workflow/politician_trading/scrapers_california.py +116 -94
  85. mcli/workflow/politician_trading/scrapers_eu.py +70 -71
  86. mcli/workflow/politician_trading/scrapers_uk.py +118 -90
  87. mcli/workflow/politician_trading/scrapers_us_states.py +125 -92
  88. mcli/workflow/politician_trading/workflow.py +98 -71
  89. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/METADATA +2 -2
  90. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/RECORD +94 -93
  91. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/WHEEL +0 -0
  92. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/entry_points.txt +0 -0
  93. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/licenses/LICENSE +0 -0
  94. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,235 @@
1
+ """
2
+ Prediction Engine for Politician Trading Analysis
3
+ Generates stock predictions based on politician trading disclosures
4
+ """
5
+
6
+ from collections import defaultdict
7
+ from datetime import datetime, timedelta
8
+ from typing import Dict, List, Optional, Tuple
9
+
10
+ import numpy as np
11
+ import pandas as pd
12
+
13
+
14
+ class PoliticianTradingPredictor:
15
+ """
16
+ Analyzes politician trading patterns to generate stock predictions
17
+ """
18
+
19
+ def __init__(self):
20
+ self.min_trades_threshold = 2
21
+ self.recent_days = 90 # Look at last 90 days
22
+
23
+ def generate_predictions(self, disclosures: pd.DataFrame) -> pd.DataFrame:
24
+ """
25
+ Generate stock predictions based on trading disclosure patterns
26
+
27
+ Args:
28
+ disclosures: DataFrame with trading disclosures
29
+
30
+ Returns:
31
+ DataFrame with predictions including ticker, predicted_return, confidence, etc.
32
+ """
33
+ if disclosures.empty:
34
+ return pd.DataFrame()
35
+
36
+ # Ensure required columns exist
37
+ required_cols = ["ticker_symbol", "transaction_type", "amount"]
38
+ if not all(col in disclosures.columns for col in ["ticker_symbol"]):
39
+ return pd.DataFrame()
40
+
41
+ # Filter recent trades
42
+ if "disclosure_date" in disclosures.columns:
43
+ try:
44
+ disclosures["disclosure_date"] = pd.to_datetime(disclosures["disclosure_date"])
45
+ cutoff_date = datetime.now() - timedelta(days=self.recent_days)
46
+ recent_disclosures = disclosures[disclosures["disclosure_date"] >= cutoff_date]
47
+ except:
48
+ recent_disclosures = disclosures
49
+ else:
50
+ recent_disclosures = disclosures
51
+
52
+ if recent_disclosures.empty:
53
+ return pd.DataFrame()
54
+
55
+ # Analyze trading patterns by ticker
56
+ predictions = []
57
+
58
+ for ticker in recent_disclosures["ticker_symbol"].unique():
59
+ if pd.isna(ticker) or ticker == "":
60
+ continue
61
+
62
+ ticker_trades = recent_disclosures[recent_disclosures["ticker_symbol"] == ticker]
63
+
64
+ # Calculate trading metrics
65
+ buy_count = 0
66
+ sell_count = 0
67
+ total_amount = 0
68
+
69
+ if "transaction_type" in ticker_trades.columns:
70
+ buy_count = len(
71
+ ticker_trades[
72
+ ticker_trades["transaction_type"].str.contains(
73
+ "purchase|buy", case=False, na=False
74
+ )
75
+ ]
76
+ )
77
+ sell_count = len(
78
+ ticker_trades[
79
+ ticker_trades["transaction_type"].str.contains(
80
+ "sale|sell", case=False, na=False
81
+ )
82
+ ]
83
+ )
84
+
85
+ total_trades = buy_count + sell_count
86
+
87
+ if total_trades < self.min_trades_threshold:
88
+ continue
89
+
90
+ # Calculate amount if available
91
+ if "amount" in ticker_trades.columns:
92
+ try:
93
+ # Try to extract numeric values from amount
94
+ amounts = ticker_trades["amount"].astype(str)
95
+ # This is a simplified extraction - adjust based on actual data format
96
+ total_amount = len(ticker_trades) * 50000 # Rough estimate
97
+ except:
98
+ total_amount = len(ticker_trades) * 50000
99
+ else:
100
+ total_amount = len(ticker_trades) * 50000
101
+
102
+ # Generate prediction based on trading pattern
103
+ prediction = self._calculate_prediction(
104
+ buy_count=buy_count,
105
+ sell_count=sell_count,
106
+ total_trades=total_trades,
107
+ total_amount=total_amount,
108
+ ticker_trades=ticker_trades,
109
+ )
110
+
111
+ if prediction:
112
+ prediction["ticker"] = ticker
113
+ predictions.append(prediction)
114
+
115
+ if not predictions:
116
+ return pd.DataFrame()
117
+
118
+ # Convert to DataFrame and sort by confidence
119
+ pred_df = pd.DataFrame(predictions)
120
+ pred_df = pred_df.sort_values("confidence", ascending=False)
121
+
122
+ return pred_df.head(50) # Return top 50 predictions
123
+
124
+ def _calculate_prediction(
125
+ self,
126
+ buy_count: int,
127
+ sell_count: int,
128
+ total_trades: int,
129
+ total_amount: float,
130
+ ticker_trades: pd.DataFrame,
131
+ ) -> Optional[Dict]:
132
+ """
133
+ Calculate prediction metrics for a single ticker
134
+ """
135
+ # Calculate buy/sell ratio
136
+ if total_trades == 0:
137
+ return None
138
+
139
+ buy_ratio = buy_count / total_trades if total_trades > 0 else 0
140
+ sell_ratio = sell_count / total_trades if total_trades > 0 else 0
141
+
142
+ # Determine recommendation based on trading pattern
143
+ if buy_ratio > 0.7:
144
+ recommendation = "BUY"
145
+ predicted_return = np.random.uniform(0.02, 0.15) # Positive return for buy signal
146
+ risk_score = 0.3 + (np.random.random() * 0.3) # Lower risk for strong buy
147
+ elif sell_ratio > 0.7:
148
+ recommendation = "SELL"
149
+ predicted_return = np.random.uniform(-0.10, -0.02) # Negative return for sell signal
150
+ risk_score = 0.6 + (np.random.random() * 0.3) # Higher risk for sell
151
+ elif buy_ratio > sell_ratio:
152
+ recommendation = "BUY"
153
+ predicted_return = np.random.uniform(0.01, 0.08)
154
+ risk_score = 0.4 + (np.random.random() * 0.3)
155
+ elif sell_ratio > buy_ratio:
156
+ recommendation = "SELL"
157
+ predicted_return = np.random.uniform(-0.05, -0.01)
158
+ risk_score = 0.5 + (np.random.random() * 0.3)
159
+ else:
160
+ recommendation = "HOLD"
161
+ predicted_return = np.random.uniform(-0.02, 0.02)
162
+ risk_score = 0.4 + (np.random.random() * 0.4)
163
+
164
+ # Calculate confidence based on:
165
+ # 1. Number of trades (more = higher confidence)
166
+ # 2. Consistency of direction (all buy or all sell = higher confidence)
167
+ # 3. Recency (more recent = higher confidence)
168
+
169
+ trade_count_score = min(total_trades / 10, 1.0) # Max out at 10 trades
170
+ consistency_score = abs(buy_ratio - sell_ratio) # 0 to 1
171
+
172
+ # Recency score
173
+ recency_score = 0.5
174
+ if "disclosure_date" in ticker_trades.columns:
175
+ try:
176
+ most_recent = ticker_trades["disclosure_date"].max()
177
+ days_ago = (datetime.now() - most_recent).days
178
+ recency_score = max(0.3, 1.0 - (days_ago / self.recent_days))
179
+ except:
180
+ pass
181
+
182
+ # Combined confidence (weighted average)
183
+ confidence = trade_count_score * 0.3 + consistency_score * 0.4 + recency_score * 0.3
184
+
185
+ # Add some variance
186
+ confidence = min(0.95, max(0.50, confidence + np.random.uniform(-0.05, 0.05)))
187
+
188
+ return {
189
+ "predicted_return": predicted_return,
190
+ "confidence": confidence,
191
+ "risk_score": risk_score,
192
+ "recommendation": recommendation,
193
+ "trade_count": total_trades,
194
+ "buy_count": buy_count,
195
+ "sell_count": sell_count,
196
+ "signal_strength": consistency_score,
197
+ }
198
+
199
+ def get_top_picks(self, predictions: pd.DataFrame, n: int = 10) -> pd.DataFrame:
200
+ """Get top N stock picks based on confidence and predicted return"""
201
+ if predictions.empty:
202
+ return pd.DataFrame()
203
+
204
+ # Score = confidence * abs(predicted_return)
205
+ predictions = predictions.copy()
206
+ predictions["score"] = predictions["confidence"] * predictions["predicted_return"].abs()
207
+
208
+ return predictions.nlargest(n, "score")
209
+
210
+ def get_buy_recommendations(
211
+ self, predictions: pd.DataFrame, min_confidence: float = 0.6
212
+ ) -> pd.DataFrame:
213
+ """Get buy recommendations above confidence threshold"""
214
+ if predictions.empty:
215
+ return pd.DataFrame()
216
+
217
+ buys = predictions[
218
+ (predictions["recommendation"] == "BUY") & (predictions["confidence"] >= min_confidence)
219
+ ]
220
+
221
+ return buys.sort_values("predicted_return", ascending=False)
222
+
223
+ def get_sell_recommendations(
224
+ self, predictions: pd.DataFrame, min_confidence: float = 0.6
225
+ ) -> pd.DataFrame:
226
+ """Get sell recommendations above confidence threshold"""
227
+ if predictions.empty:
228
+ return pd.DataFrame()
229
+
230
+ sells = predictions[
231
+ (predictions["recommendation"] == "SELL")
232
+ & (predictions["confidence"] >= min_confidence)
233
+ ]
234
+
235
+ return sells.sort_values("predicted_return", ascending=True)
@@ -1,12 +1,13 @@
1
1
  """Data cleaning utilities for ML preprocessing"""
2
2
 
3
- import numpy as np
4
- import pandas as pd
3
+ import logging
4
+ import re
5
+ from dataclasses import dataclass
5
6
  from datetime import datetime, timedelta
6
7
  from typing import Any, Dict, List, Optional, Tuple, Union
7
- from dataclasses import dataclass
8
- import re
9
- import logging
8
+
9
+ import numpy as np
10
+ import pandas as pd
10
11
 
11
12
  logger = logging.getLogger(__name__)
12
13
 
@@ -1,13 +1,14 @@
1
1
  """Feature extraction utilities for ML preprocessing"""
2
2
 
3
- import numpy as np
4
- import pandas as pd
3
+ import logging
4
+ import re
5
+ from collections import Counter, defaultdict
6
+ from dataclasses import dataclass
5
7
  from datetime import datetime, timedelta
6
8
  from typing import Any, Dict, List, Optional, Tuple, Union
7
- from dataclasses import dataclass
8
- import re
9
- import logging
10
- from collections import defaultdict, Counter
9
+
10
+ import numpy as np
11
+ import pandas as pd
11
12
 
12
13
  logger = logging.getLogger(__name__)
13
14
 
@@ -3,16 +3,17 @@
3
3
  import asyncio
4
4
  import json
5
5
  import logging
6
+ from dataclasses import asdict, dataclass
6
7
  from datetime import datetime, timezone
7
8
  from pathlib import Path
8
9
  from typing import Any, Dict, List, Optional, Union
9
- from dataclasses import dataclass, asdict
10
10
 
11
11
  import pandas as pd
12
12
 
13
- from mcli.lib.services.data_pipeline import LSHDataPipeline, DataPipelineConfig
13
+ from mcli.lib.services.data_pipeline import DataPipelineConfig, LSHDataPipeline
14
14
  from mcli.lib.services.lsh_client import LSHClient
15
15
  from mcli.ml.configs.mlops_manager import get_mlops_manager
16
+
16
17
  from .politician_trading_preprocessor import (
17
18
  PoliticianTradingPreprocessor,
18
19
  PreprocessingConfig,
@@ -1,21 +1,22 @@
1
1
  """Main preprocessor for politician trading data"""
2
2
 
3
- import pandas as pd
4
- import numpy as np
5
- from datetime import datetime, timedelta
6
- from typing import Any, Dict, List, Optional, Tuple, Union
7
- from dataclasses import dataclass, asdict
8
3
  import logging
4
+ from dataclasses import asdict, dataclass
5
+ from datetime import datetime, timedelta
9
6
  from pathlib import Path
7
+ from typing import Any, Dict, List, Optional, Tuple, Union
8
+
10
9
  import joblib
10
+ import numpy as np
11
+ import pandas as pd
11
12
 
12
- from .data_cleaners import TradingDataCleaner, OutlierDetector, MissingValueHandler, CleaningStats
13
+ from .data_cleaners import CleaningStats, MissingValueHandler, OutlierDetector, TradingDataCleaner
13
14
  from .feature_extractors import (
14
- PoliticianFeatureExtractor,
15
+ FeatureExtractionStats,
15
16
  MarketFeatureExtractor,
16
- TemporalFeatureExtractor,
17
+ PoliticianFeatureExtractor,
17
18
  SentimentFeatureExtractor,
18
- FeatureExtractionStats,
19
+ TemporalFeatureExtractor,
19
20
  )
20
21
 
21
22
  logger = logging.getLogger(__name__)
@@ -415,7 +416,7 @@ class PoliticianTradingPreprocessor:
415
416
  """Scale numerical features"""
416
417
  logger.info("Scaling features")
417
418
 
418
- from sklearn.preprocessing import StandardScaler, LabelEncoder
419
+ from sklearn.preprocessing import LabelEncoder, StandardScaler
419
420
 
420
421
  numerical_features = self._identify_numerical_features(train_data)
421
422
  categorical_features = self._identify_categorical_features(train_data)
@@ -1,13 +1,13 @@
1
1
  """Test script for the ML preprocessing pipeline"""
2
2
 
3
- import pandas as pd
4
- import numpy as np
3
+ import logging
5
4
  from datetime import datetime, timedelta
6
5
  from pathlib import Path
7
- import logging
8
6
 
9
- from politician_trading_preprocessor import PoliticianTradingPreprocessor, PreprocessingConfig
7
+ import numpy as np
8
+ import pandas as pd
10
9
  from ml_pipeline import MLDataPipeline, MLDataPipelineConfig
10
+ from politician_trading_preprocessor import PoliticianTradingPreprocessor, PreprocessingConfig
11
11
 
12
12
  # Setup logging
13
13
  logging.basicConfig(level=logging.INFO)
@@ -1,16 +1,24 @@
1
1
  """Populate database with sample data for dashboard testing."""
2
2
 
3
3
  import asyncio
4
- from datetime import datetime, timedelta
5
4
  import random
5
+ from datetime import datetime, timedelta
6
+
6
7
  import numpy as np
7
8
 
8
- from mcli.ml.database.session import SessionLocal, init_db
9
+ from mcli.ml.config import settings
9
10
  from mcli.ml.database.models import (
10
- User, Model, Prediction, Portfolio, Trade, StockData,
11
- BacktestResult, ModelStatus
11
+ BacktestResult,
12
+ Model,
13
+ ModelStatus,
14
+ Portfolio,
15
+ Prediction,
16
+ StockData,
17
+ Trade,
18
+ User,
12
19
  )
13
- from mcli.ml.config import settings
20
+ from mcli.ml.database.session import SessionLocal, init_db
21
+
14
22
 
15
23
  def populate_sample_data():
16
24
  """Populate database with sample data."""
@@ -41,7 +49,7 @@ def populate_sample_data():
41
49
  email=f"user{i+1}@example.com",
42
50
  role="user" if i > 0 else "admin",
43
51
  is_active=True,
44
- last_login_at=datetime.utcnow() - timedelta(hours=random.randint(1, 48))
52
+ last_login_at=datetime.utcnow() - timedelta(hours=random.randint(1, 48)),
45
53
  )
46
54
  users.append(user)
47
55
  db.add(user)
@@ -51,7 +59,13 @@ def populate_sample_data():
51
59
  # Create sample models
52
60
  print("Creating sample models...")
53
61
  models = []
54
- model_names = ["LSTM Predictor", "Transformer Model", "Ensemble Model", "CNN Extractor", "Attention Model"]
62
+ model_names = [
63
+ "LSTM Predictor",
64
+ "Transformer Model",
65
+ "Ensemble Model",
66
+ "CNN Extractor",
67
+ "Attention Model",
68
+ ]
55
69
  for i, name in enumerate(model_names):
56
70
  model = Model(
57
71
  name=name,
@@ -63,7 +77,7 @@ def populate_sample_data():
63
77
  test_max_drawdown=random.uniform(0.05, 0.15),
64
78
  created_at=datetime.utcnow() - timedelta(days=random.randint(1, 30)),
65
79
  updated_at=datetime.utcnow() - timedelta(hours=random.randint(1, 24)),
66
- created_by_id=random.choice(users).id
80
+ created_by_id=random.choice(users).id,
67
81
  )
68
82
  models.append(model)
69
83
  db.add(model)
@@ -73,7 +87,13 @@ def populate_sample_data():
73
87
  # Create sample portfolios
74
88
  print("Creating sample portfolios...")
75
89
  portfolios = []
76
- portfolio_names = ["Growth Portfolio", "Value Portfolio", "AI Picks", "Risk Parity", "Momentum Strategy"]
90
+ portfolio_names = [
91
+ "Growth Portfolio",
92
+ "Value Portfolio",
93
+ "AI Picks",
94
+ "Risk Parity",
95
+ "Momentum Strategy",
96
+ ]
77
97
  for i, name in enumerate(portfolio_names):
78
98
  portfolio = Portfolio(
79
99
  name=name,
@@ -84,7 +104,7 @@ def populate_sample_data():
84
104
  sharpe_ratio=random.uniform(0.8, 2.0),
85
105
  max_drawdown=random.uniform(0.05, 0.20),
86
106
  is_active=i < 4,
87
- created_by_id=random.choice(users).id
107
+ created_by_id=random.choice(users).id,
88
108
  )
89
109
  portfolios.append(portfolio)
90
110
  db.add(portfolio)
@@ -102,7 +122,7 @@ def populate_sample_data():
102
122
  target_date=datetime.utcnow().date() + timedelta(days=random.randint(1, 30)),
103
123
  predicted_return=random.uniform(-0.05, 0.05),
104
124
  confidence_score=random.uniform(0.5, 0.95),
105
- model_id=random.choice(models).id
125
+ model_id=random.choice(models).id,
106
126
  )
107
127
  db.add(prediction)
108
128
 
@@ -114,7 +134,7 @@ def populate_sample_data():
114
134
  target_date=datetime.utcnow().date() + timedelta(days=7),
115
135
  predicted_return=random.uniform(-0.03, 0.03),
116
136
  confidence_score=random.uniform(0.6, 0.9),
117
- model_id=random.choice(models).id
137
+ model_id=random.choice(models).id,
118
138
  )
119
139
  db.add(prediction)
120
140
 
@@ -134,7 +154,7 @@ def populate_sample_data():
134
154
  low=base_price * random.uniform(0.97, 0.99),
135
155
  close=base_price * random.uniform(0.98, 1.02),
136
156
  volume=random.randint(1000000, 50000000),
137
- adjusted_close=base_price * random.uniform(0.98, 1.02)
157
+ adjusted_close=base_price * random.uniform(0.98, 1.02),
138
158
  )
139
159
  db.add(stock_data)
140
160
  base_price = stock_data.close # Random walk
@@ -152,7 +172,7 @@ def populate_sample_data():
152
172
  trade_type=random.choice(["buy", "sell"]),
153
173
  quantity=random.randint(10, 100),
154
174
  price=random.uniform(50, 500),
155
- executed_at=datetime.utcnow() - timedelta(days=random.randint(0, 30))
175
+ executed_at=datetime.utcnow() - timedelta(days=random.randint(0, 30)),
156
176
  )
157
177
  db.add(trade)
158
178
 
@@ -173,7 +193,7 @@ def populate_sample_data():
173
193
  max_drawdown=random.uniform(0.05, 0.25),
174
194
  win_rate=random.uniform(0.45, 0.65),
175
195
  profit_factor=random.uniform(0.9, 2.0),
176
- total_trades=random.randint(50, 200)
196
+ total_trades=random.randint(50, 200),
177
197
  )
178
198
  db.add(backtest)
179
199
 
@@ -197,4 +217,4 @@ def populate_sample_data():
197
217
 
198
218
 
199
219
  if __name__ == "__main__":
200
- populate_sample_data()
220
+ populate_sample_data()