mcli-framework 7.2.0__py3-none-any.whl → 7.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/__init__.py +160 -0
- mcli/__main__.py +14 -0
- mcli/app/__init__.py +23 -0
- mcli/app/model/__init__.py +0 -0
- mcli/app/video/__init__.py +5 -0
- mcli/chat/__init__.py +34 -0
- mcli/lib/__init__.py +0 -0
- mcli/lib/api/__init__.py +0 -0
- mcli/lib/auth/__init__.py +1 -0
- mcli/lib/config/__init__.py +1 -0
- mcli/lib/erd/__init__.py +25 -0
- mcli/lib/files/__init__.py +0 -0
- mcli/lib/fs/__init__.py +1 -0
- mcli/lib/logger/__init__.py +3 -0
- mcli/lib/performance/__init__.py +17 -0
- mcli/lib/pickles/__init__.py +1 -0
- mcli/lib/shell/__init__.py +0 -0
- mcli/lib/toml/__init__.py +1 -0
- mcli/lib/watcher/__init__.py +0 -0
- mcli/ml/__init__.py +16 -0
- mcli/ml/api/__init__.py +30 -0
- mcli/ml/api/routers/__init__.py +27 -0
- mcli/ml/api/schemas.py +2 -2
- mcli/ml/auth/__init__.py +45 -0
- mcli/ml/auth/models.py +2 -2
- mcli/ml/backtesting/__init__.py +39 -0
- mcli/ml/cli/__init__.py +5 -0
- mcli/ml/cli/main.py +1 -1
- mcli/ml/config/__init__.py +33 -0
- mcli/ml/configs/__init__.py +16 -0
- mcli/ml/dashboard/__init__.py +12 -0
- mcli/ml/dashboard/app_integrated.py +23 -6
- mcli/ml/dashboard/components/__init__.py +7 -0
- mcli/ml/dashboard/pages/__init__.py +6 -0
- mcli/ml/dashboard/pages/predictions_enhanced.py +20 -6
- mcli/ml/dashboard/pages/test_portfolio.py +373 -0
- mcli/ml/dashboard/pages/trading.py +714 -0
- mcli/ml/dashboard/utils.py +154 -0
- mcli/ml/data_ingestion/__init__.py +39 -0
- mcli/ml/database/__init__.py +47 -0
- mcli/ml/experimentation/__init__.py +29 -0
- mcli/ml/features/__init__.py +39 -0
- mcli/ml/mlops/__init__.py +33 -0
- mcli/ml/models/__init__.py +94 -0
- mcli/ml/monitoring/__init__.py +25 -0
- mcli/ml/optimization/__init__.py +27 -0
- mcli/ml/predictions/__init__.py +5 -0
- mcli/ml/preprocessing/__init__.py +28 -0
- mcli/ml/scripts/__init__.py +1 -0
- mcli/ml/trading/__init__.py +60 -0
- mcli/ml/trading/alpaca_client.py +353 -0
- mcli/ml/trading/migrations.py +164 -0
- mcli/ml/trading/models.py +418 -0
- mcli/ml/trading/paper_trading.py +326 -0
- mcli/ml/trading/risk_management.py +370 -0
- mcli/ml/trading/trading_service.py +480 -0
- mcli/ml/training/__init__.py +10 -0
- mcli/mygroup/__init__.py +3 -0
- mcli/public/__init__.py +1 -0
- mcli/public/commands/__init__.py +2 -0
- mcli/self/__init__.py +3 -0
- mcli/self/self_cmd.py +260 -0
- mcli/workflow/__init__.py +0 -0
- mcli/workflow/daemon/__init__.py +15 -0
- mcli/workflow/daemon/daemon.py +21 -3
- mcli/workflow/dashboard/__init__.py +5 -0
- mcli/workflow/docker/__init__.py +0 -0
- mcli/workflow/file/__init__.py +0 -0
- mcli/workflow/gcloud/__init__.py +1 -0
- mcli/workflow/git_commit/__init__.py +0 -0
- mcli/workflow/interview/__init__.py +0 -0
- mcli/workflow/politician_trading/__init__.py +4 -0
- mcli/workflow/registry/__init__.py +0 -0
- mcli/workflow/repo/__init__.py +0 -0
- mcli/workflow/scheduler/__init__.py +25 -0
- mcli/workflow/search/__init__.py +0 -0
- mcli/workflow/sync/__init__.py +5 -0
- mcli/workflow/videos/__init__.py +1 -0
- mcli/workflow/wakatime/__init__.py +80 -0
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.3.1.dist-info}/METADATA +3 -1
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.3.1.dist-info}/RECORD +85 -13
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.3.1.dist-info}/WHEEL +0 -0
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.3.1.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.3.1.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
"""Shared utility functions for dashboard pages"""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import logging
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
import pandas as pd
|
|
7
|
+
import streamlit as st
|
|
8
|
+
from supabase import Client, create_client
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_supabase_client() -> Optional[Client]:
|
|
14
|
+
"""Get Supabase client with Streamlit Cloud secrets support"""
|
|
15
|
+
# Try Streamlit secrets first (for Streamlit Cloud), then fall back to environment variables (for local dev)
|
|
16
|
+
try:
|
|
17
|
+
url = st.secrets.get("SUPABASE_URL", "")
|
|
18
|
+
key = st.secrets.get("SUPABASE_KEY", "") or st.secrets.get("SUPABASE_SERVICE_ROLE_KEY", "")
|
|
19
|
+
except (AttributeError, FileNotFoundError):
|
|
20
|
+
# Secrets not available, try environment variables
|
|
21
|
+
url = os.getenv("SUPABASE_URL", "")
|
|
22
|
+
key = os.getenv("SUPABASE_KEY", "") or os.getenv("SUPABASE_SERVICE_ROLE_KEY", "")
|
|
23
|
+
|
|
24
|
+
if not url or not key:
|
|
25
|
+
logger.warning("Supabase credentials not found")
|
|
26
|
+
return None
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
client = create_client(url, key)
|
|
30
|
+
# Test connection with a simple query
|
|
31
|
+
try:
|
|
32
|
+
test_result = client.table("politicians").select("id").limit(1).execute()
|
|
33
|
+
logger.info(f"✅ Supabase connection successful (URL: {url[:30]}...)")
|
|
34
|
+
return client
|
|
35
|
+
except Exception as conn_error:
|
|
36
|
+
st.error(f"❌ Supabase connection failed: {conn_error}")
|
|
37
|
+
return None
|
|
38
|
+
except Exception as e:
|
|
39
|
+
logger.error(f"Failed to create Supabase client: {e}")
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_politician_names() -> List[str]:
|
|
44
|
+
"""Get all politician names from database for searchable dropdown"""
|
|
45
|
+
try:
|
|
46
|
+
client = get_supabase_client()
|
|
47
|
+
if not client:
|
|
48
|
+
return ["Nancy Pelosi", "Paul Pelosi", "Dan Crenshaw", "Josh Gottheimer"] # Fallback
|
|
49
|
+
|
|
50
|
+
result = client.table("politicians").select("first_name, last_name").execute()
|
|
51
|
+
names = [f"{row['first_name']} {row['last_name']}" for row in result.data]
|
|
52
|
+
return names if names else ["Nancy Pelosi", "Paul Pelosi", "Dan Crenshaw", "Josh Gottheimer"]
|
|
53
|
+
except Exception as e:
|
|
54
|
+
logger.error(f"Failed to get politician names: {e}")
|
|
55
|
+
return ["Nancy Pelosi", "Paul Pelosi", "Dan Crenshaw", "Josh Gottheimer"]
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def get_disclosures_data() -> pd.DataFrame:
|
|
59
|
+
"""Get trading disclosures from Supabase with proper schema mapping"""
|
|
60
|
+
client = get_supabase_client()
|
|
61
|
+
if not client:
|
|
62
|
+
return _generate_demo_disclosures()
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
# First, get total count
|
|
66
|
+
count_response = (
|
|
67
|
+
client.table("trading_disclosures")
|
|
68
|
+
.select("*", count="exact")
|
|
69
|
+
.execute()
|
|
70
|
+
)
|
|
71
|
+
total_count = count_response.count
|
|
72
|
+
|
|
73
|
+
if total_count == 0:
|
|
74
|
+
return _generate_demo_disclosures()
|
|
75
|
+
|
|
76
|
+
# Get the data
|
|
77
|
+
response = (
|
|
78
|
+
client.table("trading_disclosures")
|
|
79
|
+
.select("*")
|
|
80
|
+
.order("disclosure_date", desc=True)
|
|
81
|
+
.limit(1000)
|
|
82
|
+
.execute()
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
if not response.data:
|
|
86
|
+
return _generate_demo_disclosures()
|
|
87
|
+
|
|
88
|
+
df = pd.DataFrame(response.data)
|
|
89
|
+
return df
|
|
90
|
+
|
|
91
|
+
except Exception as e:
|
|
92
|
+
logger.error(f"Failed to fetch disclosures: {e}")
|
|
93
|
+
return _generate_demo_disclosures()
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _generate_demo_disclosures() -> pd.DataFrame:
|
|
97
|
+
"""Generate demo trading disclosure data for testing"""
|
|
98
|
+
st.info("🔵 Using demo trading data (Supabase unavailable)")
|
|
99
|
+
|
|
100
|
+
import random
|
|
101
|
+
from datetime import datetime, timedelta
|
|
102
|
+
|
|
103
|
+
politicians = ["Nancy Pelosi", "Paul Pelosi", "Dan Crenshaw", "Josh Gottheimer"]
|
|
104
|
+
tickers = ["AAPL", "MSFT", "GOOGL", "AMZN", "NVDA", "TSLA", "META", "AMD"]
|
|
105
|
+
transaction_types = ["Purchase", "Sale"]
|
|
106
|
+
|
|
107
|
+
data = []
|
|
108
|
+
for _ in range(50):
|
|
109
|
+
data.append({
|
|
110
|
+
"politician_name": random.choice(politicians),
|
|
111
|
+
"ticker_symbol": random.choice(tickers),
|
|
112
|
+
"transaction_type": random.choice(transaction_types),
|
|
113
|
+
"amount_min": random.randint(1000, 100000),
|
|
114
|
+
"amount_max": random.randint(100000, 1000000),
|
|
115
|
+
"disclosure_date": (datetime.now() - timedelta(days=random.randint(1, 365))).strftime("%Y-%m-%d"),
|
|
116
|
+
"asset_description": f"{random.choice(tickers)} Stock",
|
|
117
|
+
})
|
|
118
|
+
|
|
119
|
+
return pd.DataFrame(data)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def get_politician_trading_history(politician_name: str) -> pd.DataFrame:
|
|
123
|
+
"""Get trading history for a specific politician"""
|
|
124
|
+
try:
|
|
125
|
+
client = get_supabase_client()
|
|
126
|
+
if not client:
|
|
127
|
+
return pd.DataFrame() # Return empty if no client
|
|
128
|
+
|
|
129
|
+
# Split name into first and last
|
|
130
|
+
name_parts = politician_name.split()
|
|
131
|
+
if len(name_parts) < 2:
|
|
132
|
+
return pd.DataFrame()
|
|
133
|
+
|
|
134
|
+
first_name = name_parts[0]
|
|
135
|
+
last_name = " ".join(name_parts[1:])
|
|
136
|
+
|
|
137
|
+
# Get trading disclosures for this politician
|
|
138
|
+
response = (
|
|
139
|
+
client.table("trading_disclosures")
|
|
140
|
+
.select("*")
|
|
141
|
+
.eq("politician_name", politician_name)
|
|
142
|
+
.order("disclosure_date", desc=True)
|
|
143
|
+
.limit(100)
|
|
144
|
+
.execute()
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
if response.data:
|
|
148
|
+
return pd.DataFrame(response.data)
|
|
149
|
+
else:
|
|
150
|
+
return pd.DataFrame()
|
|
151
|
+
|
|
152
|
+
except Exception as e:
|
|
153
|
+
logger.warning(f"Failed to fetch trading history for {politician_name}: {e}")
|
|
154
|
+
return pd.DataFrame()
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""Real-time data ingestion pipeline"""
|
|
2
|
+
|
|
3
|
+
from .api_connectors import (
|
|
4
|
+
AlphaVantageConnector,
|
|
5
|
+
CongressionalDataAPI,
|
|
6
|
+
PolygonIOConnector,
|
|
7
|
+
QuiverQuantConnector,
|
|
8
|
+
StockMarketAPI,
|
|
9
|
+
YahooFinanceConnector,
|
|
10
|
+
)
|
|
11
|
+
from .data_pipeline import (
|
|
12
|
+
DataLoader,
|
|
13
|
+
DataTransformer,
|
|
14
|
+
DataValidator,
|
|
15
|
+
IngestionPipeline,
|
|
16
|
+
)
|
|
17
|
+
from .stream_processor import (
|
|
18
|
+
DataStream,
|
|
19
|
+
KafkaConsumer,
|
|
20
|
+
StreamProcessor,
|
|
21
|
+
WebSocketConsumer,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
"StreamProcessor",
|
|
26
|
+
"DataStream",
|
|
27
|
+
"KafkaConsumer",
|
|
28
|
+
"WebSocketConsumer",
|
|
29
|
+
"CongressionalDataAPI",
|
|
30
|
+
"StockMarketAPI",
|
|
31
|
+
"AlphaVantageConnector",
|
|
32
|
+
"YahooFinanceConnector",
|
|
33
|
+
"PolygonIOConnector",
|
|
34
|
+
"QuiverQuantConnector",
|
|
35
|
+
"IngestionPipeline",
|
|
36
|
+
"DataValidator",
|
|
37
|
+
"DataTransformer",
|
|
38
|
+
"DataLoader",
|
|
39
|
+
]
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Database models and utilities"""
|
|
2
|
+
|
|
3
|
+
from .models import (
|
|
4
|
+
Alert,
|
|
5
|
+
BacktestResult,
|
|
6
|
+
Base,
|
|
7
|
+
DataVersion,
|
|
8
|
+
Experiment,
|
|
9
|
+
FeatureSet,
|
|
10
|
+
Model,
|
|
11
|
+
Politician,
|
|
12
|
+
Portfolio,
|
|
13
|
+
Prediction,
|
|
14
|
+
StockData,
|
|
15
|
+
Trade,
|
|
16
|
+
User,
|
|
17
|
+
)
|
|
18
|
+
from .session import (
|
|
19
|
+
AsyncSessionLocal,
|
|
20
|
+
SessionLocal,
|
|
21
|
+
async_engine,
|
|
22
|
+
engine,
|
|
23
|
+
get_async_db,
|
|
24
|
+
get_db,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
__all__ = [
|
|
28
|
+
"Base",
|
|
29
|
+
"User",
|
|
30
|
+
"Trade",
|
|
31
|
+
"Politician",
|
|
32
|
+
"StockData",
|
|
33
|
+
"Prediction",
|
|
34
|
+
"Portfolio",
|
|
35
|
+
"Alert",
|
|
36
|
+
"BacktestResult",
|
|
37
|
+
"Experiment",
|
|
38
|
+
"Model",
|
|
39
|
+
"FeatureSet",
|
|
40
|
+
"DataVersion",
|
|
41
|
+
"get_db",
|
|
42
|
+
"get_async_db",
|
|
43
|
+
"SessionLocal",
|
|
44
|
+
"AsyncSessionLocal",
|
|
45
|
+
"engine",
|
|
46
|
+
"async_engine",
|
|
47
|
+
]
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""ML Experimentation and A/B Testing Framework"""
|
|
2
|
+
|
|
3
|
+
from .ab_testing import (
|
|
4
|
+
ABTestingFramework,
|
|
5
|
+
ExperimentConfig,
|
|
6
|
+
ExperimentResult,
|
|
7
|
+
ExperimentStatus,
|
|
8
|
+
Metric,
|
|
9
|
+
MetricsCollector,
|
|
10
|
+
StatisticalAnalyzer,
|
|
11
|
+
TrafficSplitter,
|
|
12
|
+
UserAssignment,
|
|
13
|
+
Variant,
|
|
14
|
+
VariantType,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"ABTestingFramework",
|
|
19
|
+
"ExperimentConfig",
|
|
20
|
+
"Variant",
|
|
21
|
+
"VariantType",
|
|
22
|
+
"Metric",
|
|
23
|
+
"ExperimentStatus",
|
|
24
|
+
"ExperimentResult",
|
|
25
|
+
"UserAssignment",
|
|
26
|
+
"TrafficSplitter",
|
|
27
|
+
"MetricsCollector",
|
|
28
|
+
"StatisticalAnalyzer",
|
|
29
|
+
]
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""Feature Engineering Module for Stock Recommendation Models"""
|
|
2
|
+
|
|
3
|
+
from .ensemble_features import (
|
|
4
|
+
DynamicFeatureSelector,
|
|
5
|
+
EnsembleFeatureBuilder,
|
|
6
|
+
FeatureInteractionEngine,
|
|
7
|
+
)
|
|
8
|
+
from .political_features import (
|
|
9
|
+
CongressionalTrackingFeatures,
|
|
10
|
+
PolicyImpactFeatures,
|
|
11
|
+
PoliticalInfluenceFeatures,
|
|
12
|
+
)
|
|
13
|
+
from .recommendation_engine import (
|
|
14
|
+
RecommendationConfig,
|
|
15
|
+
RecommendationResult,
|
|
16
|
+
StockRecommendationEngine,
|
|
17
|
+
)
|
|
18
|
+
from .stock_features import (
|
|
19
|
+
CrossAssetFeatures,
|
|
20
|
+
MarketRegimeFeatures,
|
|
21
|
+
StockRecommendationFeatures,
|
|
22
|
+
TechnicalIndicatorFeatures,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
__all__ = [
|
|
26
|
+
"StockRecommendationFeatures",
|
|
27
|
+
"TechnicalIndicatorFeatures",
|
|
28
|
+
"MarketRegimeFeatures",
|
|
29
|
+
"CrossAssetFeatures",
|
|
30
|
+
"PoliticalInfluenceFeatures",
|
|
31
|
+
"CongressionalTrackingFeatures",
|
|
32
|
+
"PolicyImpactFeatures",
|
|
33
|
+
"EnsembleFeatureBuilder",
|
|
34
|
+
"FeatureInteractionEngine",
|
|
35
|
+
"DynamicFeatureSelector",
|
|
36
|
+
"StockRecommendationEngine",
|
|
37
|
+
"RecommendationConfig",
|
|
38
|
+
"RecommendationResult",
|
|
39
|
+
]
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""MLOps components for ML pipeline management"""
|
|
2
|
+
|
|
3
|
+
from .experiment_tracker import (
|
|
4
|
+
ExperimentRun,
|
|
5
|
+
ExperimentTracker,
|
|
6
|
+
MLflowConfig,
|
|
7
|
+
ModelRegistry,
|
|
8
|
+
)
|
|
9
|
+
from .model_serving import (
|
|
10
|
+
ModelEndpoint,
|
|
11
|
+
ModelServer,
|
|
12
|
+
PredictionService,
|
|
13
|
+
)
|
|
14
|
+
from .pipeline_orchestrator import (
|
|
15
|
+
MLPipeline,
|
|
16
|
+
PipelineConfig,
|
|
17
|
+
PipelineExecutor,
|
|
18
|
+
PipelineStep,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
__all__ = [
|
|
22
|
+
"ExperimentTracker",
|
|
23
|
+
"ModelRegistry",
|
|
24
|
+
"MLflowConfig",
|
|
25
|
+
"ExperimentRun",
|
|
26
|
+
"ModelServer",
|
|
27
|
+
"PredictionService",
|
|
28
|
+
"ModelEndpoint",
|
|
29
|
+
"MLPipeline",
|
|
30
|
+
"PipelineStep",
|
|
31
|
+
"PipelineConfig",
|
|
32
|
+
"PipelineExecutor",
|
|
33
|
+
]
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""ML Models for Stock Recommendation System"""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any, Dict, Optional
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
|
|
8
|
+
from .base_models import (
|
|
9
|
+
BaseStockModel,
|
|
10
|
+
ModelMetrics,
|
|
11
|
+
ValidationResult,
|
|
12
|
+
)
|
|
13
|
+
from .ensemble_models import (
|
|
14
|
+
AttentionStockPredictor,
|
|
15
|
+
CNNFeatureExtractor,
|
|
16
|
+
DeepEnsembleModel,
|
|
17
|
+
EnsembleConfig,
|
|
18
|
+
EnsembleTrainer,
|
|
19
|
+
LSTMStockPredictor,
|
|
20
|
+
ModelConfig,
|
|
21
|
+
TransformerStockModel,
|
|
22
|
+
)
|
|
23
|
+
from .recommendation_models import (
|
|
24
|
+
RecommendationConfig,
|
|
25
|
+
RecommendationTrainer,
|
|
26
|
+
StockRecommendationModel,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
# Model registry
|
|
30
|
+
_loaded_models: Dict[str, Any] = {}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def load_production_models():
|
|
34
|
+
"""Load production models into memory"""
|
|
35
|
+
from mcli.ml.config import settings
|
|
36
|
+
from mcli.ml.logging import get_logger
|
|
37
|
+
|
|
38
|
+
logger = get_logger(__name__)
|
|
39
|
+
model_dir = settings.model.model_dir
|
|
40
|
+
|
|
41
|
+
if not model_dir.exists():
|
|
42
|
+
model_dir.mkdir(parents=True, exist_ok=True)
|
|
43
|
+
return
|
|
44
|
+
|
|
45
|
+
for model_path in model_dir.glob("*.pt"):
|
|
46
|
+
try:
|
|
47
|
+
model_id = model_path.stem
|
|
48
|
+
model = torch.load(model_path, map_location=settings.model.device)
|
|
49
|
+
_loaded_models[model_id] = model
|
|
50
|
+
logger.info(f"Loaded model: {model_id}")
|
|
51
|
+
except Exception as e:
|
|
52
|
+
logger.error(f"Failed to load model {model_path}: {e}")
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
async def get_model_by_id(model_id: str):
|
|
56
|
+
"""Get loaded model by ID"""
|
|
57
|
+
from mcli.ml.config import settings
|
|
58
|
+
|
|
59
|
+
if model_id not in _loaded_models:
|
|
60
|
+
# Try to load from disk
|
|
61
|
+
model_path = settings.model.model_dir / f"{model_id}.pt"
|
|
62
|
+
if model_path.exists():
|
|
63
|
+
_loaded_models[model_id] = torch.load(model_path, map_location=settings.model.device)
|
|
64
|
+
|
|
65
|
+
return _loaded_models.get(model_id)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def initialize_models():
|
|
69
|
+
"""Initialize models on startup"""
|
|
70
|
+
from mcli.ml.logging import get_logger
|
|
71
|
+
|
|
72
|
+
logger = get_logger(__name__)
|
|
73
|
+
logger.info("Initializing ML models...")
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
__all__ = [
|
|
77
|
+
"DeepEnsembleModel",
|
|
78
|
+
"AttentionStockPredictor",
|
|
79
|
+
"TransformerStockModel",
|
|
80
|
+
"LSTMStockPredictor",
|
|
81
|
+
"CNNFeatureExtractor",
|
|
82
|
+
"EnsembleTrainer",
|
|
83
|
+
"ModelConfig",
|
|
84
|
+
"EnsembleConfig",
|
|
85
|
+
"BaseStockModel",
|
|
86
|
+
"ModelMetrics",
|
|
87
|
+
"ValidationResult",
|
|
88
|
+
"StockRecommendationModel",
|
|
89
|
+
"RecommendationTrainer",
|
|
90
|
+
"RecommendationConfig",
|
|
91
|
+
"load_production_models",
|
|
92
|
+
"get_model_by_id",
|
|
93
|
+
"initialize_models",
|
|
94
|
+
]
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""ML Model Monitoring and Drift Detection"""
|
|
2
|
+
|
|
3
|
+
from .drift_detection import (
|
|
4
|
+
AlertSeverity,
|
|
5
|
+
ConceptDriftDetector,
|
|
6
|
+
DataProfile,
|
|
7
|
+
DriftAlert,
|
|
8
|
+
DriftType,
|
|
9
|
+
ModelMetrics,
|
|
10
|
+
ModelMonitor,
|
|
11
|
+
OutlierDetector,
|
|
12
|
+
StatisticalDriftDetector,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"ModelMonitor",
|
|
17
|
+
"StatisticalDriftDetector",
|
|
18
|
+
"ConceptDriftDetector",
|
|
19
|
+
"OutlierDetector",
|
|
20
|
+
"DriftAlert",
|
|
21
|
+
"DriftType",
|
|
22
|
+
"AlertSeverity",
|
|
23
|
+
"ModelMetrics",
|
|
24
|
+
"DataProfile",
|
|
25
|
+
]
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Advanced Portfolio Optimization"""
|
|
2
|
+
|
|
3
|
+
from .portfolio_optimizer import (
|
|
4
|
+
AdvancedPortfolioOptimizer,
|
|
5
|
+
BaseOptimizer,
|
|
6
|
+
BlackLittermanOptimizer,
|
|
7
|
+
CVaROptimizer,
|
|
8
|
+
KellyCriterionOptimizer,
|
|
9
|
+
MeanVarianceOptimizer,
|
|
10
|
+
OptimizationConstraints,
|
|
11
|
+
OptimizationObjective,
|
|
12
|
+
PortfolioAllocation,
|
|
13
|
+
RiskParityOptimizer,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"AdvancedPortfolioOptimizer",
|
|
18
|
+
"OptimizationObjective",
|
|
19
|
+
"OptimizationConstraints",
|
|
20
|
+
"PortfolioAllocation",
|
|
21
|
+
"MeanVarianceOptimizer",
|
|
22
|
+
"RiskParityOptimizer",
|
|
23
|
+
"BlackLittermanOptimizer",
|
|
24
|
+
"CVaROptimizer",
|
|
25
|
+
"KellyCriterionOptimizer",
|
|
26
|
+
"BaseOptimizer",
|
|
27
|
+
]
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""ML Data Preprocessing Module"""
|
|
2
|
+
|
|
3
|
+
from .data_cleaners import (
|
|
4
|
+
MissingValueHandler,
|
|
5
|
+
OutlierDetector,
|
|
6
|
+
TradingDataCleaner,
|
|
7
|
+
)
|
|
8
|
+
from .feature_extractors import (
|
|
9
|
+
MarketFeatureExtractor,
|
|
10
|
+
PoliticianFeatureExtractor,
|
|
11
|
+
SentimentFeatureExtractor,
|
|
12
|
+
TemporalFeatureExtractor,
|
|
13
|
+
)
|
|
14
|
+
from .ml_pipeline import MLDataPipeline, MLDataPipelineConfig
|
|
15
|
+
from .politician_trading_preprocessor import PoliticianTradingPreprocessor
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"PoliticianTradingPreprocessor",
|
|
19
|
+
"PoliticianFeatureExtractor",
|
|
20
|
+
"MarketFeatureExtractor",
|
|
21
|
+
"TemporalFeatureExtractor",
|
|
22
|
+
"SentimentFeatureExtractor",
|
|
23
|
+
"TradingDataCleaner",
|
|
24
|
+
"OutlierDetector",
|
|
25
|
+
"MissingValueHandler",
|
|
26
|
+
"MLDataPipeline",
|
|
27
|
+
"MLDataPipelineConfig",
|
|
28
|
+
]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""ML scripts module."""
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"""Trading module for portfolio management and trade execution"""
|
|
2
|
+
|
|
3
|
+
from mcli.ml.trading.models import (
|
|
4
|
+
# Enums
|
|
5
|
+
OrderStatus,
|
|
6
|
+
OrderType,
|
|
7
|
+
OrderSide,
|
|
8
|
+
PositionSide,
|
|
9
|
+
PortfolioType,
|
|
10
|
+
RiskLevel,
|
|
11
|
+
# Database models
|
|
12
|
+
TradingAccount,
|
|
13
|
+
Portfolio,
|
|
14
|
+
Position,
|
|
15
|
+
TradingOrder,
|
|
16
|
+
PortfolioPerformanceSnapshot,
|
|
17
|
+
TradingSignal,
|
|
18
|
+
# Pydantic models
|
|
19
|
+
TradingAccountCreate,
|
|
20
|
+
PortfolioCreate,
|
|
21
|
+
OrderCreate,
|
|
22
|
+
PositionResponse,
|
|
23
|
+
OrderResponse,
|
|
24
|
+
PortfolioResponse,
|
|
25
|
+
TradingSignalResponse,
|
|
26
|
+
)
|
|
27
|
+
from mcli.ml.trading.trading_service import TradingService
|
|
28
|
+
from mcli.ml.trading.alpaca_client import AlpacaTradingClient
|
|
29
|
+
from mcli.ml.trading.risk_management import RiskManager
|
|
30
|
+
from mcli.ml.trading.paper_trading import PaperTradingEngine
|
|
31
|
+
|
|
32
|
+
__all__ = [
|
|
33
|
+
# Enums
|
|
34
|
+
"OrderStatus",
|
|
35
|
+
"OrderType",
|
|
36
|
+
"OrderSide",
|
|
37
|
+
"PositionSide",
|
|
38
|
+
"PortfolioType",
|
|
39
|
+
"RiskLevel",
|
|
40
|
+
# Database models
|
|
41
|
+
"TradingAccount",
|
|
42
|
+
"Portfolio",
|
|
43
|
+
"Position",
|
|
44
|
+
"TradingOrder",
|
|
45
|
+
"PortfolioPerformanceSnapshot",
|
|
46
|
+
"TradingSignal",
|
|
47
|
+
# Pydantic models
|
|
48
|
+
"TradingAccountCreate",
|
|
49
|
+
"PortfolioCreate",
|
|
50
|
+
"OrderCreate",
|
|
51
|
+
"PositionResponse",
|
|
52
|
+
"OrderResponse",
|
|
53
|
+
"PortfolioResponse",
|
|
54
|
+
"TradingSignalResponse",
|
|
55
|
+
# Services
|
|
56
|
+
"TradingService",
|
|
57
|
+
"AlpacaTradingClient",
|
|
58
|
+
"RiskManager",
|
|
59
|
+
"PaperTradingEngine",
|
|
60
|
+
]
|