mcli-framework 7.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/chat_cmd.py +42 -0
- mcli/app/commands_cmd.py +226 -0
- mcli/app/completion_cmd.py +216 -0
- mcli/app/completion_helpers.py +288 -0
- mcli/app/cron_test_cmd.py +697 -0
- mcli/app/logs_cmd.py +419 -0
- mcli/app/main.py +492 -0
- mcli/app/model/model.py +1060 -0
- mcli/app/model_cmd.py +227 -0
- mcli/app/redis_cmd.py +269 -0
- mcli/app/video/video.py +1114 -0
- mcli/app/visual_cmd.py +303 -0
- mcli/chat/chat.py +2409 -0
- mcli/chat/command_rag.py +514 -0
- mcli/chat/enhanced_chat.py +652 -0
- mcli/chat/system_controller.py +1010 -0
- mcli/chat/system_integration.py +1016 -0
- mcli/cli.py +25 -0
- mcli/config.toml +20 -0
- mcli/lib/api/api.py +586 -0
- mcli/lib/api/daemon_client.py +203 -0
- mcli/lib/api/daemon_client_local.py +44 -0
- mcli/lib/api/daemon_decorator.py +217 -0
- mcli/lib/api/mcli_decorators.py +1032 -0
- mcli/lib/auth/auth.py +85 -0
- mcli/lib/auth/aws_manager.py +85 -0
- mcli/lib/auth/azure_manager.py +91 -0
- mcli/lib/auth/credential_manager.py +192 -0
- mcli/lib/auth/gcp_manager.py +93 -0
- mcli/lib/auth/key_manager.py +117 -0
- mcli/lib/auth/mcli_manager.py +93 -0
- mcli/lib/auth/token_manager.py +75 -0
- mcli/lib/auth/token_util.py +1011 -0
- mcli/lib/config/config.py +47 -0
- mcli/lib/discovery/__init__.py +1 -0
- mcli/lib/discovery/command_discovery.py +274 -0
- mcli/lib/erd/erd.py +1345 -0
- mcli/lib/erd/generate_graph.py +453 -0
- mcli/lib/files/files.py +76 -0
- mcli/lib/fs/fs.py +109 -0
- mcli/lib/lib.py +29 -0
- mcli/lib/logger/logger.py +611 -0
- mcli/lib/performance/optimizer.py +409 -0
- mcli/lib/performance/rust_bridge.py +502 -0
- mcli/lib/performance/uvloop_config.py +154 -0
- mcli/lib/pickles/pickles.py +50 -0
- mcli/lib/search/cached_vectorizer.py +479 -0
- mcli/lib/services/data_pipeline.py +460 -0
- mcli/lib/services/lsh_client.py +441 -0
- mcli/lib/services/redis_service.py +387 -0
- mcli/lib/shell/shell.py +137 -0
- mcli/lib/toml/toml.py +33 -0
- mcli/lib/ui/styling.py +47 -0
- mcli/lib/ui/visual_effects.py +634 -0
- mcli/lib/watcher/watcher.py +185 -0
- mcli/ml/api/app.py +215 -0
- mcli/ml/api/middleware.py +224 -0
- mcli/ml/api/routers/admin_router.py +12 -0
- mcli/ml/api/routers/auth_router.py +244 -0
- mcli/ml/api/routers/backtest_router.py +12 -0
- mcli/ml/api/routers/data_router.py +12 -0
- mcli/ml/api/routers/model_router.py +302 -0
- mcli/ml/api/routers/monitoring_router.py +12 -0
- mcli/ml/api/routers/portfolio_router.py +12 -0
- mcli/ml/api/routers/prediction_router.py +267 -0
- mcli/ml/api/routers/trade_router.py +12 -0
- mcli/ml/api/routers/websocket_router.py +76 -0
- mcli/ml/api/schemas.py +64 -0
- mcli/ml/auth/auth_manager.py +425 -0
- mcli/ml/auth/models.py +154 -0
- mcli/ml/auth/permissions.py +302 -0
- mcli/ml/backtesting/backtest_engine.py +502 -0
- mcli/ml/backtesting/performance_metrics.py +393 -0
- mcli/ml/cache.py +400 -0
- mcli/ml/cli/main.py +398 -0
- mcli/ml/config/settings.py +394 -0
- mcli/ml/configs/dvc_config.py +230 -0
- mcli/ml/configs/mlflow_config.py +131 -0
- mcli/ml/configs/mlops_manager.py +293 -0
- mcli/ml/dashboard/app.py +532 -0
- mcli/ml/dashboard/app_integrated.py +738 -0
- mcli/ml/dashboard/app_supabase.py +560 -0
- mcli/ml/dashboard/app_training.py +615 -0
- mcli/ml/dashboard/cli.py +51 -0
- mcli/ml/data_ingestion/api_connectors.py +501 -0
- mcli/ml/data_ingestion/data_pipeline.py +567 -0
- mcli/ml/data_ingestion/stream_processor.py +512 -0
- mcli/ml/database/migrations/env.py +94 -0
- mcli/ml/database/models.py +667 -0
- mcli/ml/database/session.py +200 -0
- mcli/ml/experimentation/ab_testing.py +845 -0
- mcli/ml/features/ensemble_features.py +607 -0
- mcli/ml/features/political_features.py +676 -0
- mcli/ml/features/recommendation_engine.py +809 -0
- mcli/ml/features/stock_features.py +573 -0
- mcli/ml/features/test_feature_engineering.py +346 -0
- mcli/ml/logging.py +85 -0
- mcli/ml/mlops/data_versioning.py +518 -0
- mcli/ml/mlops/experiment_tracker.py +377 -0
- mcli/ml/mlops/model_serving.py +481 -0
- mcli/ml/mlops/pipeline_orchestrator.py +614 -0
- mcli/ml/models/base_models.py +324 -0
- mcli/ml/models/ensemble_models.py +675 -0
- mcli/ml/models/recommendation_models.py +474 -0
- mcli/ml/models/test_models.py +487 -0
- mcli/ml/monitoring/drift_detection.py +676 -0
- mcli/ml/monitoring/metrics.py +45 -0
- mcli/ml/optimization/portfolio_optimizer.py +834 -0
- mcli/ml/preprocessing/data_cleaners.py +451 -0
- mcli/ml/preprocessing/feature_extractors.py +491 -0
- mcli/ml/preprocessing/ml_pipeline.py +382 -0
- mcli/ml/preprocessing/politician_trading_preprocessor.py +569 -0
- mcli/ml/preprocessing/test_preprocessing.py +294 -0
- mcli/ml/scripts/populate_sample_data.py +200 -0
- mcli/ml/tasks.py +400 -0
- mcli/ml/tests/test_integration.py +429 -0
- mcli/ml/tests/test_training_dashboard.py +387 -0
- mcli/public/oi/oi.py +15 -0
- mcli/public/public.py +4 -0
- mcli/self/self_cmd.py +1246 -0
- mcli/workflow/daemon/api_daemon.py +800 -0
- mcli/workflow/daemon/async_command_database.py +681 -0
- mcli/workflow/daemon/async_process_manager.py +591 -0
- mcli/workflow/daemon/client.py +530 -0
- mcli/workflow/daemon/commands.py +1196 -0
- mcli/workflow/daemon/daemon.py +905 -0
- mcli/workflow/daemon/daemon_api.py +59 -0
- mcli/workflow/daemon/enhanced_daemon.py +571 -0
- mcli/workflow/daemon/process_cli.py +244 -0
- mcli/workflow/daemon/process_manager.py +439 -0
- mcli/workflow/daemon/test_daemon.py +275 -0
- mcli/workflow/dashboard/dashboard_cmd.py +113 -0
- mcli/workflow/docker/docker.py +0 -0
- mcli/workflow/file/file.py +100 -0
- mcli/workflow/gcloud/config.toml +21 -0
- mcli/workflow/gcloud/gcloud.py +58 -0
- mcli/workflow/git_commit/ai_service.py +328 -0
- mcli/workflow/git_commit/commands.py +430 -0
- mcli/workflow/lsh_integration.py +355 -0
- mcli/workflow/model_service/client.py +594 -0
- mcli/workflow/model_service/download_and_run_efficient_models.py +288 -0
- mcli/workflow/model_service/lightweight_embedder.py +397 -0
- mcli/workflow/model_service/lightweight_model_server.py +714 -0
- mcli/workflow/model_service/lightweight_test.py +241 -0
- mcli/workflow/model_service/model_service.py +1955 -0
- mcli/workflow/model_service/ollama_efficient_runner.py +425 -0
- mcli/workflow/model_service/pdf_processor.py +386 -0
- mcli/workflow/model_service/test_efficient_runner.py +234 -0
- mcli/workflow/model_service/test_example.py +315 -0
- mcli/workflow/model_service/test_integration.py +131 -0
- mcli/workflow/model_service/test_new_features.py +149 -0
- mcli/workflow/openai/openai.py +99 -0
- mcli/workflow/politician_trading/commands.py +1790 -0
- mcli/workflow/politician_trading/config.py +134 -0
- mcli/workflow/politician_trading/connectivity.py +490 -0
- mcli/workflow/politician_trading/data_sources.py +395 -0
- mcli/workflow/politician_trading/database.py +410 -0
- mcli/workflow/politician_trading/demo.py +248 -0
- mcli/workflow/politician_trading/models.py +165 -0
- mcli/workflow/politician_trading/monitoring.py +413 -0
- mcli/workflow/politician_trading/scrapers.py +966 -0
- mcli/workflow/politician_trading/scrapers_california.py +412 -0
- mcli/workflow/politician_trading/scrapers_eu.py +377 -0
- mcli/workflow/politician_trading/scrapers_uk.py +350 -0
- mcli/workflow/politician_trading/scrapers_us_states.py +438 -0
- mcli/workflow/politician_trading/supabase_functions.py +354 -0
- mcli/workflow/politician_trading/workflow.py +852 -0
- mcli/workflow/registry/registry.py +180 -0
- mcli/workflow/repo/repo.py +223 -0
- mcli/workflow/scheduler/commands.py +493 -0
- mcli/workflow/scheduler/cron_parser.py +238 -0
- mcli/workflow/scheduler/job.py +182 -0
- mcli/workflow/scheduler/monitor.py +139 -0
- mcli/workflow/scheduler/persistence.py +324 -0
- mcli/workflow/scheduler/scheduler.py +679 -0
- mcli/workflow/sync/sync_cmd.py +437 -0
- mcli/workflow/sync/test_cmd.py +314 -0
- mcli/workflow/videos/videos.py +242 -0
- mcli/workflow/wakatime/wakatime.py +11 -0
- mcli/workflow/workflow.py +37 -0
- mcli_framework-7.0.0.dist-info/METADATA +479 -0
- mcli_framework-7.0.0.dist-info/RECORD +186 -0
- mcli_framework-7.0.0.dist-info/WHEEL +5 -0
- mcli_framework-7.0.0.dist-info/entry_points.txt +7 -0
- mcli_framework-7.0.0.dist-info/licenses/LICENSE +21 -0
- mcli_framework-7.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,560 @@
|
|
|
1
|
+
"""Streamlit dashboard for ML system monitoring - Supabase version"""
|
|
2
|
+
|
|
3
|
+
import streamlit as st
|
|
4
|
+
import pandas as pd
|
|
5
|
+
import plotly.express as px
|
|
6
|
+
import plotly.graph_objects as go
|
|
7
|
+
from plotly.subplots import make_subplots
|
|
8
|
+
import asyncio
|
|
9
|
+
from datetime import datetime, timedelta
|
|
10
|
+
import numpy as np
|
|
11
|
+
from supabase import create_client, Client
|
|
12
|
+
import os
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from dotenv import load_dotenv
|
|
15
|
+
|
|
16
|
+
# Page config must come first
|
|
17
|
+
st.set_page_config(
|
|
18
|
+
page_title="MCLI ML Dashboard",
|
|
19
|
+
page_icon="📊",
|
|
20
|
+
layout="wide",
|
|
21
|
+
initial_sidebar_state="expanded"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
# Load environment variables from supabase/.env.local
|
|
25
|
+
env_path = Path(__file__).parent.parent.parent.parent.parent / "supabase" / ".env.local"
|
|
26
|
+
if env_path.exists():
|
|
27
|
+
load_dotenv(env_path)
|
|
28
|
+
|
|
29
|
+
# Custom CSS
|
|
30
|
+
st.markdown("""
|
|
31
|
+
<style>
|
|
32
|
+
.metric-card {
|
|
33
|
+
background-color: #f0f2f6;
|
|
34
|
+
padding: 1rem;
|
|
35
|
+
border-radius: 0.5rem;
|
|
36
|
+
border-left: 4px solid #1f77b4;
|
|
37
|
+
}
|
|
38
|
+
</style>
|
|
39
|
+
""", unsafe_allow_html=True)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@st.cache_resource
|
|
43
|
+
def get_supabase_client() -> Client:
|
|
44
|
+
"""Get Supabase client"""
|
|
45
|
+
url = os.getenv("SUPABASE_URL", "")
|
|
46
|
+
# Try both SUPABASE_KEY and SUPABASE_ANON_KEY
|
|
47
|
+
key = os.getenv("SUPABASE_KEY", "") or os.getenv("SUPABASE_ANON_KEY", "")
|
|
48
|
+
|
|
49
|
+
if not url or not key:
|
|
50
|
+
st.warning("⚠️ Supabase credentials not found. Set SUPABASE_URL and SUPABASE_ANON_KEY environment variables.")
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
return create_client(url, key)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@st.cache_data(ttl=30)
|
|
57
|
+
def get_politicians_data():
|
|
58
|
+
"""Get politicians data from Supabase"""
|
|
59
|
+
client = get_supabase_client()
|
|
60
|
+
if not client:
|
|
61
|
+
st.warning("No Supabase client available")
|
|
62
|
+
return pd.DataFrame()
|
|
63
|
+
|
|
64
|
+
try:
|
|
65
|
+
response = client.table("politicians").select("*").execute()
|
|
66
|
+
df = pd.DataFrame(response.data)
|
|
67
|
+
print(f"Fetched {len(df)} politicians") # Debug output
|
|
68
|
+
return df
|
|
69
|
+
except Exception as e:
|
|
70
|
+
st.error(f"Error fetching politicians: {e}")
|
|
71
|
+
print(f"Error fetching politicians: {e}") # Debug output
|
|
72
|
+
return pd.DataFrame()
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@st.cache_data(ttl=30)
|
|
76
|
+
def get_disclosures_data():
|
|
77
|
+
"""Get trading disclosures from Supabase"""
|
|
78
|
+
client = get_supabase_client()
|
|
79
|
+
if not client:
|
|
80
|
+
return pd.DataFrame()
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
# Get recent disclosures
|
|
84
|
+
response = client.table("trading_disclosures").select("*").order("disclosure_date", desc=True).limit(500).execute()
|
|
85
|
+
return pd.DataFrame(response.data)
|
|
86
|
+
except Exception as e:
|
|
87
|
+
st.error(f"Error fetching disclosures: {e}")
|
|
88
|
+
return pd.DataFrame()
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@st.cache_data(ttl=30)
|
|
92
|
+
def get_predictions_data():
|
|
93
|
+
"""Get ML predictions from Supabase"""
|
|
94
|
+
client = get_supabase_client()
|
|
95
|
+
if not client:
|
|
96
|
+
return pd.DataFrame()
|
|
97
|
+
|
|
98
|
+
try:
|
|
99
|
+
# Try to get predictions if table exists
|
|
100
|
+
response = client.table("ml_predictions").select("*").order("created_at", desc=True).limit(100).execute()
|
|
101
|
+
return pd.DataFrame(response.data)
|
|
102
|
+
except:
|
|
103
|
+
# Table might not exist yet
|
|
104
|
+
return pd.DataFrame()
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@st.cache_data(ttl=30)
|
|
108
|
+
def get_portfolios_data():
|
|
109
|
+
"""Get portfolio data from Supabase"""
|
|
110
|
+
client = get_supabase_client()
|
|
111
|
+
if not client:
|
|
112
|
+
return pd.DataFrame()
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
# Try to get portfolios if table exists
|
|
116
|
+
response = client.table("portfolios").select("*").execute()
|
|
117
|
+
return pd.DataFrame(response.data)
|
|
118
|
+
except:
|
|
119
|
+
# Table might not exist yet
|
|
120
|
+
return pd.DataFrame()
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
@st.cache_data(ttl=30)
|
|
124
|
+
def get_jobs_data():
|
|
125
|
+
"""Get data pull jobs from Supabase"""
|
|
126
|
+
client = get_supabase_client()
|
|
127
|
+
if not client:
|
|
128
|
+
return pd.DataFrame()
|
|
129
|
+
|
|
130
|
+
try:
|
|
131
|
+
response = client.table("data_pull_jobs").select("*").order("created_at", desc=True).limit(50).execute()
|
|
132
|
+
return pd.DataFrame(response.data)
|
|
133
|
+
except Exception as e:
|
|
134
|
+
st.error(f"Error fetching jobs: {e}")
|
|
135
|
+
return pd.DataFrame()
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def main():
|
|
139
|
+
"""Main dashboard function"""
|
|
140
|
+
|
|
141
|
+
# Title and header
|
|
142
|
+
st.title("🤖 MCLI ML System Dashboard")
|
|
143
|
+
st.markdown("Real-time monitoring of politician trading ML system")
|
|
144
|
+
|
|
145
|
+
# Show connection status in sidebar
|
|
146
|
+
st.sidebar.title("Navigation")
|
|
147
|
+
|
|
148
|
+
# External Dashboard Links
|
|
149
|
+
st.sidebar.markdown("---")
|
|
150
|
+
st.sidebar.subheader("🔗 Navigation Hub")
|
|
151
|
+
|
|
152
|
+
st.markdown(
|
|
153
|
+
'<a href="file:///Users/lefv/repos/lsh/dashboard-hub.html" target="_blank" style="text-decoration: none;">'
|
|
154
|
+
'<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 10px; border-radius: 8px; text-align: center; margin-bottom: 15px; font-weight: 600;">'
|
|
155
|
+
'🚀 Dashboard Hub - View All'
|
|
156
|
+
'</div></a>',
|
|
157
|
+
unsafe_allow_html=True
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
st.sidebar.subheader("🔗 Direct Links")
|
|
161
|
+
|
|
162
|
+
col1, col2 = st.sidebar.columns([1, 1])
|
|
163
|
+
with col1:
|
|
164
|
+
st.markdown(
|
|
165
|
+
'<a href="http://localhost:3034/dashboard/" target="_blank" style="text-decoration: none;">'
|
|
166
|
+
'<div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 8px; border-radius: 6px; text-align: center; margin-bottom: 8px;">'
|
|
167
|
+
'📊 Pipeline Jobs'
|
|
168
|
+
'</div></a>',
|
|
169
|
+
unsafe_allow_html=True
|
|
170
|
+
)
|
|
171
|
+
st.markdown(
|
|
172
|
+
'<a href="http://localhost:3034/dashboard/workflow.html" target="_blank" style="text-decoration: none;">'
|
|
173
|
+
'<div style="background: linear-gradient(135deg, #48bb78 0%, #38a169 100%); color: white; padding: 8px; border-radius: 6px; text-align: center; margin-bottom: 8px;">'
|
|
174
|
+
'🔄 Workflows'
|
|
175
|
+
'</div></a>',
|
|
176
|
+
unsafe_allow_html=True
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
with col2:
|
|
180
|
+
st.markdown(
|
|
181
|
+
'<a href="http://localhost:3033/dashboard/" target="_blank" style="text-decoration: none;">'
|
|
182
|
+
'<div style="background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%); color: white; padding: 8px; border-radius: 6px; text-align: center; margin-bottom: 8px;">'
|
|
183
|
+
'🏗️ CI/CD'
|
|
184
|
+
'</div></a>',
|
|
185
|
+
unsafe_allow_html=True
|
|
186
|
+
)
|
|
187
|
+
st.markdown(
|
|
188
|
+
'<a href="http://localhost:3035/api/health" target="_blank" style="text-decoration: none;">'
|
|
189
|
+
'<div style="background: linear-gradient(135deg, #4facfe 0%, #00f2fe 100%); color: white; padding: 8px; border-radius: 6px; text-align: center; margin-bottom: 8px;">'
|
|
190
|
+
'🔍 Monitoring'
|
|
191
|
+
'</div></a>',
|
|
192
|
+
unsafe_allow_html=True
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
st.sidebar.markdown("---")
|
|
196
|
+
|
|
197
|
+
# Debug info
|
|
198
|
+
with st.sidebar.expander("🔧 Debug Info"):
|
|
199
|
+
st.write(f"URL: {os.getenv('SUPABASE_URL', 'Not set')}")
|
|
200
|
+
st.write(f"Key exists: {bool(os.getenv('SUPABASE_ANON_KEY'))}")
|
|
201
|
+
client = get_supabase_client()
|
|
202
|
+
if client:
|
|
203
|
+
st.success("✅ Connected to Supabase")
|
|
204
|
+
else:
|
|
205
|
+
st.error("❌ Not connected to Supabase")
|
|
206
|
+
page = st.sidebar.selectbox(
|
|
207
|
+
"Choose a page",
|
|
208
|
+
["Overview", "Politicians", "Trading Disclosures", "ML Predictions", "Data Pull Jobs", "System Health"]
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
# Auto-refresh toggle
|
|
212
|
+
auto_refresh = st.sidebar.checkbox("Auto-refresh (30s)", value=True)
|
|
213
|
+
if auto_refresh:
|
|
214
|
+
import time
|
|
215
|
+
time.sleep(30)
|
|
216
|
+
st.rerun()
|
|
217
|
+
|
|
218
|
+
# Manual refresh button
|
|
219
|
+
if st.sidebar.button("🔄 Refresh Now"):
|
|
220
|
+
st.cache_data.clear()
|
|
221
|
+
st.rerun()
|
|
222
|
+
|
|
223
|
+
# Main content based on selected page
|
|
224
|
+
if page == "Overview":
|
|
225
|
+
show_overview()
|
|
226
|
+
elif page == "Politicians":
|
|
227
|
+
show_politicians()
|
|
228
|
+
elif page == "Trading Disclosures":
|
|
229
|
+
show_disclosures()
|
|
230
|
+
elif page == "ML Predictions":
|
|
231
|
+
show_predictions()
|
|
232
|
+
elif page == "Data Pull Jobs":
|
|
233
|
+
show_jobs()
|
|
234
|
+
elif page == "System Health":
|
|
235
|
+
show_system_health()
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def show_overview():
|
|
239
|
+
"""Show overview dashboard"""
|
|
240
|
+
st.header("System Overview")
|
|
241
|
+
|
|
242
|
+
# Get data
|
|
243
|
+
politicians = get_politicians_data()
|
|
244
|
+
disclosures = get_disclosures_data()
|
|
245
|
+
predictions = get_predictions_data()
|
|
246
|
+
jobs = get_jobs_data()
|
|
247
|
+
|
|
248
|
+
# Debug: Show raw data counts
|
|
249
|
+
st.sidebar.write(f"Debug: {len(politicians)} politicians loaded")
|
|
250
|
+
st.sidebar.write(f"Debug: {len(disclosures)} disclosures loaded")
|
|
251
|
+
st.sidebar.write(f"Debug: {len(jobs)} jobs loaded")
|
|
252
|
+
|
|
253
|
+
# Also show sample data for debugging
|
|
254
|
+
if not politicians.empty:
|
|
255
|
+
st.sidebar.write("Sample politician:", politicians.iloc[0]['full_name'] if 'full_name' in politicians.columns else "No name")
|
|
256
|
+
|
|
257
|
+
# Display key metrics
|
|
258
|
+
col1, col2, col3, col4 = st.columns(4)
|
|
259
|
+
|
|
260
|
+
with col1:
|
|
261
|
+
st.metric(
|
|
262
|
+
label="Politicians Tracked",
|
|
263
|
+
value=len(politicians) if not politicians.empty else 0,
|
|
264
|
+
delta=None # Simplified to avoid errors
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
with col2:
|
|
268
|
+
st.metric(
|
|
269
|
+
label="Total Disclosures",
|
|
270
|
+
value=len(disclosures),
|
|
271
|
+
delta=f"{len(disclosures[pd.to_datetime(disclosures['disclosure_date']) > datetime.now() - timedelta(days=7)])} this week" if not disclosures.empty and 'disclosure_date' in disclosures else None
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
with col3:
|
|
275
|
+
st.metric(
|
|
276
|
+
label="ML Predictions",
|
|
277
|
+
value=len(predictions) if not predictions.empty else "0"
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
with col4:
|
|
281
|
+
successful_jobs = len(jobs[jobs['status'] == 'completed']) if not jobs.empty and 'status' in jobs else 0
|
|
282
|
+
total_jobs = len(jobs) if not jobs.empty else 0
|
|
283
|
+
st.metric(
|
|
284
|
+
label="Job Success Rate",
|
|
285
|
+
value=f"{(successful_jobs/total_jobs*100):.1f}%" if total_jobs > 0 else "N/A"
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
# Charts
|
|
289
|
+
col1, col2 = st.columns(2)
|
|
290
|
+
|
|
291
|
+
with col1:
|
|
292
|
+
st.subheader("Disclosure Types")
|
|
293
|
+
if not disclosures.empty and 'transaction_type' in disclosures:
|
|
294
|
+
type_counts = disclosures['transaction_type'].value_counts()
|
|
295
|
+
fig = px.pie(values=type_counts.values, names=type_counts.index, title="Transaction Types")
|
|
296
|
+
st.plotly_chart(fig, use_container_width=True)
|
|
297
|
+
else:
|
|
298
|
+
st.info("No disclosure data available")
|
|
299
|
+
|
|
300
|
+
with col2:
|
|
301
|
+
st.subheader("Top Traded Tickers")
|
|
302
|
+
if not disclosures.empty and 'ticker_symbol' in disclosures:
|
|
303
|
+
ticker_counts = disclosures['ticker_symbol'].value_counts().head(10)
|
|
304
|
+
fig = px.bar(x=ticker_counts.values, y=ticker_counts.index, orientation='h', title="Most Traded Stocks")
|
|
305
|
+
st.plotly_chart(fig, use_container_width=True)
|
|
306
|
+
else:
|
|
307
|
+
st.info("No ticker data available")
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
def show_politicians():
|
|
311
|
+
"""Show politicians dashboard"""
|
|
312
|
+
st.header("Politicians")
|
|
313
|
+
|
|
314
|
+
politicians = get_politicians_data()
|
|
315
|
+
|
|
316
|
+
if not politicians.empty:
|
|
317
|
+
# Filters
|
|
318
|
+
col1, col2, col3 = st.columns(3)
|
|
319
|
+
with col1:
|
|
320
|
+
party_filter = st.multiselect(
|
|
321
|
+
"Party",
|
|
322
|
+
options=politicians['party'].dropna().unique() if 'party' in politicians else [],
|
|
323
|
+
default=[]
|
|
324
|
+
)
|
|
325
|
+
with col2:
|
|
326
|
+
state_filter = st.multiselect(
|
|
327
|
+
"State",
|
|
328
|
+
options=politicians['state'].dropna().unique() if 'state' in politicians else [],
|
|
329
|
+
default=[]
|
|
330
|
+
)
|
|
331
|
+
with col3:
|
|
332
|
+
active_only = st.checkbox("Active Only", value=True)
|
|
333
|
+
|
|
334
|
+
# Apply filters
|
|
335
|
+
filtered = politicians.copy()
|
|
336
|
+
if party_filter and 'party' in filtered:
|
|
337
|
+
filtered = filtered[filtered['party'].isin(party_filter)]
|
|
338
|
+
if state_filter and 'state' in filtered:
|
|
339
|
+
filtered = filtered[filtered['state'].isin(state_filter)]
|
|
340
|
+
if active_only and 'is_active' in filtered:
|
|
341
|
+
filtered = filtered[filtered['is_active'] == True]
|
|
342
|
+
|
|
343
|
+
# Display data
|
|
344
|
+
st.dataframe(filtered, use_container_width=True)
|
|
345
|
+
|
|
346
|
+
# Stats
|
|
347
|
+
col1, col2 = st.columns(2)
|
|
348
|
+
with col1:
|
|
349
|
+
if 'party' in filtered:
|
|
350
|
+
party_dist = filtered['party'].value_counts()
|
|
351
|
+
fig = px.pie(values=party_dist.values, names=party_dist.index, title="Party Distribution")
|
|
352
|
+
st.plotly_chart(fig, use_container_width=True)
|
|
353
|
+
with col2:
|
|
354
|
+
if 'state' in filtered:
|
|
355
|
+
state_dist = filtered['state'].value_counts().head(10)
|
|
356
|
+
fig = px.bar(x=state_dist.values, y=state_dist.index, orientation='h', title="Top States")
|
|
357
|
+
st.plotly_chart(fig, use_container_width=True)
|
|
358
|
+
else:
|
|
359
|
+
st.warning("No politician data available")
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def show_disclosures():
|
|
363
|
+
"""Show trading disclosures dashboard"""
|
|
364
|
+
st.header("Trading Disclosures")
|
|
365
|
+
|
|
366
|
+
disclosures = get_disclosures_data()
|
|
367
|
+
|
|
368
|
+
if not disclosures.empty:
|
|
369
|
+
# Convert dates
|
|
370
|
+
if 'disclosure_date' in disclosures:
|
|
371
|
+
disclosures['disclosure_date'] = pd.to_datetime(disclosures['disclosure_date'])
|
|
372
|
+
|
|
373
|
+
# Filters
|
|
374
|
+
col1, col2, col3 = st.columns(3)
|
|
375
|
+
with col1:
|
|
376
|
+
ticker_filter = st.text_input("Ticker Symbol", "").upper()
|
|
377
|
+
with col2:
|
|
378
|
+
transaction_types = disclosures['transaction_type'].dropna().unique() if 'transaction_type' in disclosures else []
|
|
379
|
+
transaction_filter = st.selectbox("Transaction Type", ["All"] + list(transaction_types))
|
|
380
|
+
with col3:
|
|
381
|
+
date_range = st.date_input(
|
|
382
|
+
"Date Range",
|
|
383
|
+
value=(datetime.now() - timedelta(days=30), datetime.now()),
|
|
384
|
+
max_value=datetime.now()
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
# Apply filters
|
|
388
|
+
filtered = disclosures.copy()
|
|
389
|
+
if ticker_filter and 'ticker_symbol' in filtered:
|
|
390
|
+
filtered = filtered[filtered['ticker_symbol'].str.contains(ticker_filter, na=False)]
|
|
391
|
+
if transaction_filter != "All" and 'transaction_type' in filtered:
|
|
392
|
+
filtered = filtered[filtered['transaction_type'] == transaction_filter]
|
|
393
|
+
if len(date_range) == 2 and 'disclosure_date' in filtered:
|
|
394
|
+
filtered = filtered[(filtered['disclosure_date'] >= pd.Timestamp(date_range[0])) &
|
|
395
|
+
(filtered['disclosure_date'] <= pd.Timestamp(date_range[1]))]
|
|
396
|
+
|
|
397
|
+
# Display data
|
|
398
|
+
st.dataframe(filtered, use_container_width=True)
|
|
399
|
+
|
|
400
|
+
# Analysis
|
|
401
|
+
if not filtered.empty:
|
|
402
|
+
col1, col2 = st.columns(2)
|
|
403
|
+
with col1:
|
|
404
|
+
# Volume over time
|
|
405
|
+
if 'disclosure_date' in filtered and 'amount' in filtered:
|
|
406
|
+
daily_volume = filtered.groupby(filtered['disclosure_date'].dt.date)['amount'].sum()
|
|
407
|
+
fig = px.line(x=daily_volume.index, y=daily_volume.values, title="Trading Volume Over Time")
|
|
408
|
+
st.plotly_chart(fig, use_container_width=True)
|
|
409
|
+
|
|
410
|
+
with col2:
|
|
411
|
+
# Top politicians by trading
|
|
412
|
+
if 'politician_name' in filtered:
|
|
413
|
+
top_traders = filtered['politician_name'].value_counts().head(10)
|
|
414
|
+
fig = px.bar(x=top_traders.values, y=top_traders.index, orientation='h',
|
|
415
|
+
title="Most Active Traders")
|
|
416
|
+
st.plotly_chart(fig, use_container_width=True)
|
|
417
|
+
else:
|
|
418
|
+
st.warning("No disclosure data available")
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
def show_predictions():
|
|
422
|
+
"""Show ML predictions dashboard"""
|
|
423
|
+
st.header("ML Predictions")
|
|
424
|
+
|
|
425
|
+
predictions = get_predictions_data()
|
|
426
|
+
|
|
427
|
+
if not predictions.empty:
|
|
428
|
+
st.dataframe(predictions, use_container_width=True)
|
|
429
|
+
|
|
430
|
+
# Add prediction analysis charts if we have data
|
|
431
|
+
if 'confidence' in predictions:
|
|
432
|
+
fig = px.histogram(predictions, x='confidence', title="Prediction Confidence Distribution")
|
|
433
|
+
st.plotly_chart(fig, use_container_width=True)
|
|
434
|
+
else:
|
|
435
|
+
st.info("No ML predictions available yet. The ML pipeline will generate predictions once sufficient data is collected.")
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
def show_jobs():
|
|
439
|
+
"""Show data pull jobs dashboard"""
|
|
440
|
+
st.header("Data Pull Jobs")
|
|
441
|
+
|
|
442
|
+
jobs = get_jobs_data()
|
|
443
|
+
|
|
444
|
+
if not jobs.empty:
|
|
445
|
+
# Status overview
|
|
446
|
+
col1, col2, col3 = st.columns(3)
|
|
447
|
+
|
|
448
|
+
status_counts = jobs['status'].value_counts() if 'status' in jobs else pd.Series()
|
|
449
|
+
|
|
450
|
+
with col1:
|
|
451
|
+
st.metric("Completed", status_counts.get('completed', 0))
|
|
452
|
+
with col2:
|
|
453
|
+
st.metric("Running", status_counts.get('running', 0))
|
|
454
|
+
with col3:
|
|
455
|
+
st.metric("Failed", status_counts.get('failed', 0))
|
|
456
|
+
|
|
457
|
+
# Jobs table
|
|
458
|
+
st.dataframe(jobs, use_container_width=True)
|
|
459
|
+
|
|
460
|
+
# Success rate over time
|
|
461
|
+
if 'created_at' in jobs:
|
|
462
|
+
jobs['created_at'] = pd.to_datetime(jobs['created_at'])
|
|
463
|
+
jobs['date'] = jobs['created_at'].dt.date
|
|
464
|
+
|
|
465
|
+
daily_stats = jobs.groupby(['date', 'status']).size().unstack(fill_value=0)
|
|
466
|
+
fig = go.Figure()
|
|
467
|
+
|
|
468
|
+
for status in daily_stats.columns:
|
|
469
|
+
fig.add_trace(go.Scatter(
|
|
470
|
+
x=daily_stats.index,
|
|
471
|
+
y=daily_stats[status],
|
|
472
|
+
mode='lines+markers',
|
|
473
|
+
name=status,
|
|
474
|
+
stackgroup='one'
|
|
475
|
+
))
|
|
476
|
+
|
|
477
|
+
fig.update_layout(title="Job Status Over Time", xaxis_title="Date", yaxis_title="Count")
|
|
478
|
+
st.plotly_chart(fig, use_container_width=True)
|
|
479
|
+
else:
|
|
480
|
+
st.warning("No job data available")
|
|
481
|
+
|
|
482
|
+
|
|
483
|
+
def show_system_health():
|
|
484
|
+
"""Show system health dashboard"""
|
|
485
|
+
st.header("System Health")
|
|
486
|
+
|
|
487
|
+
client = get_supabase_client()
|
|
488
|
+
|
|
489
|
+
# Check Supabase connection
|
|
490
|
+
col1, col2, col3 = st.columns(3)
|
|
491
|
+
|
|
492
|
+
with col1:
|
|
493
|
+
if client:
|
|
494
|
+
try:
|
|
495
|
+
# Try a simple query to test connection
|
|
496
|
+
client.table("politicians").select("id").limit(1).execute()
|
|
497
|
+
st.success("✅ Supabase: Connected")
|
|
498
|
+
except:
|
|
499
|
+
st.error("❌ Supabase: Connection Error")
|
|
500
|
+
else:
|
|
501
|
+
st.warning("⚠️ Supabase: Not Configured")
|
|
502
|
+
|
|
503
|
+
with col2:
|
|
504
|
+
# Check data freshness
|
|
505
|
+
disclosures = get_disclosures_data()
|
|
506
|
+
if not disclosures.empty and 'created_at' in disclosures:
|
|
507
|
+
latest = pd.to_datetime(disclosures['created_at']).max()
|
|
508
|
+
hours_ago = (datetime.now() - latest).total_seconds() / 3600
|
|
509
|
+
if hours_ago < 24:
|
|
510
|
+
st.success(f"✅ Data: Fresh ({hours_ago:.1f}h old)")
|
|
511
|
+
else:
|
|
512
|
+
st.warning(f"⚠️ Data: Stale ({hours_ago:.1f}h old)")
|
|
513
|
+
else:
|
|
514
|
+
st.info("ℹ️ Data: No data yet")
|
|
515
|
+
|
|
516
|
+
with col3:
|
|
517
|
+
# Check job health
|
|
518
|
+
jobs = get_jobs_data()
|
|
519
|
+
if not jobs.empty and 'status' in jobs:
|
|
520
|
+
recent_jobs = jobs.head(10)
|
|
521
|
+
success_rate = (recent_jobs['status'] == 'completed').mean() * 100
|
|
522
|
+
if success_rate > 80:
|
|
523
|
+
st.success(f"✅ Jobs: {success_rate:.0f}% success")
|
|
524
|
+
elif success_rate > 50:
|
|
525
|
+
st.warning(f"⚠️ Jobs: {success_rate:.0f}% success")
|
|
526
|
+
else:
|
|
527
|
+
st.error(f"❌ Jobs: {success_rate:.0f}% success")
|
|
528
|
+
else:
|
|
529
|
+
st.info("ℹ️ Jobs: No jobs yet")
|
|
530
|
+
|
|
531
|
+
# Data statistics
|
|
532
|
+
st.subheader("Data Statistics")
|
|
533
|
+
|
|
534
|
+
politicians = get_politicians_data()
|
|
535
|
+
disclosures = get_disclosures_data()
|
|
536
|
+
predictions = get_predictions_data()
|
|
537
|
+
|
|
538
|
+
stats_data = {
|
|
539
|
+
"Entity": ["Politicians", "Disclosures", "Predictions", "Data Jobs"],
|
|
540
|
+
"Count": [
|
|
541
|
+
len(politicians),
|
|
542
|
+
len(disclosures),
|
|
543
|
+
len(predictions),
|
|
544
|
+
len(jobs) if not jobs.empty else 0
|
|
545
|
+
]
|
|
546
|
+
}
|
|
547
|
+
|
|
548
|
+
stats_df = pd.DataFrame(stats_data)
|
|
549
|
+
|
|
550
|
+
col1, col2 = st.columns(2)
|
|
551
|
+
with col1:
|
|
552
|
+
st.dataframe(stats_df, use_container_width=True)
|
|
553
|
+
|
|
554
|
+
with col2:
|
|
555
|
+
fig = px.bar(stats_df, x="Entity", y="Count", title="Database Records")
|
|
556
|
+
st.plotly_chart(fig, use_container_width=True)
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
if __name__ == "__main__":
|
|
560
|
+
main()
|