mcli-framework 7.2.0__py3-none-any.whl → 7.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/__init__.py +160 -0
- mcli/__main__.py +14 -0
- mcli/app/__init__.py +23 -0
- mcli/app/commands_cmd.py +741 -0
- mcli/app/model/__init__.py +0 -0
- mcli/app/video/__init__.py +5 -0
- mcli/chat/__init__.py +34 -0
- mcli/lib/__init__.py +0 -0
- mcli/lib/api/__init__.py +0 -0
- mcli/lib/auth/__init__.py +1 -0
- mcli/lib/config/__init__.py +1 -0
- mcli/lib/erd/__init__.py +25 -0
- mcli/lib/files/__init__.py +0 -0
- mcli/lib/fs/__init__.py +1 -0
- mcli/lib/logger/__init__.py +3 -0
- mcli/lib/performance/__init__.py +17 -0
- mcli/lib/pickles/__init__.py +1 -0
- mcli/lib/shell/__init__.py +0 -0
- mcli/lib/toml/__init__.py +1 -0
- mcli/lib/watcher/__init__.py +0 -0
- mcli/ml/__init__.py +16 -0
- mcli/ml/api/__init__.py +30 -0
- mcli/ml/api/routers/__init__.py +27 -0
- mcli/ml/api/schemas.py +2 -2
- mcli/ml/auth/__init__.py +45 -0
- mcli/ml/auth/models.py +2 -2
- mcli/ml/backtesting/__init__.py +39 -0
- mcli/ml/cli/__init__.py +5 -0
- mcli/ml/cli/main.py +1 -1
- mcli/ml/config/__init__.py +33 -0
- mcli/ml/configs/__init__.py +16 -0
- mcli/ml/dashboard/__init__.py +12 -0
- mcli/ml/dashboard/app_integrated.py +296 -30
- mcli/ml/dashboard/app_training.py +1 -1
- mcli/ml/dashboard/components/__init__.py +7 -0
- mcli/ml/dashboard/pages/__init__.py +6 -0
- mcli/ml/dashboard/pages/cicd.py +1 -1
- mcli/ml/dashboard/pages/debug_dependencies.py +364 -0
- mcli/ml/dashboard/pages/gravity_viz.py +565 -0
- mcli/ml/dashboard/pages/monte_carlo_predictions.py +555 -0
- mcli/ml/dashboard/pages/overview.py +378 -0
- mcli/ml/dashboard/pages/predictions_enhanced.py +20 -6
- mcli/ml/dashboard/pages/scrapers_and_logs.py +22 -6
- mcli/ml/dashboard/pages/test_portfolio.py +423 -0
- mcli/ml/dashboard/pages/trading.py +768 -0
- mcli/ml/dashboard/streamlit_extras_utils.py +297 -0
- mcli/ml/dashboard/utils.py +161 -0
- mcli/ml/dashboard/warning_suppression.py +34 -0
- mcli/ml/data_ingestion/__init__.py +39 -0
- mcli/ml/database/__init__.py +47 -0
- mcli/ml/database/session.py +169 -16
- mcli/ml/experimentation/__init__.py +29 -0
- mcli/ml/features/__init__.py +39 -0
- mcli/ml/mlops/__init__.py +33 -0
- mcli/ml/models/__init__.py +94 -0
- mcli/ml/monitoring/__init__.py +25 -0
- mcli/ml/optimization/__init__.py +27 -0
- mcli/ml/predictions/__init__.py +5 -0
- mcli/ml/predictions/monte_carlo.py +428 -0
- mcli/ml/preprocessing/__init__.py +28 -0
- mcli/ml/scripts/__init__.py +1 -0
- mcli/ml/trading/__init__.py +66 -0
- mcli/ml/trading/alpaca_client.py +417 -0
- mcli/ml/trading/migrations.py +164 -0
- mcli/ml/trading/models.py +418 -0
- mcli/ml/trading/paper_trading.py +326 -0
- mcli/ml/trading/risk_management.py +370 -0
- mcli/ml/trading/trading_service.py +480 -0
- mcli/ml/training/__init__.py +10 -0
- mcli/mygroup/__init__.py +3 -0
- mcli/public/__init__.py +1 -0
- mcli/public/commands/__init__.py +2 -0
- mcli/self/__init__.py +3 -0
- mcli/self/self_cmd.py +514 -15
- mcli/workflow/__init__.py +0 -0
- mcli/workflow/daemon/__init__.py +15 -0
- mcli/workflow/daemon/daemon.py +21 -3
- mcli/workflow/dashboard/__init__.py +5 -0
- mcli/workflow/docker/__init__.py +0 -0
- mcli/workflow/file/__init__.py +0 -0
- mcli/workflow/gcloud/__init__.py +1 -0
- mcli/workflow/git_commit/__init__.py +0 -0
- mcli/workflow/interview/__init__.py +0 -0
- mcli/workflow/politician_trading/__init__.py +4 -0
- mcli/workflow/registry/__init__.py +0 -0
- mcli/workflow/repo/__init__.py +0 -0
- mcli/workflow/scheduler/__init__.py +25 -0
- mcli/workflow/search/__init__.py +0 -0
- mcli/workflow/sync/__init__.py +5 -0
- mcli/workflow/videos/__init__.py +1 -0
- mcli/workflow/wakatime/__init__.py +80 -0
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.4.0.dist-info}/METADATA +4 -1
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.4.0.dist-info}/RECORD +97 -18
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.4.0.dist-info}/WHEEL +0 -0
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.4.0.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.4.0.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.2.0.dist-info → mcli_framework-7.4.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
"""Utility functions using streamlit-extras for enhanced dashboard UI"""
|
|
2
|
+
|
|
3
|
+
import streamlit as st
|
|
4
|
+
|
|
5
|
+
# Try to import streamlit-extras components
|
|
6
|
+
HAS_EXTRAS = True
|
|
7
|
+
try:
|
|
8
|
+
from streamlit_extras.metric_cards import style_metric_cards
|
|
9
|
+
from streamlit_extras.badges import badge
|
|
10
|
+
from streamlit_extras.colored_header import colored_header
|
|
11
|
+
from streamlit_extras.card import card
|
|
12
|
+
from streamlit_extras.stoggle import stoggle
|
|
13
|
+
from streamlit_extras.grid import grid
|
|
14
|
+
from streamlit_extras.add_vertical_space import add_vertical_space
|
|
15
|
+
from streamlit_extras.stylable_container import stylable_container
|
|
16
|
+
except ImportError:
|
|
17
|
+
HAS_EXTRAS = False
|
|
18
|
+
style_metric_cards = None
|
|
19
|
+
badge = None
|
|
20
|
+
colored_header = None
|
|
21
|
+
card = None
|
|
22
|
+
stoggle = None
|
|
23
|
+
grid = None
|
|
24
|
+
add_vertical_space = None
|
|
25
|
+
stylable_container = None
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def enhanced_metrics(metrics_data: list, use_container_width: bool = True):
|
|
29
|
+
"""
|
|
30
|
+
Display enhanced metric cards with styling from streamlit-extras
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
metrics_data: List of dicts with keys: label, value, delta (optional)
|
|
34
|
+
use_container_width: Whether to use full container width
|
|
35
|
+
|
|
36
|
+
Example:
|
|
37
|
+
enhanced_metrics([
|
|
38
|
+
{"label": "Total Transactions", "value": "1,234", "delta": "+12%"},
|
|
39
|
+
{"label": "Portfolio Value", "value": "$50,000", "delta": "-2.3%"},
|
|
40
|
+
])
|
|
41
|
+
"""
|
|
42
|
+
if not HAS_EXTRAS:
|
|
43
|
+
# Fallback to standard metrics
|
|
44
|
+
cols = st.columns(len(metrics_data))
|
|
45
|
+
for i, metric in enumerate(metrics_data):
|
|
46
|
+
with cols[i]:
|
|
47
|
+
st.metric(
|
|
48
|
+
label=metric["label"],
|
|
49
|
+
value=metric["value"],
|
|
50
|
+
delta=metric.get("delta")
|
|
51
|
+
)
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
# Use streamlit-extras styled metrics
|
|
55
|
+
cols = st.columns(len(metrics_data))
|
|
56
|
+
for i, metric in enumerate(metrics_data):
|
|
57
|
+
with cols[i]:
|
|
58
|
+
st.metric(
|
|
59
|
+
label=metric["label"],
|
|
60
|
+
value=metric["value"],
|
|
61
|
+
delta=metric.get("delta")
|
|
62
|
+
)
|
|
63
|
+
style_metric_cards()
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def status_badge(label: str, url: str = None):
|
|
67
|
+
"""
|
|
68
|
+
Display a status badge
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
label: Badge text (e.g., "Live", "Production", "Beta")
|
|
72
|
+
url: Optional URL to link to
|
|
73
|
+
"""
|
|
74
|
+
if not HAS_EXTRAS:
|
|
75
|
+
st.markdown(f"**{label}**")
|
|
76
|
+
return
|
|
77
|
+
|
|
78
|
+
badge(type="success", name=label, url=url)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def section_header(label: str, description: str = None, divider: str = "rainbow"):
|
|
82
|
+
"""
|
|
83
|
+
Display a colored section header with optional description
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
label: Header text
|
|
87
|
+
description: Optional description text
|
|
88
|
+
divider: Color of divider line
|
|
89
|
+
"""
|
|
90
|
+
if not HAS_EXTRAS:
|
|
91
|
+
st.header(label)
|
|
92
|
+
if description:
|
|
93
|
+
st.markdown(description)
|
|
94
|
+
st.divider()
|
|
95
|
+
return
|
|
96
|
+
|
|
97
|
+
colored_header(
|
|
98
|
+
label=label,
|
|
99
|
+
description=description or "",
|
|
100
|
+
color_name=divider
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def info_card(title: str, text: str, image: str = None, url: str = None,
|
|
105
|
+
has_button: bool = False, button_text: str = "Learn More"):
|
|
106
|
+
"""
|
|
107
|
+
Display an information card
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
title: Card title
|
|
111
|
+
text: Card content
|
|
112
|
+
image: Optional image URL
|
|
113
|
+
url: Optional URL to link to
|
|
114
|
+
has_button: Whether to show a button
|
|
115
|
+
button_text: Button text if has_button is True
|
|
116
|
+
"""
|
|
117
|
+
if not HAS_EXTRAS:
|
|
118
|
+
with st.container():
|
|
119
|
+
st.subheader(title)
|
|
120
|
+
if image:
|
|
121
|
+
st.image(image)
|
|
122
|
+
st.markdown(text)
|
|
123
|
+
if url and has_button:
|
|
124
|
+
st.link_button(button_text, url)
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
kwargs = {
|
|
128
|
+
"title": title,
|
|
129
|
+
"text": text,
|
|
130
|
+
}
|
|
131
|
+
if image:
|
|
132
|
+
kwargs["image"] = image
|
|
133
|
+
if url:
|
|
134
|
+
kwargs["url"] = url
|
|
135
|
+
|
|
136
|
+
card(**kwargs)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def collapsible_section(label: str, content_fn, default_open: bool = False):
|
|
140
|
+
"""
|
|
141
|
+
Create a collapsible toggle section
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
label: Section label
|
|
145
|
+
content_fn: Function to call to render content
|
|
146
|
+
default_open: Whether section starts open
|
|
147
|
+
"""
|
|
148
|
+
if not HAS_EXTRAS:
|
|
149
|
+
with st.expander(label, expanded=default_open):
|
|
150
|
+
content_fn()
|
|
151
|
+
return
|
|
152
|
+
|
|
153
|
+
stoggle(label, content_fn)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def dashboard_grid(num_cols: int = 3, gap: str = "medium"):
|
|
157
|
+
"""
|
|
158
|
+
Create a responsive grid layout
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
num_cols: Number of columns
|
|
162
|
+
gap: Gap size between columns
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Grid object for use in with statement
|
|
166
|
+
"""
|
|
167
|
+
if not HAS_EXTRAS:
|
|
168
|
+
return st.columns(num_cols)
|
|
169
|
+
|
|
170
|
+
return grid(num_cols, gap=gap)
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def vertical_space(lines: int = 1):
|
|
174
|
+
"""
|
|
175
|
+
Add vertical spacing
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
lines: Number of lines to add
|
|
179
|
+
"""
|
|
180
|
+
if not HAS_EXTRAS:
|
|
181
|
+
for _ in range(lines):
|
|
182
|
+
st.write("")
|
|
183
|
+
return
|
|
184
|
+
|
|
185
|
+
add_vertical_space(lines)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def styled_container(key: str, css_styles: str):
|
|
189
|
+
"""
|
|
190
|
+
Create a container with custom CSS styling
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
key: Unique key for the container
|
|
194
|
+
css_styles: CSS styles to apply
|
|
195
|
+
|
|
196
|
+
Returns:
|
|
197
|
+
Context manager for styled container
|
|
198
|
+
"""
|
|
199
|
+
if not HAS_EXTRAS:
|
|
200
|
+
return st.container()
|
|
201
|
+
|
|
202
|
+
return stylable_container(
|
|
203
|
+
key=key,
|
|
204
|
+
css_styles=css_styles
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def trading_status_card(status: str, portfolio_value: float, daily_pnl: float,
|
|
209
|
+
positions: int, cash: float):
|
|
210
|
+
"""
|
|
211
|
+
Display a trading status summary card
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
status: Trading status (e.g., "Active", "Paused")
|
|
215
|
+
portfolio_value: Current portfolio value
|
|
216
|
+
daily_pnl: Daily profit/loss
|
|
217
|
+
positions: Number of open positions
|
|
218
|
+
cash: Available cash
|
|
219
|
+
"""
|
|
220
|
+
status_color = "🟢" if status == "Active" else "🔴"
|
|
221
|
+
pnl_emoji = "📈" if daily_pnl >= 0 else "📉"
|
|
222
|
+
pnl_sign = "+" if daily_pnl >= 0 else ""
|
|
223
|
+
|
|
224
|
+
section_header(
|
|
225
|
+
f"{status_color} Trading Status: {status}",
|
|
226
|
+
f"Real-time portfolio monitoring and execution",
|
|
227
|
+
divider="blue"
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
enhanced_metrics([
|
|
231
|
+
{
|
|
232
|
+
"label": "Portfolio Value",
|
|
233
|
+
"value": f"${portfolio_value:,.2f}",
|
|
234
|
+
"delta": f"{pnl_sign}${daily_pnl:,.2f}"
|
|
235
|
+
},
|
|
236
|
+
{
|
|
237
|
+
"label": "Open Positions",
|
|
238
|
+
"value": str(positions),
|
|
239
|
+
},
|
|
240
|
+
{
|
|
241
|
+
"label": "Available Cash",
|
|
242
|
+
"value": f"${cash:,.2f}",
|
|
243
|
+
},
|
|
244
|
+
])
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def data_quality_indicators(total_records: int, clean_records: int,
|
|
248
|
+
errors: int, last_update: str):
|
|
249
|
+
"""
|
|
250
|
+
Display data quality indicators
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
total_records: Total number of records
|
|
254
|
+
clean_records: Number of clean records
|
|
255
|
+
errors: Number of errors
|
|
256
|
+
last_update: Last update timestamp
|
|
257
|
+
"""
|
|
258
|
+
quality_pct = (clean_records / total_records * 100) if total_records > 0 else 0
|
|
259
|
+
|
|
260
|
+
section_header(
|
|
261
|
+
"📊 Data Quality Metrics",
|
|
262
|
+
f"Last updated: {last_update}",
|
|
263
|
+
divider="green"
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
enhanced_metrics([
|
|
267
|
+
{
|
|
268
|
+
"label": "Total Records",
|
|
269
|
+
"value": f"{total_records:,}",
|
|
270
|
+
},
|
|
271
|
+
{
|
|
272
|
+
"label": "Data Quality",
|
|
273
|
+
"value": f"{quality_pct:.1f}%",
|
|
274
|
+
"delta": f"{clean_records:,} clean"
|
|
275
|
+
},
|
|
276
|
+
{
|
|
277
|
+
"label": "Errors",
|
|
278
|
+
"value": str(errors),
|
|
279
|
+
"delta": f"{(errors/total_records*100):.2f}%" if total_records > 0 else "0%"
|
|
280
|
+
},
|
|
281
|
+
])
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
# Export available components
|
|
285
|
+
__all__ = [
|
|
286
|
+
'HAS_EXTRAS',
|
|
287
|
+
'enhanced_metrics',
|
|
288
|
+
'status_badge',
|
|
289
|
+
'section_header',
|
|
290
|
+
'info_card',
|
|
291
|
+
'collapsible_section',
|
|
292
|
+
'dashboard_grid',
|
|
293
|
+
'vertical_space',
|
|
294
|
+
'styled_container',
|
|
295
|
+
'trading_status_card',
|
|
296
|
+
'data_quality_indicators',
|
|
297
|
+
]
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
"""Shared utility functions for dashboard pages"""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import logging
|
|
5
|
+
import warnings
|
|
6
|
+
from typing import List, Optional
|
|
7
|
+
import pandas as pd
|
|
8
|
+
import streamlit as st
|
|
9
|
+
from supabase import Client, create_client
|
|
10
|
+
|
|
11
|
+
# Suppress Streamlit warnings when used outside runtime context
|
|
12
|
+
warnings.filterwarnings("ignore", message=".*missing ScriptRunContext.*")
|
|
13
|
+
warnings.filterwarnings("ignore", message=".*No runtime found.*")
|
|
14
|
+
warnings.filterwarnings("ignore", message=".*Session state does not function.*")
|
|
15
|
+
warnings.filterwarnings("ignore", message=".*to view this Streamlit app.*")
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_supabase_client() -> Optional[Client]:
|
|
21
|
+
"""Get Supabase client with Streamlit Cloud secrets support"""
|
|
22
|
+
# Try Streamlit secrets first (for Streamlit Cloud), then fall back to environment variables (for local dev)
|
|
23
|
+
try:
|
|
24
|
+
url = st.secrets.get("SUPABASE_URL", "")
|
|
25
|
+
key = st.secrets.get("SUPABASE_KEY", "") or st.secrets.get("SUPABASE_SERVICE_ROLE_KEY", "")
|
|
26
|
+
except (AttributeError, FileNotFoundError):
|
|
27
|
+
# Secrets not available, try environment variables
|
|
28
|
+
url = os.getenv("SUPABASE_URL", "")
|
|
29
|
+
key = os.getenv("SUPABASE_KEY", "") or os.getenv("SUPABASE_SERVICE_ROLE_KEY", "")
|
|
30
|
+
|
|
31
|
+
if not url or not key:
|
|
32
|
+
logger.warning("Supabase credentials not found")
|
|
33
|
+
return None
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
client = create_client(url, key)
|
|
37
|
+
# Test connection with a simple query
|
|
38
|
+
try:
|
|
39
|
+
test_result = client.table("politicians").select("id").limit(1).execute()
|
|
40
|
+
logger.info(f"✅ Supabase connection successful (URL: {url[:30]}...)")
|
|
41
|
+
return client
|
|
42
|
+
except Exception as conn_error:
|
|
43
|
+
st.error(f"❌ Supabase connection failed: {conn_error}")
|
|
44
|
+
return None
|
|
45
|
+
except Exception as e:
|
|
46
|
+
logger.error(f"Failed to create Supabase client: {e}")
|
|
47
|
+
return None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def get_politician_names() -> List[str]:
|
|
51
|
+
"""Get all politician names from database for searchable dropdown"""
|
|
52
|
+
try:
|
|
53
|
+
client = get_supabase_client()
|
|
54
|
+
if not client:
|
|
55
|
+
return ["Nancy Pelosi", "Paul Pelosi", "Dan Crenshaw", "Josh Gottheimer"] # Fallback
|
|
56
|
+
|
|
57
|
+
result = client.table("politicians").select("first_name, last_name").execute()
|
|
58
|
+
names = [f"{row['first_name']} {row['last_name']}" for row in result.data]
|
|
59
|
+
return names if names else ["Nancy Pelosi", "Paul Pelosi", "Dan Crenshaw", "Josh Gottheimer"]
|
|
60
|
+
except Exception as e:
|
|
61
|
+
logger.error(f"Failed to get politician names: {e}")
|
|
62
|
+
return ["Nancy Pelosi", "Paul Pelosi", "Dan Crenshaw", "Josh Gottheimer"]
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def get_disclosures_data() -> pd.DataFrame:
|
|
66
|
+
"""Get trading disclosures from Supabase with proper schema mapping"""
|
|
67
|
+
client = get_supabase_client()
|
|
68
|
+
if not client:
|
|
69
|
+
return _generate_demo_disclosures()
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
# First, get total count
|
|
73
|
+
count_response = (
|
|
74
|
+
client.table("trading_disclosures")
|
|
75
|
+
.select("*", count="exact")
|
|
76
|
+
.execute()
|
|
77
|
+
)
|
|
78
|
+
total_count = count_response.count
|
|
79
|
+
|
|
80
|
+
if total_count == 0:
|
|
81
|
+
return _generate_demo_disclosures()
|
|
82
|
+
|
|
83
|
+
# Get the data
|
|
84
|
+
response = (
|
|
85
|
+
client.table("trading_disclosures")
|
|
86
|
+
.select("*")
|
|
87
|
+
.order("disclosure_date", desc=True)
|
|
88
|
+
.limit(1000)
|
|
89
|
+
.execute()
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
if not response.data:
|
|
93
|
+
return _generate_demo_disclosures()
|
|
94
|
+
|
|
95
|
+
df = pd.DataFrame(response.data)
|
|
96
|
+
return df
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
logger.error(f"Failed to fetch disclosures: {e}")
|
|
100
|
+
return _generate_demo_disclosures()
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _generate_demo_disclosures() -> pd.DataFrame:
|
|
104
|
+
"""Generate demo trading disclosure data for testing"""
|
|
105
|
+
st.info("🔵 Using demo trading data (Supabase unavailable)")
|
|
106
|
+
|
|
107
|
+
import random
|
|
108
|
+
from datetime import datetime, timedelta
|
|
109
|
+
|
|
110
|
+
politicians = ["Nancy Pelosi", "Paul Pelosi", "Dan Crenshaw", "Josh Gottheimer"]
|
|
111
|
+
tickers = ["AAPL", "MSFT", "GOOGL", "AMZN", "NVDA", "TSLA", "META", "AMD"]
|
|
112
|
+
transaction_types = ["Purchase", "Sale"]
|
|
113
|
+
|
|
114
|
+
data = []
|
|
115
|
+
for _ in range(50):
|
|
116
|
+
data.append({
|
|
117
|
+
"politician_name": random.choice(politicians),
|
|
118
|
+
"ticker_symbol": random.choice(tickers),
|
|
119
|
+
"transaction_type": random.choice(transaction_types),
|
|
120
|
+
"amount_min": random.randint(1000, 100000),
|
|
121
|
+
"amount_max": random.randint(100000, 1000000),
|
|
122
|
+
"disclosure_date": (datetime.now() - timedelta(days=random.randint(1, 365))).strftime("%Y-%m-%d"),
|
|
123
|
+
"asset_description": f"{random.choice(tickers)} Stock",
|
|
124
|
+
})
|
|
125
|
+
|
|
126
|
+
return pd.DataFrame(data)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def get_politician_trading_history(politician_name: str) -> pd.DataFrame:
|
|
130
|
+
"""Get trading history for a specific politician"""
|
|
131
|
+
try:
|
|
132
|
+
client = get_supabase_client()
|
|
133
|
+
if not client:
|
|
134
|
+
return pd.DataFrame() # Return empty if no client
|
|
135
|
+
|
|
136
|
+
# Split name into first and last
|
|
137
|
+
name_parts = politician_name.split()
|
|
138
|
+
if len(name_parts) < 2:
|
|
139
|
+
return pd.DataFrame()
|
|
140
|
+
|
|
141
|
+
first_name = name_parts[0]
|
|
142
|
+
last_name = " ".join(name_parts[1:])
|
|
143
|
+
|
|
144
|
+
# Get trading disclosures for this politician
|
|
145
|
+
response = (
|
|
146
|
+
client.table("trading_disclosures")
|
|
147
|
+
.select("*")
|
|
148
|
+
.eq("politician_name", politician_name)
|
|
149
|
+
.order("disclosure_date", desc=True)
|
|
150
|
+
.limit(100)
|
|
151
|
+
.execute()
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
if response.data:
|
|
155
|
+
return pd.DataFrame(response.data)
|
|
156
|
+
else:
|
|
157
|
+
return pd.DataFrame()
|
|
158
|
+
|
|
159
|
+
except Exception as e:
|
|
160
|
+
logger.warning(f"Failed to fetch trading history for {politician_name}: {e}")
|
|
161
|
+
return pd.DataFrame()
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
"""Warning suppression utilities for Streamlit components used outside runtime context"""
|
|
2
|
+
|
|
3
|
+
import warnings
|
|
4
|
+
import logging
|
|
5
|
+
from contextlib import contextmanager
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@contextmanager
|
|
9
|
+
def suppress_streamlit_warnings():
|
|
10
|
+
"""Context manager to suppress Streamlit warnings when used outside runtime context"""
|
|
11
|
+
# Suppress specific Streamlit warnings
|
|
12
|
+
with warnings.catch_warnings():
|
|
13
|
+
warnings.filterwarnings("ignore", message=".*missing ScriptRunContext.*")
|
|
14
|
+
warnings.filterwarnings("ignore", message=".*No runtime found.*")
|
|
15
|
+
warnings.filterwarnings("ignore", message=".*Session state does not function.*")
|
|
16
|
+
warnings.filterwarnings("ignore", message=".*to view this Streamlit app.*")
|
|
17
|
+
|
|
18
|
+
# Also suppress logging warnings from Streamlit
|
|
19
|
+
streamlit_logger = logging.getLogger("streamlit")
|
|
20
|
+
original_level = streamlit_logger.level
|
|
21
|
+
streamlit_logger.setLevel(logging.ERROR)
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
yield
|
|
25
|
+
finally:
|
|
26
|
+
streamlit_logger.setLevel(original_level)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def suppress_streamlit_warnings_decorator(func):
|
|
30
|
+
"""Decorator to suppress Streamlit warnings for a function"""
|
|
31
|
+
def wrapper(*args, **kwargs):
|
|
32
|
+
with suppress_streamlit_warnings():
|
|
33
|
+
return func(*args, **kwargs)
|
|
34
|
+
return wrapper
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"""Real-time data ingestion pipeline"""
|
|
2
|
+
|
|
3
|
+
from .api_connectors import (
|
|
4
|
+
AlphaVantageConnector,
|
|
5
|
+
CongressionalDataAPI,
|
|
6
|
+
PolygonIOConnector,
|
|
7
|
+
QuiverQuantConnector,
|
|
8
|
+
StockMarketAPI,
|
|
9
|
+
YahooFinanceConnector,
|
|
10
|
+
)
|
|
11
|
+
from .data_pipeline import (
|
|
12
|
+
DataLoader,
|
|
13
|
+
DataTransformer,
|
|
14
|
+
DataValidator,
|
|
15
|
+
IngestionPipeline,
|
|
16
|
+
)
|
|
17
|
+
from .stream_processor import (
|
|
18
|
+
DataStream,
|
|
19
|
+
KafkaConsumer,
|
|
20
|
+
StreamProcessor,
|
|
21
|
+
WebSocketConsumer,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
"StreamProcessor",
|
|
26
|
+
"DataStream",
|
|
27
|
+
"KafkaConsumer",
|
|
28
|
+
"WebSocketConsumer",
|
|
29
|
+
"CongressionalDataAPI",
|
|
30
|
+
"StockMarketAPI",
|
|
31
|
+
"AlphaVantageConnector",
|
|
32
|
+
"YahooFinanceConnector",
|
|
33
|
+
"PolygonIOConnector",
|
|
34
|
+
"QuiverQuantConnector",
|
|
35
|
+
"IngestionPipeline",
|
|
36
|
+
"DataValidator",
|
|
37
|
+
"DataTransformer",
|
|
38
|
+
"DataLoader",
|
|
39
|
+
]
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Database models and utilities"""
|
|
2
|
+
|
|
3
|
+
from .models import (
|
|
4
|
+
Alert,
|
|
5
|
+
BacktestResult,
|
|
6
|
+
Base,
|
|
7
|
+
DataVersion,
|
|
8
|
+
Experiment,
|
|
9
|
+
FeatureSet,
|
|
10
|
+
Model,
|
|
11
|
+
Politician,
|
|
12
|
+
Portfolio,
|
|
13
|
+
Prediction,
|
|
14
|
+
StockData,
|
|
15
|
+
Trade,
|
|
16
|
+
User,
|
|
17
|
+
)
|
|
18
|
+
from .session import (
|
|
19
|
+
AsyncSessionLocal,
|
|
20
|
+
SessionLocal,
|
|
21
|
+
async_engine,
|
|
22
|
+
engine,
|
|
23
|
+
get_async_db,
|
|
24
|
+
get_db,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
__all__ = [
|
|
28
|
+
"Base",
|
|
29
|
+
"User",
|
|
30
|
+
"Trade",
|
|
31
|
+
"Politician",
|
|
32
|
+
"StockData",
|
|
33
|
+
"Prediction",
|
|
34
|
+
"Portfolio",
|
|
35
|
+
"Alert",
|
|
36
|
+
"BacktestResult",
|
|
37
|
+
"Experiment",
|
|
38
|
+
"Model",
|
|
39
|
+
"FeatureSet",
|
|
40
|
+
"DataVersion",
|
|
41
|
+
"get_db",
|
|
42
|
+
"get_async_db",
|
|
43
|
+
"SessionLocal",
|
|
44
|
+
"AsyncSessionLocal",
|
|
45
|
+
"engine",
|
|
46
|
+
"async_engine",
|
|
47
|
+
]
|