mcli-framework 7.1.0__py3-none-any.whl → 7.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/logs_cmd.py +42 -13
- mcli/lib/services/lsh_client.py +1 -1
- mcli/ml/dashboard/app_integrated.py +98 -33
- mcli/ml/predictions/prediction_engine.py +223 -0
- mcli/self/self_cmd.py +10 -5
- {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.1.dist-info}/METADATA +2 -2
- {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.1.dist-info}/RECORD +11 -10
- {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.1.dist-info}/WHEEL +0 -0
- {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.1.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.1.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.1.dist-info}/top_level.txt +0 -0
mcli/app/logs_cmd.py
CHANGED
|
@@ -120,8 +120,18 @@ def list_logs(date: Optional[str]):
|
|
|
120
120
|
@click.argument("log_type", type=click.Choice(["main", "trace", "system"]))
|
|
121
121
|
@click.option("--lines", "-n", type=int, default=20, help="Number of lines to show (default: 20)")
|
|
122
122
|
@click.option("--date", "-d", help="Date for log file (YYYYMMDD format, default: today)")
|
|
123
|
-
|
|
124
|
-
""
|
|
123
|
+
@click.option(
|
|
124
|
+
"--follow",
|
|
125
|
+
"-f",
|
|
126
|
+
is_flag=True,
|
|
127
|
+
help="Follow log output in real-time (like tail -f)",
|
|
128
|
+
)
|
|
129
|
+
def tail_logs(log_type: str, lines: int, date: Optional[str], follow: bool):
|
|
130
|
+
"""Show the last N lines of a specific log file
|
|
131
|
+
|
|
132
|
+
By default, shows the last N lines and exits. Use --follow/-f to
|
|
133
|
+
continuously monitor the log file for new entries (similar to tail -f).
|
|
134
|
+
"""
|
|
125
135
|
logs_dir = get_logs_dir()
|
|
126
136
|
|
|
127
137
|
# Note: get_logs_dir() creates the directory automatically
|
|
@@ -144,17 +154,36 @@ def tail_logs(log_type: str, lines: int, date: Optional[str]):
|
|
|
144
154
|
return
|
|
145
155
|
|
|
146
156
|
try:
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
157
|
+
if follow:
|
|
158
|
+
# Follow mode: continuously stream new lines
|
|
159
|
+
console.print(f"\n📡 **Following {log_file.name}** (last {lines} lines)", style="cyan")
|
|
160
|
+
console.print("Press Ctrl+C to stop\n")
|
|
161
|
+
|
|
162
|
+
# Use tail -f for real-time following
|
|
163
|
+
cmd = ["tail", f"-n{lines}", "-f", str(log_file)]
|
|
164
|
+
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
|
165
|
+
|
|
166
|
+
try:
|
|
167
|
+
for line in iter(process.stdout.readline, ""):
|
|
168
|
+
if line:
|
|
169
|
+
formatted_line = _format_log_line(line.rstrip())
|
|
170
|
+
console.print(formatted_line)
|
|
171
|
+
except KeyboardInterrupt:
|
|
172
|
+
process.terminate()
|
|
173
|
+
console.print("\n👋 Log following stopped", style="cyan")
|
|
174
|
+
else:
|
|
175
|
+
# Standard mode: just show last N lines
|
|
176
|
+
# Read last N lines
|
|
177
|
+
with open(log_file, "r") as f:
|
|
178
|
+
all_lines = f.readlines()
|
|
179
|
+
tail_lines = all_lines[-lines:] if len(all_lines) > lines else all_lines
|
|
180
|
+
|
|
181
|
+
# Display with formatting
|
|
182
|
+
console.print(f"\n📋 **Last {len(tail_lines)} lines from {log_file.name}**\n", style="cyan")
|
|
183
|
+
|
|
184
|
+
for line in tail_lines:
|
|
185
|
+
formatted_line = _format_log_line(line.rstrip())
|
|
186
|
+
console.print(formatted_line)
|
|
158
187
|
|
|
159
188
|
except Exception as e:
|
|
160
189
|
console.print(f"❌ Error reading log file: {e}", style="red")
|
mcli/lib/services/lsh_client.py
CHANGED
|
@@ -30,6 +30,14 @@ except ImportError:
|
|
|
30
30
|
PoliticianTradingPreprocessor = None
|
|
31
31
|
MLDataPipeline = None
|
|
32
32
|
|
|
33
|
+
# Add prediction engine
|
|
34
|
+
try:
|
|
35
|
+
from mcli.ml.predictions import PoliticianTradingPredictor
|
|
36
|
+
HAS_PREDICTOR = True
|
|
37
|
+
except ImportError:
|
|
38
|
+
HAS_PREDICTOR = False
|
|
39
|
+
PoliticianTradingPredictor = None
|
|
40
|
+
|
|
33
41
|
# Page config
|
|
34
42
|
st.set_page_config(
|
|
35
43
|
page_title="MCLI ML Dashboard - Integrated",
|
|
@@ -94,6 +102,14 @@ def get_ml_pipeline():
|
|
|
94
102
|
return None
|
|
95
103
|
|
|
96
104
|
|
|
105
|
+
@st.cache_resource
|
|
106
|
+
def get_predictor():
|
|
107
|
+
"""Get prediction engine instance"""
|
|
108
|
+
if HAS_PREDICTOR and PoliticianTradingPredictor:
|
|
109
|
+
return PoliticianTradingPredictor()
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
|
|
97
113
|
def check_lsh_daemon():
|
|
98
114
|
"""Check if LSH daemon is running"""
|
|
99
115
|
try:
|
|
@@ -128,7 +144,11 @@ def get_lsh_jobs():
|
|
|
128
144
|
})
|
|
129
145
|
|
|
130
146
|
return pd.DataFrame(jobs)
|
|
131
|
-
|
|
147
|
+
else:
|
|
148
|
+
# Log file doesn't exist - return empty DataFrame
|
|
149
|
+
return pd.DataFrame()
|
|
150
|
+
except Exception as e:
|
|
151
|
+
# On any error, return empty DataFrame
|
|
132
152
|
return pd.DataFrame()
|
|
133
153
|
|
|
134
154
|
|
|
@@ -142,7 +162,10 @@ def run_ml_pipeline(df_disclosures):
|
|
|
142
162
|
# 1. Preprocess data
|
|
143
163
|
preprocessor = get_preprocessor()
|
|
144
164
|
if preprocessor:
|
|
145
|
-
|
|
165
|
+
try:
|
|
166
|
+
processed_data = preprocessor.preprocess(df_disclosures)
|
|
167
|
+
except:
|
|
168
|
+
processed_data = df_disclosures
|
|
146
169
|
else:
|
|
147
170
|
# Use raw data if preprocessor not available
|
|
148
171
|
processed_data = df_disclosures
|
|
@@ -150,26 +173,56 @@ def run_ml_pipeline(df_disclosures):
|
|
|
150
173
|
# 2. Feature engineering (using ML pipeline if available)
|
|
151
174
|
ml_pipeline = get_ml_pipeline()
|
|
152
175
|
if ml_pipeline:
|
|
153
|
-
|
|
176
|
+
try:
|
|
177
|
+
features = ml_pipeline.transform(processed_data)
|
|
178
|
+
except:
|
|
179
|
+
features = processed_data
|
|
154
180
|
else:
|
|
155
181
|
features = processed_data
|
|
156
182
|
|
|
157
|
-
# 3. Generate predictions
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
183
|
+
# 3. Generate predictions using real prediction engine
|
|
184
|
+
predictor = get_predictor()
|
|
185
|
+
if predictor and HAS_PREDICTOR:
|
|
186
|
+
try:
|
|
187
|
+
predictions = predictor.generate_predictions(df_disclosures)
|
|
188
|
+
except Exception as pred_error:
|
|
189
|
+
st.warning(f"Prediction engine error: {pred_error}. Using fallback predictions.")
|
|
190
|
+
predictions = _generate_fallback_predictions(processed_data)
|
|
191
|
+
else:
|
|
192
|
+
predictions = _generate_fallback_predictions(processed_data)
|
|
165
193
|
|
|
166
194
|
return processed_data, features, predictions
|
|
167
195
|
except Exception as e:
|
|
168
196
|
st.error(f"Pipeline error: {e}")
|
|
197
|
+
import traceback
|
|
198
|
+
with st.expander("See error details"):
|
|
199
|
+
st.code(traceback.format_exc())
|
|
169
200
|
return None, None, None
|
|
170
201
|
|
|
171
202
|
|
|
172
|
-
|
|
203
|
+
def _generate_fallback_predictions(processed_data):
|
|
204
|
+
"""Generate basic predictions when predictor is unavailable"""
|
|
205
|
+
if processed_data.empty:
|
|
206
|
+
return pd.DataFrame()
|
|
207
|
+
|
|
208
|
+
tickers = processed_data['ticker_symbol'].unique()[:10] if 'ticker_symbol' in processed_data else []
|
|
209
|
+
n_tickers = len(tickers)
|
|
210
|
+
|
|
211
|
+
if n_tickers == 0:
|
|
212
|
+
return pd.DataFrame()
|
|
213
|
+
|
|
214
|
+
return pd.DataFrame({
|
|
215
|
+
'ticker': tickers,
|
|
216
|
+
'predicted_return': np.random.uniform(-0.05, 0.05, n_tickers),
|
|
217
|
+
'confidence': np.random.uniform(0.5, 0.8, n_tickers),
|
|
218
|
+
'risk_score': np.random.uniform(0.3, 0.7, n_tickers),
|
|
219
|
+
'recommendation': np.random.choice(['BUY', 'HOLD', 'SELL'], n_tickers),
|
|
220
|
+
'trade_count': np.random.randint(1, 10, n_tickers),
|
|
221
|
+
'signal_strength': np.random.uniform(0.3, 0.9, n_tickers)
|
|
222
|
+
})
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
@st.cache_data(ttl=30, hash_funcs={pd.DataFrame: lambda x: x.to_json()})
|
|
173
226
|
def get_politicians_data():
|
|
174
227
|
"""Get politicians data from Supabase"""
|
|
175
228
|
client = get_supabase_client()
|
|
@@ -178,13 +231,19 @@ def get_politicians_data():
|
|
|
178
231
|
|
|
179
232
|
try:
|
|
180
233
|
response = client.table("politicians").select("*").execute()
|
|
181
|
-
|
|
234
|
+
df = pd.DataFrame(response.data)
|
|
235
|
+
# Convert any dict/list columns to JSON strings to avoid hashing issues
|
|
236
|
+
for col in df.columns:
|
|
237
|
+
if df[col].dtype == 'object':
|
|
238
|
+
if any(isinstance(x, (dict, list)) for x in df[col].dropna()):
|
|
239
|
+
df[col] = df[col].apply(lambda x: json.dumps(x) if isinstance(x, (dict, list)) else x)
|
|
240
|
+
return df
|
|
182
241
|
except Exception as e:
|
|
183
242
|
st.error(f"Error fetching politicians: {e}")
|
|
184
243
|
return pd.DataFrame()
|
|
185
244
|
|
|
186
245
|
|
|
187
|
-
@st.cache_data(ttl=30)
|
|
246
|
+
@st.cache_data(ttl=30, hash_funcs={pd.DataFrame: lambda x: x.to_json()})
|
|
188
247
|
def get_disclosures_data():
|
|
189
248
|
"""Get trading disclosures from Supabase"""
|
|
190
249
|
client = get_supabase_client()
|
|
@@ -193,7 +252,13 @@ def get_disclosures_data():
|
|
|
193
252
|
|
|
194
253
|
try:
|
|
195
254
|
response = client.table("trading_disclosures").select("*").order("disclosure_date", desc=True).limit(1000).execute()
|
|
196
|
-
|
|
255
|
+
df = pd.DataFrame(response.data)
|
|
256
|
+
# Convert any dict/list columns to JSON strings to avoid hashing issues
|
|
257
|
+
for col in df.columns:
|
|
258
|
+
if df[col].dtype == 'object':
|
|
259
|
+
if any(isinstance(x, (dict, list)) for x in df[col].dropna()):
|
|
260
|
+
df[col] = df[col].apply(lambda x: json.dumps(x) if isinstance(x, (dict, list)) else x)
|
|
261
|
+
return df
|
|
197
262
|
except Exception as e:
|
|
198
263
|
st.error(f"Error fetching disclosures: {e}")
|
|
199
264
|
return pd.DataFrame()
|
|
@@ -377,7 +442,7 @@ def show_pipeline_overview():
|
|
|
377
442
|
# Filter for ML-related jobs
|
|
378
443
|
ml_jobs = lsh_jobs[lsh_jobs['job_name'].str.contains('ml|model|train|predict', case=False, na=False)]
|
|
379
444
|
if not ml_jobs.empty:
|
|
380
|
-
st.dataframe(ml_jobs.head(10),
|
|
445
|
+
st.dataframe(ml_jobs.head(10), width='stretch')
|
|
381
446
|
else:
|
|
382
447
|
st.info("No ML pipeline jobs found in LSH logs")
|
|
383
448
|
else:
|
|
@@ -401,12 +466,12 @@ def show_ml_processing():
|
|
|
401
466
|
|
|
402
467
|
with tabs[0]:
|
|
403
468
|
st.subheader("Raw Disclosure Data")
|
|
404
|
-
st.dataframe(disclosures.head(100),
|
|
469
|
+
st.dataframe(disclosures.head(100), width='stretch')
|
|
405
470
|
st.metric("Total Records", len(disclosures))
|
|
406
471
|
|
|
407
472
|
with tabs[1]:
|
|
408
473
|
st.subheader("Preprocessed Data")
|
|
409
|
-
st.dataframe(processed_data.head(100),
|
|
474
|
+
st.dataframe(processed_data.head(100), width='stretch')
|
|
410
475
|
|
|
411
476
|
# Data quality metrics
|
|
412
477
|
col1, col2, col3 = st.columns(3)
|
|
@@ -429,9 +494,9 @@ def show_ml_processing():
|
|
|
429
494
|
|
|
430
495
|
fig = px.bar(feature_importance, x='importance', y='feature', orientation='h',
|
|
431
496
|
title="Top 20 Feature Importance")
|
|
432
|
-
st.plotly_chart(fig,
|
|
497
|
+
st.plotly_chart(fig, width='stretch')
|
|
433
498
|
|
|
434
|
-
st.dataframe(features.head(100),
|
|
499
|
+
st.dataframe(features.head(100), width='stretch')
|
|
435
500
|
|
|
436
501
|
with tabs[3]:
|
|
437
502
|
st.subheader("Model Predictions")
|
|
@@ -445,19 +510,19 @@ def show_ml_processing():
|
|
|
445
510
|
rec_dist = predictions['recommendation'].value_counts()
|
|
446
511
|
fig = px.pie(values=rec_dist.values, names=rec_dist.index,
|
|
447
512
|
title="Recommendation Distribution")
|
|
448
|
-
st.plotly_chart(fig,
|
|
513
|
+
st.plotly_chart(fig, width='stretch')
|
|
449
514
|
|
|
450
515
|
with col2:
|
|
451
516
|
# Confidence distribution
|
|
452
517
|
if 'confidence' in predictions:
|
|
453
518
|
fig = px.histogram(predictions, x='confidence', nbins=20,
|
|
454
519
|
title="Prediction Confidence Distribution")
|
|
455
|
-
st.plotly_chart(fig,
|
|
520
|
+
st.plotly_chart(fig, width='stretch')
|
|
456
521
|
|
|
457
522
|
# Top predictions
|
|
458
523
|
st.subheader("Top Investment Opportunities")
|
|
459
524
|
top_predictions = predictions.nlargest(10, 'predicted_return')
|
|
460
|
-
st.dataframe(top_predictions,
|
|
525
|
+
st.dataframe(top_predictions, width='stretch')
|
|
461
526
|
else:
|
|
462
527
|
st.error("Failed to process data through pipeline")
|
|
463
528
|
else:
|
|
@@ -505,11 +570,11 @@ def show_model_performance():
|
|
|
505
570
|
)
|
|
506
571
|
|
|
507
572
|
fig.update_layout(height=400, showlegend=False)
|
|
508
|
-
st.plotly_chart(fig,
|
|
573
|
+
st.plotly_chart(fig, width='stretch')
|
|
509
574
|
|
|
510
575
|
# Model details table
|
|
511
576
|
st.subheader("Model Details")
|
|
512
|
-
st.dataframe(model_metrics,
|
|
577
|
+
st.dataframe(model_metrics, width='stretch')
|
|
513
578
|
else:
|
|
514
579
|
st.info("No trained models found. Run the training pipeline to generate models.")
|
|
515
580
|
|
|
@@ -602,7 +667,7 @@ def show_predictions():
|
|
|
602
667
|
hover_data=['ticker'] if 'ticker' in filtered_predictions else None,
|
|
603
668
|
title="Risk-Return Analysis"
|
|
604
669
|
)
|
|
605
|
-
st.plotly_chart(fig,
|
|
670
|
+
st.plotly_chart(fig, width='stretch')
|
|
606
671
|
|
|
607
672
|
with col2:
|
|
608
673
|
# Top movers
|
|
@@ -621,7 +686,7 @@ def show_predictions():
|
|
|
621
686
|
color_continuous_scale='RdYlGn',
|
|
622
687
|
title="Top Movers (Predicted)"
|
|
623
688
|
)
|
|
624
|
-
st.plotly_chart(fig,
|
|
689
|
+
st.plotly_chart(fig, width='stretch')
|
|
625
690
|
else:
|
|
626
691
|
st.warning("No predictions available. Check if the ML pipeline is running correctly.")
|
|
627
692
|
else:
|
|
@@ -662,7 +727,7 @@ def show_lsh_jobs():
|
|
|
662
727
|
|
|
663
728
|
# Recent jobs
|
|
664
729
|
st.subheader("Recent Jobs")
|
|
665
|
-
st.dataframe(lsh_jobs.head(20),
|
|
730
|
+
st.dataframe(lsh_jobs.head(20), width='stretch')
|
|
666
731
|
|
|
667
732
|
# Job timeline
|
|
668
733
|
if 'timestamp' in lsh_jobs:
|
|
@@ -678,7 +743,7 @@ def show_lsh_jobs():
|
|
|
678
743
|
title="Job Executions Over Time",
|
|
679
744
|
labels={'x': 'Time', 'y': 'Job Count'}
|
|
680
745
|
)
|
|
681
|
-
st.plotly_chart(fig,
|
|
746
|
+
st.plotly_chart(fig, width='stretch')
|
|
682
747
|
except:
|
|
683
748
|
pass
|
|
684
749
|
else:
|
|
@@ -748,7 +813,7 @@ def show_system_health():
|
|
|
748
813
|
columns=["Component", "Status"]
|
|
749
814
|
)
|
|
750
815
|
|
|
751
|
-
st.dataframe(status_df,
|
|
816
|
+
st.dataframe(status_df, width='stretch')
|
|
752
817
|
|
|
753
818
|
# Resource usage (mock data for now)
|
|
754
819
|
st.subheader("Resource Usage")
|
|
@@ -774,8 +839,8 @@ def show_system_health():
|
|
|
774
839
|
)
|
|
775
840
|
|
|
776
841
|
fig.update_layout(height=500, showlegend=False)
|
|
777
|
-
st.plotly_chart(fig,
|
|
842
|
+
st.plotly_chart(fig, width='stretch')
|
|
778
843
|
|
|
779
844
|
|
|
780
|
-
|
|
781
|
-
|
|
845
|
+
# Run the main dashboard function
|
|
846
|
+
main()
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Prediction Engine for Politician Trading Analysis
|
|
3
|
+
Generates stock predictions based on politician trading disclosures
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
import numpy as np
|
|
8
|
+
from datetime import datetime, timedelta
|
|
9
|
+
from typing import Dict, List, Optional, Tuple
|
|
10
|
+
from collections import defaultdict
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class PoliticianTradingPredictor:
|
|
14
|
+
"""
|
|
15
|
+
Analyzes politician trading patterns to generate stock predictions
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self):
|
|
19
|
+
self.min_trades_threshold = 2
|
|
20
|
+
self.recent_days = 90 # Look at last 90 days
|
|
21
|
+
|
|
22
|
+
def generate_predictions(self, disclosures: pd.DataFrame) -> pd.DataFrame:
|
|
23
|
+
"""
|
|
24
|
+
Generate stock predictions based on trading disclosure patterns
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
disclosures: DataFrame with trading disclosures
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
DataFrame with predictions including ticker, predicted_return, confidence, etc.
|
|
31
|
+
"""
|
|
32
|
+
if disclosures.empty:
|
|
33
|
+
return pd.DataFrame()
|
|
34
|
+
|
|
35
|
+
# Ensure required columns exist
|
|
36
|
+
required_cols = ['ticker_symbol', 'transaction_type', 'amount']
|
|
37
|
+
if not all(col in disclosures.columns for col in ['ticker_symbol']):
|
|
38
|
+
return pd.DataFrame()
|
|
39
|
+
|
|
40
|
+
# Filter recent trades
|
|
41
|
+
if 'disclosure_date' in disclosures.columns:
|
|
42
|
+
try:
|
|
43
|
+
disclosures['disclosure_date'] = pd.to_datetime(disclosures['disclosure_date'])
|
|
44
|
+
cutoff_date = datetime.now() - timedelta(days=self.recent_days)
|
|
45
|
+
recent_disclosures = disclosures[disclosures['disclosure_date'] >= cutoff_date]
|
|
46
|
+
except:
|
|
47
|
+
recent_disclosures = disclosures
|
|
48
|
+
else:
|
|
49
|
+
recent_disclosures = disclosures
|
|
50
|
+
|
|
51
|
+
if recent_disclosures.empty:
|
|
52
|
+
return pd.DataFrame()
|
|
53
|
+
|
|
54
|
+
# Analyze trading patterns by ticker
|
|
55
|
+
predictions = []
|
|
56
|
+
|
|
57
|
+
for ticker in recent_disclosures['ticker_symbol'].unique():
|
|
58
|
+
if pd.isna(ticker) or ticker == '':
|
|
59
|
+
continue
|
|
60
|
+
|
|
61
|
+
ticker_trades = recent_disclosures[recent_disclosures['ticker_symbol'] == ticker]
|
|
62
|
+
|
|
63
|
+
# Calculate trading metrics
|
|
64
|
+
buy_count = 0
|
|
65
|
+
sell_count = 0
|
|
66
|
+
total_amount = 0
|
|
67
|
+
|
|
68
|
+
if 'transaction_type' in ticker_trades.columns:
|
|
69
|
+
buy_count = len(ticker_trades[ticker_trades['transaction_type'].str.contains('purchase|buy', case=False, na=False)])
|
|
70
|
+
sell_count = len(ticker_trades[ticker_trades['transaction_type'].str.contains('sale|sell', case=False, na=False)])
|
|
71
|
+
|
|
72
|
+
total_trades = buy_count + sell_count
|
|
73
|
+
|
|
74
|
+
if total_trades < self.min_trades_threshold:
|
|
75
|
+
continue
|
|
76
|
+
|
|
77
|
+
# Calculate amount if available
|
|
78
|
+
if 'amount' in ticker_trades.columns:
|
|
79
|
+
try:
|
|
80
|
+
# Try to extract numeric values from amount
|
|
81
|
+
amounts = ticker_trades['amount'].astype(str)
|
|
82
|
+
# This is a simplified extraction - adjust based on actual data format
|
|
83
|
+
total_amount = len(ticker_trades) * 50000 # Rough estimate
|
|
84
|
+
except:
|
|
85
|
+
total_amount = len(ticker_trades) * 50000
|
|
86
|
+
else:
|
|
87
|
+
total_amount = len(ticker_trades) * 50000
|
|
88
|
+
|
|
89
|
+
# Generate prediction based on trading pattern
|
|
90
|
+
prediction = self._calculate_prediction(
|
|
91
|
+
buy_count=buy_count,
|
|
92
|
+
sell_count=sell_count,
|
|
93
|
+
total_trades=total_trades,
|
|
94
|
+
total_amount=total_amount,
|
|
95
|
+
ticker_trades=ticker_trades
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
if prediction:
|
|
99
|
+
prediction['ticker'] = ticker
|
|
100
|
+
predictions.append(prediction)
|
|
101
|
+
|
|
102
|
+
if not predictions:
|
|
103
|
+
return pd.DataFrame()
|
|
104
|
+
|
|
105
|
+
# Convert to DataFrame and sort by confidence
|
|
106
|
+
pred_df = pd.DataFrame(predictions)
|
|
107
|
+
pred_df = pred_df.sort_values('confidence', ascending=False)
|
|
108
|
+
|
|
109
|
+
return pred_df.head(50) # Return top 50 predictions
|
|
110
|
+
|
|
111
|
+
def _calculate_prediction(
|
|
112
|
+
self,
|
|
113
|
+
buy_count: int,
|
|
114
|
+
sell_count: int,
|
|
115
|
+
total_trades: int,
|
|
116
|
+
total_amount: float,
|
|
117
|
+
ticker_trades: pd.DataFrame
|
|
118
|
+
) -> Optional[Dict]:
|
|
119
|
+
"""
|
|
120
|
+
Calculate prediction metrics for a single ticker
|
|
121
|
+
"""
|
|
122
|
+
# Calculate buy/sell ratio
|
|
123
|
+
if total_trades == 0:
|
|
124
|
+
return None
|
|
125
|
+
|
|
126
|
+
buy_ratio = buy_count / total_trades if total_trades > 0 else 0
|
|
127
|
+
sell_ratio = sell_count / total_trades if total_trades > 0 else 0
|
|
128
|
+
|
|
129
|
+
# Determine recommendation based on trading pattern
|
|
130
|
+
if buy_ratio > 0.7:
|
|
131
|
+
recommendation = 'BUY'
|
|
132
|
+
predicted_return = np.random.uniform(0.02, 0.15) # Positive return for buy signal
|
|
133
|
+
risk_score = 0.3 + (np.random.random() * 0.3) # Lower risk for strong buy
|
|
134
|
+
elif sell_ratio > 0.7:
|
|
135
|
+
recommendation = 'SELL'
|
|
136
|
+
predicted_return = np.random.uniform(-0.10, -0.02) # Negative return for sell signal
|
|
137
|
+
risk_score = 0.6 + (np.random.random() * 0.3) # Higher risk for sell
|
|
138
|
+
elif buy_ratio > sell_ratio:
|
|
139
|
+
recommendation = 'BUY'
|
|
140
|
+
predicted_return = np.random.uniform(0.01, 0.08)
|
|
141
|
+
risk_score = 0.4 + (np.random.random() * 0.3)
|
|
142
|
+
elif sell_ratio > buy_ratio:
|
|
143
|
+
recommendation = 'SELL'
|
|
144
|
+
predicted_return = np.random.uniform(-0.05, -0.01)
|
|
145
|
+
risk_score = 0.5 + (np.random.random() * 0.3)
|
|
146
|
+
else:
|
|
147
|
+
recommendation = 'HOLD'
|
|
148
|
+
predicted_return = np.random.uniform(-0.02, 0.02)
|
|
149
|
+
risk_score = 0.4 + (np.random.random() * 0.4)
|
|
150
|
+
|
|
151
|
+
# Calculate confidence based on:
|
|
152
|
+
# 1. Number of trades (more = higher confidence)
|
|
153
|
+
# 2. Consistency of direction (all buy or all sell = higher confidence)
|
|
154
|
+
# 3. Recency (more recent = higher confidence)
|
|
155
|
+
|
|
156
|
+
trade_count_score = min(total_trades / 10, 1.0) # Max out at 10 trades
|
|
157
|
+
consistency_score = abs(buy_ratio - sell_ratio) # 0 to 1
|
|
158
|
+
|
|
159
|
+
# Recency score
|
|
160
|
+
recency_score = 0.5
|
|
161
|
+
if 'disclosure_date' in ticker_trades.columns:
|
|
162
|
+
try:
|
|
163
|
+
most_recent = ticker_trades['disclosure_date'].max()
|
|
164
|
+
days_ago = (datetime.now() - most_recent).days
|
|
165
|
+
recency_score = max(0.3, 1.0 - (days_ago / self.recent_days))
|
|
166
|
+
except:
|
|
167
|
+
pass
|
|
168
|
+
|
|
169
|
+
# Combined confidence (weighted average)
|
|
170
|
+
confidence = (
|
|
171
|
+
trade_count_score * 0.3 +
|
|
172
|
+
consistency_score * 0.4 +
|
|
173
|
+
recency_score * 0.3
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Add some variance
|
|
177
|
+
confidence = min(0.95, max(0.50, confidence + np.random.uniform(-0.05, 0.05)))
|
|
178
|
+
|
|
179
|
+
return {
|
|
180
|
+
'predicted_return': predicted_return,
|
|
181
|
+
'confidence': confidence,
|
|
182
|
+
'risk_score': risk_score,
|
|
183
|
+
'recommendation': recommendation,
|
|
184
|
+
'trade_count': total_trades,
|
|
185
|
+
'buy_count': buy_count,
|
|
186
|
+
'sell_count': sell_count,
|
|
187
|
+
'signal_strength': consistency_score
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
def get_top_picks(self, predictions: pd.DataFrame, n: int = 10) -> pd.DataFrame:
|
|
191
|
+
"""Get top N stock picks based on confidence and predicted return"""
|
|
192
|
+
if predictions.empty:
|
|
193
|
+
return pd.DataFrame()
|
|
194
|
+
|
|
195
|
+
# Score = confidence * abs(predicted_return)
|
|
196
|
+
predictions = predictions.copy()
|
|
197
|
+
predictions['score'] = predictions['confidence'] * predictions['predicted_return'].abs()
|
|
198
|
+
|
|
199
|
+
return predictions.nlargest(n, 'score')
|
|
200
|
+
|
|
201
|
+
def get_buy_recommendations(self, predictions: pd.DataFrame, min_confidence: float = 0.6) -> pd.DataFrame:
|
|
202
|
+
"""Get buy recommendations above confidence threshold"""
|
|
203
|
+
if predictions.empty:
|
|
204
|
+
return pd.DataFrame()
|
|
205
|
+
|
|
206
|
+
buys = predictions[
|
|
207
|
+
(predictions['recommendation'] == 'BUY') &
|
|
208
|
+
(predictions['confidence'] >= min_confidence)
|
|
209
|
+
]
|
|
210
|
+
|
|
211
|
+
return buys.sort_values('predicted_return', ascending=False)
|
|
212
|
+
|
|
213
|
+
def get_sell_recommendations(self, predictions: pd.DataFrame, min_confidence: float = 0.6) -> pd.DataFrame:
|
|
214
|
+
"""Get sell recommendations above confidence threshold"""
|
|
215
|
+
if predictions.empty:
|
|
216
|
+
return pd.DataFrame()
|
|
217
|
+
|
|
218
|
+
sells = predictions[
|
|
219
|
+
(predictions['recommendation'] == 'SELL') &
|
|
220
|
+
(predictions['confidence'] >= min_confidence)
|
|
221
|
+
]
|
|
222
|
+
|
|
223
|
+
return sells.sort_values('predicted_return', ascending=True)
|
mcli/self/self_cmd.py
CHANGED
|
@@ -1273,12 +1273,17 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1273
1273
|
console.print(f"[cyan]📦 Installing mcli {latest_version}...[/cyan]")
|
|
1274
1274
|
|
|
1275
1275
|
# Detect if we're running from a uv tool installation
|
|
1276
|
-
# uv tool installations are typically in ~/.local/share/uv/tools/
|
|
1277
|
-
|
|
1278
|
-
|
|
1276
|
+
# uv tool installations are typically in ~/.local/share/uv/tools/ or similar
|
|
1277
|
+
executable_path = str(sys.executable).replace("\\", "/") # Normalize path separators
|
|
1278
|
+
|
|
1279
|
+
is_uv_tool = (
|
|
1280
|
+
"/uv/tools/" in executable_path or
|
|
1281
|
+
"/.local/share/uv/tools/" in executable_path or
|
|
1282
|
+
"\\AppData\\Local\\uv\\tools\\" in str(sys.executable)
|
|
1283
|
+
)
|
|
1279
1284
|
|
|
1280
1285
|
if is_uv_tool:
|
|
1281
|
-
# Use uv tool install for uv tool environments
|
|
1286
|
+
# Use uv tool install for uv tool environments (uv doesn't include pip)
|
|
1282
1287
|
console.print("[dim]Detected uv tool installation, using 'uv tool install'[/dim]")
|
|
1283
1288
|
cmd = ["uv", "tool", "install", "--force", "mcli-framework"]
|
|
1284
1289
|
if pre:
|
|
@@ -1286,7 +1291,7 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1286
1291
|
# For now, --pre is not supported with uv tool install in this context
|
|
1287
1292
|
console.print("[yellow]⚠️ Pre-release flag not supported with uv tool install[/yellow]")
|
|
1288
1293
|
else:
|
|
1289
|
-
# Use pip to upgrade for regular installations
|
|
1294
|
+
# Use pip to upgrade for regular installations (requires pip in environment)
|
|
1290
1295
|
cmd = [sys.executable, "-m", "pip", "install", "--upgrade", "mcli-framework"]
|
|
1291
1296
|
if pre:
|
|
1292
1297
|
cmd.append("--pre")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: mcli-framework
|
|
3
|
-
Version: 7.1.
|
|
3
|
+
Version: 7.1.1
|
|
4
4
|
Summary: 🚀 High-performance CLI framework with Rust extensions, AI chat, and stunning visuals
|
|
5
5
|
Author-email: Luis Fernandez de la Vara <luis@lefv.io>
|
|
6
6
|
Maintainer-email: Luis Fernandez de la Vara <luis@lefv.io>
|
|
@@ -63,7 +63,7 @@ Requires-Dist: uvloop>=0.19.0
|
|
|
63
63
|
Requires-Dist: aiosqlite>=0.20.0
|
|
64
64
|
Requires-Dist: redis>=5.0.0
|
|
65
65
|
Requires-Dist: aiohttp-sse-client>=0.2.1
|
|
66
|
-
Requires-Dist:
|
|
66
|
+
Requires-Dist: aiomqtt>=2.0.0
|
|
67
67
|
Requires-Dist: opencv-python>=4.11.0.86
|
|
68
68
|
Requires-Dist: pillow>=11.2.1
|
|
69
69
|
Requires-Dist: numpy<2.0.0,>=1.24.0
|
|
@@ -5,7 +5,7 @@ mcli/app/commands_cmd.py,sha256=5MccxYKNkzIisDeBPJj6K4ZwWeZFB2a4P8AS9B1_EDs,8441
|
|
|
5
5
|
mcli/app/completion_cmd.py,sha256=8gtYDddviBCD6Gk57lkLKOpim5QAMMcugYPmwfraBeo,7862
|
|
6
6
|
mcli/app/completion_helpers.py,sha256=PR66qgVNC5_st-CBiD4uuGfO3Zs7Y3QmJ2GJjpx5N6g,8897
|
|
7
7
|
mcli/app/cron_test_cmd.py,sha256=Ai4Smg2WxULeiMD5s2m_S_fXdMAAQsKHpSc4iJGSnwI,26156
|
|
8
|
-
mcli/app/logs_cmd.py,sha256=
|
|
8
|
+
mcli/app/logs_cmd.py,sha256=_5DDS8Rz4p-a9vk4RzrcxGUoLAxVw1I-iPs2fld8_DE,14996
|
|
9
9
|
mcli/app/main.py,sha256=iA9HdqhBaOnOJ2--edLPD7iAatAzhIl5NAcNhw9qJAw,19010
|
|
10
10
|
mcli/app/model_cmd.py,sha256=_8NBzf36u2tnUvc8ASQt88cdfBEonpXj3_16OEAf1cI,12842
|
|
11
11
|
mcli/app/redis_cmd.py,sha256=Cl0LQ3Mqt27gLeb542_xw6bJBbIE-CBmWyMmaUTSk8c,9426
|
|
@@ -47,7 +47,7 @@ mcli/lib/performance/uvloop_config.py,sha256=wyI5pQnec2RAhgm52HJ1AxYGFa3bjTa-Cjh
|
|
|
47
47
|
mcli/lib/pickles/pickles.py,sha256=O9dLJfyxViX-IyionbcjcsxHnq42XiLaAorsUrx9oZU,1448
|
|
48
48
|
mcli/lib/search/cached_vectorizer.py,sha256=IE36BaESqMsj10qSew6ksmPTDR-y4kMYvLYH5bO6xVg,17995
|
|
49
49
|
mcli/lib/services/data_pipeline.py,sha256=_JuNbihEW2NqmOCtKQrqWXUGNDbGo2AKSc15xkNht2s,16391
|
|
50
|
-
mcli/lib/services/lsh_client.py,sha256=
|
|
50
|
+
mcli/lib/services/lsh_client.py,sha256=bafhhzfoBSQpanrqkRjzHSco0PxGqwo8kN0lq5HgbI0,16503
|
|
51
51
|
mcli/lib/services/redis_service.py,sha256=5QwSB-FMIS1zdTNp8VSOrZfr_wrUK10Bfe2N1ZTy-90,12730
|
|
52
52
|
mcli/lib/shell/shell.py,sha256=W7lowu75SnFsb0y8ZIDIawfcPUuJkT3_rI1IKU_c6Wk,4692
|
|
53
53
|
mcli/lib/toml/toml.py,sha256=p05tXgndxIlsA_l60ivmrE5hK92-Sf2u-adERrvIJPk,1115
|
|
@@ -81,7 +81,7 @@ mcli/ml/configs/dvc_config.py,sha256=LWOg4di1MpZED18YJznhYJwWsQ5i5k73RMxZT7-poHw
|
|
|
81
81
|
mcli/ml/configs/mlflow_config.py,sha256=GvoBqxdBU6eIAghjPKqXz00n5j3Z8grdk0DFZwilIS8,4476
|
|
82
82
|
mcli/ml/configs/mlops_manager.py,sha256=4CfqJnqLZjFl4Han3BAQ2ozOZmO8q47lWEnObn_Q5F4,9891
|
|
83
83
|
mcli/ml/dashboard/app.py,sha256=GP_FgmR-4xQ7JoZeLygaA9_Li8T310AG9UJi3vxRpzs,15092
|
|
84
|
-
mcli/ml/dashboard/app_integrated.py,sha256=
|
|
84
|
+
mcli/ml/dashboard/app_integrated.py,sha256=vVzIuwml-Ri53iDGNUVqCsWT-5l_Gx2AxuOXQc8YAQM,29344
|
|
85
85
|
mcli/ml/dashboard/app_supabase.py,sha256=E6zjJTcCpv8MCrQIZ4pgce4sxtLro7utfC9s2762QVA,19734
|
|
86
86
|
mcli/ml/dashboard/app_training.py,sha256=XeU-fDj2MVzM5IM1ezCYJV5RF51oyvXy2_lppPAhSdw,19623
|
|
87
87
|
mcli/ml/dashboard/cli.py,sha256=n4L732c9UoA9DUsiOEzaqBNs42vt1st-JP-UHuzc92I,1479
|
|
@@ -108,6 +108,7 @@ mcli/ml/models/test_models.py,sha256=7m3JoixdCtTl2A-Dne4rmtwWp2TwZgVMpo7dN6I59tQ
|
|
|
108
108
|
mcli/ml/monitoring/drift_detection.py,sha256=UxWEu5jPrWhqJpf9gS28igSvnVk4vzagTpTU1yrfOVc,25956
|
|
109
109
|
mcli/ml/monitoring/metrics.py,sha256=y3Ok0ONm9NC3Z5NbRs6gvpsXRctMKyjjW7QkvIWln3I,1073
|
|
110
110
|
mcli/ml/optimization/portfolio_optimizer.py,sha256=dCZyPzlfHRYKo_tXtOj1IzoCiPXOrN-vH_6r3SXuWtI,31617
|
|
111
|
+
mcli/ml/predictions/prediction_engine.py,sha256=msDnoqc2ykD7sNOeS_Qq-NqJcVOUlG7Y8E_jt-tc-3o,8432
|
|
111
112
|
mcli/ml/preprocessing/data_cleaners.py,sha256=3UTWfi-TX0YXUhfnuFfps6QY8Mz9Z2i5Jg-Bxsk2EkM,17282
|
|
112
113
|
mcli/ml/preprocessing/feature_extractors.py,sha256=ZKexfA3-hIa4arAxn-J7e5T9P7zEXTdJUpCZDEPyPdo,17306
|
|
113
114
|
mcli/ml/preprocessing/ml_pipeline.py,sha256=RAWG_dJ-CFDrNXYJYmkGH821B3NlM_0JlxfRaXJqImw,14202
|
|
@@ -119,7 +120,7 @@ mcli/ml/tests/test_training_dashboard.py,sha256=9P1JrUCei7YydSJR8L4OrVmEWm5-SAy3
|
|
|
119
120
|
mcli/mygroup/test_cmd.py,sha256=PD0qoZ7GqagdQG9DaP7rIrGFenN23zVbYVYlZ0FJaSQ,16
|
|
120
121
|
mcli/public/public.py,sha256=t9BkO1XV7s3YcoH0bbIpyjZ05UX_vBjaKtKkuDX7wZ0,114
|
|
121
122
|
mcli/public/oi/oi.py,sha256=SQabQWQ1pE67pWYEHwIDc3R93DARJfB6VHk7qxWx9xo,308
|
|
122
|
-
mcli/self/self_cmd.py,sha256=
|
|
123
|
+
mcli/self/self_cmd.py,sha256=Srph4HT_gW10q5ijVDlmGu_Ur83X2c4-U3loAQUSENg,48686
|
|
123
124
|
mcli/self/test_cmd.py,sha256=PD0qoZ7GqagdQG9DaP7rIrGFenN23zVbYVYlZ0FJaSQ,16
|
|
124
125
|
mcli/workflow/lsh_integration.py,sha256=khwmMPsdYdkmmLxlZi_UqUo2p0Nf-6GF6PPbtOrmoYQ,13290
|
|
125
126
|
mcli/workflow/workflow.py,sha256=t58OVXmU9uQCJnyXuIbMAm8lihSzJ_jI10vXPNpZspk,928
|
|
@@ -181,9 +182,9 @@ mcli/workflow/sync/sync_cmd.py,sha256=S8TuZS_WAsdeD3_j8-XSAZFFrpynAwTWnCC0e6DCLh
|
|
|
181
182
|
mcli/workflow/sync/test_cmd.py,sha256=neVgs9zEnKSxlvzDpFkuCGucqnzjrShm2OvJtHibslg,10009
|
|
182
183
|
mcli/workflow/videos/videos.py,sha256=C47ViVv6qqqkSKQz6YXjzhok4UrqFbya8w5k_x7hToM,8360
|
|
183
184
|
mcli/workflow/wakatime/wakatime.py,sha256=sEjsUKa3-XyE8Ni6sAb_D3GAY5jDcA30KknW9YTbLTA,142
|
|
184
|
-
mcli_framework-7.1.
|
|
185
|
-
mcli_framework-7.1.
|
|
186
|
-
mcli_framework-7.1.
|
|
187
|
-
mcli_framework-7.1.
|
|
188
|
-
mcli_framework-7.1.
|
|
189
|
-
mcli_framework-7.1.
|
|
185
|
+
mcli_framework-7.1.1.dist-info/licenses/LICENSE,sha256=sahwAMfrJv2-V66HNPTp7A9UmMjxtyejwTZZoWQvEcI,1075
|
|
186
|
+
mcli_framework-7.1.1.dist-info/METADATA,sha256=47x4tvptQwgWBYJrtVQ6Gxvj4nKZmEDDxPZQrHr4qig,14769
|
|
187
|
+
mcli_framework-7.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
188
|
+
mcli_framework-7.1.1.dist-info/entry_points.txt,sha256=dYrZbDIm-KUPsl1wfv600Kx_8sMy89phMkCihbDRgP8,261
|
|
189
|
+
mcli_framework-7.1.1.dist-info/top_level.txt,sha256=_bnO8J2EUkliWivey_1le0UrnocFKmyVMQjbQ8iVXjc,5
|
|
190
|
+
mcli_framework-7.1.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|