mcli-framework 7.8.2__py3-none-any.whl → 7.8.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/commands_cmd.py +942 -199
- mcli/ml/dashboard/app_supabase.py +58 -12
- mcli/ml/dashboard/pages/predictions_enhanced.py +82 -38
- mcli/ml/dashboard/utils.py +39 -11
- mcli/self/self_cmd.py +5 -246
- mcli/self/store_cmd.py +424 -0
- mcli/test/test_cmd.py +0 -10
- mcli/workflow/dashboard/dashboard_cmd.py +180 -0
- {mcli_framework-7.8.2.dist-info → mcli_framework-7.8.4.dist-info}/METADATA +1 -1
- {mcli_framework-7.8.2.dist-info → mcli_framework-7.8.4.dist-info}/RECORD +14 -77
- mcli/__init__.py +0 -160
- mcli/__main__.py +0 -14
- mcli/app/__init__.py +0 -23
- mcli/app/model/__init__.py +0 -0
- mcli/app/video/__init__.py +0 -5
- mcli/chat/__init__.py +0 -34
- mcli/lib/__init__.py +0 -0
- mcli/lib/api/__init__.py +0 -0
- mcli/lib/auth/__init__.py +0 -1
- mcli/lib/config/__init__.py +0 -1
- mcli/lib/erd/__init__.py +0 -25
- mcli/lib/files/__init__.py +0 -0
- mcli/lib/fs/__init__.py +0 -1
- mcli/lib/logger/__init__.py +0 -3
- mcli/lib/performance/__init__.py +0 -17
- mcli/lib/pickles/__init__.py +0 -1
- mcli/lib/shell/__init__.py +0 -0
- mcli/lib/toml/__init__.py +0 -1
- mcli/lib/watcher/__init__.py +0 -0
- mcli/ml/__init__.py +0 -16
- mcli/ml/api/__init__.py +0 -30
- mcli/ml/api/routers/__init__.py +0 -27
- mcli/ml/auth/__init__.py +0 -41
- mcli/ml/backtesting/__init__.py +0 -33
- mcli/ml/cli/__init__.py +0 -5
- mcli/ml/config/__init__.py +0 -33
- mcli/ml/configs/__init__.py +0 -16
- mcli/ml/dashboard/__init__.py +0 -12
- mcli/ml/dashboard/components/__init__.py +0 -7
- mcli/ml/dashboard/pages/__init__.py +0 -6
- mcli/ml/data_ingestion/__init__.py +0 -29
- mcli/ml/database/__init__.py +0 -40
- mcli/ml/experimentation/__init__.py +0 -29
- mcli/ml/features/__init__.py +0 -39
- mcli/ml/mlops/__init__.py +0 -19
- mcli/ml/models/__init__.py +0 -90
- mcli/ml/monitoring/__init__.py +0 -25
- mcli/ml/optimization/__init__.py +0 -27
- mcli/ml/predictions/__init__.py +0 -5
- mcli/ml/preprocessing/__init__.py +0 -24
- mcli/ml/scripts/__init__.py +0 -1
- mcli/ml/trading/__init__.py +0 -63
- mcli/ml/training/__init__.py +0 -7
- mcli/mygroup/__init__.py +0 -3
- mcli/public/__init__.py +0 -1
- mcli/public/commands/__init__.py +0 -2
- mcli/self/__init__.py +0 -3
- mcli/test/__init__.py +0 -1
- mcli/test/cron_test_cmd.py +0 -697
- mcli/workflow/__init__.py +0 -0
- mcli/workflow/daemon/__init__.py +0 -15
- mcli/workflow/dashboard/__init__.py +0 -5
- mcli/workflow/docker/__init__.py +0 -0
- mcli/workflow/file/__init__.py +0 -0
- mcli/workflow/gcloud/__init__.py +0 -1
- mcli/workflow/git_commit/__init__.py +0 -0
- mcli/workflow/interview/__init__.py +0 -0
- mcli/workflow/politician_trading/__init__.py +0 -4
- mcli/workflow/registry/__init__.py +0 -0
- mcli/workflow/repo/__init__.py +0 -0
- mcli/workflow/scheduler/__init__.py +0 -25
- mcli/workflow/search/__init__.py +0 -0
- mcli/workflow/sync/__init__.py +0 -5
- mcli/workflow/videos/__init__.py +0 -1
- mcli/workflow/wakatime/__init__.py +0 -80
- {mcli_framework-7.8.2.dist-info → mcli_framework-7.8.4.dist-info}/WHEEL +0 -0
- {mcli_framework-7.8.2.dist-info → mcli_framework-7.8.4.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.8.2.dist-info → mcli_framework-7.8.4.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.8.2.dist-info → mcli_framework-7.8.4.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Streamlit dashboard for ML system monitoring - Supabase version"""
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
|
+
import os
|
|
4
5
|
from datetime import datetime, timedelta
|
|
5
6
|
|
|
6
7
|
import numpy as np
|
|
@@ -48,7 +49,7 @@ def get_politicians_data():
|
|
|
48
49
|
|
|
49
50
|
@st.cache_data(ttl=30)
|
|
50
51
|
def get_disclosures_data():
|
|
51
|
-
"""Get trading disclosures from Supabase"""
|
|
52
|
+
"""Get trading disclosures from Supabase with politician details"""
|
|
52
53
|
client = get_supabase_client()
|
|
53
54
|
if not client:
|
|
54
55
|
return pd.DataFrame()
|
|
@@ -64,6 +65,40 @@ def get_disclosures_data():
|
|
|
64
65
|
)
|
|
65
66
|
df = pd.DataFrame(response.data)
|
|
66
67
|
|
|
68
|
+
if df.empty:
|
|
69
|
+
return df
|
|
70
|
+
|
|
71
|
+
# Get all unique politician IDs
|
|
72
|
+
politician_ids = df["politician_id"].dropna().unique()
|
|
73
|
+
|
|
74
|
+
# Fetch politician details
|
|
75
|
+
politicians = {}
|
|
76
|
+
if len(politician_ids) > 0:
|
|
77
|
+
pol_response = (
|
|
78
|
+
client.table("politicians")
|
|
79
|
+
.select("id, full_name, party, state_or_country")
|
|
80
|
+
.in_("id", list(politician_ids))
|
|
81
|
+
.execute()
|
|
82
|
+
)
|
|
83
|
+
politicians = {p["id"]: p for p in pol_response.data}
|
|
84
|
+
|
|
85
|
+
# Add politician details to disclosures
|
|
86
|
+
df["politician_name"] = df["politician_id"].map(
|
|
87
|
+
lambda x: politicians.get(x, {}).get("full_name", "Unknown")
|
|
88
|
+
)
|
|
89
|
+
df["politician_party"] = df["politician_id"].map(
|
|
90
|
+
lambda x: politicians.get(x, {}).get("party", "Unknown")
|
|
91
|
+
)
|
|
92
|
+
df["politician_state"] = df["politician_id"].map(
|
|
93
|
+
lambda x: politicians.get(x, {}).get("state_or_country", "Unknown")
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Rename columns for compatibility
|
|
97
|
+
df["ticker_symbol"] = df["asset_ticker"]
|
|
98
|
+
df["amount"] = df["amount_exact"].fillna(
|
|
99
|
+
(df["amount_range_min"] + df["amount_range_max"]) / 2
|
|
100
|
+
)
|
|
101
|
+
|
|
67
102
|
# Convert datetime columns to proper datetime format
|
|
68
103
|
date_columns = ["transaction_date", "disclosure_date", "created_at", "updated_at"]
|
|
69
104
|
for col in date_columns:
|
|
@@ -346,21 +381,29 @@ def show_politicians():
|
|
|
346
381
|
)
|
|
347
382
|
with col2:
|
|
348
383
|
state_filter = st.multiselect(
|
|
349
|
-
"State",
|
|
350
|
-
options=
|
|
384
|
+
"State/Country",
|
|
385
|
+
options=(
|
|
386
|
+
politicians["state_or_country"].dropna().unique()
|
|
387
|
+
if "state_or_country" in politicians
|
|
388
|
+
else []
|
|
389
|
+
),
|
|
351
390
|
default=[],
|
|
352
391
|
)
|
|
353
392
|
with col3:
|
|
354
|
-
active_only = st.checkbox("Active Only", value=
|
|
393
|
+
active_only = st.checkbox("Active Only", value=False)
|
|
355
394
|
|
|
356
395
|
# Apply filters
|
|
357
396
|
filtered = politicians.copy()
|
|
358
397
|
if party_filter and "party" in filtered:
|
|
359
398
|
filtered = filtered[filtered["party"].isin(party_filter)]
|
|
360
|
-
if state_filter and "
|
|
361
|
-
filtered = filtered[filtered["
|
|
362
|
-
if active_only and "
|
|
363
|
-
|
|
399
|
+
if state_filter and "state_or_country" in filtered:
|
|
400
|
+
filtered = filtered[filtered["state_or_country"].isin(state_filter)]
|
|
401
|
+
if active_only and "term_end" in filtered:
|
|
402
|
+
# Filter for active (term_end is in the future or null)
|
|
403
|
+
filtered = filtered[
|
|
404
|
+
(filtered["term_end"].isna())
|
|
405
|
+
| (pd.to_datetime(filtered["term_end"]) > pd.Timestamp.now())
|
|
406
|
+
]
|
|
364
407
|
|
|
365
408
|
# Display data
|
|
366
409
|
st.dataframe(filtered, width="stretch")
|
|
@@ -368,17 +411,20 @@ def show_politicians():
|
|
|
368
411
|
# Stats
|
|
369
412
|
col1, col2 = st.columns(2)
|
|
370
413
|
with col1:
|
|
371
|
-
if "party" in filtered:
|
|
414
|
+
if "party" in filtered and not filtered["party"].dropna().empty:
|
|
372
415
|
party_dist = filtered["party"].value_counts()
|
|
373
416
|
fig = px.pie(
|
|
374
417
|
values=party_dist.values, names=party_dist.index, title="Party Distribution"
|
|
375
418
|
)
|
|
376
419
|
st.plotly_chart(fig, width="stretch", config={"responsive": True})
|
|
377
420
|
with col2:
|
|
378
|
-
if "
|
|
379
|
-
state_dist = filtered["
|
|
421
|
+
if "state_or_country" in filtered and not filtered["state_or_country"].dropna().empty:
|
|
422
|
+
state_dist = filtered["state_or_country"].value_counts().head(10)
|
|
380
423
|
fig = px.bar(
|
|
381
|
-
x=state_dist.values,
|
|
424
|
+
x=state_dist.values,
|
|
425
|
+
y=state_dist.index,
|
|
426
|
+
orientation="h",
|
|
427
|
+
title="Top States/Countries",
|
|
382
428
|
)
|
|
383
429
|
st.plotly_chart(fig, width="stretch", config={"responsive": True})
|
|
384
430
|
else:
|
|
@@ -158,54 +158,83 @@ def generate_mock_historical_performance() -> pd.DataFrame:
|
|
|
158
158
|
|
|
159
159
|
|
|
160
160
|
def get_real_predictions() -> pd.DataFrame:
|
|
161
|
-
"""Get real predictions from ML pipeline"""
|
|
161
|
+
"""Get real predictions from ML pipeline - REQUIRES SUPABASE CONNECTION"""
|
|
162
162
|
if not HAS_REAL_DATA:
|
|
163
|
-
st.
|
|
164
|
-
|
|
163
|
+
st.error("❌ **CONFIGURATION ERROR**: Real data functions not available!")
|
|
164
|
+
st.error(
|
|
165
|
+
"Cannot import Supabase utilities. Check that `src/mcli/ml/dashboard/utils.py` exists."
|
|
166
|
+
)
|
|
167
|
+
st.stop()
|
|
165
168
|
|
|
166
169
|
try:
|
|
167
170
|
# Get real disclosure data
|
|
168
171
|
disclosures = get_disclosures_data()
|
|
169
172
|
|
|
170
173
|
if disclosures.empty:
|
|
171
|
-
st.
|
|
172
|
-
|
|
174
|
+
st.error("❌ **DATABASE ERROR**: No trading disclosure data available!")
|
|
175
|
+
st.error("Supabase connection may not be configured. Check secrets configuration.")
|
|
176
|
+
st.code(
|
|
177
|
+
"""
|
|
178
|
+
# Required Streamlit Secrets:
|
|
179
|
+
SUPABASE_URL = "your_supabase_url"
|
|
180
|
+
SUPABASE_KEY = "your_supabase_key"
|
|
181
|
+
SUPABASE_SERVICE_ROLE_KEY = "your_service_role_key"
|
|
182
|
+
""",
|
|
183
|
+
language="toml",
|
|
184
|
+
)
|
|
185
|
+
st.stop()
|
|
186
|
+
|
|
187
|
+
# Check if we have enough data for ML
|
|
188
|
+
if len(disclosures) < 10:
|
|
189
|
+
st.error(
|
|
190
|
+
f"❌ **INSUFFICIENT DATA**: Found only {len(disclosures)} disclosures. "
|
|
191
|
+
f"Need at least 10 for ML predictions."
|
|
173
192
|
)
|
|
174
|
-
|
|
193
|
+
st.info("Please run data collection workflows to populate the database.")
|
|
194
|
+
st.stop()
|
|
175
195
|
|
|
176
196
|
# Run ML pipeline to generate predictions
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
197
|
+
st.success(f"✅ Loaded {len(disclosures)} real trading disclosures from database!")
|
|
198
|
+
|
|
199
|
+
try:
|
|
200
|
+
_, _, predictions = run_ml_pipeline(disclosures)
|
|
201
|
+
|
|
202
|
+
if predictions is not None and not predictions.empty:
|
|
203
|
+
# Ensure all required columns exist
|
|
204
|
+
required_cols = [
|
|
205
|
+
"ticker",
|
|
206
|
+
"predicted_return",
|
|
207
|
+
"confidence",
|
|
208
|
+
"risk_score",
|
|
209
|
+
"recommendation",
|
|
210
|
+
"sector",
|
|
211
|
+
"politician",
|
|
212
|
+
]
|
|
213
|
+
|
|
214
|
+
for col in required_cols:
|
|
215
|
+
if col not in predictions.columns:
|
|
216
|
+
if col == "sector":
|
|
217
|
+
predictions[col] = "Technology" # Default
|
|
218
|
+
elif col == "politician":
|
|
219
|
+
predictions[col] = "Unknown"
|
|
220
|
+
elif col == "ticker":
|
|
221
|
+
predictions[col] = "UNK"
|
|
222
|
+
|
|
223
|
+
st.success("✅ Generated ML predictions from real data!")
|
|
224
|
+
return predictions
|
|
225
|
+
else:
|
|
226
|
+
st.error("❌ **ML PIPELINE ERROR**: Predictions returned empty!")
|
|
227
|
+
st.error("ML pipeline ran but produced no predictions.")
|
|
228
|
+
st.stop()
|
|
229
|
+
except Exception as ml_error:
|
|
230
|
+
st.error(f"❌ **ML PIPELINE ERROR**: {ml_error}")
|
|
231
|
+
st.exception(ml_error)
|
|
232
|
+
st.stop()
|
|
204
233
|
|
|
205
234
|
except Exception as e:
|
|
206
|
-
st.error(f"
|
|
207
|
-
st.
|
|
208
|
-
|
|
235
|
+
st.error(f"❌ **FATAL ERROR**: {e}")
|
|
236
|
+
st.exception(e)
|
|
237
|
+
st.stop()
|
|
209
238
|
|
|
210
239
|
|
|
211
240
|
def show_predictions_enhanced():
|
|
@@ -659,12 +688,27 @@ def show_prediction_generator():
|
|
|
659
688
|
|
|
660
689
|
|
|
661
690
|
def show_performance_tracker():
|
|
662
|
-
"""Show prediction performance over time"""
|
|
691
|
+
"""Show prediction performance over time - REQUIRES REAL ML PREDICTION HISTORY"""
|
|
663
692
|
|
|
664
693
|
st.subheader("📈 Prediction Performance Tracker")
|
|
665
694
|
st.markdown("Track the accuracy and ROI of our ML predictions over time")
|
|
666
695
|
|
|
667
|
-
#
|
|
696
|
+
# TODO: Implement real performance tracking from database
|
|
697
|
+
st.error(
|
|
698
|
+
"❌ **FEATURE NOT IMPLEMENTED**: Performance tracking requires ML prediction history database."
|
|
699
|
+
)
|
|
700
|
+
st.info(
|
|
701
|
+
"""
|
|
702
|
+
This feature requires:
|
|
703
|
+
1. A prediction_history table in Supabase
|
|
704
|
+
2. Automated prediction tracking and validation
|
|
705
|
+
3. Historical performance metrics calculation
|
|
706
|
+
|
|
707
|
+
Currently showing mock data for demonstration only.
|
|
708
|
+
"""
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
# Generate historical data (mock for now)
|
|
668
712
|
performance_df = generate_mock_historical_performance()
|
|
669
713
|
|
|
670
714
|
# KPIs
|
mcli/ml/dashboard/utils.py
CHANGED
|
@@ -69,17 +69,11 @@ def get_disclosures_data() -> pd.DataFrame:
|
|
|
69
69
|
"""Get trading disclosures from Supabase with proper schema mapping"""
|
|
70
70
|
client = get_supabase_client()
|
|
71
71
|
if not client:
|
|
72
|
-
|
|
72
|
+
st.warning("⚠️ Supabase connection not available. Configure SUPABASE_URL and SUPABASE_KEY.")
|
|
73
|
+
return pd.DataFrame() # Return empty instead of demo data
|
|
73
74
|
|
|
74
75
|
try:
|
|
75
|
-
#
|
|
76
|
-
count_response = client.table("trading_disclosures").select("*", count="exact").execute()
|
|
77
|
-
total_count = count_response.count
|
|
78
|
-
|
|
79
|
-
if total_count == 0:
|
|
80
|
-
return _generate_demo_disclosures()
|
|
81
|
-
|
|
82
|
-
# Get the data
|
|
76
|
+
# Get the data with politician details joined
|
|
83
77
|
response = (
|
|
84
78
|
client.table("trading_disclosures")
|
|
85
79
|
.select("*")
|
|
@@ -89,14 +83,48 @@ def get_disclosures_data() -> pd.DataFrame:
|
|
|
89
83
|
)
|
|
90
84
|
|
|
91
85
|
if not response.data:
|
|
92
|
-
|
|
86
|
+
st.info(
|
|
87
|
+
"📊 No trading disclosures found in database. Data collection may be in progress."
|
|
88
|
+
)
|
|
89
|
+
return pd.DataFrame()
|
|
93
90
|
|
|
94
91
|
df = pd.DataFrame(response.data)
|
|
92
|
+
|
|
93
|
+
# Get politician details and join
|
|
94
|
+
if not df.empty and "politician_id" in df.columns:
|
|
95
|
+
politician_ids = df["politician_id"].dropna().unique()
|
|
96
|
+
if len(politician_ids) > 0:
|
|
97
|
+
pol_response = (
|
|
98
|
+
client.table("politicians")
|
|
99
|
+
.select("id, full_name, party, state_or_country")
|
|
100
|
+
.in_("id", list(politician_ids))
|
|
101
|
+
.execute()
|
|
102
|
+
)
|
|
103
|
+
politicians = {p["id"]: p for p in pol_response.data}
|
|
104
|
+
|
|
105
|
+
# Add politician details
|
|
106
|
+
df["politician_name"] = df["politician_id"].map(
|
|
107
|
+
lambda x: politicians.get(x, {}).get("full_name", "Unknown")
|
|
108
|
+
)
|
|
109
|
+
df["politician_party"] = df["politician_id"].map(
|
|
110
|
+
lambda x: politicians.get(x, {}).get("party", "Unknown")
|
|
111
|
+
)
|
|
112
|
+
df["politician_state"] = df["politician_id"].map(
|
|
113
|
+
lambda x: politicians.get(x, {}).get("state_or_country", "Unknown")
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Map column names for compatibility
|
|
117
|
+
df["ticker_symbol"] = df["asset_ticker"]
|
|
118
|
+
df["amount"] = df["amount_exact"].fillna(
|
|
119
|
+
(df["amount_range_min"] + df["amount_range_max"]) / 2
|
|
120
|
+
)
|
|
121
|
+
|
|
95
122
|
return df
|
|
96
123
|
|
|
97
124
|
except Exception as e:
|
|
125
|
+
st.error(f"❌ Error fetching disclosures: {e}")
|
|
98
126
|
logger.error(f"Failed to fetch disclosures: {e}")
|
|
99
|
-
return
|
|
127
|
+
return pd.DataFrame()
|
|
100
128
|
|
|
101
129
|
|
|
102
130
|
def _generate_demo_disclosures() -> pd.DataFrame:
|
mcli/self/self_cmd.py
CHANGED
|
@@ -102,79 +102,8 @@ def restore_command_state(hash_value):
|
|
|
102
102
|
return True
|
|
103
103
|
|
|
104
104
|
|
|
105
|
-
# Create a Click group for all command management
|
|
106
|
-
@self_app.group("commands")
|
|
107
|
-
def commands_group():
|
|
108
|
-
"""Manage CLI commands and command state."""
|
|
109
|
-
pass
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
# Move the command-state group under commands_group
|
|
113
|
-
@commands_group.group("state")
|
|
114
|
-
def command_state():
|
|
115
|
-
"""Manage command state lockfile and history."""
|
|
116
|
-
pass
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
@command_state.command("list")
|
|
120
|
-
def list_states():
|
|
121
|
-
"""List all saved command states (hash, timestamp, #commands)."""
|
|
122
|
-
states = load_lockfile()
|
|
123
|
-
if not states:
|
|
124
|
-
click.echo("No command states found.")
|
|
125
|
-
return
|
|
126
|
-
table = Table(title="Command States")
|
|
127
|
-
table.add_column("Hash", style="cyan")
|
|
128
|
-
table.add_column("Timestamp", style="green")
|
|
129
|
-
table.add_column("# Commands", style="yellow")
|
|
130
|
-
for state in states:
|
|
131
|
-
table.add_row(state["hash"][:8], state["timestamp"], str(len(state["commands"])))
|
|
132
|
-
console.print(table)
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
@command_state.command("restore")
|
|
136
|
-
@click.argument("hash_value")
|
|
137
|
-
def restore_state(hash_value):
|
|
138
|
-
"""Restore to a previous command state by hash."""
|
|
139
|
-
if restore_command_state(hash_value):
|
|
140
|
-
click.echo(f"Restored to state {hash_value[:8]}")
|
|
141
|
-
else:
|
|
142
|
-
click.echo(f"State {hash_value[:8]} not found.", err=True)
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
@command_state.command("write")
|
|
146
|
-
@click.argument("json_file", required=False, type=click.Path(exists=False))
|
|
147
|
-
def write_state(json_file):
|
|
148
|
-
"""Write a new command state to the lockfile from a JSON file or the current app state."""
|
|
149
|
-
import traceback
|
|
150
|
-
|
|
151
|
-
print("[DEBUG] write_state called")
|
|
152
|
-
print(f"[DEBUG] LOCKFILE_PATH: {LOCKFILE_PATH}")
|
|
153
|
-
try:
|
|
154
|
-
if json_file:
|
|
155
|
-
print(f"[DEBUG] Loading command state from file: {json_file}")
|
|
156
|
-
with open(json_file, "r") as f:
|
|
157
|
-
commands = json.load(f)
|
|
158
|
-
click.echo(f"Loaded command state from {json_file}.")
|
|
159
|
-
else:
|
|
160
|
-
print("[DEBUG] Snapshotting current command state.")
|
|
161
|
-
commands = get_current_command_state()
|
|
162
|
-
state_hash = hash_command_state(commands)
|
|
163
|
-
new_state = {
|
|
164
|
-
"hash": state_hash,
|
|
165
|
-
"timestamp": datetime.utcnow().isoformat() + "Z",
|
|
166
|
-
"commands": commands,
|
|
167
|
-
}
|
|
168
|
-
append_lockfile(new_state)
|
|
169
|
-
print(f"[DEBUG] Wrote new command state {state_hash[:8]} to lockfile at {LOCKFILE_PATH}")
|
|
170
|
-
click.echo(f"Wrote new command state {state_hash[:8]} to lockfile.")
|
|
171
|
-
except Exception as e:
|
|
172
|
-
print(f"[ERROR] Exception in write_state: {e}")
|
|
173
|
-
print(traceback.format_exc())
|
|
174
|
-
click.echo(f"[ERROR] Failed to write command state: {e}", err=True)
|
|
175
|
-
|
|
176
|
-
|
|
177
105
|
# On CLI startup, check and update lockfile if needed
|
|
106
|
+
# NOTE: The commands group has been moved to mcli.app.commands_cmd for better organization
|
|
178
107
|
|
|
179
108
|
|
|
180
109
|
def check_and_update_command_lockfile():
|
|
@@ -250,76 +179,7 @@ def {name}_command(name: str = "World"):
|
|
|
250
179
|
return template
|
|
251
180
|
|
|
252
181
|
|
|
253
|
-
|
|
254
|
-
@click.argument("query", required=False)
|
|
255
|
-
@click.option("--full", "-f", is_flag=True, help="Show full command paths and descriptions")
|
|
256
|
-
def search(query, full):
|
|
257
|
-
"""
|
|
258
|
-
Search for available commands using fuzzy matching.
|
|
259
|
-
|
|
260
|
-
Similar to telescope in neovim, this allows quick fuzzy searching
|
|
261
|
-
through all available commands in mcli.
|
|
262
|
-
|
|
263
|
-
If no query is provided, lists all commands.
|
|
264
|
-
"""
|
|
265
|
-
# Collect all commands from the application
|
|
266
|
-
commands = collect_commands()
|
|
267
|
-
|
|
268
|
-
# Display the commands in a table
|
|
269
|
-
table = Table(title="mcli Commands")
|
|
270
|
-
table.add_column("Command", style="green")
|
|
271
|
-
table.add_column("Group", style="blue")
|
|
272
|
-
if full:
|
|
273
|
-
table.add_column("Path", style="dim")
|
|
274
|
-
table.add_column("Description", style="yellow")
|
|
275
|
-
|
|
276
|
-
if query:
|
|
277
|
-
filtered_commands = []
|
|
278
|
-
|
|
279
|
-
# Try to use fuzzywuzzy for better matching if available
|
|
280
|
-
if process:
|
|
281
|
-
# Extract command names for matching
|
|
282
|
-
command_names = [
|
|
283
|
-
f"{cmd['group']}.{cmd['name']}" if cmd["group"] else cmd["name"] for cmd in commands
|
|
284
|
-
]
|
|
285
|
-
matches = process.extract(query, command_names, limit=10)
|
|
286
|
-
|
|
287
|
-
# Filter to matched commands
|
|
288
|
-
match_indices = [command_names.index(match[0]) for match in matches if match[1] > 50]
|
|
289
|
-
filtered_commands = [commands[i] for i in match_indices]
|
|
290
|
-
else:
|
|
291
|
-
# Fallback to simple substring matching
|
|
292
|
-
filtered_commands = [
|
|
293
|
-
cmd
|
|
294
|
-
for cmd in commands
|
|
295
|
-
if query.lower() in cmd["name"].lower()
|
|
296
|
-
or (cmd["group"] and query.lower() in cmd["group"].lower())
|
|
297
|
-
]
|
|
298
|
-
|
|
299
|
-
commands = filtered_commands
|
|
300
|
-
|
|
301
|
-
# Sort commands by group then name
|
|
302
|
-
commands.sort(key=lambda c: (c["group"] if c["group"] else "", c["name"]))
|
|
303
|
-
|
|
304
|
-
# Add rows to the table
|
|
305
|
-
for cmd in commands:
|
|
306
|
-
if full:
|
|
307
|
-
table.add_row(
|
|
308
|
-
cmd["name"],
|
|
309
|
-
cmd["group"] if cmd["group"] else "-",
|
|
310
|
-
cmd["path"],
|
|
311
|
-
cmd["help"] if cmd["help"] else "",
|
|
312
|
-
)
|
|
313
|
-
else:
|
|
314
|
-
table.add_row(cmd["name"], cmd["group"] if cmd["group"] else "-")
|
|
315
|
-
|
|
316
|
-
console.print(table)
|
|
317
|
-
|
|
318
|
-
if not commands:
|
|
319
|
-
logger.info("No commands found matching the search query")
|
|
320
|
-
click.echo("No commands found matching the search query")
|
|
321
|
-
|
|
322
|
-
return 0
|
|
182
|
+
# NOTE: search command has been moved to mcli.app.commands_cmd for better organization
|
|
323
183
|
|
|
324
184
|
|
|
325
185
|
def collect_commands() -> List[Dict[str, Any]]:
|
|
@@ -575,110 +435,7 @@ logger = get_logger()
|
|
|
575
435
|
pass
|
|
576
436
|
|
|
577
437
|
|
|
578
|
-
|
|
579
|
-
@click.option(
|
|
580
|
-
"--output", "-o", type=click.Path(), help="Output file (default: workflow-commands.json)"
|
|
581
|
-
)
|
|
582
|
-
def extract_workflow_commands(output):
|
|
583
|
-
"""
|
|
584
|
-
Extract workflow commands from Python modules to JSON format.
|
|
585
|
-
|
|
586
|
-
This command helps migrate existing workflow commands to portable JSON format.
|
|
587
|
-
"""
|
|
588
|
-
import inspect
|
|
589
|
-
from pathlib import Path
|
|
590
|
-
|
|
591
|
-
output_file = Path(output) if output else Path("workflow-commands.json")
|
|
592
|
-
|
|
593
|
-
workflow_commands = []
|
|
594
|
-
|
|
595
|
-
# Try to get workflow from the main app
|
|
596
|
-
try:
|
|
597
|
-
from mcli.app.main import create_app
|
|
598
|
-
|
|
599
|
-
app = create_app()
|
|
600
|
-
|
|
601
|
-
# Check if workflow group exists
|
|
602
|
-
if "workflow" in app.commands:
|
|
603
|
-
workflow_group = app.commands["workflow"]
|
|
604
|
-
|
|
605
|
-
# Force load lazy group if needed
|
|
606
|
-
if hasattr(workflow_group, "_load_group"):
|
|
607
|
-
workflow_group = workflow_group._load_group()
|
|
608
|
-
|
|
609
|
-
if hasattr(workflow_group, "commands"):
|
|
610
|
-
for cmd_name, cmd_obj in workflow_group.commands.items():
|
|
611
|
-
# Extract command information
|
|
612
|
-
command_info = {
|
|
613
|
-
"name": cmd_name,
|
|
614
|
-
"group": "workflow",
|
|
615
|
-
"description": cmd_obj.help or "Workflow command",
|
|
616
|
-
"version": "1.0",
|
|
617
|
-
"metadata": {"source": "workflow", "migrated": True},
|
|
618
|
-
}
|
|
619
|
-
|
|
620
|
-
# Create a template based on command type
|
|
621
|
-
# Replace hyphens with underscores for valid Python function names
|
|
622
|
-
safe_name = cmd_name.replace("-", "_")
|
|
623
|
-
|
|
624
|
-
if isinstance(cmd_obj, click.Group):
|
|
625
|
-
# For groups, create a template
|
|
626
|
-
command_info[
|
|
627
|
-
"code"
|
|
628
|
-
] = f'''"""
|
|
629
|
-
{cmd_name} workflow command.
|
|
630
|
-
"""
|
|
631
|
-
import click
|
|
632
|
-
|
|
633
|
-
@click.group(name="{cmd_name}")
|
|
634
|
-
def app():
|
|
635
|
-
"""{cmd_obj.help or 'Workflow command group'}"""
|
|
636
|
-
pass
|
|
637
|
-
|
|
638
|
-
# Add your subcommands here
|
|
639
|
-
'''
|
|
640
|
-
else:
|
|
641
|
-
# For regular commands, create a template
|
|
642
|
-
command_info[
|
|
643
|
-
"code"
|
|
644
|
-
] = f'''"""
|
|
645
|
-
{cmd_name} workflow command.
|
|
646
|
-
"""
|
|
647
|
-
import click
|
|
648
|
-
|
|
649
|
-
@click.command(name="{cmd_name}")
|
|
650
|
-
def app():
|
|
651
|
-
"""{cmd_obj.help or 'Workflow command'}"""
|
|
652
|
-
click.echo("Workflow command: {cmd_name}")
|
|
653
|
-
# Add your implementation here
|
|
654
|
-
'''
|
|
655
|
-
|
|
656
|
-
workflow_commands.append(command_info)
|
|
657
|
-
|
|
658
|
-
if workflow_commands:
|
|
659
|
-
import json
|
|
660
|
-
|
|
661
|
-
with open(output_file, "w") as f:
|
|
662
|
-
json.dump(workflow_commands, f, indent=2)
|
|
663
|
-
|
|
664
|
-
click.echo(f"✅ Extracted {len(workflow_commands)} workflow commands")
|
|
665
|
-
click.echo(f"📁 Saved to: {output_file}")
|
|
666
|
-
click.echo(
|
|
667
|
-
f"\n💡 These are templates. Import with: mcli self import-commands {output_file}"
|
|
668
|
-
)
|
|
669
|
-
click.echo(" Then customize the code in ~/.mcli/commands/<command>.json")
|
|
670
|
-
return 0
|
|
671
|
-
else:
|
|
672
|
-
click.echo("⚠️ No workflow commands found to extract")
|
|
673
|
-
return 1
|
|
674
|
-
|
|
675
|
-
except Exception as e:
|
|
676
|
-
logger.error(f"Failed to extract workflow commands: {e}")
|
|
677
|
-
click.echo(f"❌ Failed to extract workflow commands: {e}", err=True)
|
|
678
|
-
import traceback
|
|
679
|
-
|
|
680
|
-
click.echo(traceback.format_exc(), err=True)
|
|
681
|
-
return 1
|
|
438
|
+
# NOTE: extract-workflow-commands has been moved to mcli.app.commands_cmd for better organization
|
|
682
439
|
|
|
683
440
|
|
|
684
441
|
@click.group("plugin")
|
|
@@ -1280,6 +1037,8 @@ try:
|
|
|
1280
1037
|
except ImportError as e:
|
|
1281
1038
|
logger.debug(f"Could not load visual command: {e}")
|
|
1282
1039
|
|
|
1040
|
+
# NOTE: store command has been moved to mcli.app.commands_cmd for better organization
|
|
1041
|
+
|
|
1283
1042
|
# This part is important to make the command available to the CLI
|
|
1284
1043
|
if __name__ == "__main__":
|
|
1285
1044
|
self_app()
|