mcli-framework 7.8.3__py3-none-any.whl → 7.8.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

@@ -49,7 +49,7 @@ def get_politicians_data():
49
49
 
50
50
  @st.cache_data(ttl=30)
51
51
  def get_disclosures_data():
52
- """Get trading disclosures from Supabase"""
52
+ """Get trading disclosures from Supabase with politician details"""
53
53
  client = get_supabase_client()
54
54
  if not client:
55
55
  return pd.DataFrame()
@@ -65,6 +65,40 @@ def get_disclosures_data():
65
65
  )
66
66
  df = pd.DataFrame(response.data)
67
67
 
68
+ if df.empty:
69
+ return df
70
+
71
+ # Get all unique politician IDs
72
+ politician_ids = df["politician_id"].dropna().unique()
73
+
74
+ # Fetch politician details
75
+ politicians = {}
76
+ if len(politician_ids) > 0:
77
+ pol_response = (
78
+ client.table("politicians")
79
+ .select("id, full_name, party, state_or_country")
80
+ .in_("id", list(politician_ids))
81
+ .execute()
82
+ )
83
+ politicians = {p["id"]: p for p in pol_response.data}
84
+
85
+ # Add politician details to disclosures
86
+ df["politician_name"] = df["politician_id"].map(
87
+ lambda x: politicians.get(x, {}).get("full_name", "Unknown")
88
+ )
89
+ df["politician_party"] = df["politician_id"].map(
90
+ lambda x: politicians.get(x, {}).get("party", "Unknown")
91
+ )
92
+ df["politician_state"] = df["politician_id"].map(
93
+ lambda x: politicians.get(x, {}).get("state_or_country", "Unknown")
94
+ )
95
+
96
+ # Rename columns for compatibility
97
+ df["ticker_symbol"] = df["asset_ticker"]
98
+ df["amount"] = df["amount_exact"].fillna(
99
+ (df["amount_range_min"] + df["amount_range_max"]) / 2
100
+ )
101
+
68
102
  # Convert datetime columns to proper datetime format
69
103
  date_columns = ["transaction_date", "disclosure_date", "created_at", "updated_at"]
70
104
  for col in date_columns:
@@ -347,21 +381,29 @@ def show_politicians():
347
381
  )
348
382
  with col2:
349
383
  state_filter = st.multiselect(
350
- "State",
351
- options=politicians["state"].dropna().unique() if "state" in politicians else [],
384
+ "State/Country",
385
+ options=(
386
+ politicians["state_or_country"].dropna().unique()
387
+ if "state_or_country" in politicians
388
+ else []
389
+ ),
352
390
  default=[],
353
391
  )
354
392
  with col3:
355
- active_only = st.checkbox("Active Only", value=True)
393
+ active_only = st.checkbox("Active Only", value=False)
356
394
 
357
395
  # Apply filters
358
396
  filtered = politicians.copy()
359
397
  if party_filter and "party" in filtered:
360
398
  filtered = filtered[filtered["party"].isin(party_filter)]
361
- if state_filter and "state" in filtered:
362
- filtered = filtered[filtered["state"].isin(state_filter)]
363
- if active_only and "is_active" in filtered:
364
- filtered = filtered[filtered["is_active"] == True]
399
+ if state_filter and "state_or_country" in filtered:
400
+ filtered = filtered[filtered["state_or_country"].isin(state_filter)]
401
+ if active_only and "term_end" in filtered:
402
+ # Filter for active (term_end is in the future or null)
403
+ filtered = filtered[
404
+ (filtered["term_end"].isna())
405
+ | (pd.to_datetime(filtered["term_end"]) > pd.Timestamp.now())
406
+ ]
365
407
 
366
408
  # Display data
367
409
  st.dataframe(filtered, width="stretch")
@@ -369,17 +411,20 @@ def show_politicians():
369
411
  # Stats
370
412
  col1, col2 = st.columns(2)
371
413
  with col1:
372
- if "party" in filtered:
414
+ if "party" in filtered and not filtered["party"].dropna().empty:
373
415
  party_dist = filtered["party"].value_counts()
374
416
  fig = px.pie(
375
417
  values=party_dist.values, names=party_dist.index, title="Party Distribution"
376
418
  )
377
419
  st.plotly_chart(fig, width="stretch", config={"responsive": True})
378
420
  with col2:
379
- if "state" in filtered:
380
- state_dist = filtered["state"].value_counts().head(10)
421
+ if "state_or_country" in filtered and not filtered["state_or_country"].dropna().empty:
422
+ state_dist = filtered["state_or_country"].value_counts().head(10)
381
423
  fig = px.bar(
382
- x=state_dist.values, y=state_dist.index, orientation="h", title="Top States"
424
+ x=state_dist.values,
425
+ y=state_dist.index,
426
+ orientation="h",
427
+ title="Top States/Countries",
383
428
  )
384
429
  st.plotly_chart(fig, width="stretch", config={"responsive": True})
385
430
  else:
@@ -158,54 +158,83 @@ def generate_mock_historical_performance() -> pd.DataFrame:
158
158
 
159
159
 
160
160
  def get_real_predictions() -> pd.DataFrame:
161
- """Get real predictions from ML pipeline"""
161
+ """Get real predictions from ML pipeline - REQUIRES SUPABASE CONNECTION"""
162
162
  if not HAS_REAL_DATA:
163
- st.warning("⚠️ Supabase connection not configured. Using demo data.")
164
- return generate_mock_predictions()
163
+ st.error(" **CONFIGURATION ERROR**: Real data functions not available!")
164
+ st.error(
165
+ "Cannot import Supabase utilities. Check that `src/mcli/ml/dashboard/utils.py` exists."
166
+ )
167
+ st.stop()
165
168
 
166
169
  try:
167
170
  # Get real disclosure data
168
171
  disclosures = get_disclosures_data()
169
172
 
170
173
  if disclosures.empty:
171
- st.info(
172
- "No disclosure data available. Click 'Run ML Pipeline' in sidebar to generate predictions."
174
+ st.error("❌ **DATABASE ERROR**: No trading disclosure data available!")
175
+ st.error("Supabase connection may not be configured. Check secrets configuration.")
176
+ st.code(
177
+ """
178
+ # Required Streamlit Secrets:
179
+ SUPABASE_URL = "your_supabase_url"
180
+ SUPABASE_KEY = "your_supabase_key"
181
+ SUPABASE_SERVICE_ROLE_KEY = "your_service_role_key"
182
+ """,
183
+ language="toml",
184
+ )
185
+ st.stop()
186
+
187
+ # Check if we have enough data for ML
188
+ if len(disclosures) < 10:
189
+ st.error(
190
+ f"❌ **INSUFFICIENT DATA**: Found only {len(disclosures)} disclosures. "
191
+ f"Need at least 10 for ML predictions."
173
192
  )
174
- return generate_mock_predictions()
193
+ st.info("Please run data collection workflows to populate the database.")
194
+ st.stop()
175
195
 
176
196
  # Run ML pipeline to generate predictions
177
- _, _, predictions = run_ml_pipeline(disclosures)
178
-
179
- if predictions is not None and not predictions.empty:
180
- # Ensure all required columns exist
181
- required_cols = [
182
- "ticker",
183
- "predicted_return",
184
- "confidence",
185
- "risk_score",
186
- "recommendation",
187
- "sector",
188
- "politician",
189
- ]
190
-
191
- for col in required_cols:
192
- if col not in predictions.columns:
193
- if col == "sector":
194
- predictions[col] = "Technology" # Default
195
- elif col == "politician":
196
- predictions[col] = "Unknown"
197
- elif col == "ticker":
198
- predictions[col] = "UNK"
199
-
200
- return predictions
201
- else:
202
- st.info("ML pipeline did not generate predictions. Using demo data for display.")
203
- return generate_mock_predictions()
197
+ st.success(f"✅ Loaded {len(disclosures)} real trading disclosures from database!")
198
+
199
+ try:
200
+ _, _, predictions = run_ml_pipeline(disclosures)
201
+
202
+ if predictions is not None and not predictions.empty:
203
+ # Ensure all required columns exist
204
+ required_cols = [
205
+ "ticker",
206
+ "predicted_return",
207
+ "confidence",
208
+ "risk_score",
209
+ "recommendation",
210
+ "sector",
211
+ "politician",
212
+ ]
213
+
214
+ for col in required_cols:
215
+ if col not in predictions.columns:
216
+ if col == "sector":
217
+ predictions[col] = "Technology" # Default
218
+ elif col == "politician":
219
+ predictions[col] = "Unknown"
220
+ elif col == "ticker":
221
+ predictions[col] = "UNK"
222
+
223
+ st.success("✅ Generated ML predictions from real data!")
224
+ return predictions
225
+ else:
226
+ st.error("❌ **ML PIPELINE ERROR**: Predictions returned empty!")
227
+ st.error("ML pipeline ran but produced no predictions.")
228
+ st.stop()
229
+ except Exception as ml_error:
230
+ st.error(f"❌ **ML PIPELINE ERROR**: {ml_error}")
231
+ st.exception(ml_error)
232
+ st.stop()
204
233
 
205
234
  except Exception as e:
206
- st.error(f"Error loading predictions: {e}")
207
- st.info("Falling back to demo data")
208
- return generate_mock_predictions()
235
+ st.error(f" **FATAL ERROR**: {e}")
236
+ st.exception(e)
237
+ st.stop()
209
238
 
210
239
 
211
240
  def show_predictions_enhanced():
@@ -659,12 +688,27 @@ def show_prediction_generator():
659
688
 
660
689
 
661
690
  def show_performance_tracker():
662
- """Show prediction performance over time"""
691
+ """Show prediction performance over time - REQUIRES REAL ML PREDICTION HISTORY"""
663
692
 
664
693
  st.subheader("📈 Prediction Performance Tracker")
665
694
  st.markdown("Track the accuracy and ROI of our ML predictions over time")
666
695
 
667
- # Generate historical data
696
+ # TODO: Implement real performance tracking from database
697
+ st.error(
698
+ "❌ **FEATURE NOT IMPLEMENTED**: Performance tracking requires ML prediction history database."
699
+ )
700
+ st.info(
701
+ """
702
+ This feature requires:
703
+ 1. A prediction_history table in Supabase
704
+ 2. Automated prediction tracking and validation
705
+ 3. Historical performance metrics calculation
706
+
707
+ Currently showing mock data for demonstration only.
708
+ """
709
+ )
710
+
711
+ # Generate historical data (mock for now)
668
712
  performance_df = generate_mock_historical_performance()
669
713
 
670
714
  # KPIs
@@ -69,17 +69,11 @@ def get_disclosures_data() -> pd.DataFrame:
69
69
  """Get trading disclosures from Supabase with proper schema mapping"""
70
70
  client = get_supabase_client()
71
71
  if not client:
72
- return _generate_demo_disclosures()
72
+ st.warning("⚠️ Supabase connection not available. Configure SUPABASE_URL and SUPABASE_KEY.")
73
+ return pd.DataFrame() # Return empty instead of demo data
73
74
 
74
75
  try:
75
- # First, get total count
76
- count_response = client.table("trading_disclosures").select("*", count="exact").execute()
77
- total_count = count_response.count
78
-
79
- if total_count == 0:
80
- return _generate_demo_disclosures()
81
-
82
- # Get the data
76
+ # Get the data with politician details joined
83
77
  response = (
84
78
  client.table("trading_disclosures")
85
79
  .select("*")
@@ -89,14 +83,48 @@ def get_disclosures_data() -> pd.DataFrame:
89
83
  )
90
84
 
91
85
  if not response.data:
92
- return _generate_demo_disclosures()
86
+ st.info(
87
+ "📊 No trading disclosures found in database. Data collection may be in progress."
88
+ )
89
+ return pd.DataFrame()
93
90
 
94
91
  df = pd.DataFrame(response.data)
92
+
93
+ # Get politician details and join
94
+ if not df.empty and "politician_id" in df.columns:
95
+ politician_ids = df["politician_id"].dropna().unique()
96
+ if len(politician_ids) > 0:
97
+ pol_response = (
98
+ client.table("politicians")
99
+ .select("id, full_name, party, state_or_country")
100
+ .in_("id", list(politician_ids))
101
+ .execute()
102
+ )
103
+ politicians = {p["id"]: p for p in pol_response.data}
104
+
105
+ # Add politician details
106
+ df["politician_name"] = df["politician_id"].map(
107
+ lambda x: politicians.get(x, {}).get("full_name", "Unknown")
108
+ )
109
+ df["politician_party"] = df["politician_id"].map(
110
+ lambda x: politicians.get(x, {}).get("party", "Unknown")
111
+ )
112
+ df["politician_state"] = df["politician_id"].map(
113
+ lambda x: politicians.get(x, {}).get("state_or_country", "Unknown")
114
+ )
115
+
116
+ # Map column names for compatibility
117
+ df["ticker_symbol"] = df["asset_ticker"]
118
+ df["amount"] = df["amount_exact"].fillna(
119
+ (df["amount_range_min"] + df["amount_range_max"]) / 2
120
+ )
121
+
95
122
  return df
96
123
 
97
124
  except Exception as e:
125
+ st.error(f"❌ Error fetching disclosures: {e}")
98
126
  logger.error(f"Failed to fetch disclosures: {e}")
99
- return _generate_demo_disclosures()
127
+ return pd.DataFrame()
100
128
 
101
129
 
102
130
  def _generate_demo_disclosures() -> pd.DataFrame:
mcli/self/self_cmd.py CHANGED
@@ -102,79 +102,8 @@ def restore_command_state(hash_value):
102
102
  return True
103
103
 
104
104
 
105
- # Create a Click group for all command management
106
- @self_app.group("commands")
107
- def commands_group():
108
- """Manage CLI commands and command state."""
109
- pass
110
-
111
-
112
- # Move the command-state group under commands_group
113
- @commands_group.group("state")
114
- def command_state():
115
- """Manage command state lockfile and history."""
116
- pass
117
-
118
-
119
- @command_state.command("list")
120
- def list_states():
121
- """List all saved command states (hash, timestamp, #commands)."""
122
- states = load_lockfile()
123
- if not states:
124
- click.echo("No command states found.")
125
- return
126
- table = Table(title="Command States")
127
- table.add_column("Hash", style="cyan")
128
- table.add_column("Timestamp", style="green")
129
- table.add_column("# Commands", style="yellow")
130
- for state in states:
131
- table.add_row(state["hash"][:8], state["timestamp"], str(len(state["commands"])))
132
- console.print(table)
133
-
134
-
135
- @command_state.command("restore")
136
- @click.argument("hash_value")
137
- def restore_state(hash_value):
138
- """Restore to a previous command state by hash."""
139
- if restore_command_state(hash_value):
140
- click.echo(f"Restored to state {hash_value[:8]}")
141
- else:
142
- click.echo(f"State {hash_value[:8]} not found.", err=True)
143
-
144
-
145
- @command_state.command("write")
146
- @click.argument("json_file", required=False, type=click.Path(exists=False))
147
- def write_state(json_file):
148
- """Write a new command state to the lockfile from a JSON file or the current app state."""
149
- import traceback
150
-
151
- print("[DEBUG] write_state called")
152
- print(f"[DEBUG] LOCKFILE_PATH: {LOCKFILE_PATH}")
153
- try:
154
- if json_file:
155
- print(f"[DEBUG] Loading command state from file: {json_file}")
156
- with open(json_file, "r") as f:
157
- commands = json.load(f)
158
- click.echo(f"Loaded command state from {json_file}.")
159
- else:
160
- print("[DEBUG] Snapshotting current command state.")
161
- commands = get_current_command_state()
162
- state_hash = hash_command_state(commands)
163
- new_state = {
164
- "hash": state_hash,
165
- "timestamp": datetime.utcnow().isoformat() + "Z",
166
- "commands": commands,
167
- }
168
- append_lockfile(new_state)
169
- print(f"[DEBUG] Wrote new command state {state_hash[:8]} to lockfile at {LOCKFILE_PATH}")
170
- click.echo(f"Wrote new command state {state_hash[:8]} to lockfile.")
171
- except Exception as e:
172
- print(f"[ERROR] Exception in write_state: {e}")
173
- print(traceback.format_exc())
174
- click.echo(f"[ERROR] Failed to write command state: {e}", err=True)
175
-
176
-
177
105
  # On CLI startup, check and update lockfile if needed
106
+ # NOTE: The commands group has been moved to mcli.app.commands_cmd for better organization
178
107
 
179
108
 
180
109
  def check_and_update_command_lockfile():
@@ -250,76 +179,7 @@ def {name}_command(name: str = "World"):
250
179
  return template
251
180
 
252
181
 
253
- @self_app.command("search")
254
- @click.argument("query", required=False)
255
- @click.option("--full", "-f", is_flag=True, help="Show full command paths and descriptions")
256
- def search(query, full):
257
- """
258
- Search for available commands using fuzzy matching.
259
-
260
- Similar to telescope in neovim, this allows quick fuzzy searching
261
- through all available commands in mcli.
262
-
263
- If no query is provided, lists all commands.
264
- """
265
- # Collect all commands from the application
266
- commands = collect_commands()
267
-
268
- # Display the commands in a table
269
- table = Table(title="mcli Commands")
270
- table.add_column("Command", style="green")
271
- table.add_column("Group", style="blue")
272
- if full:
273
- table.add_column("Path", style="dim")
274
- table.add_column("Description", style="yellow")
275
-
276
- if query:
277
- filtered_commands = []
278
-
279
- # Try to use fuzzywuzzy for better matching if available
280
- if process:
281
- # Extract command names for matching
282
- command_names = [
283
- f"{cmd['group']}.{cmd['name']}" if cmd["group"] else cmd["name"] for cmd in commands
284
- ]
285
- matches = process.extract(query, command_names, limit=10)
286
-
287
- # Filter to matched commands
288
- match_indices = [command_names.index(match[0]) for match in matches if match[1] > 50]
289
- filtered_commands = [commands[i] for i in match_indices]
290
- else:
291
- # Fallback to simple substring matching
292
- filtered_commands = [
293
- cmd
294
- for cmd in commands
295
- if query.lower() in cmd["name"].lower()
296
- or (cmd["group"] and query.lower() in cmd["group"].lower())
297
- ]
298
-
299
- commands = filtered_commands
300
-
301
- # Sort commands by group then name
302
- commands.sort(key=lambda c: (c["group"] if c["group"] else "", c["name"]))
303
-
304
- # Add rows to the table
305
- for cmd in commands:
306
- if full:
307
- table.add_row(
308
- cmd["name"],
309
- cmd["group"] if cmd["group"] else "-",
310
- cmd["path"],
311
- cmd["help"] if cmd["help"] else "",
312
- )
313
- else:
314
- table.add_row(cmd["name"], cmd["group"] if cmd["group"] else "-")
315
-
316
- console.print(table)
317
-
318
- if not commands:
319
- logger.info("No commands found matching the search query")
320
- click.echo("No commands found matching the search query")
321
-
322
- return 0
182
+ # NOTE: search command has been moved to mcli.app.commands_cmd for better organization
323
183
 
324
184
 
325
185
  def collect_commands() -> List[Dict[str, Any]]:
@@ -575,110 +435,7 @@ logger = get_logger()
575
435
  pass
576
436
 
577
437
 
578
- @self_app.command("extract-workflow-commands")
579
- @click.option(
580
- "--output", "-o", type=click.Path(), help="Output file (default: workflow-commands.json)"
581
- )
582
- def extract_workflow_commands(output):
583
- """
584
- Extract workflow commands from Python modules to JSON format.
585
-
586
- This command helps migrate existing workflow commands to portable JSON format.
587
- """
588
- import inspect
589
- from pathlib import Path
590
-
591
- output_file = Path(output) if output else Path("workflow-commands.json")
592
-
593
- workflow_commands = []
594
-
595
- # Try to get workflow from the main app
596
- try:
597
- from mcli.app.main import create_app
598
-
599
- app = create_app()
600
-
601
- # Check if workflow group exists
602
- if "workflow" in app.commands:
603
- workflow_group = app.commands["workflow"]
604
-
605
- # Force load lazy group if needed
606
- if hasattr(workflow_group, "_load_group"):
607
- workflow_group = workflow_group._load_group()
608
-
609
- if hasattr(workflow_group, "commands"):
610
- for cmd_name, cmd_obj in workflow_group.commands.items():
611
- # Extract command information
612
- command_info = {
613
- "name": cmd_name,
614
- "group": "workflow",
615
- "description": cmd_obj.help or "Workflow command",
616
- "version": "1.0",
617
- "metadata": {"source": "workflow", "migrated": True},
618
- }
619
-
620
- # Create a template based on command type
621
- # Replace hyphens with underscores for valid Python function names
622
- safe_name = cmd_name.replace("-", "_")
623
-
624
- if isinstance(cmd_obj, click.Group):
625
- # For groups, create a template
626
- command_info[
627
- "code"
628
- ] = f'''"""
629
- {cmd_name} workflow command.
630
- """
631
- import click
632
-
633
- @click.group(name="{cmd_name}")
634
- def app():
635
- """{cmd_obj.help or 'Workflow command group'}"""
636
- pass
637
-
638
- # Add your subcommands here
639
- '''
640
- else:
641
- # For regular commands, create a template
642
- command_info[
643
- "code"
644
- ] = f'''"""
645
- {cmd_name} workflow command.
646
- """
647
- import click
648
-
649
- @click.command(name="{cmd_name}")
650
- def app():
651
- """{cmd_obj.help or 'Workflow command'}"""
652
- click.echo("Workflow command: {cmd_name}")
653
- # Add your implementation here
654
- '''
655
-
656
- workflow_commands.append(command_info)
657
-
658
- if workflow_commands:
659
- import json
660
-
661
- with open(output_file, "w") as f:
662
- json.dump(workflow_commands, f, indent=2)
663
-
664
- click.echo(f"✅ Extracted {len(workflow_commands)} workflow commands")
665
- click.echo(f"📁 Saved to: {output_file}")
666
- click.echo(
667
- f"\n💡 These are templates. Import with: mcli self import-commands {output_file}"
668
- )
669
- click.echo(" Then customize the code in ~/.mcli/commands/<command>.json")
670
- return 0
671
- else:
672
- click.echo("⚠️ No workflow commands found to extract")
673
- return 1
674
-
675
- except Exception as e:
676
- logger.error(f"Failed to extract workflow commands: {e}")
677
- click.echo(f"❌ Failed to extract workflow commands: {e}", err=True)
678
- import traceback
679
-
680
- click.echo(traceback.format_exc(), err=True)
681
- return 1
438
+ # NOTE: extract-workflow-commands has been moved to mcli.app.commands_cmd for better organization
682
439
 
683
440
 
684
441
  @click.group("plugin")
@@ -1280,13 +1037,7 @@ try:
1280
1037
  except ImportError as e:
1281
1038
  logger.debug(f"Could not load visual command: {e}")
1282
1039
 
1283
- try:
1284
- from mcli.self.store_cmd import store
1285
-
1286
- self_app.add_command(store, name="store")
1287
- logger.debug("Added store command to self group")
1288
- except ImportError as e:
1289
- logger.debug(f"Could not load store command: {e}")
1040
+ # NOTE: store command has been moved to mcli.app.commands_cmd for better organization
1290
1041
 
1291
1042
  # This part is important to make the command available to the CLI
1292
1043
  if __name__ == "__main__":
mcli/self/store_cmd.py CHANGED
@@ -3,14 +3,16 @@ Command Store Management - Sync ~/.mcli/commands/ to git
3
3
  Similar to lsh secrets but for workflow commands
4
4
  """
5
5
 
6
- import click
7
6
  import os
8
7
  import shutil
9
8
  import subprocess
10
- from pathlib import Path
11
9
  from datetime import datetime
12
- from mcli.lib.ui.styling import success, error, info, warning
10
+ from pathlib import Path
11
+
12
+ import click
13
+
13
14
  from mcli.lib.logger.logger import get_logger
15
+ from mcli.lib.ui.styling import error, info, success, warning
14
16
 
15
17
  logger = get_logger()
16
18
 
mcli/test/test_cmd.py CHANGED
@@ -16,15 +16,5 @@ def test_group():
16
16
  pass
17
17
 
18
18
 
19
- # Import and register subcommands
20
- try:
21
- from mcli.test.cron_test_cmd import cron_test
22
-
23
- test_group.add_command(cron_test, name="cron")
24
- logger.debug("Added cron test command to test group")
25
- except ImportError as e:
26
- logger.debug(f"Could not load cron test command: {e}")
27
-
28
-
29
19
  if __name__ == "__main__":
30
20
  test_group()
@@ -139,6 +139,7 @@ def info():
139
139
  def test():
140
140
  """Test Supabase connection."""
141
141
  import os
142
+
142
143
  from mcli.ml.dashboard.common import get_supabase_client
143
144
 
144
145
  click.echo("🔍 Testing Supabase connection...")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcli-framework
3
- Version: 7.8.3
3
+ Version: 7.8.4
4
4
  Summary: Portable workflow framework - transform any script into a versioned, schedulable command. Store in ~/.mcli/commands/, version with lockfile, run as daemon or cron job.
5
5
  Author-email: Luis Fernandez de la Vara <luis@lefv.io>
6
6
  Maintainer-email: Luis Fernandez de la Vara <luis@lefv.io>