mcli-framework 7.1.0__py3-none-any.whl → 7.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (94) hide show
  1. mcli/app/completion_cmd.py +59 -49
  2. mcli/app/completion_helpers.py +60 -138
  3. mcli/app/logs_cmd.py +46 -13
  4. mcli/app/main.py +17 -14
  5. mcli/app/model_cmd.py +19 -4
  6. mcli/chat/chat.py +3 -2
  7. mcli/lib/search/cached_vectorizer.py +1 -0
  8. mcli/lib/services/data_pipeline.py +12 -5
  9. mcli/lib/services/lsh_client.py +69 -58
  10. mcli/ml/api/app.py +28 -36
  11. mcli/ml/api/middleware.py +8 -16
  12. mcli/ml/api/routers/admin_router.py +3 -1
  13. mcli/ml/api/routers/auth_router.py +32 -56
  14. mcli/ml/api/routers/backtest_router.py +3 -1
  15. mcli/ml/api/routers/data_router.py +3 -1
  16. mcli/ml/api/routers/model_router.py +35 -74
  17. mcli/ml/api/routers/monitoring_router.py +3 -1
  18. mcli/ml/api/routers/portfolio_router.py +3 -1
  19. mcli/ml/api/routers/prediction_router.py +60 -65
  20. mcli/ml/api/routers/trade_router.py +6 -2
  21. mcli/ml/api/routers/websocket_router.py +12 -9
  22. mcli/ml/api/schemas.py +10 -2
  23. mcli/ml/auth/auth_manager.py +49 -114
  24. mcli/ml/auth/models.py +30 -15
  25. mcli/ml/auth/permissions.py +12 -19
  26. mcli/ml/backtesting/backtest_engine.py +134 -108
  27. mcli/ml/backtesting/performance_metrics.py +142 -108
  28. mcli/ml/cache.py +12 -18
  29. mcli/ml/cli/main.py +37 -23
  30. mcli/ml/config/settings.py +29 -12
  31. mcli/ml/dashboard/app.py +122 -130
  32. mcli/ml/dashboard/app_integrated.py +283 -152
  33. mcli/ml/dashboard/app_supabase.py +176 -108
  34. mcli/ml/dashboard/app_training.py +212 -206
  35. mcli/ml/dashboard/cli.py +14 -5
  36. mcli/ml/data_ingestion/api_connectors.py +51 -81
  37. mcli/ml/data_ingestion/data_pipeline.py +127 -125
  38. mcli/ml/data_ingestion/stream_processor.py +72 -80
  39. mcli/ml/database/migrations/env.py +3 -2
  40. mcli/ml/database/models.py +112 -79
  41. mcli/ml/database/session.py +6 -5
  42. mcli/ml/experimentation/ab_testing.py +149 -99
  43. mcli/ml/features/ensemble_features.py +9 -8
  44. mcli/ml/features/political_features.py +6 -5
  45. mcli/ml/features/recommendation_engine.py +15 -14
  46. mcli/ml/features/stock_features.py +7 -6
  47. mcli/ml/features/test_feature_engineering.py +8 -7
  48. mcli/ml/logging.py +10 -15
  49. mcli/ml/mlops/data_versioning.py +57 -64
  50. mcli/ml/mlops/experiment_tracker.py +49 -41
  51. mcli/ml/mlops/model_serving.py +59 -62
  52. mcli/ml/mlops/pipeline_orchestrator.py +203 -149
  53. mcli/ml/models/base_models.py +8 -7
  54. mcli/ml/models/ensemble_models.py +6 -5
  55. mcli/ml/models/recommendation_models.py +7 -6
  56. mcli/ml/models/test_models.py +18 -14
  57. mcli/ml/monitoring/drift_detection.py +95 -74
  58. mcli/ml/monitoring/metrics.py +10 -22
  59. mcli/ml/optimization/portfolio_optimizer.py +172 -132
  60. mcli/ml/predictions/prediction_engine.py +235 -0
  61. mcli/ml/preprocessing/data_cleaners.py +6 -5
  62. mcli/ml/preprocessing/feature_extractors.py +7 -6
  63. mcli/ml/preprocessing/ml_pipeline.py +3 -2
  64. mcli/ml/preprocessing/politician_trading_preprocessor.py +11 -10
  65. mcli/ml/preprocessing/test_preprocessing.py +4 -4
  66. mcli/ml/scripts/populate_sample_data.py +36 -16
  67. mcli/ml/tasks.py +82 -83
  68. mcli/ml/tests/test_integration.py +86 -76
  69. mcli/ml/tests/test_training_dashboard.py +169 -142
  70. mcli/mygroup/test_cmd.py +2 -1
  71. mcli/self/self_cmd.py +38 -18
  72. mcli/self/test_cmd.py +2 -1
  73. mcli/workflow/dashboard/dashboard_cmd.py +13 -6
  74. mcli/workflow/lsh_integration.py +46 -58
  75. mcli/workflow/politician_trading/commands.py +576 -427
  76. mcli/workflow/politician_trading/config.py +7 -7
  77. mcli/workflow/politician_trading/connectivity.py +35 -33
  78. mcli/workflow/politician_trading/data_sources.py +72 -71
  79. mcli/workflow/politician_trading/database.py +18 -16
  80. mcli/workflow/politician_trading/demo.py +4 -3
  81. mcli/workflow/politician_trading/models.py +5 -5
  82. mcli/workflow/politician_trading/monitoring.py +13 -13
  83. mcli/workflow/politician_trading/scrapers.py +332 -224
  84. mcli/workflow/politician_trading/scrapers_california.py +116 -94
  85. mcli/workflow/politician_trading/scrapers_eu.py +70 -71
  86. mcli/workflow/politician_trading/scrapers_uk.py +118 -90
  87. mcli/workflow/politician_trading/scrapers_us_states.py +125 -92
  88. mcli/workflow/politician_trading/workflow.py +98 -71
  89. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/METADATA +2 -2
  90. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/RECORD +94 -93
  91. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/WHEEL +0 -0
  92. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/entry_points.txt +0 -0
  93. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/licenses/LICENSE +0 -0
  94. {mcli_framework-7.1.0.dist-info → mcli_framework-7.1.2.dist-info}/top_level.txt +0 -0
@@ -1,14 +1,15 @@
1
1
  """Unit tests for training dashboard functionality"""
2
2
 
3
- import pytest
4
- import pandas as pd
5
- import numpy as np
6
3
  from datetime import datetime, timedelta
7
- from unittest.mock import Mock, patch, MagicMock
4
+ from unittest.mock import MagicMock, Mock, patch
8
5
 
9
- from mcli.ml.database.models import Model, ModelStatus, Experiment
6
+ import numpy as np
7
+ import pandas as pd
8
+ import pytest
10
9
  from sqlalchemy.orm import Session
11
10
 
11
+ from mcli.ml.database.models import Experiment, Model, ModelStatus
12
+
12
13
 
13
14
  class TestTrainingDashboard:
14
15
  """Test suite for training dashboard functions"""
@@ -27,53 +28,53 @@ class TestTrainingDashboard:
27
28
  # Bitcoin-style model comparison data
28
29
  model_configs = [
29
30
  {
30
- 'name': 'Random Forest',
31
- 'type': 'random_forest',
32
- 'test_rmse': 150.5,
33
- 'test_mae': 120.3,
34
- 'test_r2': 0.85,
35
- 'mape': 5.5
31
+ "name": "Random Forest",
32
+ "type": "random_forest",
33
+ "test_rmse": 150.5,
34
+ "test_mae": 120.3,
35
+ "test_r2": 0.85,
36
+ "mape": 5.5,
36
37
  },
37
38
  {
38
- 'name': 'Gradient Boosting',
39
- 'type': 'gradient_boosting',
40
- 'test_rmse': 155.2,
41
- 'test_mae': 125.8,
42
- 'test_r2': 0.83,
43
- 'mape': 6.2
39
+ "name": "Gradient Boosting",
40
+ "type": "gradient_boosting",
41
+ "test_rmse": 155.2,
42
+ "test_mae": 125.8,
43
+ "test_r2": 0.83,
44
+ "mape": 6.2,
44
45
  },
45
46
  {
46
- 'name': 'Linear Regression',
47
- 'type': 'linear_regression',
48
- 'test_rmse': 180.0,
49
- 'test_mae': 145.0,
50
- 'test_r2': 0.75,
51
- 'mape': 8.5
47
+ "name": "Linear Regression",
48
+ "type": "linear_regression",
49
+ "test_rmse": 180.0,
50
+ "test_mae": 145.0,
51
+ "test_r2": 0.75,
52
+ "mape": 8.5,
52
53
  },
53
54
  {
54
- 'name': 'Ridge Regression',
55
- 'type': 'ridge',
56
- 'test_rmse': 175.5,
57
- 'test_mae': 140.2,
58
- 'test_r2': 0.78,
59
- 'mape': 7.8
55
+ "name": "Ridge Regression",
56
+ "type": "ridge",
57
+ "test_rmse": 175.5,
58
+ "test_mae": 140.2,
59
+ "test_r2": 0.78,
60
+ "mape": 7.8,
60
61
  },
61
62
  {
62
- 'name': 'Lasso Regression',
63
- 'type': 'lasso',
64
- 'test_rmse': 178.0,
65
- 'test_mae': 142.5,
66
- 'test_r2': 0.76,
67
- 'mape': 8.1
68
- }
63
+ "name": "Lasso Regression",
64
+ "type": "lasso",
65
+ "test_rmse": 178.0,
66
+ "test_mae": 142.5,
67
+ "test_r2": 0.76,
68
+ "mape": 8.1,
69
+ },
69
70
  ]
70
71
 
71
72
  for i, config in enumerate(model_configs):
72
73
  model = Mock(spec=Model)
73
- model.id = f'model-{i}'
74
- model.name = config['name']
75
- model.version = '1.0.0'
76
- model.model_type = config['type']
74
+ model.id = f"model-{i}"
75
+ model.name = config["name"]
76
+ model.version = "1.0.0"
77
+ model.model_type = config["type"]
77
78
  model.status = ModelStatus.DEPLOYED if i < 2 else ModelStatus.TRAINED
78
79
 
79
80
  model.train_accuracy = 0.90 + np.random.uniform(-0.05, 0.05)
@@ -85,23 +86,29 @@ class TestTrainingDashboard:
85
86
  model.test_loss = 0.20 + np.random.uniform(-0.05, 0.05)
86
87
 
87
88
  # Bitcoin-style metrics
88
- model.test_rmse = config['test_rmse']
89
- model.test_mae = config['test_mae']
90
- model.test_r2 = config['test_r2']
89
+ model.test_rmse = config["test_rmse"]
90
+ model.test_mae = config["test_mae"]
91
+ model.test_r2 = config["test_r2"]
91
92
 
92
93
  model.metrics = {
93
- 'rmse': config['test_rmse'],
94
- 'mae': config['test_mae'],
95
- 'r2': config['test_r2'],
96
- 'mape': config['mape']
94
+ "rmse": config["test_rmse"],
95
+ "mae": config["test_mae"],
96
+ "r2": config["test_r2"],
97
+ "mape": config["mape"],
97
98
  }
98
99
 
99
100
  # Feature names
100
101
  model.feature_names = [
101
- 'lag_1', 'lag_7', 'lag_30',
102
- 'ma_7', 'ma_14', 'ma_30',
103
- 'volatility_7', 'volatility_14',
104
- 'price_change_1', 'price_change_7'
102
+ "lag_1",
103
+ "lag_7",
104
+ "lag_30",
105
+ "ma_7",
106
+ "ma_14",
107
+ "ma_30",
108
+ "volatility_7",
109
+ "volatility_14",
110
+ "price_change_1",
111
+ "price_change_7",
105
112
  ]
106
113
 
107
114
  model.created_at = datetime.utcnow() - timedelta(days=i)
@@ -118,22 +125,22 @@ class TestTrainingDashboard:
118
125
 
119
126
  for i in range(10):
120
127
  exp = Mock(spec=Experiment)
121
- exp.id = f'exp-{i}'
122
- exp.name = f'Experiment {i}'
123
- exp.status = 'completed' if i < 7 else ('running' if i < 9 else 'failed')
124
- exp.started_at = datetime.utcnow() - timedelta(hours=i*2)
125
- exp.completed_at = datetime.utcnow() - timedelta(hours=i*2-1) if exp.status == 'completed' else None
126
- exp.duration_seconds = 3600 if exp.status == 'completed' else None
127
-
128
- exp.hyperparameters = {
129
- 'learning_rate': 0.01,
130
- 'n_estimators': 100,
131
- 'max_depth': 10
132
- }
133
-
134
- exp.train_metrics = {'loss': 0.15, 'accuracy': 0.90}
135
- exp.val_metrics = {'loss': 0.18, 'accuracy': 0.88}
136
- exp.test_metrics = {'loss': 0.20, 'accuracy': 0.85}
128
+ exp.id = f"exp-{i}"
129
+ exp.name = f"Experiment {i}"
130
+ exp.status = "completed" if i < 7 else ("running" if i < 9 else "failed")
131
+ exp.started_at = datetime.utcnow() - timedelta(hours=i * 2)
132
+ exp.completed_at = (
133
+ datetime.utcnow() - timedelta(hours=i * 2 - 1)
134
+ if exp.status == "completed"
135
+ else None
136
+ )
137
+ exp.duration_seconds = 3600 if exp.status == "completed" else None
138
+
139
+ exp.hyperparameters = {"learning_rate": 0.01, "n_estimators": 100, "max_depth": 10}
140
+
141
+ exp.train_metrics = {"loss": 0.15, "accuracy": 0.90}
142
+ exp.val_metrics = {"loss": 0.18, "accuracy": 0.88}
143
+ exp.test_metrics = {"loss": 0.20, "accuracy": 0.85}
137
144
 
138
145
  experiments.append(exp)
139
146
 
@@ -142,43 +149,45 @@ class TestTrainingDashboard:
142
149
  def test_model_comparison_metrics(self, sample_models):
143
150
  """Test model comparison metrics calculation"""
144
151
  # Convert to DataFrame as the dashboard would
145
- df = pd.DataFrame([
146
- {
147
- 'name': m.name,
148
- 'test_rmse': m.test_rmse,
149
- 'test_mae': m.test_mae,
150
- 'test_r2': m.test_r2,
151
- 'mape': m.metrics['mape']
152
- }
153
- for m in sample_models
154
- ])
152
+ df = pd.DataFrame(
153
+ [
154
+ {
155
+ "name": m.name,
156
+ "test_rmse": m.test_rmse,
157
+ "test_mae": m.test_mae,
158
+ "test_r2": m.test_r2,
159
+ "mape": m.metrics["mape"],
160
+ }
161
+ for m in sample_models
162
+ ]
163
+ )
155
164
 
156
165
  # Test ranking by RMSE
157
- sorted_by_rmse = df.sort_values('test_rmse')
158
- assert sorted_by_rmse.iloc[0]['name'] == 'Random Forest'
159
- assert sorted_by_rmse.iloc[0]['test_rmse'] < 155
166
+ sorted_by_rmse = df.sort_values("test_rmse")
167
+ assert sorted_by_rmse.iloc[0]["name"] == "Random Forest"
168
+ assert sorted_by_rmse.iloc[0]["test_rmse"] < 155
160
169
 
161
170
  # Test ranking by R²
162
- sorted_by_r2 = df.sort_values('test_r2', ascending=False)
163
- assert sorted_by_r2.iloc[0]['test_r2'] > 0.8
171
+ sorted_by_r2 = df.sort_values("test_r2", ascending=False)
172
+ assert sorted_by_r2.iloc[0]["test_r2"] > 0.8
164
173
 
165
174
  # Test ranking by MAE
166
- sorted_by_mae = df.sort_values('test_mae')
167
- assert sorted_by_mae.iloc[0]['test_mae'] < 125
175
+ sorted_by_mae = df.sort_values("test_mae")
176
+ assert sorted_by_mae.iloc[0]["test_mae"] < 125
168
177
 
169
178
  def test_model_performance_aggregation(self, sample_models):
170
179
  """Test aggregation of model performance"""
171
180
  metrics = {
172
- 'total_models': len(sample_models),
173
- 'deployed_models': sum(1 for m in sample_models if m.status == ModelStatus.DEPLOYED),
174
- 'avg_rmse': np.mean([m.test_rmse for m in sample_models]),
175
- 'avg_r2': np.mean([m.test_r2 for m in sample_models]),
181
+ "total_models": len(sample_models),
182
+ "deployed_models": sum(1 for m in sample_models if m.status == ModelStatus.DEPLOYED),
183
+ "avg_rmse": np.mean([m.test_rmse for m in sample_models]),
184
+ "avg_r2": np.mean([m.test_r2 for m in sample_models]),
176
185
  }
177
186
 
178
- assert metrics['total_models'] == 5
179
- assert metrics['deployed_models'] == 2
180
- assert 150 < metrics['avg_rmse'] < 180
181
- assert 0.75 < metrics['avg_r2'] < 0.85
187
+ assert metrics["total_models"] == 5
188
+ assert metrics["deployed_models"] == 2
189
+ assert 150 < metrics["avg_rmse"] < 180
190
+ assert 0.75 < metrics["avg_r2"] < 0.85
182
191
 
183
192
  def test_feature_importance_calculation(self, sample_models):
184
193
  """Test feature importance extraction and ranking"""
@@ -186,18 +195,17 @@ class TestTrainingDashboard:
186
195
 
187
196
  # Simulate feature importance
188
197
  importance = np.random.dirichlet(np.ones(len(model.feature_names)))
189
- feature_df = pd.DataFrame({
190
- 'feature': model.feature_names,
191
- 'importance': importance
192
- }).sort_values('importance', ascending=False)
198
+ feature_df = pd.DataFrame(
199
+ {"feature": model.feature_names, "importance": importance}
200
+ ).sort_values("importance", ascending=False)
193
201
 
194
202
  # Test that importances sum to 1
195
- assert np.isclose(feature_df['importance'].sum(), 1.0)
203
+ assert np.isclose(feature_df["importance"].sum(), 1.0)
196
204
 
197
205
  # Test top features
198
206
  top_5 = feature_df.head(5)
199
207
  assert len(top_5) == 5
200
- assert all(top_5['importance'] > 0)
208
+ assert all(top_5["importance"] > 0)
201
209
 
202
210
  def test_residuals_analysis(self):
203
211
  """Test residual analysis calculations"""
@@ -220,6 +228,7 @@ class TestTrainingDashboard:
220
228
 
221
229
  # Test normality (using simple statistics)
222
230
  from scipy import stats
231
+
223
232
  _, p_value = stats.normaltest(residuals)
224
233
  # With random data, should generally pass normality test
225
234
  assert 0 <= p_value <= 1
@@ -240,7 +249,7 @@ class TestTrainingDashboard:
240
249
 
241
250
  def test_training_duration_analysis(self, sample_experiments):
242
251
  """Test training duration analysis"""
243
- completed = [exp for exp in sample_experiments if exp.status == 'completed']
252
+ completed = [exp for exp in sample_experiments if exp.status == "completed"]
244
253
 
245
254
  durations = [exp.duration_seconds for exp in completed]
246
255
  avg_duration = np.mean(durations)
@@ -252,54 +261,72 @@ class TestTrainingDashboard:
252
261
 
253
262
  def test_model_comparison_ranking(self, sample_models):
254
263
  """Test ranking models by multiple metrics"""
255
- df = pd.DataFrame([
256
- {
257
- 'name': m.name,
258
- 'test_rmse': m.test_rmse,
259
- 'test_mae': m.test_mae,
260
- 'test_r2': m.test_r2,
261
- }
262
- for m in sample_models
263
- ])
264
+ df = pd.DataFrame(
265
+ [
266
+ {
267
+ "name": m.name,
268
+ "test_rmse": m.test_rmse,
269
+ "test_mae": m.test_mae,
270
+ "test_r2": m.test_r2,
271
+ }
272
+ for m in sample_models
273
+ ]
274
+ )
264
275
 
265
276
  # Rank by RMSE (lower is better)
266
- df['rank_rmse'] = df['test_rmse'].rank()
277
+ df["rank_rmse"] = df["test_rmse"].rank()
267
278
 
268
279
  # Rank by R² (higher is better)
269
- df['rank_r2'] = df['test_r2'].rank(ascending=False)
280
+ df["rank_r2"] = df["test_r2"].rank(ascending=False)
270
281
 
271
282
  # Composite rank
272
- df['composite_rank'] = (df['rank_rmse'] + df['rank_r2']) / 2
283
+ df["composite_rank"] = (df["rank_rmse"] + df["rank_r2"]) / 2
273
284
 
274
- best_overall = df.loc[df['composite_rank'].idxmin()]
285
+ best_overall = df.loc[df["composite_rank"].idxmin()]
275
286
 
276
287
  # Random Forest should be among the best
277
- assert best_overall['test_r2'] > 0.8
278
- assert best_overall['test_rmse'] < 160
288
+ assert best_overall["test_r2"] > 0.8
289
+ assert best_overall["test_rmse"] < 160
279
290
 
280
291
  def test_feature_categorization(self):
281
292
  """Test feature categorization (lag, MA, volatility, etc.)"""
282
293
  features = [
283
- 'lag_1', 'lag_7', 'lag_30',
284
- 'ma_7', 'ma_14', 'sma_30', 'ema_20',
285
- 'volatility_7', 'volatility_14', 'std_30',
286
- 'price_change_1', 'pct_change_7',
287
- 'rsi_14', 'macd', 'bollinger_upper'
294
+ "lag_1",
295
+ "lag_7",
296
+ "lag_30",
297
+ "ma_7",
298
+ "ma_14",
299
+ "sma_30",
300
+ "ema_20",
301
+ "volatility_7",
302
+ "volatility_14",
303
+ "std_30",
304
+ "price_change_1",
305
+ "pct_change_7",
306
+ "rsi_14",
307
+ "macd",
308
+ "bollinger_upper",
288
309
  ]
289
310
 
290
311
  categories = {
291
- 'Lag Features': [f for f in features if 'lag' in f.lower()],
292
- 'Moving Averages': [f for f in features if any(x in f.lower() for x in ['ma', 'sma', 'ema'])],
293
- 'Volatility': [f for f in features if any(x in f.lower() for x in ['volatility', 'std'])],
294
- 'Price Changes': [f for f in features if 'change' in f.lower() or 'pct' in f.lower()],
295
- 'Technical': [f for f in features if any(x in f.lower() for x in ['rsi', 'macd', 'bollinger'])]
312
+ "Lag Features": [f for f in features if "lag" in f.lower()],
313
+ "Moving Averages": [
314
+ f for f in features if any(x in f.lower() for x in ["ma", "sma", "ema"])
315
+ ],
316
+ "Volatility": [
317
+ f for f in features if any(x in f.lower() for x in ["volatility", "std"])
318
+ ],
319
+ "Price Changes": [f for f in features if "change" in f.lower() or "pct" in f.lower()],
320
+ "Technical": [
321
+ f for f in features if any(x in f.lower() for x in ["rsi", "macd", "bollinger"])
322
+ ],
296
323
  }
297
324
 
298
- assert len(categories['Lag Features']) == 3
299
- assert len(categories['Moving Averages']) == 4
300
- assert len(categories['Volatility']) == 3
301
- assert len(categories['Price Changes']) == 2
302
- assert len(categories['Technical']) == 3
325
+ assert len(categories["Lag Features"]) == 3
326
+ assert len(categories["Moving Averages"]) == 4
327
+ assert len(categories["Volatility"]) == 3
328
+ assert len(categories["Price Changes"]) == 2
329
+ assert len(categories["Technical"]) == 3
303
330
 
304
331
  def test_mape_calculation(self):
305
332
  """Test Mean Absolute Percentage Error calculation"""
@@ -318,7 +345,7 @@ class TestTrainingDashboard:
318
345
  errors = np.array([5, 3, 8, 2, 10])
319
346
 
320
347
  mae = np.mean(np.abs(errors))
321
- rmse = np.sqrt(np.mean(errors ** 2))
348
+ rmse = np.sqrt(np.mean(errors**2))
322
349
 
323
350
  assert rmse >= mae
324
351
 
@@ -347,9 +374,9 @@ class TestTrainingDashboard:
347
374
  for exp in sample_experiments:
348
375
  status_counts[exp.status] = status_counts.get(exp.status, 0) + 1
349
376
 
350
- assert status_counts['completed'] == 7
351
- assert status_counts['running'] == 2
352
- assert status_counts['failed'] == 1
377
+ assert status_counts["completed"] == 7
378
+ assert status_counts["running"] == 2
379
+ assert status_counts["failed"] == 1
353
380
  assert sum(status_counts.values()) == 10
354
381
 
355
382
 
@@ -358,10 +385,10 @@ class TestModelVersioning:
358
385
 
359
386
  def test_version_comparison(self):
360
387
  """Test semantic version comparison"""
361
- versions = ['1.0.0', '1.1.0', '1.0.1', '2.0.0', '1.2.0']
388
+ versions = ["1.0.0", "1.1.0", "1.0.1", "2.0.0", "1.2.0"]
362
389
 
363
390
  # Parse and sort versions
364
- parsed = [tuple(map(int, v.split('.'))) for v in versions]
391
+ parsed = [tuple(map(int, v.split("."))) for v in versions]
365
392
  sorted_versions = sorted(parsed)
366
393
 
367
394
  assert sorted_versions[0] == (1, 0, 0)
@@ -370,18 +397,18 @@ class TestModelVersioning:
370
397
  def test_model_deployment_tracking(self):
371
398
  """Test tracking which models are deployed"""
372
399
  models = [
373
- {'name': 'model-a', 'version': '1.0.0', 'deployed': True},
374
- {'name': 'model-a', 'version': '1.1.0', 'deployed': False},
375
- {'name': 'model-b', 'version': '1.0.0', 'deployed': True},
400
+ {"name": "model-a", "version": "1.0.0", "deployed": True},
401
+ {"name": "model-a", "version": "1.1.0", "deployed": False},
402
+ {"name": "model-b", "version": "1.0.0", "deployed": True},
376
403
  ]
377
404
 
378
- deployed = [m for m in models if m['deployed']]
405
+ deployed = [m for m in models if m["deployed"]]
379
406
  assert len(deployed) == 2
380
407
 
381
408
  # Test that only one version of each model is deployed
382
- deployed_names = [m['name'] for m in deployed]
409
+ deployed_names = [m["name"] for m in deployed]
383
410
  assert len(deployed_names) == len(set(deployed_names))
384
411
 
385
412
 
386
- if __name__ == '__main__':
387
- pytest.main([__file__, '-v'])
413
+ if __name__ == "__main__":
414
+ pytest.main([__file__, "-v"])
mcli/mygroup/test_cmd.py CHANGED
@@ -1 +1,2 @@
1
- def test(): pass
1
+ def test():
2
+ pass
mcli/self/self_cmd.py CHANGED
@@ -1137,11 +1137,8 @@ def check_ci_status(version: str) -> tuple[bool, Optional[str]]:
1137
1137
  response = requests.get(
1138
1138
  "https://api.github.com/repos/gwicho38/mcli/actions/runs",
1139
1139
  params={"per_page": 5},
1140
- headers={
1141
- "Accept": "application/vnd.github.v3+json",
1142
- "User-Agent": "mcli-cli"
1143
- },
1144
- timeout=10
1140
+ headers={"Accept": "application/vnd.github.v3+json", "User-Agent": "mcli-cli"},
1141
+ timeout=10,
1145
1142
  )
1146
1143
 
1147
1144
  if response.status_code == 200:
@@ -1150,7 +1147,8 @@ def check_ci_status(version: str) -> tuple[bool, Optional[str]]:
1150
1147
 
1151
1148
  # Find the most recent completed run for main branch
1152
1149
  main_runs = [
1153
- run for run in runs
1150
+ run
1151
+ for run in runs
1154
1152
  if run.get("head_branch") == "main" and run.get("status") == "completed"
1155
1153
  ]
1156
1154
 
@@ -1209,7 +1207,10 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
1209
1207
  if pre:
1210
1208
  # Include pre-releases
1211
1209
  all_versions = list(pypi_data["releases"].keys())
1212
- latest_version = max(all_versions, key=lambda v: [int(x) for x in v.split(".")] if v[0].isdigit() else [0])
1210
+ latest_version = max(
1211
+ all_versions,
1212
+ key=lambda v: [int(x) for x in v.split(".")] if v[0].isdigit() else [0],
1213
+ )
1213
1214
  else:
1214
1215
  # Only stable releases
1215
1216
  latest_version = pypi_data["info"]["version"]
@@ -1232,7 +1233,9 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
1232
1233
  latest_parsed = parse_version(latest_version)
1233
1234
 
1234
1235
  if current_parsed >= latest_parsed:
1235
- console.print(f"[green]✅ Your version ({current_version}) is up to date or newer[/green]")
1236
+ console.print(
1237
+ f"[green]✅ Your version ({current_version}) is up to date or newer[/green]"
1238
+ )
1236
1239
  return
1237
1240
 
1238
1241
  console.print(f"[yellow]⬆️ Update available: {current_version} → {latest_version}[/yellow]")
@@ -1250,6 +1253,7 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
1250
1253
  # Ask for confirmation unless --yes flag is used
1251
1254
  if not yes:
1252
1255
  from rich.prompt import Confirm
1256
+
1253
1257
  if not Confirm.ask(f"[yellow]Install mcli {latest_version}?[/yellow]"):
1254
1258
  console.print("[yellow]Update cancelled[/yellow]")
1255
1259
  return
@@ -1263,8 +1267,12 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
1263
1267
  console.print("[red]✗ CI build is failing for the latest version[/red]")
1264
1268
  if ci_url:
1265
1269
  console.print(f"[yellow] View CI status: {ci_url}[/yellow]")
1266
- console.print("[yellow]⚠️ Update blocked to prevent installing a broken version[/yellow]")
1267
- console.print("[dim] Use --skip-ci-check to install anyway (not recommended)[/dim]")
1270
+ console.print(
1271
+ "[yellow]⚠️ Update blocked to prevent installing a broken version[/yellow]"
1272
+ )
1273
+ console.print(
1274
+ "[dim] Use --skip-ci-check to install anyway (not recommended)[/dim]"
1275
+ )
1268
1276
  return
1269
1277
  else:
1270
1278
  console.print("[green]✓ CI build is passing[/green]")
@@ -1273,20 +1281,27 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
1273
1281
  console.print(f"[cyan]📦 Installing mcli {latest_version}...[/cyan]")
1274
1282
 
1275
1283
  # Detect if we're running from a uv tool installation
1276
- # uv tool installations are typically in ~/.local/share/uv/tools/
1277
- is_uv_tool = ".local/share/uv/tools/" in sys.executable or \
1278
- "\\AppData\\Local\\uv\\tools\\" in sys.executable
1284
+ # uv tool installations are typically in ~/.local/share/uv/tools/ or similar
1285
+ executable_path = str(sys.executable).replace("\\", "/") # Normalize path separators
1286
+
1287
+ is_uv_tool = (
1288
+ "/uv/tools/" in executable_path
1289
+ or "/.local/share/uv/tools/" in executable_path
1290
+ or "\\AppData\\Local\\uv\\tools\\" in str(sys.executable)
1291
+ )
1279
1292
 
1280
1293
  if is_uv_tool:
1281
- # Use uv tool install for uv tool environments
1294
+ # Use uv tool install for uv tool environments (uv doesn't include pip)
1282
1295
  console.print("[dim]Detected uv tool installation, using 'uv tool install'[/dim]")
1283
1296
  cmd = ["uv", "tool", "install", "--force", "mcli-framework"]
1284
1297
  if pre:
1285
1298
  # For pre-releases, we'd need to specify the version explicitly
1286
1299
  # For now, --pre is not supported with uv tool install in this context
1287
- console.print("[yellow]⚠️ Pre-release flag not supported with uv tool install[/yellow]")
1300
+ console.print(
1301
+ "[yellow]⚠️ Pre-release flag not supported with uv tool install[/yellow]"
1302
+ )
1288
1303
  else:
1289
- # Use pip to upgrade for regular installations
1304
+ # Use pip to upgrade for regular installations (requires pip in environment)
1290
1305
  cmd = [sys.executable, "-m", "pip", "install", "--upgrade", "mcli-framework"]
1291
1306
  if pre:
1292
1307
  cmd.append("--pre")
@@ -1296,9 +1311,13 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
1296
1311
  if result.returncode == 0:
1297
1312
  console.print(f"[green]✅ Successfully updated to mcli {latest_version}![/green]")
1298
1313
  if is_uv_tool:
1299
- console.print("[yellow]ℹ️ Run 'hash -r' to refresh your shell's command cache[/yellow]")
1314
+ console.print(
1315
+ "[yellow]ℹ️ Run 'hash -r' to refresh your shell's command cache[/yellow]"
1316
+ )
1300
1317
  else:
1301
- console.print("[yellow]ℹ️ Restart your terminal or run 'hash -r' to use the new version[/yellow]")
1318
+ console.print(
1319
+ "[yellow]ℹ️ Restart your terminal or run 'hash -r' to use the new version[/yellow]"
1320
+ )
1302
1321
  else:
1303
1322
  console.print(f"[red]❌ Update failed:[/red]")
1304
1323
  console.print(result.stderr)
@@ -1306,6 +1325,7 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
1306
1325
  except Exception as e:
1307
1326
  console.print(f"[red]❌ Error during update: {e}[/red]")
1308
1327
  import traceback
1328
+
1309
1329
  console.print(f"[dim]{traceback.format_exc()}[/dim]")
1310
1330
 
1311
1331
 
mcli/self/test_cmd.py CHANGED
@@ -1 +1,2 @@
1
- def test(): pass
1
+ def test():
2
+ pass
@@ -36,11 +36,17 @@ def launch(port, host, debug):
36
36
 
37
37
  # Build streamlit command
38
38
  cmd = [
39
- sys.executable, "-m", "streamlit", "run",
39
+ sys.executable,
40
+ "-m",
41
+ "streamlit",
42
+ "run",
40
43
  str(dashboard_path),
41
- "--server.port", str(port),
42
- "--server.address", host,
43
- "--browser.gatherUsageStats", "false"
44
+ "--server.port",
45
+ str(port),
46
+ "--server.address",
47
+ host,
48
+ "--browser.gatherUsageStats",
49
+ "false",
44
50
  ]
45
51
 
46
52
  if debug:
@@ -68,8 +74,8 @@ def info():
68
74
 
69
75
  # Check if dependencies are installed
70
76
  try:
71
- import streamlit
72
77
  import plotly
78
+ import streamlit
73
79
 
74
80
  click.echo("✅ Dashboard dependencies installed")
75
81
  click.echo(f" Streamlit version: {streamlit.__version__}")
@@ -81,6 +87,7 @@ def info():
81
87
  # Check database connection
82
88
  try:
83
89
  from mcli.ml.config import settings
90
+
84
91
  click.echo(f"\n📁 Database URL: {settings.database.url}")
85
92
  click.echo(f"📍 Redis URL: {settings.redis.url}")
86
93
  except Exception as e:
@@ -110,4 +117,4 @@ def service(action):
110
117
 
111
118
 
112
119
  if __name__ == "__main__":
113
- dashboard()
120
+ dashboard()