mcli-framework 7.1.1__py3-none-any.whl → 7.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/completion_cmd.py +59 -49
- mcli/app/completion_helpers.py +60 -138
- mcli/app/logs_cmd.py +6 -2
- mcli/app/main.py +17 -14
- mcli/app/model_cmd.py +19 -4
- mcli/chat/chat.py +3 -2
- mcli/lib/search/cached_vectorizer.py +1 -0
- mcli/lib/services/data_pipeline.py +12 -5
- mcli/lib/services/lsh_client.py +68 -57
- mcli/ml/api/app.py +28 -36
- mcli/ml/api/middleware.py +8 -16
- mcli/ml/api/routers/admin_router.py +3 -1
- mcli/ml/api/routers/auth_router.py +32 -56
- mcli/ml/api/routers/backtest_router.py +3 -1
- mcli/ml/api/routers/data_router.py +3 -1
- mcli/ml/api/routers/model_router.py +35 -74
- mcli/ml/api/routers/monitoring_router.py +3 -1
- mcli/ml/api/routers/portfolio_router.py +3 -1
- mcli/ml/api/routers/prediction_router.py +60 -65
- mcli/ml/api/routers/trade_router.py +6 -2
- mcli/ml/api/routers/websocket_router.py +12 -9
- mcli/ml/api/schemas.py +10 -2
- mcli/ml/auth/auth_manager.py +49 -114
- mcli/ml/auth/models.py +30 -15
- mcli/ml/auth/permissions.py +12 -19
- mcli/ml/backtesting/backtest_engine.py +134 -108
- mcli/ml/backtesting/performance_metrics.py +142 -108
- mcli/ml/cache.py +12 -18
- mcli/ml/cli/main.py +37 -23
- mcli/ml/config/settings.py +29 -12
- mcli/ml/dashboard/app.py +122 -130
- mcli/ml/dashboard/app_integrated.py +955 -154
- mcli/ml/dashboard/app_supabase.py +176 -108
- mcli/ml/dashboard/app_training.py +212 -206
- mcli/ml/dashboard/cli.py +14 -5
- mcli/ml/data_ingestion/api_connectors.py +51 -81
- mcli/ml/data_ingestion/data_pipeline.py +127 -125
- mcli/ml/data_ingestion/stream_processor.py +72 -80
- mcli/ml/database/migrations/env.py +3 -2
- mcli/ml/database/models.py +112 -79
- mcli/ml/database/session.py +6 -5
- mcli/ml/experimentation/ab_testing.py +149 -99
- mcli/ml/features/ensemble_features.py +9 -8
- mcli/ml/features/political_features.py +6 -5
- mcli/ml/features/recommendation_engine.py +15 -14
- mcli/ml/features/stock_features.py +7 -6
- mcli/ml/features/test_feature_engineering.py +8 -7
- mcli/ml/logging.py +10 -15
- mcli/ml/mlops/data_versioning.py +57 -64
- mcli/ml/mlops/experiment_tracker.py +49 -41
- mcli/ml/mlops/model_serving.py +59 -62
- mcli/ml/mlops/pipeline_orchestrator.py +203 -149
- mcli/ml/models/base_models.py +8 -7
- mcli/ml/models/ensemble_models.py +6 -5
- mcli/ml/models/recommendation_models.py +7 -6
- mcli/ml/models/test_models.py +18 -14
- mcli/ml/monitoring/drift_detection.py +95 -74
- mcli/ml/monitoring/metrics.py +10 -22
- mcli/ml/optimization/portfolio_optimizer.py +172 -132
- mcli/ml/predictions/prediction_engine.py +62 -50
- mcli/ml/preprocessing/data_cleaners.py +6 -5
- mcli/ml/preprocessing/feature_extractors.py +7 -6
- mcli/ml/preprocessing/ml_pipeline.py +3 -2
- mcli/ml/preprocessing/politician_trading_preprocessor.py +11 -10
- mcli/ml/preprocessing/test_preprocessing.py +4 -4
- mcli/ml/scripts/populate_sample_data.py +36 -16
- mcli/ml/tasks.py +82 -83
- mcli/ml/tests/test_integration.py +86 -76
- mcli/ml/tests/test_training_dashboard.py +169 -142
- mcli/mygroup/test_cmd.py +2 -1
- mcli/self/self_cmd.py +31 -16
- mcli/self/test_cmd.py +2 -1
- mcli/workflow/dashboard/dashboard_cmd.py +13 -6
- mcli/workflow/lsh_integration.py +46 -58
- mcli/workflow/politician_trading/commands.py +576 -427
- mcli/workflow/politician_trading/config.py +7 -7
- mcli/workflow/politician_trading/connectivity.py +35 -33
- mcli/workflow/politician_trading/data_sources.py +72 -71
- mcli/workflow/politician_trading/database.py +18 -16
- mcli/workflow/politician_trading/demo.py +4 -3
- mcli/workflow/politician_trading/models.py +5 -5
- mcli/workflow/politician_trading/monitoring.py +13 -13
- mcli/workflow/politician_trading/scrapers.py +332 -224
- mcli/workflow/politician_trading/scrapers_california.py +116 -94
- mcli/workflow/politician_trading/scrapers_eu.py +70 -71
- mcli/workflow/politician_trading/scrapers_uk.py +118 -90
- mcli/workflow/politician_trading/scrapers_us_states.py +125 -92
- mcli/workflow/politician_trading/workflow.py +98 -71
- {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/METADATA +1 -1
- {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/RECORD +94 -94
- {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/WHEEL +0 -0
- {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.1.1.dist-info → mcli_framework-7.1.3.dist-info}/top_level.txt +0 -0
|
@@ -1,14 +1,15 @@
|
|
|
1
1
|
"""Unit tests for training dashboard functionality"""
|
|
2
2
|
|
|
3
|
-
import pytest
|
|
4
|
-
import pandas as pd
|
|
5
|
-
import numpy as np
|
|
6
3
|
from datetime import datetime, timedelta
|
|
7
|
-
from unittest.mock import Mock, patch
|
|
4
|
+
from unittest.mock import MagicMock, Mock, patch
|
|
8
5
|
|
|
9
|
-
|
|
6
|
+
import numpy as np
|
|
7
|
+
import pandas as pd
|
|
8
|
+
import pytest
|
|
10
9
|
from sqlalchemy.orm import Session
|
|
11
10
|
|
|
11
|
+
from mcli.ml.database.models import Experiment, Model, ModelStatus
|
|
12
|
+
|
|
12
13
|
|
|
13
14
|
class TestTrainingDashboard:
|
|
14
15
|
"""Test suite for training dashboard functions"""
|
|
@@ -27,53 +28,53 @@ class TestTrainingDashboard:
|
|
|
27
28
|
# Bitcoin-style model comparison data
|
|
28
29
|
model_configs = [
|
|
29
30
|
{
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
31
|
+
"name": "Random Forest",
|
|
32
|
+
"type": "random_forest",
|
|
33
|
+
"test_rmse": 150.5,
|
|
34
|
+
"test_mae": 120.3,
|
|
35
|
+
"test_r2": 0.85,
|
|
36
|
+
"mape": 5.5,
|
|
36
37
|
},
|
|
37
38
|
{
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
39
|
+
"name": "Gradient Boosting",
|
|
40
|
+
"type": "gradient_boosting",
|
|
41
|
+
"test_rmse": 155.2,
|
|
42
|
+
"test_mae": 125.8,
|
|
43
|
+
"test_r2": 0.83,
|
|
44
|
+
"mape": 6.2,
|
|
44
45
|
},
|
|
45
46
|
{
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
47
|
+
"name": "Linear Regression",
|
|
48
|
+
"type": "linear_regression",
|
|
49
|
+
"test_rmse": 180.0,
|
|
50
|
+
"test_mae": 145.0,
|
|
51
|
+
"test_r2": 0.75,
|
|
52
|
+
"mape": 8.5,
|
|
52
53
|
},
|
|
53
54
|
{
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
55
|
+
"name": "Ridge Regression",
|
|
56
|
+
"type": "ridge",
|
|
57
|
+
"test_rmse": 175.5,
|
|
58
|
+
"test_mae": 140.2,
|
|
59
|
+
"test_r2": 0.78,
|
|
60
|
+
"mape": 7.8,
|
|
60
61
|
},
|
|
61
62
|
{
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
}
|
|
63
|
+
"name": "Lasso Regression",
|
|
64
|
+
"type": "lasso",
|
|
65
|
+
"test_rmse": 178.0,
|
|
66
|
+
"test_mae": 142.5,
|
|
67
|
+
"test_r2": 0.76,
|
|
68
|
+
"mape": 8.1,
|
|
69
|
+
},
|
|
69
70
|
]
|
|
70
71
|
|
|
71
72
|
for i, config in enumerate(model_configs):
|
|
72
73
|
model = Mock(spec=Model)
|
|
73
|
-
model.id = f
|
|
74
|
-
model.name = config[
|
|
75
|
-
model.version =
|
|
76
|
-
model.model_type = config[
|
|
74
|
+
model.id = f"model-{i}"
|
|
75
|
+
model.name = config["name"]
|
|
76
|
+
model.version = "1.0.0"
|
|
77
|
+
model.model_type = config["type"]
|
|
77
78
|
model.status = ModelStatus.DEPLOYED if i < 2 else ModelStatus.TRAINED
|
|
78
79
|
|
|
79
80
|
model.train_accuracy = 0.90 + np.random.uniform(-0.05, 0.05)
|
|
@@ -85,23 +86,29 @@ class TestTrainingDashboard:
|
|
|
85
86
|
model.test_loss = 0.20 + np.random.uniform(-0.05, 0.05)
|
|
86
87
|
|
|
87
88
|
# Bitcoin-style metrics
|
|
88
|
-
model.test_rmse = config[
|
|
89
|
-
model.test_mae = config[
|
|
90
|
-
model.test_r2 = config[
|
|
89
|
+
model.test_rmse = config["test_rmse"]
|
|
90
|
+
model.test_mae = config["test_mae"]
|
|
91
|
+
model.test_r2 = config["test_r2"]
|
|
91
92
|
|
|
92
93
|
model.metrics = {
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
94
|
+
"rmse": config["test_rmse"],
|
|
95
|
+
"mae": config["test_mae"],
|
|
96
|
+
"r2": config["test_r2"],
|
|
97
|
+
"mape": config["mape"],
|
|
97
98
|
}
|
|
98
99
|
|
|
99
100
|
# Feature names
|
|
100
101
|
model.feature_names = [
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
102
|
+
"lag_1",
|
|
103
|
+
"lag_7",
|
|
104
|
+
"lag_30",
|
|
105
|
+
"ma_7",
|
|
106
|
+
"ma_14",
|
|
107
|
+
"ma_30",
|
|
108
|
+
"volatility_7",
|
|
109
|
+
"volatility_14",
|
|
110
|
+
"price_change_1",
|
|
111
|
+
"price_change_7",
|
|
105
112
|
]
|
|
106
113
|
|
|
107
114
|
model.created_at = datetime.utcnow() - timedelta(days=i)
|
|
@@ -118,22 +125,22 @@ class TestTrainingDashboard:
|
|
|
118
125
|
|
|
119
126
|
for i in range(10):
|
|
120
127
|
exp = Mock(spec=Experiment)
|
|
121
|
-
exp.id = f
|
|
122
|
-
exp.name = f
|
|
123
|
-
exp.status =
|
|
124
|
-
exp.started_at = datetime.utcnow() - timedelta(hours=i*2)
|
|
125
|
-
exp.completed_at =
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
exp.train_metrics = {
|
|
135
|
-
exp.val_metrics = {
|
|
136
|
-
exp.test_metrics = {
|
|
128
|
+
exp.id = f"exp-{i}"
|
|
129
|
+
exp.name = f"Experiment {i}"
|
|
130
|
+
exp.status = "completed" if i < 7 else ("running" if i < 9 else "failed")
|
|
131
|
+
exp.started_at = datetime.utcnow() - timedelta(hours=i * 2)
|
|
132
|
+
exp.completed_at = (
|
|
133
|
+
datetime.utcnow() - timedelta(hours=i * 2 - 1)
|
|
134
|
+
if exp.status == "completed"
|
|
135
|
+
else None
|
|
136
|
+
)
|
|
137
|
+
exp.duration_seconds = 3600 if exp.status == "completed" else None
|
|
138
|
+
|
|
139
|
+
exp.hyperparameters = {"learning_rate": 0.01, "n_estimators": 100, "max_depth": 10}
|
|
140
|
+
|
|
141
|
+
exp.train_metrics = {"loss": 0.15, "accuracy": 0.90}
|
|
142
|
+
exp.val_metrics = {"loss": 0.18, "accuracy": 0.88}
|
|
143
|
+
exp.test_metrics = {"loss": 0.20, "accuracy": 0.85}
|
|
137
144
|
|
|
138
145
|
experiments.append(exp)
|
|
139
146
|
|
|
@@ -142,43 +149,45 @@ class TestTrainingDashboard:
|
|
|
142
149
|
def test_model_comparison_metrics(self, sample_models):
|
|
143
150
|
"""Test model comparison metrics calculation"""
|
|
144
151
|
# Convert to DataFrame as the dashboard would
|
|
145
|
-
df = pd.DataFrame(
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
152
|
+
df = pd.DataFrame(
|
|
153
|
+
[
|
|
154
|
+
{
|
|
155
|
+
"name": m.name,
|
|
156
|
+
"test_rmse": m.test_rmse,
|
|
157
|
+
"test_mae": m.test_mae,
|
|
158
|
+
"test_r2": m.test_r2,
|
|
159
|
+
"mape": m.metrics["mape"],
|
|
160
|
+
}
|
|
161
|
+
for m in sample_models
|
|
162
|
+
]
|
|
163
|
+
)
|
|
155
164
|
|
|
156
165
|
# Test ranking by RMSE
|
|
157
|
-
sorted_by_rmse = df.sort_values(
|
|
158
|
-
assert sorted_by_rmse.iloc[0][
|
|
159
|
-
assert sorted_by_rmse.iloc[0][
|
|
166
|
+
sorted_by_rmse = df.sort_values("test_rmse")
|
|
167
|
+
assert sorted_by_rmse.iloc[0]["name"] == "Random Forest"
|
|
168
|
+
assert sorted_by_rmse.iloc[0]["test_rmse"] < 155
|
|
160
169
|
|
|
161
170
|
# Test ranking by R²
|
|
162
|
-
sorted_by_r2 = df.sort_values(
|
|
163
|
-
assert sorted_by_r2.iloc[0][
|
|
171
|
+
sorted_by_r2 = df.sort_values("test_r2", ascending=False)
|
|
172
|
+
assert sorted_by_r2.iloc[0]["test_r2"] > 0.8
|
|
164
173
|
|
|
165
174
|
# Test ranking by MAE
|
|
166
|
-
sorted_by_mae = df.sort_values(
|
|
167
|
-
assert sorted_by_mae.iloc[0][
|
|
175
|
+
sorted_by_mae = df.sort_values("test_mae")
|
|
176
|
+
assert sorted_by_mae.iloc[0]["test_mae"] < 125
|
|
168
177
|
|
|
169
178
|
def test_model_performance_aggregation(self, sample_models):
|
|
170
179
|
"""Test aggregation of model performance"""
|
|
171
180
|
metrics = {
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
181
|
+
"total_models": len(sample_models),
|
|
182
|
+
"deployed_models": sum(1 for m in sample_models if m.status == ModelStatus.DEPLOYED),
|
|
183
|
+
"avg_rmse": np.mean([m.test_rmse for m in sample_models]),
|
|
184
|
+
"avg_r2": np.mean([m.test_r2 for m in sample_models]),
|
|
176
185
|
}
|
|
177
186
|
|
|
178
|
-
assert metrics[
|
|
179
|
-
assert metrics[
|
|
180
|
-
assert 150 < metrics[
|
|
181
|
-
assert 0.75 < metrics[
|
|
187
|
+
assert metrics["total_models"] == 5
|
|
188
|
+
assert metrics["deployed_models"] == 2
|
|
189
|
+
assert 150 < metrics["avg_rmse"] < 180
|
|
190
|
+
assert 0.75 < metrics["avg_r2"] < 0.85
|
|
182
191
|
|
|
183
192
|
def test_feature_importance_calculation(self, sample_models):
|
|
184
193
|
"""Test feature importance extraction and ranking"""
|
|
@@ -186,18 +195,17 @@ class TestTrainingDashboard:
|
|
|
186
195
|
|
|
187
196
|
# Simulate feature importance
|
|
188
197
|
importance = np.random.dirichlet(np.ones(len(model.feature_names)))
|
|
189
|
-
feature_df = pd.DataFrame(
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
}).sort_values('importance', ascending=False)
|
|
198
|
+
feature_df = pd.DataFrame(
|
|
199
|
+
{"feature": model.feature_names, "importance": importance}
|
|
200
|
+
).sort_values("importance", ascending=False)
|
|
193
201
|
|
|
194
202
|
# Test that importances sum to 1
|
|
195
|
-
assert np.isclose(feature_df[
|
|
203
|
+
assert np.isclose(feature_df["importance"].sum(), 1.0)
|
|
196
204
|
|
|
197
205
|
# Test top features
|
|
198
206
|
top_5 = feature_df.head(5)
|
|
199
207
|
assert len(top_5) == 5
|
|
200
|
-
assert all(top_5[
|
|
208
|
+
assert all(top_5["importance"] > 0)
|
|
201
209
|
|
|
202
210
|
def test_residuals_analysis(self):
|
|
203
211
|
"""Test residual analysis calculations"""
|
|
@@ -220,6 +228,7 @@ class TestTrainingDashboard:
|
|
|
220
228
|
|
|
221
229
|
# Test normality (using simple statistics)
|
|
222
230
|
from scipy import stats
|
|
231
|
+
|
|
223
232
|
_, p_value = stats.normaltest(residuals)
|
|
224
233
|
# With random data, should generally pass normality test
|
|
225
234
|
assert 0 <= p_value <= 1
|
|
@@ -240,7 +249,7 @@ class TestTrainingDashboard:
|
|
|
240
249
|
|
|
241
250
|
def test_training_duration_analysis(self, sample_experiments):
|
|
242
251
|
"""Test training duration analysis"""
|
|
243
|
-
completed = [exp for exp in sample_experiments if exp.status ==
|
|
252
|
+
completed = [exp for exp in sample_experiments if exp.status == "completed"]
|
|
244
253
|
|
|
245
254
|
durations = [exp.duration_seconds for exp in completed]
|
|
246
255
|
avg_duration = np.mean(durations)
|
|
@@ -252,54 +261,72 @@ class TestTrainingDashboard:
|
|
|
252
261
|
|
|
253
262
|
def test_model_comparison_ranking(self, sample_models):
|
|
254
263
|
"""Test ranking models by multiple metrics"""
|
|
255
|
-
df = pd.DataFrame(
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
+
df = pd.DataFrame(
|
|
265
|
+
[
|
|
266
|
+
{
|
|
267
|
+
"name": m.name,
|
|
268
|
+
"test_rmse": m.test_rmse,
|
|
269
|
+
"test_mae": m.test_mae,
|
|
270
|
+
"test_r2": m.test_r2,
|
|
271
|
+
}
|
|
272
|
+
for m in sample_models
|
|
273
|
+
]
|
|
274
|
+
)
|
|
264
275
|
|
|
265
276
|
# Rank by RMSE (lower is better)
|
|
266
|
-
df[
|
|
277
|
+
df["rank_rmse"] = df["test_rmse"].rank()
|
|
267
278
|
|
|
268
279
|
# Rank by R² (higher is better)
|
|
269
|
-
df[
|
|
280
|
+
df["rank_r2"] = df["test_r2"].rank(ascending=False)
|
|
270
281
|
|
|
271
282
|
# Composite rank
|
|
272
|
-
df[
|
|
283
|
+
df["composite_rank"] = (df["rank_rmse"] + df["rank_r2"]) / 2
|
|
273
284
|
|
|
274
|
-
best_overall = df.loc[df[
|
|
285
|
+
best_overall = df.loc[df["composite_rank"].idxmin()]
|
|
275
286
|
|
|
276
287
|
# Random Forest should be among the best
|
|
277
|
-
assert best_overall[
|
|
278
|
-
assert best_overall[
|
|
288
|
+
assert best_overall["test_r2"] > 0.8
|
|
289
|
+
assert best_overall["test_rmse"] < 160
|
|
279
290
|
|
|
280
291
|
def test_feature_categorization(self):
|
|
281
292
|
"""Test feature categorization (lag, MA, volatility, etc.)"""
|
|
282
293
|
features = [
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
294
|
+
"lag_1",
|
|
295
|
+
"lag_7",
|
|
296
|
+
"lag_30",
|
|
297
|
+
"ma_7",
|
|
298
|
+
"ma_14",
|
|
299
|
+
"sma_30",
|
|
300
|
+
"ema_20",
|
|
301
|
+
"volatility_7",
|
|
302
|
+
"volatility_14",
|
|
303
|
+
"std_30",
|
|
304
|
+
"price_change_1",
|
|
305
|
+
"pct_change_7",
|
|
306
|
+
"rsi_14",
|
|
307
|
+
"macd",
|
|
308
|
+
"bollinger_upper",
|
|
288
309
|
]
|
|
289
310
|
|
|
290
311
|
categories = {
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
312
|
+
"Lag Features": [f for f in features if "lag" in f.lower()],
|
|
313
|
+
"Moving Averages": [
|
|
314
|
+
f for f in features if any(x in f.lower() for x in ["ma", "sma", "ema"])
|
|
315
|
+
],
|
|
316
|
+
"Volatility": [
|
|
317
|
+
f for f in features if any(x in f.lower() for x in ["volatility", "std"])
|
|
318
|
+
],
|
|
319
|
+
"Price Changes": [f for f in features if "change" in f.lower() or "pct" in f.lower()],
|
|
320
|
+
"Technical": [
|
|
321
|
+
f for f in features if any(x in f.lower() for x in ["rsi", "macd", "bollinger"])
|
|
322
|
+
],
|
|
296
323
|
}
|
|
297
324
|
|
|
298
|
-
assert len(categories[
|
|
299
|
-
assert len(categories[
|
|
300
|
-
assert len(categories[
|
|
301
|
-
assert len(categories[
|
|
302
|
-
assert len(categories[
|
|
325
|
+
assert len(categories["Lag Features"]) == 3
|
|
326
|
+
assert len(categories["Moving Averages"]) == 4
|
|
327
|
+
assert len(categories["Volatility"]) == 3
|
|
328
|
+
assert len(categories["Price Changes"]) == 2
|
|
329
|
+
assert len(categories["Technical"]) == 3
|
|
303
330
|
|
|
304
331
|
def test_mape_calculation(self):
|
|
305
332
|
"""Test Mean Absolute Percentage Error calculation"""
|
|
@@ -318,7 +345,7 @@ class TestTrainingDashboard:
|
|
|
318
345
|
errors = np.array([5, 3, 8, 2, 10])
|
|
319
346
|
|
|
320
347
|
mae = np.mean(np.abs(errors))
|
|
321
|
-
rmse = np.sqrt(np.mean(errors
|
|
348
|
+
rmse = np.sqrt(np.mean(errors**2))
|
|
322
349
|
|
|
323
350
|
assert rmse >= mae
|
|
324
351
|
|
|
@@ -347,9 +374,9 @@ class TestTrainingDashboard:
|
|
|
347
374
|
for exp in sample_experiments:
|
|
348
375
|
status_counts[exp.status] = status_counts.get(exp.status, 0) + 1
|
|
349
376
|
|
|
350
|
-
assert status_counts[
|
|
351
|
-
assert status_counts[
|
|
352
|
-
assert status_counts[
|
|
377
|
+
assert status_counts["completed"] == 7
|
|
378
|
+
assert status_counts["running"] == 2
|
|
379
|
+
assert status_counts["failed"] == 1
|
|
353
380
|
assert sum(status_counts.values()) == 10
|
|
354
381
|
|
|
355
382
|
|
|
@@ -358,10 +385,10 @@ class TestModelVersioning:
|
|
|
358
385
|
|
|
359
386
|
def test_version_comparison(self):
|
|
360
387
|
"""Test semantic version comparison"""
|
|
361
|
-
versions = [
|
|
388
|
+
versions = ["1.0.0", "1.1.0", "1.0.1", "2.0.0", "1.2.0"]
|
|
362
389
|
|
|
363
390
|
# Parse and sort versions
|
|
364
|
-
parsed = [tuple(map(int, v.split(
|
|
391
|
+
parsed = [tuple(map(int, v.split("."))) for v in versions]
|
|
365
392
|
sorted_versions = sorted(parsed)
|
|
366
393
|
|
|
367
394
|
assert sorted_versions[0] == (1, 0, 0)
|
|
@@ -370,18 +397,18 @@ class TestModelVersioning:
|
|
|
370
397
|
def test_model_deployment_tracking(self):
|
|
371
398
|
"""Test tracking which models are deployed"""
|
|
372
399
|
models = [
|
|
373
|
-
{
|
|
374
|
-
{
|
|
375
|
-
{
|
|
400
|
+
{"name": "model-a", "version": "1.0.0", "deployed": True},
|
|
401
|
+
{"name": "model-a", "version": "1.1.0", "deployed": False},
|
|
402
|
+
{"name": "model-b", "version": "1.0.0", "deployed": True},
|
|
376
403
|
]
|
|
377
404
|
|
|
378
|
-
deployed = [m for m in models if m[
|
|
405
|
+
deployed = [m for m in models if m["deployed"]]
|
|
379
406
|
assert len(deployed) == 2
|
|
380
407
|
|
|
381
408
|
# Test that only one version of each model is deployed
|
|
382
|
-
deployed_names = [m[
|
|
409
|
+
deployed_names = [m["name"] for m in deployed]
|
|
383
410
|
assert len(deployed_names) == len(set(deployed_names))
|
|
384
411
|
|
|
385
412
|
|
|
386
|
-
if __name__ ==
|
|
387
|
-
pytest.main([__file__,
|
|
413
|
+
if __name__ == "__main__":
|
|
414
|
+
pytest.main([__file__, "-v"])
|
mcli/mygroup/test_cmd.py
CHANGED
|
@@ -1 +1,2 @@
|
|
|
1
|
-
def test():
|
|
1
|
+
def test():
|
|
2
|
+
pass
|
mcli/self/self_cmd.py
CHANGED
|
@@ -1137,11 +1137,8 @@ def check_ci_status(version: str) -> tuple[bool, Optional[str]]:
|
|
|
1137
1137
|
response = requests.get(
|
|
1138
1138
|
"https://api.github.com/repos/gwicho38/mcli/actions/runs",
|
|
1139
1139
|
params={"per_page": 5},
|
|
1140
|
-
headers={
|
|
1141
|
-
|
|
1142
|
-
"User-Agent": "mcli-cli"
|
|
1143
|
-
},
|
|
1144
|
-
timeout=10
|
|
1140
|
+
headers={"Accept": "application/vnd.github.v3+json", "User-Agent": "mcli-cli"},
|
|
1141
|
+
timeout=10,
|
|
1145
1142
|
)
|
|
1146
1143
|
|
|
1147
1144
|
if response.status_code == 200:
|
|
@@ -1150,7 +1147,8 @@ def check_ci_status(version: str) -> tuple[bool, Optional[str]]:
|
|
|
1150
1147
|
|
|
1151
1148
|
# Find the most recent completed run for main branch
|
|
1152
1149
|
main_runs = [
|
|
1153
|
-
run
|
|
1150
|
+
run
|
|
1151
|
+
for run in runs
|
|
1154
1152
|
if run.get("head_branch") == "main" and run.get("status") == "completed"
|
|
1155
1153
|
]
|
|
1156
1154
|
|
|
@@ -1209,7 +1207,10 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1209
1207
|
if pre:
|
|
1210
1208
|
# Include pre-releases
|
|
1211
1209
|
all_versions = list(pypi_data["releases"].keys())
|
|
1212
|
-
latest_version = max(
|
|
1210
|
+
latest_version = max(
|
|
1211
|
+
all_versions,
|
|
1212
|
+
key=lambda v: [int(x) for x in v.split(".")] if v[0].isdigit() else [0],
|
|
1213
|
+
)
|
|
1213
1214
|
else:
|
|
1214
1215
|
# Only stable releases
|
|
1215
1216
|
latest_version = pypi_data["info"]["version"]
|
|
@@ -1232,7 +1233,9 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1232
1233
|
latest_parsed = parse_version(latest_version)
|
|
1233
1234
|
|
|
1234
1235
|
if current_parsed >= latest_parsed:
|
|
1235
|
-
console.print(
|
|
1236
|
+
console.print(
|
|
1237
|
+
f"[green]✅ Your version ({current_version}) is up to date or newer[/green]"
|
|
1238
|
+
)
|
|
1236
1239
|
return
|
|
1237
1240
|
|
|
1238
1241
|
console.print(f"[yellow]⬆️ Update available: {current_version} → {latest_version}[/yellow]")
|
|
@@ -1250,6 +1253,7 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1250
1253
|
# Ask for confirmation unless --yes flag is used
|
|
1251
1254
|
if not yes:
|
|
1252
1255
|
from rich.prompt import Confirm
|
|
1256
|
+
|
|
1253
1257
|
if not Confirm.ask(f"[yellow]Install mcli {latest_version}?[/yellow]"):
|
|
1254
1258
|
console.print("[yellow]Update cancelled[/yellow]")
|
|
1255
1259
|
return
|
|
@@ -1263,8 +1267,12 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1263
1267
|
console.print("[red]✗ CI build is failing for the latest version[/red]")
|
|
1264
1268
|
if ci_url:
|
|
1265
1269
|
console.print(f"[yellow] View CI status: {ci_url}[/yellow]")
|
|
1266
|
-
console.print(
|
|
1267
|
-
|
|
1270
|
+
console.print(
|
|
1271
|
+
"[yellow]⚠️ Update blocked to prevent installing a broken version[/yellow]"
|
|
1272
|
+
)
|
|
1273
|
+
console.print(
|
|
1274
|
+
"[dim] Use --skip-ci-check to install anyway (not recommended)[/dim]"
|
|
1275
|
+
)
|
|
1268
1276
|
return
|
|
1269
1277
|
else:
|
|
1270
1278
|
console.print("[green]✓ CI build is passing[/green]")
|
|
@@ -1277,9 +1285,9 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1277
1285
|
executable_path = str(sys.executable).replace("\\", "/") # Normalize path separators
|
|
1278
1286
|
|
|
1279
1287
|
is_uv_tool = (
|
|
1280
|
-
"/uv/tools/" in executable_path
|
|
1281
|
-
"/.local/share/uv/tools/" in executable_path
|
|
1282
|
-
"\\AppData\\Local\\uv\\tools\\" in str(sys.executable)
|
|
1288
|
+
"/uv/tools/" in executable_path
|
|
1289
|
+
or "/.local/share/uv/tools/" in executable_path
|
|
1290
|
+
or "\\AppData\\Local\\uv\\tools\\" in str(sys.executable)
|
|
1283
1291
|
)
|
|
1284
1292
|
|
|
1285
1293
|
if is_uv_tool:
|
|
@@ -1289,7 +1297,9 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1289
1297
|
if pre:
|
|
1290
1298
|
# For pre-releases, we'd need to specify the version explicitly
|
|
1291
1299
|
# For now, --pre is not supported with uv tool install in this context
|
|
1292
|
-
console.print(
|
|
1300
|
+
console.print(
|
|
1301
|
+
"[yellow]⚠️ Pre-release flag not supported with uv tool install[/yellow]"
|
|
1302
|
+
)
|
|
1293
1303
|
else:
|
|
1294
1304
|
# Use pip to upgrade for regular installations (requires pip in environment)
|
|
1295
1305
|
cmd = [sys.executable, "-m", "pip", "install", "--upgrade", "mcli-framework"]
|
|
@@ -1301,9 +1311,13 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1301
1311
|
if result.returncode == 0:
|
|
1302
1312
|
console.print(f"[green]✅ Successfully updated to mcli {latest_version}![/green]")
|
|
1303
1313
|
if is_uv_tool:
|
|
1304
|
-
console.print(
|
|
1314
|
+
console.print(
|
|
1315
|
+
"[yellow]ℹ️ Run 'hash -r' to refresh your shell's command cache[/yellow]"
|
|
1316
|
+
)
|
|
1305
1317
|
else:
|
|
1306
|
-
console.print(
|
|
1318
|
+
console.print(
|
|
1319
|
+
"[yellow]ℹ️ Restart your terminal or run 'hash -r' to use the new version[/yellow]"
|
|
1320
|
+
)
|
|
1307
1321
|
else:
|
|
1308
1322
|
console.print(f"[red]❌ Update failed:[/red]")
|
|
1309
1323
|
console.print(result.stderr)
|
|
@@ -1311,6 +1325,7 @@ def update(check: bool, pre: bool, yes: bool, skip_ci_check: bool):
|
|
|
1311
1325
|
except Exception as e:
|
|
1312
1326
|
console.print(f"[red]❌ Error during update: {e}[/red]")
|
|
1313
1327
|
import traceback
|
|
1328
|
+
|
|
1314
1329
|
console.print(f"[dim]{traceback.format_exc()}[/dim]")
|
|
1315
1330
|
|
|
1316
1331
|
|
mcli/self/test_cmd.py
CHANGED
|
@@ -1 +1,2 @@
|
|
|
1
|
-
def test():
|
|
1
|
+
def test():
|
|
2
|
+
pass
|
|
@@ -36,11 +36,17 @@ def launch(port, host, debug):
|
|
|
36
36
|
|
|
37
37
|
# Build streamlit command
|
|
38
38
|
cmd = [
|
|
39
|
-
sys.executable,
|
|
39
|
+
sys.executable,
|
|
40
|
+
"-m",
|
|
41
|
+
"streamlit",
|
|
42
|
+
"run",
|
|
40
43
|
str(dashboard_path),
|
|
41
|
-
"--server.port",
|
|
42
|
-
|
|
43
|
-
"--
|
|
44
|
+
"--server.port",
|
|
45
|
+
str(port),
|
|
46
|
+
"--server.address",
|
|
47
|
+
host,
|
|
48
|
+
"--browser.gatherUsageStats",
|
|
49
|
+
"false",
|
|
44
50
|
]
|
|
45
51
|
|
|
46
52
|
if debug:
|
|
@@ -68,8 +74,8 @@ def info():
|
|
|
68
74
|
|
|
69
75
|
# Check if dependencies are installed
|
|
70
76
|
try:
|
|
71
|
-
import streamlit
|
|
72
77
|
import plotly
|
|
78
|
+
import streamlit
|
|
73
79
|
|
|
74
80
|
click.echo("✅ Dashboard dependencies installed")
|
|
75
81
|
click.echo(f" Streamlit version: {streamlit.__version__}")
|
|
@@ -81,6 +87,7 @@ def info():
|
|
|
81
87
|
# Check database connection
|
|
82
88
|
try:
|
|
83
89
|
from mcli.ml.config import settings
|
|
90
|
+
|
|
84
91
|
click.echo(f"\n📁 Database URL: {settings.database.url}")
|
|
85
92
|
click.echo(f"📍 Redis URL: {settings.redis.url}")
|
|
86
93
|
except Exception as e:
|
|
@@ -110,4 +117,4 @@ def service(action):
|
|
|
110
117
|
|
|
111
118
|
|
|
112
119
|
if __name__ == "__main__":
|
|
113
|
-
dashboard()
|
|
120
|
+
dashboard()
|