mcli-framework 7.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/chat_cmd.py +42 -0
- mcli/app/commands_cmd.py +226 -0
- mcli/app/completion_cmd.py +216 -0
- mcli/app/completion_helpers.py +288 -0
- mcli/app/cron_test_cmd.py +697 -0
- mcli/app/logs_cmd.py +419 -0
- mcli/app/main.py +492 -0
- mcli/app/model/model.py +1060 -0
- mcli/app/model_cmd.py +227 -0
- mcli/app/redis_cmd.py +269 -0
- mcli/app/video/video.py +1114 -0
- mcli/app/visual_cmd.py +303 -0
- mcli/chat/chat.py +2409 -0
- mcli/chat/command_rag.py +514 -0
- mcli/chat/enhanced_chat.py +652 -0
- mcli/chat/system_controller.py +1010 -0
- mcli/chat/system_integration.py +1016 -0
- mcli/cli.py +25 -0
- mcli/config.toml +20 -0
- mcli/lib/api/api.py +586 -0
- mcli/lib/api/daemon_client.py +203 -0
- mcli/lib/api/daemon_client_local.py +44 -0
- mcli/lib/api/daemon_decorator.py +217 -0
- mcli/lib/api/mcli_decorators.py +1032 -0
- mcli/lib/auth/auth.py +85 -0
- mcli/lib/auth/aws_manager.py +85 -0
- mcli/lib/auth/azure_manager.py +91 -0
- mcli/lib/auth/credential_manager.py +192 -0
- mcli/lib/auth/gcp_manager.py +93 -0
- mcli/lib/auth/key_manager.py +117 -0
- mcli/lib/auth/mcli_manager.py +93 -0
- mcli/lib/auth/token_manager.py +75 -0
- mcli/lib/auth/token_util.py +1011 -0
- mcli/lib/config/config.py +47 -0
- mcli/lib/discovery/__init__.py +1 -0
- mcli/lib/discovery/command_discovery.py +274 -0
- mcli/lib/erd/erd.py +1345 -0
- mcli/lib/erd/generate_graph.py +453 -0
- mcli/lib/files/files.py +76 -0
- mcli/lib/fs/fs.py +109 -0
- mcli/lib/lib.py +29 -0
- mcli/lib/logger/logger.py +611 -0
- mcli/lib/performance/optimizer.py +409 -0
- mcli/lib/performance/rust_bridge.py +502 -0
- mcli/lib/performance/uvloop_config.py +154 -0
- mcli/lib/pickles/pickles.py +50 -0
- mcli/lib/search/cached_vectorizer.py +479 -0
- mcli/lib/services/data_pipeline.py +460 -0
- mcli/lib/services/lsh_client.py +441 -0
- mcli/lib/services/redis_service.py +387 -0
- mcli/lib/shell/shell.py +137 -0
- mcli/lib/toml/toml.py +33 -0
- mcli/lib/ui/styling.py +47 -0
- mcli/lib/ui/visual_effects.py +634 -0
- mcli/lib/watcher/watcher.py +185 -0
- mcli/ml/api/app.py +215 -0
- mcli/ml/api/middleware.py +224 -0
- mcli/ml/api/routers/admin_router.py +12 -0
- mcli/ml/api/routers/auth_router.py +244 -0
- mcli/ml/api/routers/backtest_router.py +12 -0
- mcli/ml/api/routers/data_router.py +12 -0
- mcli/ml/api/routers/model_router.py +302 -0
- mcli/ml/api/routers/monitoring_router.py +12 -0
- mcli/ml/api/routers/portfolio_router.py +12 -0
- mcli/ml/api/routers/prediction_router.py +267 -0
- mcli/ml/api/routers/trade_router.py +12 -0
- mcli/ml/api/routers/websocket_router.py +76 -0
- mcli/ml/api/schemas.py +64 -0
- mcli/ml/auth/auth_manager.py +425 -0
- mcli/ml/auth/models.py +154 -0
- mcli/ml/auth/permissions.py +302 -0
- mcli/ml/backtesting/backtest_engine.py +502 -0
- mcli/ml/backtesting/performance_metrics.py +393 -0
- mcli/ml/cache.py +400 -0
- mcli/ml/cli/main.py +398 -0
- mcli/ml/config/settings.py +394 -0
- mcli/ml/configs/dvc_config.py +230 -0
- mcli/ml/configs/mlflow_config.py +131 -0
- mcli/ml/configs/mlops_manager.py +293 -0
- mcli/ml/dashboard/app.py +532 -0
- mcli/ml/dashboard/app_integrated.py +738 -0
- mcli/ml/dashboard/app_supabase.py +560 -0
- mcli/ml/dashboard/app_training.py +615 -0
- mcli/ml/dashboard/cli.py +51 -0
- mcli/ml/data_ingestion/api_connectors.py +501 -0
- mcli/ml/data_ingestion/data_pipeline.py +567 -0
- mcli/ml/data_ingestion/stream_processor.py +512 -0
- mcli/ml/database/migrations/env.py +94 -0
- mcli/ml/database/models.py +667 -0
- mcli/ml/database/session.py +200 -0
- mcli/ml/experimentation/ab_testing.py +845 -0
- mcli/ml/features/ensemble_features.py +607 -0
- mcli/ml/features/political_features.py +676 -0
- mcli/ml/features/recommendation_engine.py +809 -0
- mcli/ml/features/stock_features.py +573 -0
- mcli/ml/features/test_feature_engineering.py +346 -0
- mcli/ml/logging.py +85 -0
- mcli/ml/mlops/data_versioning.py +518 -0
- mcli/ml/mlops/experiment_tracker.py +377 -0
- mcli/ml/mlops/model_serving.py +481 -0
- mcli/ml/mlops/pipeline_orchestrator.py +614 -0
- mcli/ml/models/base_models.py +324 -0
- mcli/ml/models/ensemble_models.py +675 -0
- mcli/ml/models/recommendation_models.py +474 -0
- mcli/ml/models/test_models.py +487 -0
- mcli/ml/monitoring/drift_detection.py +676 -0
- mcli/ml/monitoring/metrics.py +45 -0
- mcli/ml/optimization/portfolio_optimizer.py +834 -0
- mcli/ml/preprocessing/data_cleaners.py +451 -0
- mcli/ml/preprocessing/feature_extractors.py +491 -0
- mcli/ml/preprocessing/ml_pipeline.py +382 -0
- mcli/ml/preprocessing/politician_trading_preprocessor.py +569 -0
- mcli/ml/preprocessing/test_preprocessing.py +294 -0
- mcli/ml/scripts/populate_sample_data.py +200 -0
- mcli/ml/tasks.py +400 -0
- mcli/ml/tests/test_integration.py +429 -0
- mcli/ml/tests/test_training_dashboard.py +387 -0
- mcli/public/oi/oi.py +15 -0
- mcli/public/public.py +4 -0
- mcli/self/self_cmd.py +1246 -0
- mcli/workflow/daemon/api_daemon.py +800 -0
- mcli/workflow/daemon/async_command_database.py +681 -0
- mcli/workflow/daemon/async_process_manager.py +591 -0
- mcli/workflow/daemon/client.py +530 -0
- mcli/workflow/daemon/commands.py +1196 -0
- mcli/workflow/daemon/daemon.py +905 -0
- mcli/workflow/daemon/daemon_api.py +59 -0
- mcli/workflow/daemon/enhanced_daemon.py +571 -0
- mcli/workflow/daemon/process_cli.py +244 -0
- mcli/workflow/daemon/process_manager.py +439 -0
- mcli/workflow/daemon/test_daemon.py +275 -0
- mcli/workflow/dashboard/dashboard_cmd.py +113 -0
- mcli/workflow/docker/docker.py +0 -0
- mcli/workflow/file/file.py +100 -0
- mcli/workflow/gcloud/config.toml +21 -0
- mcli/workflow/gcloud/gcloud.py +58 -0
- mcli/workflow/git_commit/ai_service.py +328 -0
- mcli/workflow/git_commit/commands.py +430 -0
- mcli/workflow/lsh_integration.py +355 -0
- mcli/workflow/model_service/client.py +594 -0
- mcli/workflow/model_service/download_and_run_efficient_models.py +288 -0
- mcli/workflow/model_service/lightweight_embedder.py +397 -0
- mcli/workflow/model_service/lightweight_model_server.py +714 -0
- mcli/workflow/model_service/lightweight_test.py +241 -0
- mcli/workflow/model_service/model_service.py +1955 -0
- mcli/workflow/model_service/ollama_efficient_runner.py +425 -0
- mcli/workflow/model_service/pdf_processor.py +386 -0
- mcli/workflow/model_service/test_efficient_runner.py +234 -0
- mcli/workflow/model_service/test_example.py +315 -0
- mcli/workflow/model_service/test_integration.py +131 -0
- mcli/workflow/model_service/test_new_features.py +149 -0
- mcli/workflow/openai/openai.py +99 -0
- mcli/workflow/politician_trading/commands.py +1790 -0
- mcli/workflow/politician_trading/config.py +134 -0
- mcli/workflow/politician_trading/connectivity.py +490 -0
- mcli/workflow/politician_trading/data_sources.py +395 -0
- mcli/workflow/politician_trading/database.py +410 -0
- mcli/workflow/politician_trading/demo.py +248 -0
- mcli/workflow/politician_trading/models.py +165 -0
- mcli/workflow/politician_trading/monitoring.py +413 -0
- mcli/workflow/politician_trading/scrapers.py +966 -0
- mcli/workflow/politician_trading/scrapers_california.py +412 -0
- mcli/workflow/politician_trading/scrapers_eu.py +377 -0
- mcli/workflow/politician_trading/scrapers_uk.py +350 -0
- mcli/workflow/politician_trading/scrapers_us_states.py +438 -0
- mcli/workflow/politician_trading/supabase_functions.py +354 -0
- mcli/workflow/politician_trading/workflow.py +852 -0
- mcli/workflow/registry/registry.py +180 -0
- mcli/workflow/repo/repo.py +223 -0
- mcli/workflow/scheduler/commands.py +493 -0
- mcli/workflow/scheduler/cron_parser.py +238 -0
- mcli/workflow/scheduler/job.py +182 -0
- mcli/workflow/scheduler/monitor.py +139 -0
- mcli/workflow/scheduler/persistence.py +324 -0
- mcli/workflow/scheduler/scheduler.py +679 -0
- mcli/workflow/sync/sync_cmd.py +437 -0
- mcli/workflow/sync/test_cmd.py +314 -0
- mcli/workflow/videos/videos.py +242 -0
- mcli/workflow/wakatime/wakatime.py +11 -0
- mcli/workflow/workflow.py +37 -0
- mcli_framework-7.0.0.dist-info/METADATA +479 -0
- mcli_framework-7.0.0.dist-info/RECORD +186 -0
- mcli_framework-7.0.0.dist-info/WHEEL +5 -0
- mcli_framework-7.0.0.dist-info/entry_points.txt +7 -0
- mcli_framework-7.0.0.dist-info/licenses/LICENSE +21 -0
- mcli_framework-7.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,594 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any, Dict, List, Optional, Union
|
|
7
|
+
|
|
8
|
+
import click
|
|
9
|
+
import requests
|
|
10
|
+
|
|
11
|
+
from mcli.lib.logger.logger import get_logger
|
|
12
|
+
|
|
13
|
+
logger = get_logger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ModelServiceClient:
|
|
17
|
+
"""Client for interacting with the model service daemon"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, base_url: str = "http://localhost:8000"):
|
|
20
|
+
self.base_url = base_url.rstrip("/")
|
|
21
|
+
self.session = requests.Session()
|
|
22
|
+
self.session.headers.update(
|
|
23
|
+
{"Content-Type": "application/json", "Accept": "application/json"}
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
def _make_request(self, method: str, endpoint: str, data: Optional[Dict] = None) -> Dict:
|
|
27
|
+
"""Make a request to the model service"""
|
|
28
|
+
url = f"{self.base_url}{endpoint}"
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
if method.upper() == "GET":
|
|
32
|
+
response = self.session.get(url)
|
|
33
|
+
elif method.upper() == "POST":
|
|
34
|
+
response = self.session.post(url, json=data)
|
|
35
|
+
elif method.upper() == "DELETE":
|
|
36
|
+
response = self.session.delete(url)
|
|
37
|
+
else:
|
|
38
|
+
raise ValueError(f"Unsupported HTTP method: {method}")
|
|
39
|
+
|
|
40
|
+
response.raise_for_status()
|
|
41
|
+
return response.json()
|
|
42
|
+
|
|
43
|
+
except requests.exceptions.ConnectionError:
|
|
44
|
+
raise ConnectionError(f"Could not connect to model service at {self.base_url}")
|
|
45
|
+
except requests.exceptions.HTTPError as e:
|
|
46
|
+
if e.response.status_code == 404:
|
|
47
|
+
raise ValueError("Resource not found")
|
|
48
|
+
elif e.response.status_code == 500:
|
|
49
|
+
error_detail = e.response.json().get("detail", "Unknown error")
|
|
50
|
+
raise RuntimeError(f"Server error: {error_detail}")
|
|
51
|
+
else:
|
|
52
|
+
raise RuntimeError(f"HTTP error {e.response.status_code}: {e.response.text}")
|
|
53
|
+
except Exception as e:
|
|
54
|
+
raise RuntimeError(f"Request failed: {e}")
|
|
55
|
+
|
|
56
|
+
def get_status(self) -> Dict[str, Any]:
|
|
57
|
+
"""Get service status"""
|
|
58
|
+
return self._make_request("GET", "/")
|
|
59
|
+
|
|
60
|
+
def get_health(self) -> Dict[str, Any]:
|
|
61
|
+
"""Get service health"""
|
|
62
|
+
return self._make_request("GET", "/health")
|
|
63
|
+
|
|
64
|
+
def list_models(self) -> List[Dict[str, Any]]:
|
|
65
|
+
"""List all available models"""
|
|
66
|
+
return self._make_request("GET", "/models")
|
|
67
|
+
|
|
68
|
+
def load_model(
|
|
69
|
+
self,
|
|
70
|
+
name: str,
|
|
71
|
+
model_type: str,
|
|
72
|
+
model_path: str,
|
|
73
|
+
tokenizer_path: Optional[str] = None,
|
|
74
|
+
device: str = "auto",
|
|
75
|
+
max_length: int = 512,
|
|
76
|
+
temperature: float = 0.7,
|
|
77
|
+
top_p: float = 0.9,
|
|
78
|
+
top_k: int = 50,
|
|
79
|
+
) -> str:
|
|
80
|
+
"""Load a new model"""
|
|
81
|
+
data = {
|
|
82
|
+
"name": name,
|
|
83
|
+
"model_type": model_type,
|
|
84
|
+
"model_path": model_path,
|
|
85
|
+
"tokenizer_path": tokenizer_path,
|
|
86
|
+
"device": device,
|
|
87
|
+
"max_length": max_length,
|
|
88
|
+
"temperature": temperature,
|
|
89
|
+
"top_p": top_p,
|
|
90
|
+
"top_k": top_k,
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
result = self._make_request("POST", "/models", data)
|
|
94
|
+
return result["model_id"]
|
|
95
|
+
|
|
96
|
+
def unload_model(self, model_id: str) -> bool:
|
|
97
|
+
"""Unload a model"""
|
|
98
|
+
try:
|
|
99
|
+
self._make_request("DELETE", f"/models/{model_id}")
|
|
100
|
+
return True
|
|
101
|
+
except ValueError:
|
|
102
|
+
return False
|
|
103
|
+
|
|
104
|
+
def update_model(self, model_id: str, updates: Dict[str, Any]) -> bool:
|
|
105
|
+
"""Update model configuration"""
|
|
106
|
+
try:
|
|
107
|
+
self._make_request("PUT", f"/models/{model_id}", updates)
|
|
108
|
+
return True
|
|
109
|
+
except ValueError:
|
|
110
|
+
return False
|
|
111
|
+
|
|
112
|
+
def remove_model(self, model_id: str) -> bool:
|
|
113
|
+
"""Remove a model from the database"""
|
|
114
|
+
try:
|
|
115
|
+
self._make_request("DELETE", f"/models/{model_id}/remove")
|
|
116
|
+
return True
|
|
117
|
+
except ValueError:
|
|
118
|
+
return False
|
|
119
|
+
|
|
120
|
+
def generate_text(
|
|
121
|
+
self,
|
|
122
|
+
model_id: str,
|
|
123
|
+
prompt: str,
|
|
124
|
+
max_length: Optional[int] = None,
|
|
125
|
+
temperature: Optional[float] = None,
|
|
126
|
+
top_p: Optional[float] = None,
|
|
127
|
+
top_k: Optional[int] = None,
|
|
128
|
+
) -> Dict[str, Any]:
|
|
129
|
+
"""Generate text using a model"""
|
|
130
|
+
data = {
|
|
131
|
+
"prompt": prompt,
|
|
132
|
+
"max_length": max_length,
|
|
133
|
+
"temperature": temperature,
|
|
134
|
+
"top_p": top_p,
|
|
135
|
+
"top_k": top_k,
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
# Remove None values
|
|
139
|
+
data = {k: v for k, v in data.items() if v is not None}
|
|
140
|
+
|
|
141
|
+
return self._make_request("POST", f"/models/{model_id}/generate", data)
|
|
142
|
+
|
|
143
|
+
def classify_text(self, model_id: str, text: str) -> Dict[str, Any]:
|
|
144
|
+
"""Classify text using a model"""
|
|
145
|
+
data = {"text": text}
|
|
146
|
+
return self._make_request("POST", f"/models/{model_id}/classify", data)
|
|
147
|
+
|
|
148
|
+
def translate_text(
|
|
149
|
+
self, model_id: str, text: str, source_lang: str = "en", target_lang: str = "fr"
|
|
150
|
+
) -> Dict[str, Any]:
|
|
151
|
+
"""Translate text using a model"""
|
|
152
|
+
data = {"text": text, "source_lang": source_lang, "target_lang": target_lang}
|
|
153
|
+
return self._make_request("POST", f"/models/{model_id}/translate", data)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
# CLI Commands
|
|
157
|
+
@click.group(name="model-client")
|
|
158
|
+
def model_client():
|
|
159
|
+
"""Client for interacting with the model service daemon"""
|
|
160
|
+
pass
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
@model_client.command()
|
|
164
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
165
|
+
def status(url: str):
|
|
166
|
+
"""Get model service status"""
|
|
167
|
+
try:
|
|
168
|
+
client = ModelServiceClient(url)
|
|
169
|
+
status_info = client.get_status()
|
|
170
|
+
health_info = client.get_health()
|
|
171
|
+
|
|
172
|
+
click.echo("=" * 60)
|
|
173
|
+
click.echo(click.style("Model Service Status", fg="bright_blue", bold=True))
|
|
174
|
+
click.echo("=" * 60)
|
|
175
|
+
click.echo(f"Service: {status_info['service']}")
|
|
176
|
+
click.echo(f"Version: {status_info['version']}")
|
|
177
|
+
click.echo(f"Status: {status_info['status']}")
|
|
178
|
+
click.echo(f"Models Loaded: {status_info['models_loaded']}")
|
|
179
|
+
click.echo(f"Memory Usage: {health_info.get('memory_usage_mb', 0):.1f} MB")
|
|
180
|
+
click.echo(f"API URL: {url}")
|
|
181
|
+
click.echo("=" * 60)
|
|
182
|
+
|
|
183
|
+
except Exception as e:
|
|
184
|
+
click.echo(click.style(f"❌ Error: {e}", fg="red"))
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
@model_client.command()
|
|
188
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
189
|
+
def list_models(url: str):
|
|
190
|
+
"""List all available models"""
|
|
191
|
+
try:
|
|
192
|
+
client = ModelServiceClient(url)
|
|
193
|
+
models = client.list_models()
|
|
194
|
+
|
|
195
|
+
if not models:
|
|
196
|
+
click.echo("No models available")
|
|
197
|
+
return
|
|
198
|
+
|
|
199
|
+
click.echo("=" * 80)
|
|
200
|
+
click.echo(click.style("Available Models", fg="bright_blue", bold=True))
|
|
201
|
+
click.echo("=" * 80)
|
|
202
|
+
|
|
203
|
+
for i, model in enumerate(models, 1):
|
|
204
|
+
status_icon = "✅" if model.get("is_loaded") else "⏳"
|
|
205
|
+
click.echo(f"{i}. {status_icon} {model['name']}")
|
|
206
|
+
click.echo(f" Type: {model['model_type']}")
|
|
207
|
+
click.echo(f" Path: {model['model_path']}")
|
|
208
|
+
click.echo(f" Device: {model['device']}")
|
|
209
|
+
if model.get("is_loaded"):
|
|
210
|
+
click.echo(f" Memory: {model.get('memory_usage_mb', 0):.1f} MB")
|
|
211
|
+
click.echo(f" Parameters: {model.get('parameters_count', 0):,}")
|
|
212
|
+
click.echo()
|
|
213
|
+
|
|
214
|
+
click.echo("=" * 80)
|
|
215
|
+
|
|
216
|
+
except Exception as e:
|
|
217
|
+
click.echo(click.style(f"❌ Error: {e}", fg="red"))
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
@model_client.command()
|
|
221
|
+
@click.argument("model_path")
|
|
222
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
223
|
+
@click.option("--name", required=True, help="Model name")
|
|
224
|
+
@click.option("--type", "model_type", required=True, help="Model type")
|
|
225
|
+
@click.option("--tokenizer-path", help="Path to tokenizer")
|
|
226
|
+
@click.option("--device", default="auto", help="Device to use")
|
|
227
|
+
@click.option("--max-length", default=512, help="Maximum sequence length")
|
|
228
|
+
@click.option("--temperature", default=0.7, help="Sampling temperature")
|
|
229
|
+
@click.option("--top-p", default=0.9, help="Top-p sampling")
|
|
230
|
+
@click.option("--top-k", default=50, help="Top-k sampling")
|
|
231
|
+
def load_model(
|
|
232
|
+
model_path: str,
|
|
233
|
+
url: str,
|
|
234
|
+
name: str,
|
|
235
|
+
model_type: str,
|
|
236
|
+
tokenizer_path: str = None,
|
|
237
|
+
device: str = "auto",
|
|
238
|
+
max_length: int = 512,
|
|
239
|
+
temperature: float = 0.7,
|
|
240
|
+
top_p: float = 0.9,
|
|
241
|
+
top_k: int = 50,
|
|
242
|
+
):
|
|
243
|
+
"""Load a model into the service"""
|
|
244
|
+
try:
|
|
245
|
+
client = ModelServiceClient(url)
|
|
246
|
+
|
|
247
|
+
click.echo(f"Loading model '{name}'...")
|
|
248
|
+
model_id = client.load_model(
|
|
249
|
+
name=name,
|
|
250
|
+
model_type=model_type,
|
|
251
|
+
model_path=model_path,
|
|
252
|
+
tokenizer_path=tokenizer_path,
|
|
253
|
+
device=device,
|
|
254
|
+
max_length=max_length,
|
|
255
|
+
temperature=temperature,
|
|
256
|
+
top_p=top_p,
|
|
257
|
+
top_k=top_k,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
click.echo(click.style(f"✅ Model '{name}' loaded successfully!", fg="green"))
|
|
261
|
+
click.echo(f"Model ID: {model_id}")
|
|
262
|
+
|
|
263
|
+
except Exception as e:
|
|
264
|
+
click.echo(click.style(f"❌ Error loading model: {e}", fg="red"))
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
@model_client.command()
|
|
268
|
+
@click.argument("model_id")
|
|
269
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
270
|
+
def unload_model(model_id: str, url: str):
|
|
271
|
+
"""Unload a model from the service"""
|
|
272
|
+
try:
|
|
273
|
+
client = ModelServiceClient(url)
|
|
274
|
+
|
|
275
|
+
click.echo(f"Unloading model {model_id}...")
|
|
276
|
+
success = client.unload_model(model_id)
|
|
277
|
+
|
|
278
|
+
if success:
|
|
279
|
+
click.echo(click.style(f"✅ Model {model_id} unloaded successfully!", fg="green"))
|
|
280
|
+
else:
|
|
281
|
+
click.echo(click.style(f"❌ Model {model_id} not found", fg="red"))
|
|
282
|
+
|
|
283
|
+
except Exception as e:
|
|
284
|
+
click.echo(click.style(f"❌ Error unloading model: {e}", fg="red"))
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
@model_client.command()
|
|
288
|
+
@click.argument("model_id")
|
|
289
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
290
|
+
@click.option("--name", help="New model name")
|
|
291
|
+
@click.option("--temperature", type=float, help="New temperature value")
|
|
292
|
+
@click.option("--max-length", type=int, help="New max length value")
|
|
293
|
+
@click.option("--top-p", type=float, help="New top-p value")
|
|
294
|
+
@click.option("--top-k", type=int, help="New top-k value")
|
|
295
|
+
@click.option("--device", help="New device setting")
|
|
296
|
+
def update_model(
|
|
297
|
+
model_id: str,
|
|
298
|
+
url: str,
|
|
299
|
+
name: Optional[str] = None,
|
|
300
|
+
temperature: Optional[float] = None,
|
|
301
|
+
max_length: Optional[int] = None,
|
|
302
|
+
top_p: Optional[float] = None,
|
|
303
|
+
top_k: Optional[int] = None,
|
|
304
|
+
device: Optional[str] = None,
|
|
305
|
+
):
|
|
306
|
+
"""Update model configuration"""
|
|
307
|
+
try:
|
|
308
|
+
client = ModelServiceClient(url)
|
|
309
|
+
|
|
310
|
+
# Build updates dictionary
|
|
311
|
+
updates = {}
|
|
312
|
+
if name is not None:
|
|
313
|
+
updates["name"] = name
|
|
314
|
+
if temperature is not None:
|
|
315
|
+
updates["temperature"] = temperature
|
|
316
|
+
if max_length is not None:
|
|
317
|
+
updates["max_length"] = max_length
|
|
318
|
+
if top_p is not None:
|
|
319
|
+
updates["top_p"] = top_p
|
|
320
|
+
if top_k is not None:
|
|
321
|
+
updates["top_k"] = top_k
|
|
322
|
+
if device is not None:
|
|
323
|
+
updates["device"] = device
|
|
324
|
+
|
|
325
|
+
if not updates:
|
|
326
|
+
click.echo(
|
|
327
|
+
click.style(
|
|
328
|
+
"❌ No updates specified. Use --help to see available options.", fg="red"
|
|
329
|
+
)
|
|
330
|
+
)
|
|
331
|
+
return
|
|
332
|
+
|
|
333
|
+
click.echo(f"Updating model {model_id}...")
|
|
334
|
+
success = client.update_model(model_id, updates)
|
|
335
|
+
|
|
336
|
+
if success:
|
|
337
|
+
click.echo(click.style(f"✅ Model {model_id} updated successfully!", fg="green"))
|
|
338
|
+
click.echo("Updated parameters:")
|
|
339
|
+
for key, value in updates.items():
|
|
340
|
+
click.echo(f" {key}: {value}")
|
|
341
|
+
else:
|
|
342
|
+
click.echo(click.style(f"❌ Model {model_id} not found", fg="red"))
|
|
343
|
+
|
|
344
|
+
except Exception as e:
|
|
345
|
+
click.echo(click.style(f"❌ Error updating model: {e}", fg="red"))
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
@model_client.command()
|
|
349
|
+
@click.argument("model_id")
|
|
350
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
351
|
+
@click.option("--force", is_flag=True, help="Force removal without confirmation")
|
|
352
|
+
def remove_model(model_id: str, url: str, force: bool = False):
|
|
353
|
+
"""Remove a model from the database"""
|
|
354
|
+
try:
|
|
355
|
+
client = ModelServiceClient(url)
|
|
356
|
+
|
|
357
|
+
if not force:
|
|
358
|
+
# Get model info first
|
|
359
|
+
models = client.list_models()
|
|
360
|
+
model_info = None
|
|
361
|
+
for model in models:
|
|
362
|
+
if model["id"] == model_id:
|
|
363
|
+
model_info = model
|
|
364
|
+
break
|
|
365
|
+
|
|
366
|
+
if model_info:
|
|
367
|
+
click.echo(f"Model to remove:")
|
|
368
|
+
click.echo(f" Name: {model_info['name']}")
|
|
369
|
+
click.echo(f" Type: {model_info['model_type']}")
|
|
370
|
+
click.echo(f" Path: {model_info['model_path']}")
|
|
371
|
+
click.echo(f" Loaded: {'Yes' if model_info.get('is_loaded') else 'No'}")
|
|
372
|
+
|
|
373
|
+
if not click.confirm("Are you sure you want to remove this model?"):
|
|
374
|
+
click.echo("Operation cancelled.")
|
|
375
|
+
return
|
|
376
|
+
else:
|
|
377
|
+
click.echo(click.style(f"❌ Model {model_id} not found", fg="red"))
|
|
378
|
+
return
|
|
379
|
+
|
|
380
|
+
click.echo(f"Removing model {model_id}...")
|
|
381
|
+
success = client.remove_model(model_id)
|
|
382
|
+
|
|
383
|
+
if success:
|
|
384
|
+
click.echo(click.style(f"✅ Model {model_id} removed successfully!", fg="green"))
|
|
385
|
+
else:
|
|
386
|
+
click.echo(click.style(f"❌ Model {model_id} not found", fg="red"))
|
|
387
|
+
|
|
388
|
+
except Exception as e:
|
|
389
|
+
click.echo(click.style(f"❌ Error removing model: {e}", fg="red"))
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
@model_client.command()
|
|
393
|
+
@click.argument("model_id")
|
|
394
|
+
@click.argument("prompt")
|
|
395
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
396
|
+
@click.option("--max-length", type=int, help="Maximum sequence length")
|
|
397
|
+
@click.option("--temperature", type=float, help="Sampling temperature")
|
|
398
|
+
@click.option("--top-p", type=float, help="Top-p sampling")
|
|
399
|
+
@click.option("--top-k", type=int, help="Top-k sampling")
|
|
400
|
+
def generate(
|
|
401
|
+
model_id: str,
|
|
402
|
+
prompt: str,
|
|
403
|
+
url: str,
|
|
404
|
+
max_length: Optional[int] = None,
|
|
405
|
+
temperature: Optional[float] = None,
|
|
406
|
+
top_p: Optional[float] = None,
|
|
407
|
+
top_k: Optional[int] = None,
|
|
408
|
+
):
|
|
409
|
+
"""Generate text using a model"""
|
|
410
|
+
try:
|
|
411
|
+
client = ModelServiceClient(url)
|
|
412
|
+
|
|
413
|
+
click.echo(f"Generating text with model {model_id}...")
|
|
414
|
+
click.echo(f"Prompt: {prompt}")
|
|
415
|
+
click.echo("-" * 50)
|
|
416
|
+
|
|
417
|
+
result = client.generate_text(
|
|
418
|
+
model_id=model_id,
|
|
419
|
+
prompt=prompt,
|
|
420
|
+
max_length=max_length,
|
|
421
|
+
temperature=temperature,
|
|
422
|
+
top_p=top_p,
|
|
423
|
+
top_k=top_k,
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
click.echo(click.style("Generated Text:", fg="bright_green", bold=True))
|
|
427
|
+
click.echo(result["generated_text"])
|
|
428
|
+
click.echo()
|
|
429
|
+
click.echo(f"Execution time: {result['execution_time_ms']} ms")
|
|
430
|
+
|
|
431
|
+
except Exception as e:
|
|
432
|
+
click.echo(click.style(f"❌ Error generating text: {e}", fg="red"))
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
@model_client.command()
|
|
436
|
+
@click.argument("model_id")
|
|
437
|
+
@click.argument("text")
|
|
438
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
439
|
+
def classify(model_id: str, text: str, url: str):
|
|
440
|
+
"""Classify text using a model"""
|
|
441
|
+
try:
|
|
442
|
+
client = ModelServiceClient(url)
|
|
443
|
+
|
|
444
|
+
click.echo(f"Classifying text with model {model_id}...")
|
|
445
|
+
click.echo(f"Text: {text}")
|
|
446
|
+
click.echo("-" * 50)
|
|
447
|
+
|
|
448
|
+
result = client.classify_text(model_id=model_id, text=text)
|
|
449
|
+
|
|
450
|
+
click.echo(click.style("Classifications:", fg="bright_green", bold=True))
|
|
451
|
+
for class_name, probability in result["classifications"].items():
|
|
452
|
+
click.echo(f"{class_name}: {probability:.4f}")
|
|
453
|
+
|
|
454
|
+
click.echo()
|
|
455
|
+
click.echo(f"Execution time: {result['execution_time_ms']} ms")
|
|
456
|
+
|
|
457
|
+
except Exception as e:
|
|
458
|
+
click.echo(click.style(f"❌ Error classifying text: {e}", fg="red"))
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
@model_client.command()
|
|
462
|
+
@click.argument("model_id")
|
|
463
|
+
@click.argument("text")
|
|
464
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
465
|
+
@click.option("--source-lang", default="en", help="Source language")
|
|
466
|
+
@click.option("--target-lang", default="fr", help="Target language")
|
|
467
|
+
def translate(model_id: str, text: str, url: str, source_lang: str = "en", target_lang: str = "fr"):
|
|
468
|
+
"""Translate text using a model"""
|
|
469
|
+
try:
|
|
470
|
+
client = ModelServiceClient(url)
|
|
471
|
+
|
|
472
|
+
click.echo(f"Translating text with model {model_id}...")
|
|
473
|
+
click.echo(f"Text ({source_lang}): {text}")
|
|
474
|
+
click.echo("-" * 50)
|
|
475
|
+
|
|
476
|
+
result = client.translate_text(
|
|
477
|
+
model_id=model_id, text=text, source_lang=source_lang, target_lang=target_lang
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
click.echo(click.style("Translation:", fg="bright_green", bold=True))
|
|
481
|
+
click.echo(f"({target_lang}): {result['translated_text']}")
|
|
482
|
+
click.echo()
|
|
483
|
+
click.echo(f"Execution time: {result['execution_time_ms']} ms")
|
|
484
|
+
|
|
485
|
+
except Exception as e:
|
|
486
|
+
click.echo(click.style(f"❌ Error translating text: {e}", fg="red"))
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
@model_client.command()
|
|
490
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
491
|
+
@click.option("--model-id", required=True, help="Model ID to test")
|
|
492
|
+
@click.option("--prompt", default="Hello, how are you?", help="Test prompt")
|
|
493
|
+
def test_model(url: str, model_id: str, prompt: str):
|
|
494
|
+
"""Test a model with a simple prompt"""
|
|
495
|
+
try:
|
|
496
|
+
client = ModelServiceClient(url)
|
|
497
|
+
|
|
498
|
+
click.echo("Testing model...")
|
|
499
|
+
click.echo(f"Model ID: {model_id}")
|
|
500
|
+
click.echo(f"Test prompt: {prompt}")
|
|
501
|
+
click.echo("-" * 50)
|
|
502
|
+
|
|
503
|
+
# Test text generation
|
|
504
|
+
result = client.generate_text(model_id=model_id, prompt=prompt)
|
|
505
|
+
|
|
506
|
+
click.echo(click.style("Test Result:", fg="bright_green", bold=True))
|
|
507
|
+
click.echo(f"Generated: {result['generated_text']}")
|
|
508
|
+
click.echo(f"Time: {result['execution_time_ms']} ms")
|
|
509
|
+
|
|
510
|
+
click.echo(click.style("✅ Model test successful!", fg="green"))
|
|
511
|
+
|
|
512
|
+
except Exception as e:
|
|
513
|
+
click.echo(click.style(f"❌ Model test failed: {e}", fg="red"))
|
|
514
|
+
|
|
515
|
+
|
|
516
|
+
@model_client.command()
|
|
517
|
+
@click.option("--url", default="http://localhost:8000", help="Model service URL")
|
|
518
|
+
@click.option("--file", type=click.Path(exists=True), help="File with prompts to test")
|
|
519
|
+
@click.option("--model-id", required=True, help="Model ID to test")
|
|
520
|
+
@click.option("--output", type=click.Path(), help="Output file for results")
|
|
521
|
+
def batch_test(
|
|
522
|
+
url: str,
|
|
523
|
+
file: Optional[str] = None,
|
|
524
|
+
model_id: Optional[str] = None,
|
|
525
|
+
output: Optional[str] = None,
|
|
526
|
+
):
|
|
527
|
+
"""Run batch tests on a model"""
|
|
528
|
+
try:
|
|
529
|
+
client = ModelServiceClient(url)
|
|
530
|
+
|
|
531
|
+
if file:
|
|
532
|
+
# Read prompts from file
|
|
533
|
+
with open(file, "r") as f:
|
|
534
|
+
prompts = [line.strip() for line in f if line.strip()]
|
|
535
|
+
else:
|
|
536
|
+
# Use default test prompts
|
|
537
|
+
prompts = [
|
|
538
|
+
"Hello, how are you?",
|
|
539
|
+
"What is the capital of France?",
|
|
540
|
+
"Explain quantum computing in simple terms.",
|
|
541
|
+
"Write a short poem about technology.",
|
|
542
|
+
]
|
|
543
|
+
|
|
544
|
+
click.echo(f"Running batch test with {len(prompts)} prompts...")
|
|
545
|
+
click.echo(f"Model ID: {model_id}")
|
|
546
|
+
click.echo("-" * 50)
|
|
547
|
+
|
|
548
|
+
results = []
|
|
549
|
+
total_time = 0
|
|
550
|
+
|
|
551
|
+
for i, prompt in enumerate(prompts, 1):
|
|
552
|
+
click.echo(f"Test {i}/{len(prompts)}: {prompt[:50]}...")
|
|
553
|
+
|
|
554
|
+
try:
|
|
555
|
+
result = client.generate_text(model_id=model_id, prompt=prompt)
|
|
556
|
+
results.append(
|
|
557
|
+
{
|
|
558
|
+
"prompt": prompt,
|
|
559
|
+
"generated": result["generated_text"],
|
|
560
|
+
"time_ms": result["execution_time_ms"],
|
|
561
|
+
"success": True,
|
|
562
|
+
}
|
|
563
|
+
)
|
|
564
|
+
total_time += result["execution_time_ms"]
|
|
565
|
+
|
|
566
|
+
except Exception as e:
|
|
567
|
+
results.append({"prompt": prompt, "error": str(e), "success": False})
|
|
568
|
+
|
|
569
|
+
# Display summary
|
|
570
|
+
click.echo("\n" + "=" * 60)
|
|
571
|
+
click.echo(click.style("Batch Test Results", fg="bright_blue", bold=True))
|
|
572
|
+
click.echo("=" * 60)
|
|
573
|
+
|
|
574
|
+
successful = sum(1 for r in results if r["success"])
|
|
575
|
+
failed = len(results) - successful
|
|
576
|
+
|
|
577
|
+
click.echo(f"Total tests: {len(results)}")
|
|
578
|
+
click.echo(f"Successful: {successful}")
|
|
579
|
+
click.echo(f"Failed: {failed}")
|
|
580
|
+
click.echo(f"Total time: {total_time} ms")
|
|
581
|
+
click.echo(f"Average time: {total_time/len(results):.1f} ms")
|
|
582
|
+
|
|
583
|
+
# Save results if output file specified
|
|
584
|
+
if output:
|
|
585
|
+
with open(output, "w") as f:
|
|
586
|
+
json.dump(results, f, indent=2)
|
|
587
|
+
click.echo(f"Results saved to: {output}")
|
|
588
|
+
|
|
589
|
+
except Exception as e:
|
|
590
|
+
click.echo(click.style(f"❌ Batch test failed: {e}", fg="red"))
|
|
591
|
+
|
|
592
|
+
|
|
593
|
+
if __name__ == "__main__":
|
|
594
|
+
model_client()
|