mcli-framework 7.12.0__py3-none-any.whl → 7.12.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/__init__.py +0 -2
- mcli/app/commands_cmd.py +19 -23
- mcli/app/completion_helpers.py +5 -5
- mcli/app/init_cmd.py +10 -10
- mcli/app/lock_cmd.py +82 -27
- mcli/app/main.py +4 -50
- mcli/app/model/model.py +5 -10
- mcli/app/store_cmd.py +8 -8
- mcli/app/video/__init__.py +0 -2
- mcli/app/video/video.py +1 -14
- mcli/chat/chat.py +90 -108
- mcli/chat/command_rag.py +0 -4
- mcli/chat/enhanced_chat.py +32 -41
- mcli/chat/system_controller.py +37 -37
- mcli/chat/system_integration.py +4 -5
- mcli/cli.py +2 -3
- mcli/lib/api/api.py +4 -9
- mcli/lib/api/daemon_client.py +19 -20
- mcli/lib/api/daemon_client_local.py +1 -3
- mcli/lib/api/daemon_decorator.py +6 -6
- mcli/lib/api/mcli_decorators.py +4 -8
- mcli/lib/auth/__init__.py +0 -1
- mcli/lib/auth/auth.py +4 -5
- mcli/lib/auth/mcli_manager.py +7 -12
- mcli/lib/auth/token_util.py +5 -5
- mcli/lib/config/__init__.py +29 -1
- mcli/lib/config/config.py +0 -1
- mcli/lib/custom_commands.py +1 -1
- mcli/lib/discovery/command_discovery.py +15 -15
- mcli/lib/erd/erd.py +7 -7
- mcli/lib/files/files.py +1 -1
- mcli/lib/fs/__init__.py +31 -1
- mcli/lib/fs/fs.py +12 -13
- mcli/lib/lib.py +0 -1
- mcli/lib/logger/logger.py +7 -10
- mcli/lib/performance/optimizer.py +25 -27
- mcli/lib/performance/rust_bridge.py +22 -27
- mcli/lib/performance/uvloop_config.py +0 -1
- mcli/lib/pickles/__init__.py +0 -1
- mcli/lib/pickles/pickles.py +0 -2
- mcli/lib/secrets/commands.py +0 -2
- mcli/lib/secrets/manager.py +0 -1
- mcli/lib/secrets/repl.py +2 -3
- mcli/lib/secrets/store.py +1 -2
- mcli/lib/services/data_pipeline.py +34 -34
- mcli/lib/services/lsh_client.py +38 -40
- mcli/lib/shell/shell.py +2 -2
- mcli/lib/toml/__init__.py +0 -1
- mcli/lib/ui/styling.py +0 -1
- mcli/lib/ui/visual_effects.py +33 -41
- mcli/lib/watcher/watcher.py +0 -1
- mcli/ml/__init__.py +1 -1
- mcli/ml/api/__init__.py +1 -1
- mcli/ml/api/app.py +8 -9
- mcli/ml/api/middleware.py +10 -10
- mcli/ml/api/routers/__init__.py +1 -1
- mcli/ml/api/routers/admin_router.py +3 -3
- mcli/ml/api/routers/auth_router.py +17 -18
- mcli/ml/api/routers/backtest_router.py +2 -2
- mcli/ml/api/routers/data_router.py +2 -2
- mcli/ml/api/routers/model_router.py +14 -15
- mcli/ml/api/routers/monitoring_router.py +2 -2
- mcli/ml/api/routers/portfolio_router.py +2 -2
- mcli/ml/api/routers/prediction_router.py +10 -9
- mcli/ml/api/routers/trade_router.py +2 -2
- mcli/ml/api/routers/websocket_router.py +6 -7
- mcli/ml/api/schemas.py +2 -2
- mcli/ml/auth/__init__.py +1 -1
- mcli/ml/auth/auth_manager.py +22 -23
- mcli/ml/auth/models.py +17 -17
- mcli/ml/auth/permissions.py +17 -17
- mcli/ml/backtesting/__init__.py +1 -1
- mcli/ml/backtesting/backtest_engine.py +31 -35
- mcli/ml/backtesting/performance_metrics.py +12 -14
- mcli/ml/backtesting/run.py +1 -2
- mcli/ml/cache.py +35 -36
- mcli/ml/cli/__init__.py +1 -1
- mcli/ml/cli/main.py +21 -24
- mcli/ml/config/__init__.py +1 -1
- mcli/ml/config/settings.py +28 -29
- mcli/ml/configs/__init__.py +1 -1
- mcli/ml/configs/dvc_config.py +14 -15
- mcli/ml/configs/mlflow_config.py +12 -13
- mcli/ml/configs/mlops_manager.py +19 -21
- mcli/ml/dashboard/__init__.py +4 -4
- mcli/ml/dashboard/app.py +20 -30
- mcli/ml/dashboard/app_supabase.py +16 -19
- mcli/ml/dashboard/app_training.py +11 -14
- mcli/ml/dashboard/cli.py +2 -2
- mcli/ml/dashboard/common.py +2 -3
- mcli/ml/dashboard/components/__init__.py +1 -1
- mcli/ml/dashboard/components/charts.py +13 -11
- mcli/ml/dashboard/components/metrics.py +7 -7
- mcli/ml/dashboard/components/tables.py +12 -9
- mcli/ml/dashboard/overview.py +2 -2
- mcli/ml/dashboard/pages/__init__.py +1 -1
- mcli/ml/dashboard/pages/cicd.py +15 -18
- mcli/ml/dashboard/pages/debug_dependencies.py +7 -7
- mcli/ml/dashboard/pages/monte_carlo_predictions.py +11 -18
- mcli/ml/dashboard/pages/predictions_enhanced.py +24 -32
- mcli/ml/dashboard/pages/scrapers_and_logs.py +22 -24
- mcli/ml/dashboard/pages/test_portfolio.py +3 -6
- mcli/ml/dashboard/pages/trading.py +16 -18
- mcli/ml/dashboard/pages/workflows.py +20 -30
- mcli/ml/dashboard/utils.py +9 -9
- mcli/ml/dashboard/warning_suppression.py +3 -3
- mcli/ml/data_ingestion/__init__.py +1 -1
- mcli/ml/data_ingestion/api_connectors.py +41 -46
- mcli/ml/data_ingestion/data_pipeline.py +36 -46
- mcli/ml/data_ingestion/stream_processor.py +43 -46
- mcli/ml/database/__init__.py +1 -1
- mcli/ml/database/migrations/env.py +2 -2
- mcli/ml/database/models.py +22 -24
- mcli/ml/database/session.py +14 -14
- mcli/ml/experimentation/__init__.py +1 -1
- mcli/ml/experimentation/ab_testing.py +45 -46
- mcli/ml/features/__init__.py +1 -1
- mcli/ml/features/ensemble_features.py +22 -27
- mcli/ml/features/recommendation_engine.py +30 -30
- mcli/ml/features/stock_features.py +29 -32
- mcli/ml/features/test_feature_engineering.py +10 -11
- mcli/ml/logging.py +4 -4
- mcli/ml/mlops/__init__.py +1 -1
- mcli/ml/mlops/data_versioning.py +29 -30
- mcli/ml/mlops/experiment_tracker.py +24 -24
- mcli/ml/mlops/model_serving.py +31 -34
- mcli/ml/mlops/pipeline_orchestrator.py +27 -35
- mcli/ml/models/__init__.py +5 -6
- mcli/ml/models/base_models.py +23 -23
- mcli/ml/models/ensemble_models.py +31 -31
- mcli/ml/models/recommendation_models.py +18 -19
- mcli/ml/models/test_models.py +14 -16
- mcli/ml/monitoring/__init__.py +1 -1
- mcli/ml/monitoring/drift_detection.py +32 -36
- mcli/ml/monitoring/metrics.py +2 -2
- mcli/ml/optimization/__init__.py +1 -1
- mcli/ml/optimization/optimize.py +1 -2
- mcli/ml/optimization/portfolio_optimizer.py +30 -32
- mcli/ml/predictions/__init__.py +1 -1
- mcli/ml/preprocessing/__init__.py +1 -1
- mcli/ml/preprocessing/data_cleaners.py +22 -23
- mcli/ml/preprocessing/feature_extractors.py +23 -26
- mcli/ml/preprocessing/ml_pipeline.py +23 -23
- mcli/ml/preprocessing/test_preprocessing.py +7 -8
- mcli/ml/scripts/populate_sample_data.py +0 -4
- mcli/ml/serving/serve.py +1 -2
- mcli/ml/tasks.py +17 -17
- mcli/ml/tests/test_integration.py +29 -30
- mcli/ml/tests/test_training_dashboard.py +21 -21
- mcli/ml/trading/__init__.py +1 -1
- mcli/ml/trading/migrations.py +5 -5
- mcli/ml/trading/models.py +21 -23
- mcli/ml/trading/paper_trading.py +16 -13
- mcli/ml/trading/risk_management.py +17 -18
- mcli/ml/trading/trading_service.py +25 -28
- mcli/ml/training/__init__.py +1 -1
- mcli/ml/training/train.py +0 -1
- mcli/public/oi/oi.py +1 -2
- mcli/self/completion_cmd.py +6 -10
- mcli/self/logs_cmd.py +19 -24
- mcli/self/migrate_cmd.py +22 -20
- mcli/self/redis_cmd.py +10 -11
- mcli/self/self_cmd.py +62 -18
- mcli/self/store_cmd.py +10 -12
- mcli/self/visual_cmd.py +9 -14
- mcli/self/zsh_cmd.py +2 -4
- mcli/workflow/daemon/async_command_database.py +23 -24
- mcli/workflow/daemon/async_process_manager.py +27 -29
- mcli/workflow/daemon/client.py +27 -33
- mcli/workflow/daemon/daemon.py +32 -36
- mcli/workflow/daemon/enhanced_daemon.py +24 -33
- mcli/workflow/daemon/process_cli.py +11 -12
- mcli/workflow/daemon/process_manager.py +23 -26
- mcli/workflow/daemon/test_daemon.py +4 -5
- mcli/workflow/dashboard/dashboard_cmd.py +0 -1
- mcli/workflow/doc_convert.py +15 -17
- mcli/workflow/gcloud/__init__.py +0 -1
- mcli/workflow/gcloud/gcloud.py +11 -8
- mcli/workflow/git_commit/ai_service.py +14 -15
- mcli/workflow/lsh_integration.py +9 -11
- mcli/workflow/model_service/client.py +26 -31
- mcli/workflow/model_service/download_and_run_efficient_models.py +10 -14
- mcli/workflow/model_service/lightweight_embedder.py +25 -35
- mcli/workflow/model_service/lightweight_model_server.py +26 -32
- mcli/workflow/model_service/lightweight_test.py +7 -10
- mcli/workflow/model_service/model_service.py +80 -91
- mcli/workflow/model_service/ollama_efficient_runner.py +14 -18
- mcli/workflow/model_service/openai_adapter.py +23 -23
- mcli/workflow/model_service/pdf_processor.py +21 -26
- mcli/workflow/model_service/test_efficient_runner.py +12 -16
- mcli/workflow/model_service/test_example.py +11 -13
- mcli/workflow/model_service/test_integration.py +3 -5
- mcli/workflow/model_service/test_new_features.py +7 -8
- mcli/workflow/notebook/converter.py +1 -1
- mcli/workflow/notebook/notebook_cmd.py +5 -6
- mcli/workflow/notebook/schema.py +0 -1
- mcli/workflow/notebook/validator.py +7 -3
- mcli/workflow/openai/openai.py +1 -2
- mcli/workflow/registry/registry.py +4 -1
- mcli/workflow/repo/repo.py +6 -7
- mcli/workflow/scheduler/cron_parser.py +16 -19
- mcli/workflow/scheduler/job.py +10 -10
- mcli/workflow/scheduler/monitor.py +15 -15
- mcli/workflow/scheduler/persistence.py +17 -18
- mcli/workflow/scheduler/scheduler.py +37 -38
- mcli/workflow/secrets/__init__.py +1 -1
- mcli/workflow/sync/test_cmd.py +0 -1
- mcli/workflow/wakatime/__init__.py +5 -9
- mcli/workflow/wakatime/wakatime.py +1 -2
- {mcli_framework-7.12.0.dist-info → mcli_framework-7.12.3.dist-info}/METADATA +1 -1
- mcli_framework-7.12.3.dist-info/RECORD +279 -0
- mcli_framework-7.12.0.dist-info/RECORD +0 -279
- {mcli_framework-7.12.0.dist-info → mcli_framework-7.12.3.dist-info}/WHEEL +0 -0
- {mcli_framework-7.12.0.dist-info → mcli_framework-7.12.3.dist-info}/entry_points.txt +0 -0
- {mcli_framework-7.12.0.dist-info → mcli_framework-7.12.3.dist-info}/licenses/LICENSE +0 -0
- {mcli_framework-7.12.0.dist-info → mcli_framework-7.12.3.dist-info}/top_level.txt +0 -0
|
@@ -2,9 +2,6 @@ import json
|
|
|
2
2
|
import os
|
|
3
3
|
import signal
|
|
4
4
|
import subprocess
|
|
5
|
-
import tempfile
|
|
6
|
-
import threading
|
|
7
|
-
import time
|
|
8
5
|
import uuid
|
|
9
6
|
from dataclasses import asdict, dataclass
|
|
10
7
|
from datetime import datetime
|
|
@@ -29,7 +26,7 @@ class ProcessStatus(Enum):
|
|
|
29
26
|
|
|
30
27
|
@dataclass
|
|
31
28
|
class ProcessInfo:
|
|
32
|
-
"""Information about a managed process"""
|
|
29
|
+
"""Information about a managed process."""
|
|
33
30
|
|
|
34
31
|
id: str
|
|
35
32
|
name: str
|
|
@@ -50,7 +47,7 @@ class ProcessInfo:
|
|
|
50
47
|
|
|
51
48
|
|
|
52
49
|
class ProcessContainer:
|
|
53
|
-
"""Manages a single containerized process"""
|
|
50
|
+
"""Manages a single containerized process."""
|
|
54
51
|
|
|
55
52
|
def __init__(self, process_info: ProcessInfo):
|
|
56
53
|
self.info = process_info
|
|
@@ -61,7 +58,7 @@ class ProcessContainer:
|
|
|
61
58
|
self._setup_container_environment()
|
|
62
59
|
|
|
63
60
|
def _setup_container_environment(self):
|
|
64
|
-
"""Setup isolated environment for the process"""
|
|
61
|
+
"""Setup isolated environment for the process."""
|
|
65
62
|
# Create container directory
|
|
66
63
|
base_dir = Path.home() / ".local" / "mcli" / "containers"
|
|
67
64
|
self.container_dir = base_dir / self.info.id
|
|
@@ -77,15 +74,15 @@ class ProcessContainer:
|
|
|
77
74
|
json.dump(asdict(self.info), f, indent=2, default=str)
|
|
78
75
|
|
|
79
76
|
def start(self) -> bool:
|
|
80
|
-
"""Start the containerized process"""
|
|
77
|
+
"""Start the containerized process."""
|
|
81
78
|
try:
|
|
82
79
|
if self.process and self.process.poll() is None:
|
|
83
80
|
logger.warning(f"Process {self.info.id} is already running")
|
|
84
81
|
return False
|
|
85
82
|
|
|
86
83
|
# Open log files
|
|
87
|
-
stdout_handle = open(self.stdout_file, "w")
|
|
88
|
-
stderr_handle = open(self.stderr_file, "w")
|
|
84
|
+
stdout_handle = open(self.stdout_file, "w") # noqa: SIM115
|
|
85
|
+
stderr_handle = open(self.stderr_file, "w") # noqa: SIM115
|
|
89
86
|
|
|
90
87
|
# Start process
|
|
91
88
|
self.process = subprocess.Popen(
|
|
@@ -110,7 +107,7 @@ class ProcessContainer:
|
|
|
110
107
|
return False
|
|
111
108
|
|
|
112
109
|
def stop(self, timeout: int = 10) -> bool:
|
|
113
|
-
"""Stop the process gracefully"""
|
|
110
|
+
"""Stop the process gracefully."""
|
|
114
111
|
if not self.process or self.process.poll() is not None:
|
|
115
112
|
return True
|
|
116
113
|
|
|
@@ -140,7 +137,7 @@ class ProcessContainer:
|
|
|
140
137
|
return False
|
|
141
138
|
|
|
142
139
|
def kill(self) -> bool:
|
|
143
|
-
"""Force kill the process"""
|
|
140
|
+
"""Force kill the process."""
|
|
144
141
|
if not self.process or self.process.poll() is not None:
|
|
145
142
|
return True
|
|
146
143
|
|
|
@@ -160,13 +157,13 @@ class ProcessContainer:
|
|
|
160
157
|
return False
|
|
161
158
|
|
|
162
159
|
def is_running(self) -> bool:
|
|
163
|
-
"""Check if process is currently running"""
|
|
160
|
+
"""Check if process is currently running."""
|
|
164
161
|
if not self.process:
|
|
165
162
|
return False
|
|
166
163
|
return self.process.poll() is None
|
|
167
164
|
|
|
168
165
|
def get_logs(self, lines: Optional[int] = None, follow: bool = False) -> Dict[str, str]:
|
|
169
|
-
"""Get process logs"""
|
|
166
|
+
"""Get process logs."""
|
|
170
167
|
logs = {"stdout": "", "stderr": ""}
|
|
171
168
|
|
|
172
169
|
try:
|
|
@@ -190,7 +187,7 @@ class ProcessContainer:
|
|
|
190
187
|
return logs
|
|
191
188
|
|
|
192
189
|
def get_stats(self) -> Dict[str, Any]:
|
|
193
|
-
"""Get process statistics"""
|
|
190
|
+
"""Get process statistics."""
|
|
194
191
|
stats = {
|
|
195
192
|
"cpu_percent": 0.0,
|
|
196
193
|
"memory_mb": 0.0,
|
|
@@ -215,7 +212,7 @@ class ProcessContainer:
|
|
|
215
212
|
return stats
|
|
216
213
|
|
|
217
214
|
def cleanup(self):
|
|
218
|
-
"""Clean up container resources"""
|
|
215
|
+
"""Clean up container resources."""
|
|
219
216
|
try:
|
|
220
217
|
# Stop process if running
|
|
221
218
|
if self.is_running():
|
|
@@ -229,7 +226,7 @@ class ProcessContainer:
|
|
|
229
226
|
|
|
230
227
|
|
|
231
228
|
class ProcessManager:
|
|
232
|
-
"""Docker-like process management system"""
|
|
229
|
+
"""Docker-like process management system."""
|
|
233
230
|
|
|
234
231
|
def __init__(self):
|
|
235
232
|
self.containers: Dict[str, ProcessContainer] = {}
|
|
@@ -240,7 +237,7 @@ class ProcessManager:
|
|
|
240
237
|
self._load_existing_containers()
|
|
241
238
|
|
|
242
239
|
def _load_existing_containers(self):
|
|
243
|
-
"""Load existing containers from disk"""
|
|
240
|
+
"""Load existing containers from disk."""
|
|
244
241
|
try:
|
|
245
242
|
for container_dir in self.base_dir.iterdir():
|
|
246
243
|
if container_dir.is_dir():
|
|
@@ -281,7 +278,7 @@ class ProcessManager:
|
|
|
281
278
|
working_dir: str = None,
|
|
282
279
|
environment: Dict[str, str] = None,
|
|
283
280
|
) -> str:
|
|
284
|
-
"""Create a new process container"""
|
|
281
|
+
"""Create a new process container."""
|
|
285
282
|
process_id = str(uuid.uuid4())
|
|
286
283
|
|
|
287
284
|
process_info = ProcessInfo(
|
|
@@ -301,7 +298,7 @@ class ProcessManager:
|
|
|
301
298
|
return process_id
|
|
302
299
|
|
|
303
300
|
def start(self, process_id: str) -> bool:
|
|
304
|
-
"""Start a process container"""
|
|
301
|
+
"""Start a process container."""
|
|
305
302
|
if process_id not in self.containers:
|
|
306
303
|
logger.error(f"Container {process_id} not found")
|
|
307
304
|
return False
|
|
@@ -309,7 +306,7 @@ class ProcessManager:
|
|
|
309
306
|
return self.containers[process_id].start()
|
|
310
307
|
|
|
311
308
|
def stop(self, process_id: str, timeout: int = 10) -> bool:
|
|
312
|
-
"""Stop a process container"""
|
|
309
|
+
"""Stop a process container."""
|
|
313
310
|
if process_id not in self.containers:
|
|
314
311
|
logger.error(f"Container {process_id} not found")
|
|
315
312
|
return False
|
|
@@ -317,7 +314,7 @@ class ProcessManager:
|
|
|
317
314
|
return self.containers[process_id].stop(timeout)
|
|
318
315
|
|
|
319
316
|
def kill(self, process_id: str) -> bool:
|
|
320
|
-
"""Kill a process container"""
|
|
317
|
+
"""Kill a process container."""
|
|
321
318
|
if process_id not in self.containers:
|
|
322
319
|
logger.error(f"Container {process_id} not found")
|
|
323
320
|
return False
|
|
@@ -325,7 +322,7 @@ class ProcessManager:
|
|
|
325
322
|
return self.containers[process_id].kill()
|
|
326
323
|
|
|
327
324
|
def remove(self, process_id: str, force: bool = False) -> bool:
|
|
328
|
-
"""Remove a process container"""
|
|
325
|
+
"""Remove a process container."""
|
|
329
326
|
if process_id not in self.containers:
|
|
330
327
|
logger.error(f"Container {process_id} not found")
|
|
331
328
|
return False
|
|
@@ -347,7 +344,7 @@ class ProcessManager:
|
|
|
347
344
|
return True
|
|
348
345
|
|
|
349
346
|
def list_processes(self, all_processes: bool = False) -> List[Dict[str, Any]]:
|
|
350
|
-
"""List all process containers (Docker ps style)"""
|
|
347
|
+
"""List all process containers (Docker ps style)."""
|
|
351
348
|
result = []
|
|
352
349
|
|
|
353
350
|
for container in self.containers.values():
|
|
@@ -380,7 +377,7 @@ class ProcessManager:
|
|
|
380
377
|
return result
|
|
381
378
|
|
|
382
379
|
def inspect(self, process_id: str) -> Optional[Dict[str, Any]]:
|
|
383
|
-
"""Get detailed information about a process container"""
|
|
380
|
+
"""Get detailed information about a process container."""
|
|
384
381
|
if process_id not in self.containers:
|
|
385
382
|
return None
|
|
386
383
|
|
|
@@ -413,7 +410,7 @@ class ProcessManager:
|
|
|
413
410
|
def logs(
|
|
414
411
|
self, process_id: str, lines: Optional[int] = None, follow: bool = False
|
|
415
412
|
) -> Optional[Dict[str, str]]:
|
|
416
|
-
"""Get logs from a process container"""
|
|
413
|
+
"""Get logs from a process container."""
|
|
417
414
|
if process_id not in self.containers:
|
|
418
415
|
return None
|
|
419
416
|
|
|
@@ -428,7 +425,7 @@ class ProcessManager:
|
|
|
428
425
|
environment: Dict[str, str] = None,
|
|
429
426
|
detach: bool = True,
|
|
430
427
|
) -> str:
|
|
431
|
-
"""Create and start a process container in one step"""
|
|
428
|
+
"""Create and start a process container in one step."""
|
|
432
429
|
process_id = self.create(name, command, args, working_dir, environment)
|
|
433
430
|
|
|
434
431
|
if self.start(process_id):
|
|
@@ -9,12 +9,11 @@ example commands in different programming languages.
|
|
|
9
9
|
import os
|
|
10
10
|
import subprocess
|
|
11
11
|
import tempfile
|
|
12
|
-
import time
|
|
13
12
|
from pathlib import Path
|
|
14
13
|
|
|
15
14
|
|
|
16
15
|
def create_test_scripts():
|
|
17
|
-
"""Create test scripts for different languages"""
|
|
16
|
+
"""Create test scripts for different languages."""
|
|
18
17
|
scripts = {}
|
|
19
18
|
|
|
20
19
|
# Python script
|
|
@@ -88,7 +87,7 @@ echo "=== End ==="
|
|
|
88
87
|
|
|
89
88
|
|
|
90
89
|
def test_daemon_functionality():
|
|
91
|
-
"""Test the daemon functionality"""
|
|
90
|
+
"""Test the daemon functionality."""
|
|
92
91
|
print("🧪 Testing MCLI Daemon Service")
|
|
93
92
|
print("=" * 50)
|
|
94
93
|
|
|
@@ -190,7 +189,7 @@ def test_daemon_functionality():
|
|
|
190
189
|
|
|
191
190
|
|
|
192
191
|
def show_usage_examples():
|
|
193
|
-
"""Show usage examples"""
|
|
192
|
+
"""Show usage examples."""
|
|
194
193
|
print("\n📚 Usage Examples")
|
|
195
194
|
print("=" * 50)
|
|
196
195
|
|
|
@@ -239,7 +238,7 @@ def show_usage_examples():
|
|
|
239
238
|
|
|
240
239
|
|
|
241
240
|
def main():
|
|
242
|
-
"""Main test function"""
|
|
241
|
+
"""Main test function."""
|
|
243
242
|
print("🚀 MCLI Daemon Service Test Suite")
|
|
244
243
|
print("=" * 60)
|
|
245
244
|
|
mcli/workflow/doc_convert.py
CHANGED
|
@@ -9,7 +9,6 @@ Uses temporary directory with hard links to avoid path issues.
|
|
|
9
9
|
import os
|
|
10
10
|
import shutil
|
|
11
11
|
import subprocess
|
|
12
|
-
import tempfile
|
|
13
12
|
from dataclasses import dataclass
|
|
14
13
|
from enum import Enum
|
|
15
14
|
from glob import glob as file_glob
|
|
@@ -39,7 +38,7 @@ FORMAT_ALIASES = {
|
|
|
39
38
|
"htm": "html",
|
|
40
39
|
"xhtml": "html",
|
|
41
40
|
# PDF
|
|
42
|
-
"
|
|
41
|
+
"pd": "pd",
|
|
43
42
|
# LaTeX
|
|
44
43
|
"tex": "latex",
|
|
45
44
|
"latex": "latex",
|
|
@@ -58,13 +57,13 @@ FORMAT_ALIASES = {
|
|
|
58
57
|
"org": "org",
|
|
59
58
|
"mediawiki": "mediawiki",
|
|
60
59
|
"textile": "textile",
|
|
61
|
-
"
|
|
60
|
+
"rt": "rt",
|
|
62
61
|
"epub": "epub",
|
|
63
62
|
}
|
|
64
63
|
|
|
65
64
|
|
|
66
65
|
class ConversionMethod(Enum):
|
|
67
|
-
"""Available conversion methods"""
|
|
66
|
+
"""Available conversion methods."""
|
|
68
67
|
|
|
69
68
|
PANDOC = "pandoc"
|
|
70
69
|
NBCONVERT = "nbconvert"
|
|
@@ -74,7 +73,7 @@ class ConversionMethod(Enum):
|
|
|
74
73
|
|
|
75
74
|
@dataclass
|
|
76
75
|
class ConversionStrategy:
|
|
77
|
-
"""Represents a conversion strategy with command and description"""
|
|
76
|
+
"""Represents a conversion strategy with command and description."""
|
|
78
77
|
|
|
79
78
|
method: ConversionMethod
|
|
80
79
|
description: str
|
|
@@ -82,7 +81,7 @@ class ConversionStrategy:
|
|
|
82
81
|
|
|
83
82
|
|
|
84
83
|
def get_temp_conversion_dir() -> Path:
|
|
85
|
-
"""Get or create temporary conversion directory in ~/.mcli/commands/temp
|
|
84
|
+
"""Get or create temporary conversion directory in ~/.mcli/commands/temp/."""
|
|
86
85
|
commands_dir = get_custom_commands_dir()
|
|
87
86
|
temp_dir = commands_dir / "temp" / "conversions"
|
|
88
87
|
temp_dir.mkdir(parents=True, exist_ok=True)
|
|
@@ -148,7 +147,7 @@ def get_conversion_strategies(
|
|
|
148
147
|
strategies = []
|
|
149
148
|
|
|
150
149
|
# Special handling for Jupyter notebook to PDF (notoriously problematic)
|
|
151
|
-
if from_format == "ipynb" and to_format == "
|
|
150
|
+
if from_format == "ipynb" and to_format == "pd":
|
|
152
151
|
# Strategy 1: nbconvert (most reliable for notebooks)
|
|
153
152
|
strategies.append(
|
|
154
153
|
ConversionStrategy(
|
|
@@ -191,7 +190,7 @@ def get_conversion_strategies(
|
|
|
191
190
|
strategies.append(ConversionStrategy(method=ConversionMethod.PANDOC, description="pandoc"))
|
|
192
191
|
|
|
193
192
|
# PDF output (general)
|
|
194
|
-
elif to_format == "
|
|
193
|
+
elif to_format == "pd":
|
|
195
194
|
strategies.append(
|
|
196
195
|
ConversionStrategy(
|
|
197
196
|
method=ConversionMethod.PANDOC_LATEX, description="pandoc with LaTeX"
|
|
@@ -255,7 +254,7 @@ def execute_conversion_strategy(
|
|
|
255
254
|
cmd = [
|
|
256
255
|
"pandoc",
|
|
257
256
|
str(temp_input),
|
|
258
|
-
"-
|
|
257
|
+
"-",
|
|
259
258
|
from_format,
|
|
260
259
|
"-o",
|
|
261
260
|
str(temp_output),
|
|
@@ -276,7 +275,7 @@ def execute_conversion_strategy(
|
|
|
276
275
|
cmd_html = [
|
|
277
276
|
"pandoc",
|
|
278
277
|
str(temp_input),
|
|
279
|
-
"-
|
|
278
|
+
"-",
|
|
280
279
|
from_format,
|
|
281
280
|
"-t",
|
|
282
281
|
"html",
|
|
@@ -291,7 +290,7 @@ def execute_conversion_strategy(
|
|
|
291
290
|
return False, f"HTML intermediate failed: {result.stderr}"
|
|
292
291
|
|
|
293
292
|
# Step 2: Convert HTML to PDF
|
|
294
|
-
cmd = ["pandoc", str(html_temp), "-
|
|
293
|
+
cmd = ["pandoc", str(html_temp), "-", "html", "-t", "pd", "-o", str(temp_output)]
|
|
295
294
|
|
|
296
295
|
result = subprocess.run(
|
|
297
296
|
cmd, capture_output=True, text=True, check=True, timeout=120, cwd=str(temp_dir)
|
|
@@ -299,9 +298,9 @@ def execute_conversion_strategy(
|
|
|
299
298
|
|
|
300
299
|
else: # PANDOC
|
|
301
300
|
# Standard pandoc conversion
|
|
302
|
-
cmd = ["pandoc", str(temp_input), "-
|
|
301
|
+
cmd = ["pandoc", str(temp_input), "-", from_format, "-o", str(temp_output)]
|
|
303
302
|
# Use xelatex for PDF conversions (better Unicode support)
|
|
304
|
-
if to_format == "
|
|
303
|
+
if to_format == "pd":
|
|
305
304
|
cmd.append("--pdf-engine=xelatex")
|
|
306
305
|
if pandoc_args:
|
|
307
306
|
cmd.extend(pandoc_args.split())
|
|
@@ -330,8 +329,7 @@ def execute_conversion_strategy(
|
|
|
330
329
|
|
|
331
330
|
@click.group(name="doc-convert")
|
|
332
331
|
def doc_convert():
|
|
333
|
-
"""Document conversion with automatic fallback strategies"""
|
|
334
|
-
pass
|
|
332
|
+
"""Document conversion with automatic fallback strategies."""
|
|
335
333
|
|
|
336
334
|
|
|
337
335
|
@doc_convert.command()
|
|
@@ -573,7 +571,7 @@ def convert(from_format, to_format, path, output_dir, pandoc_args, no_fallback):
|
|
|
573
571
|
output_path = input_path.parent / f"{input_path.stem}.{output_ext}"
|
|
574
572
|
|
|
575
573
|
info(f"🔄 Converting: {input_path.name} → {output_path.name}")
|
|
576
|
-
info(
|
|
574
|
+
info(" 📁 Using temp directory: ~/.mcli/commands/temp/conversions/")
|
|
577
575
|
|
|
578
576
|
# Get conversion strategies
|
|
579
577
|
strategies = get_conversion_strategies(
|
|
@@ -616,7 +614,7 @@ def convert(from_format, to_format, path, output_dir, pandoc_args, no_fallback):
|
|
|
616
614
|
if conversion_succeeded:
|
|
617
615
|
success_count += 1
|
|
618
616
|
else:
|
|
619
|
-
error(
|
|
617
|
+
error(" ❌ All conversion methods failed")
|
|
620
618
|
if last_error:
|
|
621
619
|
error(f" ℹ️ Last error: {last_error[:200]}")
|
|
622
620
|
error_count += 1
|
mcli/workflow/gcloud/__init__.py
CHANGED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
from .gcloud import gcloud
|
mcli/workflow/gcloud/gcloud.py
CHANGED
|
@@ -1,33 +1,36 @@
|
|
|
1
1
|
import click
|
|
2
2
|
|
|
3
|
+
from mcli.lib.logger.logger import get_logger
|
|
4
|
+
|
|
3
5
|
# from mcli.public.mcli.lib.shell.shell import shell_exec, get_shell_script_path
|
|
4
6
|
from mcli.lib.shell.shell import get_shell_script_path, shell_exec
|
|
5
7
|
|
|
8
|
+
logger = get_logger(__name__)
|
|
9
|
+
|
|
6
10
|
|
|
7
11
|
# Click CLI group renamed to 'gcloud'
|
|
8
12
|
@click.group()
|
|
9
13
|
def gcloud():
|
|
10
|
-
"""gcloud utility - use this to interact with gcloud"""
|
|
11
|
-
pass
|
|
14
|
+
"""gcloud utility - use this to interact with gcloud."""
|
|
12
15
|
|
|
13
16
|
|
|
14
17
|
@click.command()
|
|
15
18
|
def start():
|
|
16
|
-
"""Start a gcloud instance"""
|
|
19
|
+
"""Start a gcloud instance."""
|
|
17
20
|
scripts_path = get_shell_script_path("gcloud", __file__)
|
|
18
21
|
shell_exec(scripts_path, "start")
|
|
19
22
|
|
|
20
23
|
|
|
21
24
|
@gcloud.command()
|
|
22
25
|
def stop():
|
|
23
|
-
"""Start a gcloud instance"""
|
|
26
|
+
"""Start a gcloud instance."""
|
|
24
27
|
scripts_path = get_shell_script_path("gcloud", __file__)
|
|
25
28
|
shell_exec(scripts_path, "stop")
|
|
26
29
|
|
|
27
30
|
|
|
28
31
|
@gcloud.command()
|
|
29
32
|
def describe():
|
|
30
|
-
"""Start a gcloud instance"""
|
|
33
|
+
"""Start a gcloud instance."""
|
|
31
34
|
scripts_path = get_shell_script_path("gcloud", __file__)
|
|
32
35
|
shell_exec(scripts_path, "describe")
|
|
33
36
|
|
|
@@ -36,7 +39,7 @@ def describe():
|
|
|
36
39
|
@click.argument("remote-port", type=str)
|
|
37
40
|
@click.argument("local-port", type=str)
|
|
38
41
|
def tunnel(remote_port: str, local_port: str):
|
|
39
|
-
"""Create an alpha tunnel using the instance"""
|
|
42
|
+
"""Create an alpha tunnel using the instance."""
|
|
40
43
|
logger.info(f"Creating a tunnel at {remote_port} to local port {local_port}")
|
|
41
44
|
scripts_path = get_shell_script_path("gcloud", __file__)
|
|
42
45
|
shell_exec(scripts_path, "tunnel", remote_port, local_port)
|
|
@@ -44,8 +47,8 @@ def tunnel(remote_port: str, local_port: str):
|
|
|
44
47
|
|
|
45
48
|
@gcloud.command()
|
|
46
49
|
def login(remote_port: str, local_port: str):
|
|
47
|
-
"""Login to gcloud"""
|
|
48
|
-
logger.info(
|
|
50
|
+
"""Login to gcloud."""
|
|
51
|
+
logger.info("Authenticating into gcloud")
|
|
49
52
|
scripts_path = get_shell_script_path("gcloud", __file__)
|
|
50
53
|
shell_exec(scripts_path, "login", remote_port, local_port)
|
|
51
54
|
|
|
@@ -1,6 +1,4 @@
|
|
|
1
|
-
import
|
|
2
|
-
import logging
|
|
3
|
-
from typing import Any, Dict, Optional
|
|
1
|
+
from typing import Any, Dict
|
|
4
2
|
|
|
5
3
|
from mcli.lib.logger.logger import get_logger
|
|
6
4
|
from mcli.lib.optional_deps import optional_import
|
|
@@ -13,7 +11,7 @@ logger = get_logger(__name__)
|
|
|
13
11
|
|
|
14
12
|
|
|
15
13
|
class GitCommitAIService:
|
|
16
|
-
"""AI service for generating intelligent git commit messages"""
|
|
14
|
+
"""AI service for generating intelligent git commit messages."""
|
|
17
15
|
|
|
18
16
|
def __init__(self):
|
|
19
17
|
self.config = self._load_config()
|
|
@@ -24,7 +22,7 @@ class GitCommitAIService:
|
|
|
24
22
|
self.ollama_base_url = self.config.get("ollama_base_url", "http://localhost:11434")
|
|
25
23
|
|
|
26
24
|
def _load_config(self) -> Dict[str, Any]:
|
|
27
|
-
"""Load LLM configuration from config.toml"""
|
|
25
|
+
"""Load LLM configuration from config.toml."""
|
|
28
26
|
try:
|
|
29
27
|
config = read_from_toml("config.toml", "llm") or {}
|
|
30
28
|
|
|
@@ -48,7 +46,7 @@ class GitCommitAIService:
|
|
|
48
46
|
}
|
|
49
47
|
|
|
50
48
|
def _analyze_file_patterns(self, changes: Dict[str, Any]) -> Dict[str, Any]:
|
|
51
|
-
"""Analyze file patterns to understand the scope of changes"""
|
|
49
|
+
"""Analyze file patterns to understand the scope of changes."""
|
|
52
50
|
analysis = {"languages": set(), "categories": set(), "scope": "unknown", "confidence": 0.0}
|
|
53
51
|
|
|
54
52
|
all_files = (
|
|
@@ -109,7 +107,7 @@ class GitCommitAIService:
|
|
|
109
107
|
],
|
|
110
108
|
"backend": [".py", ".go", ".rs", ".java", ".cpp", "api/", "server/", "backend/"],
|
|
111
109
|
"database": [".sql", "migration", "schema", "models/", "db/"],
|
|
112
|
-
"infra": ["docker", "deploy", "infrastructure", ".
|
|
110
|
+
"infra": ["docker", "deploy", "infrastructure", ".t", "kubernetes", "helm"],
|
|
113
111
|
}
|
|
114
112
|
|
|
115
113
|
for file in all_files:
|
|
@@ -149,12 +147,14 @@ class GitCommitAIService:
|
|
|
149
147
|
def _create_commit_prompt(
|
|
150
148
|
self, changes: Dict[str, Any], diff_content: str, analysis: Dict[str, Any]
|
|
151
149
|
) -> str:
|
|
152
|
-
"""Create a detailed prompt for AI commit message generation"""
|
|
150
|
+
"""Create a detailed prompt for AI commit message generation."""
|
|
153
151
|
|
|
154
152
|
# Truncate diff if too long (keep first 2000 chars)
|
|
155
|
-
|
|
153
|
+
_truncated_diff = (
|
|
154
|
+
diff_content[:2000] + "..." if len(diff_content) > 2000 else diff_content
|
|
155
|
+
) # noqa: F841
|
|
156
156
|
|
|
157
|
-
prompt =
|
|
157
|
+
prompt = """You are an expert software developer writing git commit messages following conventional commit standards.
|
|
158
158
|
|
|
159
159
|
CHANGE ANALYSIS:
|
|
160
160
|
- Files changed: {changes['total_files']}
|
|
@@ -204,7 +204,7 @@ Generate ONLY the commit message, nothing else:"""
|
|
|
204
204
|
return prompt
|
|
205
205
|
|
|
206
206
|
def generate_commit_message(self, changes: Dict[str, Any], diff_content: str) -> str:
|
|
207
|
-
"""Generate an AI-powered commit message"""
|
|
207
|
+
"""Generate an AI-powered commit message."""
|
|
208
208
|
try:
|
|
209
209
|
# Check if ollama is available
|
|
210
210
|
if not OLLAMA_AVAILABLE:
|
|
@@ -254,7 +254,7 @@ Generate ONLY the commit message, nothing else:"""
|
|
|
254
254
|
)
|
|
255
255
|
|
|
256
256
|
def _clean_commit_message(self, message: str) -> str:
|
|
257
|
-
"""Clean up AI generated commit message"""
|
|
257
|
+
"""Clean up AI generated commit message."""
|
|
258
258
|
lines = message.strip().split("\n")
|
|
259
259
|
|
|
260
260
|
# Remove any introductory text
|
|
@@ -274,8 +274,7 @@ Generate ONLY the commit message, nothing else:"""
|
|
|
274
274
|
return "\n".join(cleaned_lines) if cleaned_lines else ""
|
|
275
275
|
|
|
276
276
|
def _generate_fallback_message(self, changes: Dict[str, Any], analysis: Dict[str, Any]) -> str:
|
|
277
|
-
"""Generate fallback commit message using rules"""
|
|
278
|
-
summary_parts = []
|
|
277
|
+
"""Generate fallback commit message using rules."""
|
|
279
278
|
|
|
280
279
|
# Determine commit type based on analysis
|
|
281
280
|
if "tests" in analysis["categories"]:
|
|
@@ -317,7 +316,7 @@ Generate ONLY the commit message, nothing else:"""
|
|
|
317
316
|
return f"{commit_type}{scope}: {description}"
|
|
318
317
|
|
|
319
318
|
def test_ai_service(self) -> bool:
|
|
320
|
-
"""Test if the AI service is working properly"""
|
|
319
|
+
"""Test if the AI service is working properly."""
|
|
321
320
|
try:
|
|
322
321
|
# Test with minimal changes
|
|
323
322
|
test_changes = {
|
mcli/workflow/lsh_integration.py
CHANGED
|
@@ -10,12 +10,10 @@ import sys
|
|
|
10
10
|
from pathlib import Path
|
|
11
11
|
from typing import Any, Dict, Optional
|
|
12
12
|
|
|
13
|
-
import click
|
|
14
|
-
|
|
15
13
|
from mcli.lib.api import mcli_decorators as mcli
|
|
16
14
|
from mcli.lib.logger.logger import get_logger
|
|
17
15
|
from mcli.lib.services.data_pipeline import DataPipelineConfig, LSHDataPipeline
|
|
18
|
-
from mcli.lib.services.lsh_client import LSHClient
|
|
16
|
+
from mcli.lib.services.lsh_client import LSHClient
|
|
19
17
|
|
|
20
18
|
logger = get_logger(__name__)
|
|
21
19
|
|
|
@@ -26,7 +24,7 @@ logger = get_logger(__name__)
|
|
|
26
24
|
)
|
|
27
25
|
@mcli.option("--api-key", default=None, help="LSH API key (default: $LSH_API_KEY)")
|
|
28
26
|
async def lsh_status(url: Optional[str], api_key: Optional[str]):
|
|
29
|
-
"""Check LSH daemon connection and status"""
|
|
27
|
+
"""Check LSH daemon connection and status."""
|
|
30
28
|
try:
|
|
31
29
|
async with LSHClient(base_url=url, api_key=api_key) as client:
|
|
32
30
|
# Test connection
|
|
@@ -64,7 +62,7 @@ async def lsh_status(url: Optional[str], api_key: Optional[str]):
|
|
|
64
62
|
@mcli.option("--url", default=None, help="LSH API URL")
|
|
65
63
|
@mcli.option("--api-key", default=None, help="LSH API key")
|
|
66
64
|
async def lsh_jobs(status: Optional[str], format: str, url: Optional[str], api_key: Optional[str]):
|
|
67
|
-
"""List LSH jobs"""
|
|
65
|
+
"""List LSH jobs."""
|
|
68
66
|
try:
|
|
69
67
|
async with LSHClient(base_url=url, api_key=api_key) as client:
|
|
70
68
|
filter_params = {}
|
|
@@ -125,7 +123,7 @@ async def lsh_create_job(
|
|
|
125
123
|
url: Optional[str],
|
|
126
124
|
api_key: Optional[str],
|
|
127
125
|
):
|
|
128
|
-
"""Create a new LSH job"""
|
|
126
|
+
"""Create a new LSH job."""
|
|
129
127
|
try:
|
|
130
128
|
async with LSHClient(base_url=url, api_key=api_key) as client:
|
|
131
129
|
job_spec = {
|
|
@@ -173,7 +171,7 @@ async def lsh_pipeline(
|
|
|
173
171
|
url: Optional[str],
|
|
174
172
|
api_key: Optional[str],
|
|
175
173
|
):
|
|
176
|
-
"""Start LSH data pipeline listener"""
|
|
174
|
+
"""Start LSH data pipeline listener."""
|
|
177
175
|
try:
|
|
178
176
|
# Configure pipeline
|
|
179
177
|
config = DataPipelineConfig()
|
|
@@ -221,7 +219,7 @@ async def lsh_pipeline(
|
|
|
221
219
|
@mcli.option("--api-key", default=None, help="LSH API key")
|
|
222
220
|
@mcli.option("--filter", help="Event type filter (e.g., 'job:completed')")
|
|
223
221
|
async def lsh_listen(url: Optional[str], api_key: Optional[str], filter: Optional[str]):
|
|
224
|
-
"""Listen to LSH events for debugging"""
|
|
222
|
+
"""Listen to LSH events for debugging."""
|
|
225
223
|
try:
|
|
226
224
|
mcli.echo(mcli.style("👂 Listening to LSH events...", fg="blue", bold=True))
|
|
227
225
|
mcli.echo("Press Ctrl+C to stop")
|
|
@@ -259,7 +257,7 @@ async def lsh_listen(url: Optional[str], api_key: Optional[str], filter: Optiona
|
|
|
259
257
|
async def lsh_webhook(
|
|
260
258
|
action: str, endpoint: Optional[str], url: Optional[str], api_key: Optional[str]
|
|
261
259
|
):
|
|
262
|
-
"""Manage LSH webhooks"""
|
|
260
|
+
"""Manage LSH webhooks."""
|
|
263
261
|
try:
|
|
264
262
|
async with LSHClient(base_url=url, api_key=api_key) as client:
|
|
265
263
|
if action == "list":
|
|
@@ -289,7 +287,7 @@ async def lsh_webhook(
|
|
|
289
287
|
@mcli.option("--set-api-key", help="Set LSH API key")
|
|
290
288
|
@mcli.option("--show", is_flag=True, help="Show current configuration")
|
|
291
289
|
def lsh_config(set_url: Optional[str], set_api_key: Optional[str], show: bool):
|
|
292
|
-
"""Configure LSH integration settings"""
|
|
290
|
+
"""Configure LSH integration settings."""
|
|
293
291
|
env_file = Path.home() / ".mcli" / "lsh.env"
|
|
294
292
|
env_file.parent.mkdir(exist_ok=True)
|
|
295
293
|
|
|
@@ -339,5 +337,5 @@ def lsh_config(set_url: Optional[str], set_api_key: Optional[str], show: bool):
|
|
|
339
337
|
|
|
340
338
|
# Register all commands with mcli
|
|
341
339
|
def register_lsh_commands():
|
|
342
|
-
"""Register LSH integration commands with mcli"""
|
|
340
|
+
"""Register LSH integration commands with mcli."""
|
|
343
341
|
pass # Commands are automatically registered via decorators
|