mcli-framework 7.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mcli-framework might be problematic. Click here for more details.
- mcli/app/chat_cmd.py +42 -0
- mcli/app/commands_cmd.py +226 -0
- mcli/app/completion_cmd.py +216 -0
- mcli/app/completion_helpers.py +288 -0
- mcli/app/cron_test_cmd.py +697 -0
- mcli/app/logs_cmd.py +419 -0
- mcli/app/main.py +492 -0
- mcli/app/model/model.py +1060 -0
- mcli/app/model_cmd.py +227 -0
- mcli/app/redis_cmd.py +269 -0
- mcli/app/video/video.py +1114 -0
- mcli/app/visual_cmd.py +303 -0
- mcli/chat/chat.py +2409 -0
- mcli/chat/command_rag.py +514 -0
- mcli/chat/enhanced_chat.py +652 -0
- mcli/chat/system_controller.py +1010 -0
- mcli/chat/system_integration.py +1016 -0
- mcli/cli.py +25 -0
- mcli/config.toml +20 -0
- mcli/lib/api/api.py +586 -0
- mcli/lib/api/daemon_client.py +203 -0
- mcli/lib/api/daemon_client_local.py +44 -0
- mcli/lib/api/daemon_decorator.py +217 -0
- mcli/lib/api/mcli_decorators.py +1032 -0
- mcli/lib/auth/auth.py +85 -0
- mcli/lib/auth/aws_manager.py +85 -0
- mcli/lib/auth/azure_manager.py +91 -0
- mcli/lib/auth/credential_manager.py +192 -0
- mcli/lib/auth/gcp_manager.py +93 -0
- mcli/lib/auth/key_manager.py +117 -0
- mcli/lib/auth/mcli_manager.py +93 -0
- mcli/lib/auth/token_manager.py +75 -0
- mcli/lib/auth/token_util.py +1011 -0
- mcli/lib/config/config.py +47 -0
- mcli/lib/discovery/__init__.py +1 -0
- mcli/lib/discovery/command_discovery.py +274 -0
- mcli/lib/erd/erd.py +1345 -0
- mcli/lib/erd/generate_graph.py +453 -0
- mcli/lib/files/files.py +76 -0
- mcli/lib/fs/fs.py +109 -0
- mcli/lib/lib.py +29 -0
- mcli/lib/logger/logger.py +611 -0
- mcli/lib/performance/optimizer.py +409 -0
- mcli/lib/performance/rust_bridge.py +502 -0
- mcli/lib/performance/uvloop_config.py +154 -0
- mcli/lib/pickles/pickles.py +50 -0
- mcli/lib/search/cached_vectorizer.py +479 -0
- mcli/lib/services/data_pipeline.py +460 -0
- mcli/lib/services/lsh_client.py +441 -0
- mcli/lib/services/redis_service.py +387 -0
- mcli/lib/shell/shell.py +137 -0
- mcli/lib/toml/toml.py +33 -0
- mcli/lib/ui/styling.py +47 -0
- mcli/lib/ui/visual_effects.py +634 -0
- mcli/lib/watcher/watcher.py +185 -0
- mcli/ml/api/app.py +215 -0
- mcli/ml/api/middleware.py +224 -0
- mcli/ml/api/routers/admin_router.py +12 -0
- mcli/ml/api/routers/auth_router.py +244 -0
- mcli/ml/api/routers/backtest_router.py +12 -0
- mcli/ml/api/routers/data_router.py +12 -0
- mcli/ml/api/routers/model_router.py +302 -0
- mcli/ml/api/routers/monitoring_router.py +12 -0
- mcli/ml/api/routers/portfolio_router.py +12 -0
- mcli/ml/api/routers/prediction_router.py +267 -0
- mcli/ml/api/routers/trade_router.py +12 -0
- mcli/ml/api/routers/websocket_router.py +76 -0
- mcli/ml/api/schemas.py +64 -0
- mcli/ml/auth/auth_manager.py +425 -0
- mcli/ml/auth/models.py +154 -0
- mcli/ml/auth/permissions.py +302 -0
- mcli/ml/backtesting/backtest_engine.py +502 -0
- mcli/ml/backtesting/performance_metrics.py +393 -0
- mcli/ml/cache.py +400 -0
- mcli/ml/cli/main.py +398 -0
- mcli/ml/config/settings.py +394 -0
- mcli/ml/configs/dvc_config.py +230 -0
- mcli/ml/configs/mlflow_config.py +131 -0
- mcli/ml/configs/mlops_manager.py +293 -0
- mcli/ml/dashboard/app.py +532 -0
- mcli/ml/dashboard/app_integrated.py +738 -0
- mcli/ml/dashboard/app_supabase.py +560 -0
- mcli/ml/dashboard/app_training.py +615 -0
- mcli/ml/dashboard/cli.py +51 -0
- mcli/ml/data_ingestion/api_connectors.py +501 -0
- mcli/ml/data_ingestion/data_pipeline.py +567 -0
- mcli/ml/data_ingestion/stream_processor.py +512 -0
- mcli/ml/database/migrations/env.py +94 -0
- mcli/ml/database/models.py +667 -0
- mcli/ml/database/session.py +200 -0
- mcli/ml/experimentation/ab_testing.py +845 -0
- mcli/ml/features/ensemble_features.py +607 -0
- mcli/ml/features/political_features.py +676 -0
- mcli/ml/features/recommendation_engine.py +809 -0
- mcli/ml/features/stock_features.py +573 -0
- mcli/ml/features/test_feature_engineering.py +346 -0
- mcli/ml/logging.py +85 -0
- mcli/ml/mlops/data_versioning.py +518 -0
- mcli/ml/mlops/experiment_tracker.py +377 -0
- mcli/ml/mlops/model_serving.py +481 -0
- mcli/ml/mlops/pipeline_orchestrator.py +614 -0
- mcli/ml/models/base_models.py +324 -0
- mcli/ml/models/ensemble_models.py +675 -0
- mcli/ml/models/recommendation_models.py +474 -0
- mcli/ml/models/test_models.py +487 -0
- mcli/ml/monitoring/drift_detection.py +676 -0
- mcli/ml/monitoring/metrics.py +45 -0
- mcli/ml/optimization/portfolio_optimizer.py +834 -0
- mcli/ml/preprocessing/data_cleaners.py +451 -0
- mcli/ml/preprocessing/feature_extractors.py +491 -0
- mcli/ml/preprocessing/ml_pipeline.py +382 -0
- mcli/ml/preprocessing/politician_trading_preprocessor.py +569 -0
- mcli/ml/preprocessing/test_preprocessing.py +294 -0
- mcli/ml/scripts/populate_sample_data.py +200 -0
- mcli/ml/tasks.py +400 -0
- mcli/ml/tests/test_integration.py +429 -0
- mcli/ml/tests/test_training_dashboard.py +387 -0
- mcli/public/oi/oi.py +15 -0
- mcli/public/public.py +4 -0
- mcli/self/self_cmd.py +1246 -0
- mcli/workflow/daemon/api_daemon.py +800 -0
- mcli/workflow/daemon/async_command_database.py +681 -0
- mcli/workflow/daemon/async_process_manager.py +591 -0
- mcli/workflow/daemon/client.py +530 -0
- mcli/workflow/daemon/commands.py +1196 -0
- mcli/workflow/daemon/daemon.py +905 -0
- mcli/workflow/daemon/daemon_api.py +59 -0
- mcli/workflow/daemon/enhanced_daemon.py +571 -0
- mcli/workflow/daemon/process_cli.py +244 -0
- mcli/workflow/daemon/process_manager.py +439 -0
- mcli/workflow/daemon/test_daemon.py +275 -0
- mcli/workflow/dashboard/dashboard_cmd.py +113 -0
- mcli/workflow/docker/docker.py +0 -0
- mcli/workflow/file/file.py +100 -0
- mcli/workflow/gcloud/config.toml +21 -0
- mcli/workflow/gcloud/gcloud.py +58 -0
- mcli/workflow/git_commit/ai_service.py +328 -0
- mcli/workflow/git_commit/commands.py +430 -0
- mcli/workflow/lsh_integration.py +355 -0
- mcli/workflow/model_service/client.py +594 -0
- mcli/workflow/model_service/download_and_run_efficient_models.py +288 -0
- mcli/workflow/model_service/lightweight_embedder.py +397 -0
- mcli/workflow/model_service/lightweight_model_server.py +714 -0
- mcli/workflow/model_service/lightweight_test.py +241 -0
- mcli/workflow/model_service/model_service.py +1955 -0
- mcli/workflow/model_service/ollama_efficient_runner.py +425 -0
- mcli/workflow/model_service/pdf_processor.py +386 -0
- mcli/workflow/model_service/test_efficient_runner.py +234 -0
- mcli/workflow/model_service/test_example.py +315 -0
- mcli/workflow/model_service/test_integration.py +131 -0
- mcli/workflow/model_service/test_new_features.py +149 -0
- mcli/workflow/openai/openai.py +99 -0
- mcli/workflow/politician_trading/commands.py +1790 -0
- mcli/workflow/politician_trading/config.py +134 -0
- mcli/workflow/politician_trading/connectivity.py +490 -0
- mcli/workflow/politician_trading/data_sources.py +395 -0
- mcli/workflow/politician_trading/database.py +410 -0
- mcli/workflow/politician_trading/demo.py +248 -0
- mcli/workflow/politician_trading/models.py +165 -0
- mcli/workflow/politician_trading/monitoring.py +413 -0
- mcli/workflow/politician_trading/scrapers.py +966 -0
- mcli/workflow/politician_trading/scrapers_california.py +412 -0
- mcli/workflow/politician_trading/scrapers_eu.py +377 -0
- mcli/workflow/politician_trading/scrapers_uk.py +350 -0
- mcli/workflow/politician_trading/scrapers_us_states.py +438 -0
- mcli/workflow/politician_trading/supabase_functions.py +354 -0
- mcli/workflow/politician_trading/workflow.py +852 -0
- mcli/workflow/registry/registry.py +180 -0
- mcli/workflow/repo/repo.py +223 -0
- mcli/workflow/scheduler/commands.py +493 -0
- mcli/workflow/scheduler/cron_parser.py +238 -0
- mcli/workflow/scheduler/job.py +182 -0
- mcli/workflow/scheduler/monitor.py +139 -0
- mcli/workflow/scheduler/persistence.py +324 -0
- mcli/workflow/scheduler/scheduler.py +679 -0
- mcli/workflow/sync/sync_cmd.py +437 -0
- mcli/workflow/sync/test_cmd.py +314 -0
- mcli/workflow/videos/videos.py +242 -0
- mcli/workflow/wakatime/wakatime.py +11 -0
- mcli/workflow/workflow.py +37 -0
- mcli_framework-7.0.0.dist-info/METADATA +479 -0
- mcli_framework-7.0.0.dist-info/RECORD +186 -0
- mcli_framework-7.0.0.dist-info/WHEEL +5 -0
- mcli_framework-7.0.0.dist-info/entry_points.txt +7 -0
- mcli_framework-7.0.0.dist-info/licenses/LICENSE +21 -0
- mcli_framework-7.0.0.dist-info/top_level.txt +1 -0
mcli/chat/chat.py
ADDED
|
@@ -0,0 +1,2409 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import readline
|
|
3
|
+
from typing import Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
|
|
7
|
+
# Optional ollama import - gracefully handle if not installed
|
|
8
|
+
try:
|
|
9
|
+
import ollama
|
|
10
|
+
OLLAMA_AVAILABLE = True
|
|
11
|
+
except ImportError:
|
|
12
|
+
OLLAMA_AVAILABLE = False
|
|
13
|
+
ollama = None # type: ignore
|
|
14
|
+
|
|
15
|
+
from mcli.chat.system_integration import handle_system_request
|
|
16
|
+
from mcli.lib.api.daemon_client import get_daemon_client
|
|
17
|
+
from mcli.lib.discovery.command_discovery import get_command_discovery
|
|
18
|
+
from mcli.lib.logger.logger import get_logger
|
|
19
|
+
from mcli.lib.toml.toml import read_from_toml
|
|
20
|
+
from mcli.lib.ui.styling import console
|
|
21
|
+
|
|
22
|
+
# Load config from config.toml
|
|
23
|
+
CONFIG_PATH = "config.toml"
|
|
24
|
+
config = {}
|
|
25
|
+
try:
|
|
26
|
+
config = read_from_toml(CONFIG_PATH, "llm") or {}
|
|
27
|
+
except Exception:
|
|
28
|
+
# Silently handle config loading errors
|
|
29
|
+
config = {}
|
|
30
|
+
|
|
31
|
+
if not config:
|
|
32
|
+
# Default to lightweight local model for better performance and privacy
|
|
33
|
+
config = {
|
|
34
|
+
"provider": "local",
|
|
35
|
+
"model": "prajjwal1/bert-tiny",
|
|
36
|
+
"temperature": 0.7,
|
|
37
|
+
"system_prompt": "You are the MCLI Chat Assistant, a helpful AI assistant for the MCLI tool.",
|
|
38
|
+
"ollama_base_url": "http://localhost:8080", # Use lightweight model server
|
|
39
|
+
}
|
|
40
|
+
elif not config.get("openai_api_key") and config.get("provider", "openai") == "openai":
|
|
41
|
+
# If openai provider but no API key, switch to local lightweight models
|
|
42
|
+
config["provider"] = "local"
|
|
43
|
+
if not config.get("model"):
|
|
44
|
+
config["model"] = "prajjwal1/bert-tiny" # Use lightweight model
|
|
45
|
+
if not config.get("ollama_base_url"):
|
|
46
|
+
config["ollama_base_url"] = "http://localhost:8080" # Use lightweight model server
|
|
47
|
+
|
|
48
|
+
logger = get_logger(__name__)
|
|
49
|
+
|
|
50
|
+
# Fallbacks if not set in config.toml
|
|
51
|
+
LLM_PROVIDER = config.get("provider", "local")
|
|
52
|
+
MODEL_NAME = config.get("model", "prajjwal1/bert-tiny") # Default to lightweight model
|
|
53
|
+
OPENAI_API_KEY = config.get("openai_api_key", None)
|
|
54
|
+
OLLAMA_BASE_URL = config.get(
|
|
55
|
+
"ollama_base_url", "http://localhost:8080"
|
|
56
|
+
) # Default to lightweight server
|
|
57
|
+
TEMPERATURE = float(config.get("temperature", 0.7))
|
|
58
|
+
SYSTEM_PROMPT = config.get(
|
|
59
|
+
"system_prompt",
|
|
60
|
+
"""You are the MCLI Personal Assistant, an intelligent agent that helps manage your computer and tasks.
|
|
61
|
+
|
|
62
|
+
I am a true personal assistant with these capabilities:
|
|
63
|
+
- System monitoring and control (memory, disk, applications, cleanup)
|
|
64
|
+
- Job scheduling and automation (cron jobs, reminders, recurring tasks)
|
|
65
|
+
- Process management and command execution
|
|
66
|
+
- File organization and system maintenance
|
|
67
|
+
- Contextual awareness of ongoing tasks and system state
|
|
68
|
+
|
|
69
|
+
I maintain awareness of:
|
|
70
|
+
- Currently scheduled jobs and their status
|
|
71
|
+
- System health and resource usage
|
|
72
|
+
- Recent activities and completed tasks
|
|
73
|
+
- User preferences and routine patterns
|
|
74
|
+
|
|
75
|
+
I can proactively suggest optimizations, schedule maintenance, and automate repetitive tasks.
|
|
76
|
+
I'm designed to be your digital assistant that keeps things running smoothly.""",
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class ChatClient:
|
|
81
|
+
"""Interactive chat client for MCLI command management"""
|
|
82
|
+
|
|
83
|
+
def __init__(self, use_remote: bool = False, model_override: str = None):
|
|
84
|
+
self.daemon = get_daemon_client()
|
|
85
|
+
self.history = []
|
|
86
|
+
self.session_active = True
|
|
87
|
+
self.use_remote = use_remote
|
|
88
|
+
self.model_override = model_override
|
|
89
|
+
self._configure_model_settings()
|
|
90
|
+
self._ensure_daemon_running()
|
|
91
|
+
self._load_scheduled_jobs()
|
|
92
|
+
|
|
93
|
+
def _configure_model_settings(self):
|
|
94
|
+
"""Configure model settings based on remote/local preferences"""
|
|
95
|
+
global LLM_PROVIDER, MODEL_NAME, OLLAMA_BASE_URL
|
|
96
|
+
|
|
97
|
+
if not self.use_remote:
|
|
98
|
+
# Default to lightweight local models
|
|
99
|
+
LLM_PROVIDER = "local"
|
|
100
|
+
MODEL_NAME = self.model_override or "prajjwal1/bert-tiny"
|
|
101
|
+
OLLAMA_BASE_URL = "http://localhost:8080" # Use lightweight model server port
|
|
102
|
+
|
|
103
|
+
# Update the config dictionary too
|
|
104
|
+
config["provider"] = "local"
|
|
105
|
+
config["model"] = MODEL_NAME
|
|
106
|
+
config["ollama_base_url"] = OLLAMA_BASE_URL
|
|
107
|
+
|
|
108
|
+
# Ensure lightweight model server is running
|
|
109
|
+
self._ensure_lightweight_model_server()
|
|
110
|
+
else:
|
|
111
|
+
# Use remote models from config
|
|
112
|
+
if self.model_override:
|
|
113
|
+
MODEL_NAME = self.model_override
|
|
114
|
+
# Keep existing provider settings from config
|
|
115
|
+
|
|
116
|
+
def _ensure_lightweight_model_server(self):
|
|
117
|
+
"""Ensure the lightweight model server is running"""
|
|
118
|
+
import time
|
|
119
|
+
|
|
120
|
+
import requests
|
|
121
|
+
|
|
122
|
+
try:
|
|
123
|
+
# Check if server is already running
|
|
124
|
+
response = requests.get(f"{OLLAMA_BASE_URL}/health", timeout=2)
|
|
125
|
+
if response.status_code == 200:
|
|
126
|
+
console.print("[green]✅ Lightweight model server already running[/green]")
|
|
127
|
+
# Server is running, but check if our model is loaded
|
|
128
|
+
try:
|
|
129
|
+
models_response = requests.get(f"{OLLAMA_BASE_URL}/api/tags", timeout=2)
|
|
130
|
+
if models_response.status_code == 200:
|
|
131
|
+
models_data = models_response.json()
|
|
132
|
+
loaded_models = [m.get("name", "") for m in models_data.get("models", [])]
|
|
133
|
+
if MODEL_NAME in loaded_models:
|
|
134
|
+
console.print(f"[green]✅ Model {MODEL_NAME} already loaded[/green]")
|
|
135
|
+
return # Server is running and model is loaded
|
|
136
|
+
else:
|
|
137
|
+
console.print(
|
|
138
|
+
f"[yellow]Model {MODEL_NAME} not loaded, will auto-load on first use[/yellow]"
|
|
139
|
+
)
|
|
140
|
+
return # Server will auto-load model when needed
|
|
141
|
+
except:
|
|
142
|
+
# If we can't check models, assume server will handle it
|
|
143
|
+
return
|
|
144
|
+
except:
|
|
145
|
+
pass
|
|
146
|
+
|
|
147
|
+
# Try to start the server automatically
|
|
148
|
+
console.print("[yellow]Starting lightweight model server...[/yellow]")
|
|
149
|
+
|
|
150
|
+
try:
|
|
151
|
+
import threading
|
|
152
|
+
|
|
153
|
+
from mcli.workflow.model_service.lightweight_model_server import LightweightModelServer
|
|
154
|
+
|
|
155
|
+
# Get configured model
|
|
156
|
+
model_name = MODEL_NAME
|
|
157
|
+
|
|
158
|
+
# Start server in background thread
|
|
159
|
+
server = LightweightModelServer(port=8080)
|
|
160
|
+
|
|
161
|
+
# Download and load model if needed
|
|
162
|
+
if server.download_and_load_model(model_name):
|
|
163
|
+
# Start server in background thread
|
|
164
|
+
def start_server_thread():
|
|
165
|
+
try:
|
|
166
|
+
server.start_server()
|
|
167
|
+
except Exception as e:
|
|
168
|
+
console.print(f"[red]Server thread error: {e}[/red]")
|
|
169
|
+
|
|
170
|
+
server_thread = threading.Thread(target=start_server_thread, daemon=True)
|
|
171
|
+
server_thread.start()
|
|
172
|
+
|
|
173
|
+
# Wait longer for server to start and verify it's working
|
|
174
|
+
max_retries = 10
|
|
175
|
+
for i in range(max_retries):
|
|
176
|
+
time.sleep(1)
|
|
177
|
+
try:
|
|
178
|
+
response = requests.get(f"{OLLAMA_BASE_URL}/health", timeout=1)
|
|
179
|
+
if response.status_code == 200:
|
|
180
|
+
console.print(
|
|
181
|
+
f"[green]✅ Lightweight model server started with {model_name}[/green]"
|
|
182
|
+
)
|
|
183
|
+
return
|
|
184
|
+
except:
|
|
185
|
+
pass
|
|
186
|
+
|
|
187
|
+
console.print(f"[yellow]⚠️ Server started but health check failed[/yellow]")
|
|
188
|
+
console.print("Falling back to remote models...")
|
|
189
|
+
self.use_remote = True
|
|
190
|
+
else:
|
|
191
|
+
console.print(f"[yellow]⚠️ Could not download/load model {model_name}[/yellow]")
|
|
192
|
+
console.print("Falling back to remote models...")
|
|
193
|
+
self.use_remote = True
|
|
194
|
+
|
|
195
|
+
except Exception as e:
|
|
196
|
+
console.print(f"[yellow]⚠️ Could not start lightweight model server: {e}[/yellow]")
|
|
197
|
+
console.print("Falling back to remote models...")
|
|
198
|
+
self.use_remote = True
|
|
199
|
+
|
|
200
|
+
def start_interactive_session(self):
|
|
201
|
+
"""Start the chat interface"""
|
|
202
|
+
console.print("[bold green]MCLI Personal Assistant[/bold green] (type 'exit' to quit)")
|
|
203
|
+
|
|
204
|
+
# Show current configuration
|
|
205
|
+
if not self.use_remote:
|
|
206
|
+
console.print(f"[dim]Using lightweight local model: {MODEL_NAME} (offline mode)[/dim]")
|
|
207
|
+
elif LLM_PROVIDER == "local":
|
|
208
|
+
console.print(f"[dim]Using local model: {MODEL_NAME} via Ollama[/dim]")
|
|
209
|
+
elif LLM_PROVIDER == "openai":
|
|
210
|
+
console.print(f"[dim]Using OpenAI model: {MODEL_NAME}[/dim]")
|
|
211
|
+
elif LLM_PROVIDER == "anthropic":
|
|
212
|
+
console.print(f"[dim]Using Anthropic model: {MODEL_NAME}[/dim]")
|
|
213
|
+
|
|
214
|
+
# Show proactive status update
|
|
215
|
+
self._show_startup_status()
|
|
216
|
+
|
|
217
|
+
console.print("How can I help you with your tasks today?")
|
|
218
|
+
console.print("\n[bold cyan]Available Commands:[/bold cyan]")
|
|
219
|
+
console.print("• [yellow]commands[/yellow] - List available functions")
|
|
220
|
+
console.print("• [yellow]run <command> [args][/yellow] - Execute command in container")
|
|
221
|
+
console.print("• [yellow]ps[/yellow] - List running processes (Docker-style)")
|
|
222
|
+
console.print("• [yellow]logs <id>[/yellow] - View process logs")
|
|
223
|
+
console.print("• [yellow]inspect <id>[/yellow] - Detailed process info")
|
|
224
|
+
console.print("• [yellow]start/stop <id>[/yellow] - Control process lifecycle")
|
|
225
|
+
console.print(
|
|
226
|
+
"• [yellow]System Control[/yellow] - Control applications (e.g., 'open TextEdit', 'take screenshot')"
|
|
227
|
+
)
|
|
228
|
+
console.print(
|
|
229
|
+
"• [yellow]Job Scheduling[/yellow] - Schedule tasks (e.g., 'schedule cleanup daily', 'what's my status?')"
|
|
230
|
+
)
|
|
231
|
+
console.print("• Ask questions about functions and codebase\n")
|
|
232
|
+
|
|
233
|
+
while self.session_active:
|
|
234
|
+
try:
|
|
235
|
+
user_input = console.input("[bold cyan]>>> [/bold cyan]").strip()
|
|
236
|
+
if not user_input:
|
|
237
|
+
continue
|
|
238
|
+
|
|
239
|
+
if user_input.lower() in ("exit", "quit"):
|
|
240
|
+
self.session_active = False
|
|
241
|
+
continue
|
|
242
|
+
|
|
243
|
+
self.process_input(user_input)
|
|
244
|
+
|
|
245
|
+
except KeyboardInterrupt:
|
|
246
|
+
console.print("\nUse 'exit' to quit the chat session")
|
|
247
|
+
except Exception as e:
|
|
248
|
+
logger.error(f"Chat error: {e}")
|
|
249
|
+
console.print(f"[red]Error:[/red] {str(e)}")
|
|
250
|
+
|
|
251
|
+
def process_input(self, user_input: str):
|
|
252
|
+
"""Process user input and generate response"""
|
|
253
|
+
self.history.append({"user": user_input})
|
|
254
|
+
|
|
255
|
+
# Check for commands list request
|
|
256
|
+
if user_input.lower().strip() == "commands":
|
|
257
|
+
self.handle_commands_list()
|
|
258
|
+
return
|
|
259
|
+
|
|
260
|
+
# Check for process management commands
|
|
261
|
+
if user_input.lower().startswith("ps") or user_input.lower().startswith("docker ps"):
|
|
262
|
+
self.handle_process_list()
|
|
263
|
+
return
|
|
264
|
+
elif user_input.lower().startswith("logs "):
|
|
265
|
+
process_id = user_input.split()[1] if len(user_input.split()) > 1 else None
|
|
266
|
+
if process_id:
|
|
267
|
+
self.handle_process_logs(process_id)
|
|
268
|
+
else:
|
|
269
|
+
console.print("[red]Usage: logs <process_id>[/red]")
|
|
270
|
+
return
|
|
271
|
+
elif user_input.lower().startswith("inspect "):
|
|
272
|
+
process_id = user_input.split()[1] if len(user_input.split()) > 1 else None
|
|
273
|
+
if process_id:
|
|
274
|
+
self.handle_process_inspect(process_id)
|
|
275
|
+
else:
|
|
276
|
+
console.print("[red]Usage: inspect <process_id>[/red]")
|
|
277
|
+
return
|
|
278
|
+
elif user_input.lower().startswith("stop "):
|
|
279
|
+
process_id = user_input.split()[1] if len(user_input.split()) > 1 else None
|
|
280
|
+
if process_id:
|
|
281
|
+
self.handle_process_stop(process_id)
|
|
282
|
+
else:
|
|
283
|
+
console.print("[red]Usage: stop <process_id>[/red]")
|
|
284
|
+
return
|
|
285
|
+
elif user_input.lower().startswith("start "):
|
|
286
|
+
process_id = user_input.split()[1] if len(user_input.split()) > 1 else None
|
|
287
|
+
if process_id:
|
|
288
|
+
self.handle_process_start(process_id)
|
|
289
|
+
else:
|
|
290
|
+
console.print("[red]Usage: start <process_id>[/red]")
|
|
291
|
+
return
|
|
292
|
+
|
|
293
|
+
# Check for command creation requests
|
|
294
|
+
if self.is_command_creation_request(user_input):
|
|
295
|
+
self.handle_command_creation(user_input)
|
|
296
|
+
return
|
|
297
|
+
|
|
298
|
+
# Check for job management requests BEFORE system control
|
|
299
|
+
# This prevents "list my jobs" from being caught by system control
|
|
300
|
+
if self._is_job_management_request(user_input):
|
|
301
|
+
self._handle_job_management(user_input)
|
|
302
|
+
return
|
|
303
|
+
|
|
304
|
+
# Check for system control requests
|
|
305
|
+
if self.is_system_control_request(user_input):
|
|
306
|
+
self.handle_system_control(user_input)
|
|
307
|
+
return
|
|
308
|
+
|
|
309
|
+
# Check for 'run <command> [args...]' pattern (containerized execution)
|
|
310
|
+
if user_input.lower().startswith("run "):
|
|
311
|
+
command_part = user_input[4:].strip()
|
|
312
|
+
|
|
313
|
+
# Handle natural language patterns like "run the hello world command"
|
|
314
|
+
if " command" in command_part.lower():
|
|
315
|
+
# Extract the actual command name from natural language
|
|
316
|
+
command_part = (
|
|
317
|
+
command_part.lower().replace(" command", "").replace("the ", "").strip()
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
parts = command_part.split()
|
|
321
|
+
if parts:
|
|
322
|
+
command = parts[0]
|
|
323
|
+
args = parts[1:] if len(parts) > 1 else []
|
|
324
|
+
self.handle_containerized_run(command, args)
|
|
325
|
+
else:
|
|
326
|
+
console.print("[red]No command provided after 'run'.[/red]")
|
|
327
|
+
return
|
|
328
|
+
|
|
329
|
+
# Check for natural language command execution requests
|
|
330
|
+
if self.is_command_execution_request(user_input):
|
|
331
|
+
command_name = self.extract_command_name(user_input)
|
|
332
|
+
if command_name:
|
|
333
|
+
self.handle_direct_command_execution(command_name)
|
|
334
|
+
return
|
|
335
|
+
|
|
336
|
+
# Try to check for command-related queries, but fallback gracefully if daemon is unavailable
|
|
337
|
+
try:
|
|
338
|
+
daemon_available = True
|
|
339
|
+
_ = self.daemon.list_commands()
|
|
340
|
+
except Exception as e:
|
|
341
|
+
daemon_available = False
|
|
342
|
+
logger.debug(f"Daemon unavailable, running in LLM-only mode. Details: {e}")
|
|
343
|
+
|
|
344
|
+
if daemon_available and any(
|
|
345
|
+
keyword in user_input.lower()
|
|
346
|
+
for keyword in ["command", "list", "show", "find", "search"]
|
|
347
|
+
):
|
|
348
|
+
self.handle_command_queries(user_input)
|
|
349
|
+
else:
|
|
350
|
+
self.generate_llm_response(user_input)
|
|
351
|
+
|
|
352
|
+
def execute_command_via_daemon(self, command_name: str, args: Optional[list] = None):
|
|
353
|
+
"""Execute a command via the daemon and print the result."""
|
|
354
|
+
try:
|
|
355
|
+
result = self.daemon.execute_command(command_name=command_name, args=args or [])
|
|
356
|
+
output = result.get("output") or result.get("result") or str(result)
|
|
357
|
+
console.print(f"[green]Command Output:[/green]\n{output}")
|
|
358
|
+
except Exception as e:
|
|
359
|
+
console.print(f"[red]Failed to execute command:[/red] {e}")
|
|
360
|
+
|
|
361
|
+
def is_command_execution_request(self, user_input: str) -> bool:
|
|
362
|
+
"""Check if user input is requesting to execute a command."""
|
|
363
|
+
lower_input = user_input.lower()
|
|
364
|
+
execution_keywords = [
|
|
365
|
+
"call the",
|
|
366
|
+
"execute the",
|
|
367
|
+
"run the",
|
|
368
|
+
"execute command",
|
|
369
|
+
"hello world",
|
|
370
|
+
"hello-world",
|
|
371
|
+
"helloworld",
|
|
372
|
+
]
|
|
373
|
+
# Be more specific - avoid matching on single words like "execute" or "call"
|
|
374
|
+
return any(keyword in lower_input for keyword in execution_keywords)
|
|
375
|
+
|
|
376
|
+
def extract_command_name(self, user_input: str) -> Optional[str]:
|
|
377
|
+
"""Extract command name from natural language input."""
|
|
378
|
+
lower_input = user_input.lower()
|
|
379
|
+
|
|
380
|
+
# Handle specific command patterns
|
|
381
|
+
if "hello" in lower_input:
|
|
382
|
+
return "hello"
|
|
383
|
+
|
|
384
|
+
# Try to extract command name using common patterns
|
|
385
|
+
import re
|
|
386
|
+
|
|
387
|
+
patterns = [
|
|
388
|
+
r"(?:call|execute|run)\s+(?:the\s+)?([a-zA-Z0-9\-_]+)(?:\s+command)?",
|
|
389
|
+
r"([a-zA-Z0-9\-_]+)\s+command",
|
|
390
|
+
r"the\s+([a-zA-Z0-9\-_]+)\s+command",
|
|
391
|
+
]
|
|
392
|
+
|
|
393
|
+
for pattern in patterns:
|
|
394
|
+
match = re.search(pattern, lower_input)
|
|
395
|
+
if match:
|
|
396
|
+
return match.group(1).replace("-", "_").replace(" ", "_")
|
|
397
|
+
|
|
398
|
+
return None
|
|
399
|
+
|
|
400
|
+
def handle_direct_command_execution(self, command_name: str):
|
|
401
|
+
"""Handle direct execution of a discovered command."""
|
|
402
|
+
try:
|
|
403
|
+
# Use command discovery to find the command
|
|
404
|
+
discovery = get_command_discovery()
|
|
405
|
+
command = discovery.get_command_by_name(command_name)
|
|
406
|
+
|
|
407
|
+
if command:
|
|
408
|
+
console.print(f"[green]Executing command:[/green] {command.full_name}")
|
|
409
|
+
try:
|
|
410
|
+
# Execute the command callback directly
|
|
411
|
+
if command.callback:
|
|
412
|
+
# For the hello command, we need to call it appropriately
|
|
413
|
+
if command.name == "hello" and command.full_name.startswith("self."):
|
|
414
|
+
# This is the hello command from self module - call with default argument
|
|
415
|
+
result = command.callback("World")
|
|
416
|
+
console.print(f"[green]✅ Command executed successfully[/green]")
|
|
417
|
+
else:
|
|
418
|
+
result = command.callback()
|
|
419
|
+
console.print(f"[green]✅ Command executed successfully[/green]")
|
|
420
|
+
else:
|
|
421
|
+
console.print("[yellow]Command found but has no callback[/yellow]")
|
|
422
|
+
except Exception as e:
|
|
423
|
+
console.print(f"[red]Error executing command:[/red] {e}")
|
|
424
|
+
else:
|
|
425
|
+
console.print(f"[red]Command '{command_name}' not found[/red]")
|
|
426
|
+
console.print("[yellow]Try 'commands' to see available commands[/yellow]")
|
|
427
|
+
|
|
428
|
+
except Exception as e:
|
|
429
|
+
console.print(f"[red]Error finding command:[/red] {e}")
|
|
430
|
+
|
|
431
|
+
def handle_command_queries(self, query: str):
|
|
432
|
+
"""Handle command-related queries using existing command registry"""
|
|
433
|
+
try:
|
|
434
|
+
# Always fetch all commands (active and inactive)
|
|
435
|
+
result = self.daemon.list_commands(all=True)
|
|
436
|
+
if isinstance(result, dict):
|
|
437
|
+
commands = result.get("commands", [])
|
|
438
|
+
elif isinstance(result, list):
|
|
439
|
+
commands = result
|
|
440
|
+
else:
|
|
441
|
+
commands = []
|
|
442
|
+
except Exception as e:
|
|
443
|
+
logger.debug(
|
|
444
|
+
f"Could not fetch commands from daemon: {e}. Falling back to LLM-only mode."
|
|
445
|
+
)
|
|
446
|
+
return self.generate_llm_response(query)
|
|
447
|
+
|
|
448
|
+
# Simple keyword matching for initial implementation
|
|
449
|
+
lowered = query.lower()
|
|
450
|
+
if (
|
|
451
|
+
"list command" in lowered
|
|
452
|
+
or "show command" in lowered
|
|
453
|
+
or "available command" in lowered
|
|
454
|
+
or "what can i do" in lowered
|
|
455
|
+
or "commands" in lowered
|
|
456
|
+
):
|
|
457
|
+
self.list_commands() # Always use discovery system, ignore daemon commands
|
|
458
|
+
elif "search" in lowered or "find" in lowered:
|
|
459
|
+
self.search_commands(query) # Always use discovery system, ignore daemon commands
|
|
460
|
+
else:
|
|
461
|
+
# Check if this is a context-sensitive question that might need system help
|
|
462
|
+
if self._is_system_help_request(query):
|
|
463
|
+
self._handle_system_help_request(query)
|
|
464
|
+
else:
|
|
465
|
+
self.generate_llm_response(query)
|
|
466
|
+
|
|
467
|
+
def list_commands(self, commands: List[Dict] = None):
|
|
468
|
+
"""List available commands"""
|
|
469
|
+
if commands is None:
|
|
470
|
+
# Use discovery system to get all commands
|
|
471
|
+
try:
|
|
472
|
+
discovery = get_command_discovery()
|
|
473
|
+
commands = discovery.get_commands(include_groups=False)
|
|
474
|
+
except Exception as e:
|
|
475
|
+
console.print(f"[red]Error discovering commands: {e}[/red]")
|
|
476
|
+
return
|
|
477
|
+
|
|
478
|
+
if not commands:
|
|
479
|
+
console.print("No commands found")
|
|
480
|
+
return
|
|
481
|
+
|
|
482
|
+
console.print(f"[bold]Available Commands ({len(commands)}):[/bold]")
|
|
483
|
+
for cmd in commands[:20]: # Show first 20 to avoid overwhelming
|
|
484
|
+
if "full_name" in cmd:
|
|
485
|
+
# New discovery format
|
|
486
|
+
console.print(f"• [green]{cmd['full_name']}[/green]")
|
|
487
|
+
else:
|
|
488
|
+
# Old daemon format
|
|
489
|
+
status = "[INACTIVE] " if not cmd.get("is_active", True) else ""
|
|
490
|
+
console.print(
|
|
491
|
+
f"{status}• [green]{cmd['name']}[/green] ({cmd.get('language', 'python')})"
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
if cmd.get("description"):
|
|
495
|
+
console.print(f" {cmd['description']}")
|
|
496
|
+
if cmd.get("module"):
|
|
497
|
+
console.print(f" Module: {cmd['module']}")
|
|
498
|
+
elif cmd.get("tags"):
|
|
499
|
+
console.print(f" Tags: {', '.join(cmd['tags'])}")
|
|
500
|
+
console.print()
|
|
501
|
+
|
|
502
|
+
if len(commands) > 20:
|
|
503
|
+
console.print(f"[dim]... and {len(commands) - 20} more commands[/dim]")
|
|
504
|
+
console.print("[dim]Use 'mcli commands list' to see all commands[/dim]")
|
|
505
|
+
|
|
506
|
+
def search_commands(self, query: str, commands: List[Dict] = None):
|
|
507
|
+
"""Search commands based on query"""
|
|
508
|
+
search_term = query.lower().replace("search", "").replace("find", "").strip()
|
|
509
|
+
|
|
510
|
+
if commands is None:
|
|
511
|
+
# Use discovery system to search
|
|
512
|
+
try:
|
|
513
|
+
discovery = get_command_discovery()
|
|
514
|
+
results = discovery.search_commands(search_term)
|
|
515
|
+
except Exception as e:
|
|
516
|
+
console.print(f"[red]Error searching commands: {e}[/red]")
|
|
517
|
+
return
|
|
518
|
+
else:
|
|
519
|
+
# Use provided commands (legacy mode)
|
|
520
|
+
results = [
|
|
521
|
+
cmd
|
|
522
|
+
for cmd in commands
|
|
523
|
+
if (
|
|
524
|
+
search_term in cmd["name"].lower()
|
|
525
|
+
or search_term in (cmd["description"] or "").lower()
|
|
526
|
+
or any(search_term in tag.lower() for tag in cmd.get("tags", []))
|
|
527
|
+
)
|
|
528
|
+
]
|
|
529
|
+
|
|
530
|
+
if not results:
|
|
531
|
+
console.print(f"No commands found matching '[yellow]{search_term}[/yellow]'")
|
|
532
|
+
return
|
|
533
|
+
|
|
534
|
+
console.print(f"[bold]Matching Commands for '{search_term}' ({len(results)}):[/bold]")
|
|
535
|
+
for cmd in results[:10]: # Show first 10 results
|
|
536
|
+
if "full_name" in cmd:
|
|
537
|
+
# New discovery format
|
|
538
|
+
console.print(f"• [green]{cmd['full_name']}[/green]")
|
|
539
|
+
else:
|
|
540
|
+
# Old daemon format
|
|
541
|
+
console.print(f"• [green]{cmd['name']}[/green] ({cmd.get('language', 'python')})")
|
|
542
|
+
|
|
543
|
+
console.print(f" [italic]{cmd['description']}[/italic]")
|
|
544
|
+
console.print()
|
|
545
|
+
|
|
546
|
+
if len(results) > 10:
|
|
547
|
+
console.print(f"[dim]... and {len(results) - 10} more results[/dim]")
|
|
548
|
+
|
|
549
|
+
def handle_commands_list(self):
|
|
550
|
+
"""Handle 'commands' command to list available functions"""
|
|
551
|
+
try:
|
|
552
|
+
# Get commands from daemon
|
|
553
|
+
if hasattr(self.daemon, "list_commands"):
|
|
554
|
+
commands = self.daemon.list_commands()
|
|
555
|
+
|
|
556
|
+
if not commands:
|
|
557
|
+
console.print("[yellow]No commands available through daemon[/yellow]")
|
|
558
|
+
return
|
|
559
|
+
|
|
560
|
+
console.print(f"[bold green]Available Commands ({len(commands)}):[/bold green]")
|
|
561
|
+
|
|
562
|
+
for i, cmd in enumerate(commands[:20]): # Show first 20 commands
|
|
563
|
+
name = cmd.get("name", "Unknown")
|
|
564
|
+
description = cmd.get("description", cmd.get("help", "No description"))
|
|
565
|
+
|
|
566
|
+
# Truncate long descriptions
|
|
567
|
+
if len(description) > 80:
|
|
568
|
+
description = description[:77] + "..."
|
|
569
|
+
|
|
570
|
+
console.print(f"• [cyan]{name}[/cyan]")
|
|
571
|
+
if description:
|
|
572
|
+
console.print(f" {description}")
|
|
573
|
+
|
|
574
|
+
if len(commands) > 20:
|
|
575
|
+
console.print(f"[dim]... and {len(commands) - 20} more commands[/dim]")
|
|
576
|
+
console.print("[dim]Use natural language to ask about specific commands[/dim]")
|
|
577
|
+
|
|
578
|
+
else:
|
|
579
|
+
# Fallback - try to get commands another way
|
|
580
|
+
console.print(
|
|
581
|
+
"[yellow]Command listing not available - daemon may not be running[/yellow]"
|
|
582
|
+
)
|
|
583
|
+
console.print(
|
|
584
|
+
"Try starting the daemon with: [cyan]mcli workflow daemon start[/cyan]"
|
|
585
|
+
)
|
|
586
|
+
|
|
587
|
+
except Exception as e:
|
|
588
|
+
logger.debug(f"Error listing commands: {e}")
|
|
589
|
+
console.print("[yellow]Could not retrieve commands list[/yellow]")
|
|
590
|
+
console.print("Available built-in chat commands:")
|
|
591
|
+
console.print("• [cyan]commands[/cyan] - This command")
|
|
592
|
+
console.print("• [cyan]ps[/cyan] - List running processes")
|
|
593
|
+
console.print("• [cyan]run <command>[/cyan] - Execute a command")
|
|
594
|
+
console.print("• [cyan]logs <id>[/cyan] - View process logs")
|
|
595
|
+
console.print("• [cyan]inspect <id>[/cyan] - Detailed process info")
|
|
596
|
+
console.print("• [cyan]start/stop <id>[/cyan] - Control process lifecycle")
|
|
597
|
+
|
|
598
|
+
def handle_process_list(self):
|
|
599
|
+
"""Handle 'ps' command to list running processes"""
|
|
600
|
+
try:
|
|
601
|
+
import requests
|
|
602
|
+
|
|
603
|
+
# Use daemon client to get correct URL
|
|
604
|
+
response = requests.get(f"{self.daemon.base_url}/processes")
|
|
605
|
+
if response.status_code == 200:
|
|
606
|
+
data = response.json()
|
|
607
|
+
processes = data.get("processes", [])
|
|
608
|
+
|
|
609
|
+
if not processes:
|
|
610
|
+
console.print("No processes running")
|
|
611
|
+
return
|
|
612
|
+
|
|
613
|
+
# Format output like docker ps
|
|
614
|
+
console.print(
|
|
615
|
+
"[bold]CONTAINER ID NAME COMMAND STATUS UPTIME CPU MEMORY[/bold]"
|
|
616
|
+
)
|
|
617
|
+
for proc in processes:
|
|
618
|
+
console.print(
|
|
619
|
+
f"{proc['id']:<13} {proc['name']:<14} {proc['command'][:24]:<24} {proc['status']:<9} {proc['uptime']:<10} {proc['cpu']:<8} {proc['memory']}"
|
|
620
|
+
)
|
|
621
|
+
else:
|
|
622
|
+
console.print(
|
|
623
|
+
f"[red]Error: Failed to get process list (HTTP {response.status_code})[/red]"
|
|
624
|
+
)
|
|
625
|
+
except Exception as e:
|
|
626
|
+
console.print(f"[red]Error connecting to daemon: {e}[/red]")
|
|
627
|
+
|
|
628
|
+
def handle_process_logs(self, process_id: str):
|
|
629
|
+
"""Handle 'logs' command to show process logs"""
|
|
630
|
+
try:
|
|
631
|
+
import requests
|
|
632
|
+
|
|
633
|
+
response = requests.get(f"{self.daemon.base_url}/processes/{process_id}/logs")
|
|
634
|
+
if response.status_code == 200:
|
|
635
|
+
logs = response.json()
|
|
636
|
+
console.print(f"[bold]Logs for {process_id}:[/bold]")
|
|
637
|
+
if logs.get("stdout"):
|
|
638
|
+
console.print("[green]STDOUT:[/green]")
|
|
639
|
+
console.print(logs["stdout"])
|
|
640
|
+
if logs.get("stderr"):
|
|
641
|
+
console.print("[red]STDERR:[/red]")
|
|
642
|
+
console.print(logs["stderr"])
|
|
643
|
+
if not logs.get("stdout") and not logs.get("stderr"):
|
|
644
|
+
console.print("No logs available")
|
|
645
|
+
elif response.status_code == 404:
|
|
646
|
+
console.print(f"[red]Process {process_id} not found[/red]")
|
|
647
|
+
else:
|
|
648
|
+
console.print(f"[red]Error: Failed to get logs (HTTP {response.status_code})[/red]")
|
|
649
|
+
except Exception as e:
|
|
650
|
+
console.print(f"[red]Error connecting to daemon: {e}[/red]")
|
|
651
|
+
|
|
652
|
+
def handle_process_inspect(self, process_id: str):
|
|
653
|
+
"""Handle 'inspect' command to show detailed process info"""
|
|
654
|
+
try:
|
|
655
|
+
import requests
|
|
656
|
+
|
|
657
|
+
response = requests.get(f"{self.daemon.base_url}/processes/{process_id}")
|
|
658
|
+
if response.status_code == 200:
|
|
659
|
+
info = response.json()
|
|
660
|
+
console.print(f"[bold]Process {process_id} Details:[/bold]")
|
|
661
|
+
console.print(f"ID: {info['id']}")
|
|
662
|
+
console.print(f"Name: {info['name']}")
|
|
663
|
+
console.print(f"Status: {info['status']}")
|
|
664
|
+
console.print(f"PID: {info['pid']}")
|
|
665
|
+
console.print(f"Command: {info['command']} {' '.join(info.get('args', []))}")
|
|
666
|
+
console.print(f"Working Dir: {info.get('working_dir', 'N/A')}")
|
|
667
|
+
console.print(f"Created: {info.get('created_at', 'N/A')}")
|
|
668
|
+
console.print(f"Started: {info.get('started_at', 'N/A')}")
|
|
669
|
+
if info.get("stats"):
|
|
670
|
+
stats = info["stats"]
|
|
671
|
+
console.print(f"CPU: {stats.get('cpu_percent', 0):.1f}%")
|
|
672
|
+
console.print(f"Memory: {stats.get('memory_mb', 0):.1f} MB")
|
|
673
|
+
console.print(f"Uptime: {stats.get('uptime_seconds', 0)} seconds")
|
|
674
|
+
elif response.status_code == 404:
|
|
675
|
+
console.print(f"[red]Process {process_id} not found[/red]")
|
|
676
|
+
else:
|
|
677
|
+
console.print(
|
|
678
|
+
f"[red]Error: Failed to inspect process (HTTP {response.status_code})[/red]"
|
|
679
|
+
)
|
|
680
|
+
except Exception as e:
|
|
681
|
+
console.print(f"[red]Error connecting to daemon: {e}[/red]")
|
|
682
|
+
|
|
683
|
+
def handle_process_stop(self, process_id: str):
|
|
684
|
+
"""Handle 'stop' command to stop a process"""
|
|
685
|
+
try:
|
|
686
|
+
import requests
|
|
687
|
+
|
|
688
|
+
response = requests.post(f"{self.daemon.base_url}/processes/{process_id}/stop")
|
|
689
|
+
if response.status_code == 200:
|
|
690
|
+
console.print(f"[green]Process {process_id} stopped[/green]")
|
|
691
|
+
elif response.status_code == 404:
|
|
692
|
+
console.print(f"[red]Process {process_id} not found[/red]")
|
|
693
|
+
else:
|
|
694
|
+
console.print(
|
|
695
|
+
f"[red]Error: Failed to stop process (HTTP {response.status_code})[/red]"
|
|
696
|
+
)
|
|
697
|
+
except Exception as e:
|
|
698
|
+
console.print(f"[red]Error connecting to daemon: {e}[/red]")
|
|
699
|
+
|
|
700
|
+
def handle_process_start(self, process_id: str):
|
|
701
|
+
"""Handle 'start' command to start a process"""
|
|
702
|
+
try:
|
|
703
|
+
import requests
|
|
704
|
+
|
|
705
|
+
response = requests.post(f"{self.daemon.base_url}/processes/{process_id}/start")
|
|
706
|
+
if response.status_code == 200:
|
|
707
|
+
console.print(f"[green]Process {process_id} started[/green]")
|
|
708
|
+
elif response.status_code == 404:
|
|
709
|
+
console.print(f"[red]Process {process_id} not found[/red]")
|
|
710
|
+
else:
|
|
711
|
+
console.print(
|
|
712
|
+
f"[red]Error: Failed to start process (HTTP {response.status_code})[/red]"
|
|
713
|
+
)
|
|
714
|
+
except Exception as e:
|
|
715
|
+
console.print(f"[red]Error connecting to daemon: {e}[/red]")
|
|
716
|
+
|
|
717
|
+
def handle_containerized_run(self, command: str, args: List[str]):
|
|
718
|
+
"""Handle 'run' command to execute in a containerized process"""
|
|
719
|
+
try:
|
|
720
|
+
import requests
|
|
721
|
+
|
|
722
|
+
# Check if it's a registered command first
|
|
723
|
+
try:
|
|
724
|
+
result = self.daemon.list_commands(all=True)
|
|
725
|
+
commands = result.get("commands", []) if isinstance(result, dict) else result
|
|
726
|
+
|
|
727
|
+
# Look for matching command
|
|
728
|
+
matching_cmd = None
|
|
729
|
+
for cmd in commands:
|
|
730
|
+
if cmd["name"].lower() == command.lower():
|
|
731
|
+
matching_cmd = cmd
|
|
732
|
+
break
|
|
733
|
+
|
|
734
|
+
if matching_cmd:
|
|
735
|
+
# Execute via the existing command system but in a container
|
|
736
|
+
response = requests.post(
|
|
737
|
+
f"{self.daemon.base_url}/processes/run",
|
|
738
|
+
json={
|
|
739
|
+
"name": f"cmd-{matching_cmd['name']}",
|
|
740
|
+
"command": (
|
|
741
|
+
"python"
|
|
742
|
+
if matching_cmd["language"] == "python"
|
|
743
|
+
else matching_cmd["language"]
|
|
744
|
+
),
|
|
745
|
+
"args": ["-c", matching_cmd["code"]] + args,
|
|
746
|
+
"detach": True,
|
|
747
|
+
},
|
|
748
|
+
)
|
|
749
|
+
|
|
750
|
+
if response.status_code == 200:
|
|
751
|
+
result = response.json()
|
|
752
|
+
console.print(
|
|
753
|
+
f"[green]Started containerized command '{matching_cmd['name']}' with ID {result['id'][:12]}[/green]"
|
|
754
|
+
)
|
|
755
|
+
console.print("Use 'logs <id>' to view output or 'ps' to see status")
|
|
756
|
+
else:
|
|
757
|
+
console.print(f"[red]Failed to start containerized command[/red]")
|
|
758
|
+
return
|
|
759
|
+
except Exception:
|
|
760
|
+
pass # Fall through to shell command execution
|
|
761
|
+
|
|
762
|
+
# Execute as shell command in container
|
|
763
|
+
response = requests.post(
|
|
764
|
+
f"{self.daemon.base_url}/processes/run",
|
|
765
|
+
json={"name": f"shell-{command}", "command": command, "args": args, "detach": True},
|
|
766
|
+
)
|
|
767
|
+
|
|
768
|
+
if response.status_code == 200:
|
|
769
|
+
result = response.json()
|
|
770
|
+
console.print(
|
|
771
|
+
f"[green]Started containerized process with ID {result['id'][:12]}[/green]"
|
|
772
|
+
)
|
|
773
|
+
console.print("Use 'logs <id>' to view output or 'ps' to see status")
|
|
774
|
+
else:
|
|
775
|
+
console.print(f"[red]Failed to start containerized process[/red]")
|
|
776
|
+
|
|
777
|
+
except Exception as e:
|
|
778
|
+
console.print(f"[red]Error connecting to daemon: {e}[/red]")
|
|
779
|
+
|
|
780
|
+
def _ensure_daemon_running(self):
|
|
781
|
+
"""Ensure the API daemon is running, start it if not"""
|
|
782
|
+
try:
|
|
783
|
+
if not self.daemon.is_running():
|
|
784
|
+
console.print("[yellow]Starting MCLI daemon...[/yellow]")
|
|
785
|
+
import threading
|
|
786
|
+
import time
|
|
787
|
+
|
|
788
|
+
from mcli.workflow.daemon.api_daemon import APIDaemonService
|
|
789
|
+
|
|
790
|
+
# Start daemon in a separate thread
|
|
791
|
+
daemon_service = APIDaemonService()
|
|
792
|
+
daemon_thread = threading.Thread(target=daemon_service.start, daemon=True)
|
|
793
|
+
daemon_thread.start()
|
|
794
|
+
|
|
795
|
+
# Wait for daemon to be ready with progress
|
|
796
|
+
for i in range(10): # Wait up to 10 seconds
|
|
797
|
+
time.sleep(1)
|
|
798
|
+
if self.daemon.is_running():
|
|
799
|
+
console.print(
|
|
800
|
+
f"[green]✅ MCLI daemon started successfully on {self.daemon.base_url}[/green]"
|
|
801
|
+
)
|
|
802
|
+
return
|
|
803
|
+
if i % 2 == 0: # Show progress every 2 seconds
|
|
804
|
+
console.print(f"[dim]Waiting for daemon to start... ({i+1}/10)[/dim]")
|
|
805
|
+
|
|
806
|
+
console.print("[red]❌ Daemon failed to start within 10 seconds[/red]")
|
|
807
|
+
console.print(
|
|
808
|
+
"[yellow]Try starting manually: mcli workflow api-daemon start[/yellow]"
|
|
809
|
+
)
|
|
810
|
+
except Exception as e:
|
|
811
|
+
console.print(f"[red]❌ Could not start daemon: {e}[/red]")
|
|
812
|
+
console.print("[yellow]Try starting manually: mcli workflow api-daemon start[/yellow]")
|
|
813
|
+
|
|
814
|
+
def _pull_model_if_needed(self, model_name: str):
|
|
815
|
+
"""Pull the model from Ollama if it doesn't exist locally"""
|
|
816
|
+
try:
|
|
817
|
+
console.print(
|
|
818
|
+
f"[yellow]Downloading model '{model_name}'. This may take a few minutes...[/yellow]"
|
|
819
|
+
)
|
|
820
|
+
|
|
821
|
+
import subprocess
|
|
822
|
+
|
|
823
|
+
result = subprocess.run(
|
|
824
|
+
["ollama", "pull", model_name],
|
|
825
|
+
capture_output=True,
|
|
826
|
+
text=True,
|
|
827
|
+
timeout=300, # 5 minute timeout
|
|
828
|
+
)
|
|
829
|
+
|
|
830
|
+
if result.returncode == 0:
|
|
831
|
+
console.print(f"[green]✅ Model '{model_name}' downloaded successfully[/green]")
|
|
832
|
+
else:
|
|
833
|
+
console.print(
|
|
834
|
+
f"[red]❌ Failed to download model '{model_name}': {result.stderr}[/red]"
|
|
835
|
+
)
|
|
836
|
+
|
|
837
|
+
except subprocess.TimeoutExpired:
|
|
838
|
+
console.print(f"[red]❌ Download of model '{model_name}' timed out[/red]")
|
|
839
|
+
except FileNotFoundError:
|
|
840
|
+
console.print("[red]❌ Ollama command not found. Please install Ollama first:[/red]")
|
|
841
|
+
console.print(" brew install ollama")
|
|
842
|
+
except Exception as e:
|
|
843
|
+
console.print(f"[red]❌ Error downloading model '{model_name}': {e}[/red]")
|
|
844
|
+
|
|
845
|
+
def generate_llm_response(self, query: str):
|
|
846
|
+
"""Generate response using LLM integration"""
|
|
847
|
+
try:
|
|
848
|
+
# Try to get all commands, including inactive
|
|
849
|
+
try:
|
|
850
|
+
result = self.daemon.list_commands(all=True)
|
|
851
|
+
if isinstance(result, dict):
|
|
852
|
+
commands = result.get("commands", [])
|
|
853
|
+
elif isinstance(result, list):
|
|
854
|
+
commands = result
|
|
855
|
+
else:
|
|
856
|
+
commands = []
|
|
857
|
+
except Exception:
|
|
858
|
+
commands = []
|
|
859
|
+
|
|
860
|
+
command_context = (
|
|
861
|
+
"\n".join(
|
|
862
|
+
f"Command: {cmd['name']}\nDescription: {cmd.get('description', '')}\nTags: {', '.join(cmd.get('tags', []))}\nStatus: {'INACTIVE' if not cmd.get('is_active', True) else 'ACTIVE'}"
|
|
863
|
+
for cmd in commands
|
|
864
|
+
)
|
|
865
|
+
if commands
|
|
866
|
+
else "(No command context available)"
|
|
867
|
+
)
|
|
868
|
+
|
|
869
|
+
# Check if this is a command creation request
|
|
870
|
+
is_creation_request = any(
|
|
871
|
+
keyword in query.lower()
|
|
872
|
+
for keyword in [
|
|
873
|
+
"create command",
|
|
874
|
+
"create a command",
|
|
875
|
+
"new command",
|
|
876
|
+
"make command",
|
|
877
|
+
"integrate",
|
|
878
|
+
"add command",
|
|
879
|
+
"build command",
|
|
880
|
+
"generate command",
|
|
881
|
+
]
|
|
882
|
+
)
|
|
883
|
+
|
|
884
|
+
if is_creation_request:
|
|
885
|
+
prompt = f"""{SYSTEM_PROMPT}
|
|
886
|
+
|
|
887
|
+
IMPORTANT CONTEXT:
|
|
888
|
+
- Available MCLI Commands: {command_context}
|
|
889
|
+
- The above list shows ALL currently available commands
|
|
890
|
+
- If a command is NOT in this list, it does NOT exist and needs to be created
|
|
891
|
+
- DO NOT suggest non-existent commands like 'mcli ls' or 'mcli list-files'
|
|
892
|
+
- ALWAYS generate new code when asked to create functionality
|
|
893
|
+
|
|
894
|
+
User Request: {query}
|
|
895
|
+
|
|
896
|
+
This is a command creation request. You must:
|
|
897
|
+
1. Check if the requested functionality exists in the available commands above
|
|
898
|
+
2. If it DOES NOT exist, generate NEW Python code using Click framework
|
|
899
|
+
3. Provide complete, working implementation with error handling
|
|
900
|
+
4. Include proper command structure and help text
|
|
901
|
+
5. Suggest specific file paths and explain testing
|
|
902
|
+
|
|
903
|
+
NEVER suggest commands that don't exist. ALWAYS create new code for missing functionality."""
|
|
904
|
+
else:
|
|
905
|
+
prompt = f"""{SYSTEM_PROMPT}
|
|
906
|
+
|
|
907
|
+
AVAILABLE MCLI COMMANDS:
|
|
908
|
+
{command_context}
|
|
909
|
+
|
|
910
|
+
SYSTEM CONTROL CAPABILITIES (always available):
|
|
911
|
+
- System Information: 'what time is it?', 'how much RAM do I have?', 'show system specs'
|
|
912
|
+
- Disk Management: 'how much disk space do I have?', 'clear system caches'
|
|
913
|
+
- Application Control: 'open TextEdit', 'take screenshot', 'close Calculator'
|
|
914
|
+
- Memory Monitoring: 'show memory usage' with intelligent recommendations
|
|
915
|
+
|
|
916
|
+
JOB SCHEDULING CAPABILITIES (always available):
|
|
917
|
+
- Schedule Tasks: 'schedule cleanup daily at 2am', 'remind me weekly to check backups'
|
|
918
|
+
- Agent Status: 'what's my status?', 'list my jobs', 'show active tasks'
|
|
919
|
+
- Job Management: 'cancel job cleanup', 'stop scheduled task'
|
|
920
|
+
- Automation: Set up recurring system maintenance, file cleanup, monitoring
|
|
921
|
+
|
|
922
|
+
RESPONSE GUIDELINES:
|
|
923
|
+
- Be conversational and helpful, not robotic
|
|
924
|
+
- Provide intelligent suggestions based on context
|
|
925
|
+
- When users ask about cleanup/optimization, guide them to system control features
|
|
926
|
+
- If a command doesn't exist, suggest alternatives or explain how to create it
|
|
927
|
+
- Always consider what the user is trying to accomplish
|
|
928
|
+
|
|
929
|
+
User Question: {query}
|
|
930
|
+
|
|
931
|
+
Respond naturally and helpfully, considering both MCLI commands and system control capabilities."""
|
|
932
|
+
|
|
933
|
+
if LLM_PROVIDER == "local":
|
|
934
|
+
# Check if ollama is available
|
|
935
|
+
if not OLLAMA_AVAILABLE:
|
|
936
|
+
console.print("[red]Error: ollama is not installed.[/red]")
|
|
937
|
+
console.print("[yellow]For local model support, install ollama:[/yellow]")
|
|
938
|
+
console.print(" pip install ollama")
|
|
939
|
+
console.print("\n[yellow]Or switch to OpenAI by configuring:[/yellow]")
|
|
940
|
+
console.print(" provider = \"openai\"")
|
|
941
|
+
console.print(" openai_api_key = \"your-key-here\"")
|
|
942
|
+
return
|
|
943
|
+
|
|
944
|
+
# Use Ollama SDK for local model inference
|
|
945
|
+
try:
|
|
946
|
+
response = ollama.generate( # type: ignore
|
|
947
|
+
model=MODEL_NAME,
|
|
948
|
+
prompt=prompt,
|
|
949
|
+
options={
|
|
950
|
+
"temperature": TEMPERATURE,
|
|
951
|
+
},
|
|
952
|
+
)
|
|
953
|
+
content = response.get("response", "")
|
|
954
|
+
|
|
955
|
+
# Clean up response like we do for OpenAI
|
|
956
|
+
import re
|
|
957
|
+
|
|
958
|
+
split_patterns = [
|
|
959
|
+
r"\n2\.",
|
|
960
|
+
r"\n```",
|
|
961
|
+
r"\nRelevant commands",
|
|
962
|
+
r"\n3\.",
|
|
963
|
+
r"\n- \*\*Command",
|
|
964
|
+
]
|
|
965
|
+
split_idx = len(content)
|
|
966
|
+
for pat in split_patterns:
|
|
967
|
+
m = re.search(pat, content)
|
|
968
|
+
if m:
|
|
969
|
+
split_idx = min(split_idx, m.start())
|
|
970
|
+
main_answer = content[:split_idx].strip()
|
|
971
|
+
# Validate and correct any hallucinated commands
|
|
972
|
+
corrected_response = self.validate_and_correct_response(main_answer, commands)
|
|
973
|
+
return console.print(corrected_response)
|
|
974
|
+
|
|
975
|
+
except ollama.ResponseError as e: # type: ignore
|
|
976
|
+
if "model" in str(e).lower() and "not found" in str(e).lower():
|
|
977
|
+
if not self.use_remote:
|
|
978
|
+
# In lightweight mode, model not found means we need to ensure server is running
|
|
979
|
+
console.print(
|
|
980
|
+
f"[yellow]Model '{MODEL_NAME}' not found on lightweight server.[/yellow]"
|
|
981
|
+
)
|
|
982
|
+
console.print(
|
|
983
|
+
"[yellow]Ensuring lightweight model server is running...[/yellow]"
|
|
984
|
+
)
|
|
985
|
+
self._ensure_lightweight_model_server()
|
|
986
|
+
# Retry the request
|
|
987
|
+
try:
|
|
988
|
+
response = ollama.generate(
|
|
989
|
+
model=MODEL_NAME,
|
|
990
|
+
prompt=prompt,
|
|
991
|
+
options={
|
|
992
|
+
"temperature": TEMPERATURE,
|
|
993
|
+
},
|
|
994
|
+
)
|
|
995
|
+
content = response.get("response", "")
|
|
996
|
+
import re
|
|
997
|
+
|
|
998
|
+
split_patterns = [
|
|
999
|
+
r"\n2\.",
|
|
1000
|
+
r"\n```",
|
|
1001
|
+
r"\nRelevant commands",
|
|
1002
|
+
r"\n3\.",
|
|
1003
|
+
r"\n- \*\*Command",
|
|
1004
|
+
]
|
|
1005
|
+
split_idx = len(content)
|
|
1006
|
+
for pat in split_patterns:
|
|
1007
|
+
m = re.search(pat, content)
|
|
1008
|
+
if m:
|
|
1009
|
+
split_idx = min(split_idx, m.start())
|
|
1010
|
+
main_answer = content[:split_idx].strip()
|
|
1011
|
+
# Validate and correct any hallucinated commands
|
|
1012
|
+
corrected_response = self.validate_and_correct_response(
|
|
1013
|
+
main_answer, commands
|
|
1014
|
+
)
|
|
1015
|
+
return console.print(corrected_response)
|
|
1016
|
+
except Exception:
|
|
1017
|
+
raise Exception(
|
|
1018
|
+
"Failed to generate response after restarting lightweight server"
|
|
1019
|
+
)
|
|
1020
|
+
else:
|
|
1021
|
+
# In remote mode, try to pull the model with Ollama
|
|
1022
|
+
console.print(
|
|
1023
|
+
f"[yellow]Model '{MODEL_NAME}' not found. Attempting to pull it...[/yellow]"
|
|
1024
|
+
)
|
|
1025
|
+
self._pull_model_if_needed(MODEL_NAME)
|
|
1026
|
+
# Retry the request
|
|
1027
|
+
try:
|
|
1028
|
+
response = ollama.generate(
|
|
1029
|
+
model=MODEL_NAME,
|
|
1030
|
+
prompt=prompt,
|
|
1031
|
+
options={
|
|
1032
|
+
"temperature": TEMPERATURE,
|
|
1033
|
+
},
|
|
1034
|
+
)
|
|
1035
|
+
content = response.get("response", "")
|
|
1036
|
+
import re
|
|
1037
|
+
|
|
1038
|
+
split_patterns = [
|
|
1039
|
+
r"\n2\.",
|
|
1040
|
+
r"\n```",
|
|
1041
|
+
r"\nRelevant commands",
|
|
1042
|
+
r"\n3\.",
|
|
1043
|
+
r"\n- \*\*Command",
|
|
1044
|
+
]
|
|
1045
|
+
split_idx = len(content)
|
|
1046
|
+
for pat in split_patterns:
|
|
1047
|
+
m = re.search(pat, content)
|
|
1048
|
+
if m:
|
|
1049
|
+
split_idx = min(split_idx, m.start())
|
|
1050
|
+
main_answer = content[:split_idx].strip()
|
|
1051
|
+
# Validate and correct any hallucinated commands
|
|
1052
|
+
corrected_response = self.validate_and_correct_response(
|
|
1053
|
+
main_answer, commands
|
|
1054
|
+
)
|
|
1055
|
+
return console.print(corrected_response)
|
|
1056
|
+
except Exception:
|
|
1057
|
+
raise Exception("Failed to generate response after pulling model")
|
|
1058
|
+
else:
|
|
1059
|
+
raise Exception(f"Ollama API error: {e}")
|
|
1060
|
+
|
|
1061
|
+
except (requests.exceptions.ConnectionError, ollama.RequestError if OLLAMA_AVAILABLE else Exception): # type: ignore
|
|
1062
|
+
console.print(
|
|
1063
|
+
"[red]Could not connect to Ollama. Please ensure Ollama is running:[/red]"
|
|
1064
|
+
)
|
|
1065
|
+
console.print(" brew install ollama")
|
|
1066
|
+
console.print(" ollama serve")
|
|
1067
|
+
console.print(f" Visit: {OLLAMA_BASE_URL}")
|
|
1068
|
+
return
|
|
1069
|
+
except requests.exceptions.Timeout:
|
|
1070
|
+
console.print(
|
|
1071
|
+
"[yellow]Request timed out. The model might be processing a complex query.[/yellow]"
|
|
1072
|
+
)
|
|
1073
|
+
return
|
|
1074
|
+
except Exception as api_exc:
|
|
1075
|
+
raise
|
|
1076
|
+
|
|
1077
|
+
elif LLM_PROVIDER == "openai":
|
|
1078
|
+
from openai import OpenAI
|
|
1079
|
+
|
|
1080
|
+
if not OPENAI_API_KEY:
|
|
1081
|
+
console.print(
|
|
1082
|
+
"[red]OpenAI API key not configured. Please set it in config.toml[/red]"
|
|
1083
|
+
)
|
|
1084
|
+
return
|
|
1085
|
+
client = OpenAI(api_key=OPENAI_API_KEY)
|
|
1086
|
+
try:
|
|
1087
|
+
response = client.chat.completions.create(
|
|
1088
|
+
model=MODEL_NAME,
|
|
1089
|
+
messages=[{"role": "user", "content": prompt}],
|
|
1090
|
+
temperature=TEMPERATURE,
|
|
1091
|
+
)
|
|
1092
|
+
# Only print the first section (natural language answer) before any markdown/code block
|
|
1093
|
+
content = response.choices[0].message.content
|
|
1094
|
+
# Split on '```' or '2.' or 'Relevant commands' to avoid printing command/code blocks
|
|
1095
|
+
import re
|
|
1096
|
+
|
|
1097
|
+
# Try to split on numbered sections or code block
|
|
1098
|
+
split_patterns = [
|
|
1099
|
+
r"\n2\.",
|
|
1100
|
+
r"\n```",
|
|
1101
|
+
r"\nRelevant commands",
|
|
1102
|
+
r"\n3\.",
|
|
1103
|
+
r"\n- \*\*Command",
|
|
1104
|
+
]
|
|
1105
|
+
split_idx = len(content)
|
|
1106
|
+
for pat in split_patterns:
|
|
1107
|
+
m = re.search(pat, content)
|
|
1108
|
+
if m:
|
|
1109
|
+
split_idx = min(split_idx, m.start())
|
|
1110
|
+
main_answer = content[:split_idx].strip()
|
|
1111
|
+
return console.print(main_answer)
|
|
1112
|
+
except Exception as api_exc:
|
|
1113
|
+
raise
|
|
1114
|
+
|
|
1115
|
+
elif LLM_PROVIDER == "anthropic":
|
|
1116
|
+
from anthropic import Anthropic
|
|
1117
|
+
|
|
1118
|
+
api_key = config.get("anthropic_api_key", None)
|
|
1119
|
+
client = Anthropic(api_key=api_key)
|
|
1120
|
+
try:
|
|
1121
|
+
response = client.messages.create(
|
|
1122
|
+
model=MODEL_NAME,
|
|
1123
|
+
max_tokens=1000,
|
|
1124
|
+
temperature=TEMPERATURE,
|
|
1125
|
+
system=SYSTEM_PROMPT or "",
|
|
1126
|
+
messages=[{"role": "user", "content": query}],
|
|
1127
|
+
)
|
|
1128
|
+
return console.print(response.content)
|
|
1129
|
+
except Exception as api_exc:
|
|
1130
|
+
raise
|
|
1131
|
+
|
|
1132
|
+
else:
|
|
1133
|
+
raise ValueError(f"Unsupported LLM provider: {LLM_PROVIDER}")
|
|
1134
|
+
|
|
1135
|
+
except Exception as e:
|
|
1136
|
+
import traceback
|
|
1137
|
+
|
|
1138
|
+
logger.error(f"LLM Error: {e}\n{traceback.format_exc()}")
|
|
1139
|
+
console.print("[red]Error:[/red] Could not generate LLM response")
|
|
1140
|
+
console.print("Please check your LLM configuration in .env file")
|
|
1141
|
+
|
|
1142
|
+
def is_command_creation_request(self, user_input: str) -> bool:
|
|
1143
|
+
"""Check if user input is requesting to create a new command."""
|
|
1144
|
+
lower_input = user_input.lower()
|
|
1145
|
+
|
|
1146
|
+
# Primary creation patterns
|
|
1147
|
+
creation_patterns = [
|
|
1148
|
+
r"\bcreate\s+.*command", # "create a command", "create simple command", etc.
|
|
1149
|
+
r"\bmake\s+.*command", # "make a command", "make new command", etc.
|
|
1150
|
+
r"\bbuild\s+.*command", # "build a command", "build new command", etc.
|
|
1151
|
+
r"\bgenerate\s+.*command", # "generate a command", etc.
|
|
1152
|
+
r"\badd\s+.*command", # "add a command", "add new command", etc.
|
|
1153
|
+
r"\bnew\s+command", # "new command"
|
|
1154
|
+
r"\bcommand\s+.*create", # "command to create", etc.
|
|
1155
|
+
r"\bintegrate.*code", # "integrate code", "integrate the code", etc.
|
|
1156
|
+
r"\bcan\s+you\s+create", # "can you create"
|
|
1157
|
+
r"\bhelp\s+me\s+create", # "help me create"
|
|
1158
|
+
]
|
|
1159
|
+
|
|
1160
|
+
import re
|
|
1161
|
+
|
|
1162
|
+
for pattern in creation_patterns:
|
|
1163
|
+
if re.search(pattern, lower_input):
|
|
1164
|
+
return True
|
|
1165
|
+
|
|
1166
|
+
return False
|
|
1167
|
+
|
|
1168
|
+
def handle_command_creation(self, user_input: str):
|
|
1169
|
+
"""Handle command creation requests with complete end-to-end implementation."""
|
|
1170
|
+
console.print("[bold green]🛠️ Command Creation Mode[/bold green]")
|
|
1171
|
+
console.print("I'll create a complete working MCLI command for you!")
|
|
1172
|
+
console.print()
|
|
1173
|
+
|
|
1174
|
+
# Check if user already specified their preference in the input
|
|
1175
|
+
if any(phrase in user_input.lower() for phrase in ["code only", "just code", "show code"]):
|
|
1176
|
+
console.print("[yellow]Code-only mode selected[/yellow]")
|
|
1177
|
+
self._generate_code_only(user_input)
|
|
1178
|
+
return
|
|
1179
|
+
|
|
1180
|
+
# Ask user if they want full automation or just guidance
|
|
1181
|
+
try:
|
|
1182
|
+
console.print("[bold cyan]Choose your approach:[/bold cyan]")
|
|
1183
|
+
console.print(
|
|
1184
|
+
"1. [green]Full automation[/green] - I'll create, save, and test the command"
|
|
1185
|
+
)
|
|
1186
|
+
console.print(
|
|
1187
|
+
"2. [yellow]Code only[/yellow] - I'll just generate code for you to implement"
|
|
1188
|
+
)
|
|
1189
|
+
console.print()
|
|
1190
|
+
console.print("[dim]Tip: You can also say 'code only' in your original request[/dim]")
|
|
1191
|
+
console.print()
|
|
1192
|
+
|
|
1193
|
+
choice = console.input(
|
|
1194
|
+
"[bold cyan]Enter choice (1 or 2, default=1): [/bold cyan]"
|
|
1195
|
+
).strip()
|
|
1196
|
+
if choice == "2" or choice.lower() in ["code only", "code", "just code"]:
|
|
1197
|
+
# Original behavior - just generate code
|
|
1198
|
+
self._generate_code_only(user_input)
|
|
1199
|
+
else:
|
|
1200
|
+
# New behavior - complete automation
|
|
1201
|
+
self._create_complete_command(user_input)
|
|
1202
|
+
|
|
1203
|
+
except (EOFError, KeyboardInterrupt):
|
|
1204
|
+
# Default to full automation if input fails
|
|
1205
|
+
console.print("Defaulting to full automation...")
|
|
1206
|
+
self._create_complete_command(user_input)
|
|
1207
|
+
|
|
1208
|
+
def validate_and_correct_response(self, response_text: str, available_commands: list) -> str:
|
|
1209
|
+
"""Validate AI response and correct any hallucinated commands."""
|
|
1210
|
+
import re
|
|
1211
|
+
|
|
1212
|
+
# Extract command names from available commands
|
|
1213
|
+
real_commands = set()
|
|
1214
|
+
if available_commands:
|
|
1215
|
+
for cmd in available_commands:
|
|
1216
|
+
if isinstance(cmd, dict) and "name" in cmd:
|
|
1217
|
+
real_commands.add(cmd["name"])
|
|
1218
|
+
|
|
1219
|
+
# Common hallucinated commands to catch
|
|
1220
|
+
hallucinated_patterns = [
|
|
1221
|
+
r"mcli ls\b",
|
|
1222
|
+
r"mcli list\b",
|
|
1223
|
+
r"mcli list-files\b",
|
|
1224
|
+
r"mcli dir\b",
|
|
1225
|
+
r"mcli files\b",
|
|
1226
|
+
r"mcli show\b",
|
|
1227
|
+
]
|
|
1228
|
+
|
|
1229
|
+
corrected_response = response_text
|
|
1230
|
+
|
|
1231
|
+
# Check for hallucinated commands and correct them
|
|
1232
|
+
for pattern in hallucinated_patterns:
|
|
1233
|
+
if re.search(pattern, corrected_response, re.IGNORECASE):
|
|
1234
|
+
# Add warning about non-existent command
|
|
1235
|
+
correction = "\n\n⚠️ **Note**: The command mentioned above does not exist in MCLI. To create this functionality, you would need to implement a new command. Would you like me to help you create it?"
|
|
1236
|
+
corrected_response = (
|
|
1237
|
+
re.sub(
|
|
1238
|
+
pattern,
|
|
1239
|
+
"**[Command Does Not Exist]** " + pattern.replace("\\b", ""),
|
|
1240
|
+
corrected_response,
|
|
1241
|
+
flags=re.IGNORECASE,
|
|
1242
|
+
)
|
|
1243
|
+
+ correction
|
|
1244
|
+
)
|
|
1245
|
+
break
|
|
1246
|
+
|
|
1247
|
+
# Look for any "mcli [word]" patterns that aren't in real commands
|
|
1248
|
+
mcli_commands = re.findall(r"mcli\s+([a-zA-Z][a-zA-Z0-9_-]*)", corrected_response)
|
|
1249
|
+
for cmd in mcli_commands:
|
|
1250
|
+
if cmd not in real_commands:
|
|
1251
|
+
# This might be a hallucination
|
|
1252
|
+
warning = f"\n\n⚠️ **Note**: 'mcli {cmd}' does not exist. Available commands can be listed with the 'commands' chat command."
|
|
1253
|
+
if warning not in corrected_response:
|
|
1254
|
+
corrected_response += warning
|
|
1255
|
+
break
|
|
1256
|
+
|
|
1257
|
+
return corrected_response
|
|
1258
|
+
|
|
1259
|
+
def _generate_code_only(self, user_input: str):
|
|
1260
|
+
"""Generate code only without creating files."""
|
|
1261
|
+
# Use the enhanced LLM generation with creation context
|
|
1262
|
+
self.generate_llm_response(user_input)
|
|
1263
|
+
|
|
1264
|
+
# Provide additional guidance
|
|
1265
|
+
console.print()
|
|
1266
|
+
console.print("[bold cyan]💡 Next Steps:[/bold cyan]")
|
|
1267
|
+
console.print("1. Copy the generated code to a new Python file")
|
|
1268
|
+
console.print("2. Save it in the appropriate MCLI module directory")
|
|
1269
|
+
console.print("3. Test the command with: [yellow]mcli <your-command>[/yellow]")
|
|
1270
|
+
console.print("4. Use [yellow]mcli commands list[/yellow] to verify it's available")
|
|
1271
|
+
console.print()
|
|
1272
|
+
console.print(
|
|
1273
|
+
"[dim]Tip: Commands are automatically discovered when placed in the correct directories[/dim]"
|
|
1274
|
+
)
|
|
1275
|
+
|
|
1276
|
+
def _create_complete_command(self, user_input: str):
|
|
1277
|
+
"""Create a complete working command with full automation."""
|
|
1278
|
+
import os
|
|
1279
|
+
import re
|
|
1280
|
+
from pathlib import Path
|
|
1281
|
+
|
|
1282
|
+
console.print("[bold blue]🤖 Starting automated command creation...[/bold blue]")
|
|
1283
|
+
console.print()
|
|
1284
|
+
|
|
1285
|
+
# Step 1: Generate code with AI
|
|
1286
|
+
console.print("1. [cyan]Generating command code...[/cyan]")
|
|
1287
|
+
code_response = self._get_command_code_from_ai(user_input)
|
|
1288
|
+
|
|
1289
|
+
if not code_response:
|
|
1290
|
+
console.print("[red]❌ Failed to generate code. Falling back to code-only mode.[/red]")
|
|
1291
|
+
self._generate_code_only(user_input)
|
|
1292
|
+
return
|
|
1293
|
+
|
|
1294
|
+
# Step 2: Extract command info and code
|
|
1295
|
+
command_info = self._parse_command_response(code_response)
|
|
1296
|
+
if not command_info:
|
|
1297
|
+
console.print(
|
|
1298
|
+
"[red]❌ Could not parse command information. Showing generated code:[/red]"
|
|
1299
|
+
)
|
|
1300
|
+
console.print(code_response)
|
|
1301
|
+
return
|
|
1302
|
+
|
|
1303
|
+
# Step 3: Create the file
|
|
1304
|
+
console.print(f"2. [cyan]Creating command file: {command_info['filename']}[/cyan]")
|
|
1305
|
+
file_path = self._create_command_file(command_info)
|
|
1306
|
+
|
|
1307
|
+
if not file_path:
|
|
1308
|
+
console.print("[red]❌ Failed to create command file.[/red]")
|
|
1309
|
+
return
|
|
1310
|
+
|
|
1311
|
+
# Step 4: Test the command
|
|
1312
|
+
console.print(f"3. [cyan]Testing command: {command_info['name']}[/cyan]")
|
|
1313
|
+
test_result = self._test_command(command_info["name"])
|
|
1314
|
+
|
|
1315
|
+
# Step 5: Show results
|
|
1316
|
+
console.print()
|
|
1317
|
+
if test_result:
|
|
1318
|
+
console.print("[bold green]✅ Command created successfully![/bold green]")
|
|
1319
|
+
console.print(f"📁 File: [green]{file_path}[/green]")
|
|
1320
|
+
console.print(f"🚀 Usage: [yellow]mcli {command_info['name']} --help[/yellow]")
|
|
1321
|
+
console.print(f"📋 Test: [yellow]mcli {command_info['name']}[/yellow]")
|
|
1322
|
+
else:
|
|
1323
|
+
console.print("[yellow]⚠️ Command created but may need debugging[/yellow]")
|
|
1324
|
+
console.print(f"📁 File: [yellow]{file_path}[/yellow]")
|
|
1325
|
+
console.print("💡 Check the file and test manually")
|
|
1326
|
+
|
|
1327
|
+
console.print()
|
|
1328
|
+
console.print("[dim]Command is now available in MCLI![/dim]")
|
|
1329
|
+
|
|
1330
|
+
def _get_command_code_from_ai(self, user_input: str) -> str:
|
|
1331
|
+
"""Get command code from AI with specific formatting requirements."""
|
|
1332
|
+
# Enhanced prompt for structured code generation
|
|
1333
|
+
try:
|
|
1334
|
+
commands = self.daemon.list_commands()
|
|
1335
|
+
except Exception:
|
|
1336
|
+
commands = []
|
|
1337
|
+
|
|
1338
|
+
command_context = (
|
|
1339
|
+
"\n".join(
|
|
1340
|
+
f"Command: {cmd['name']}\nDescription: {cmd.get('description', '')}"
|
|
1341
|
+
for cmd in commands
|
|
1342
|
+
)
|
|
1343
|
+
if commands
|
|
1344
|
+
else "(No command context available)"
|
|
1345
|
+
)
|
|
1346
|
+
|
|
1347
|
+
prompt = f"""You are creating a complete MCLI command. Generate ONLY the Python code with this exact structure:
|
|
1348
|
+
|
|
1349
|
+
COMMAND_NAME: [single word command name]
|
|
1350
|
+
FILENAME: [snake_case_filename.py]
|
|
1351
|
+
DESCRIPTION: [brief description]
|
|
1352
|
+
CODE:
|
|
1353
|
+
```python
|
|
1354
|
+
import click
|
|
1355
|
+
|
|
1356
|
+
@click.command()
|
|
1357
|
+
@click.option('--example', help='Example option')
|
|
1358
|
+
@click.argument('input_arg', required=False)
|
|
1359
|
+
def command_name(example, input_arg):
|
|
1360
|
+
'''Command description here'''
|
|
1361
|
+
# Implementation here
|
|
1362
|
+
click.echo("Command works!")
|
|
1363
|
+
|
|
1364
|
+
if __name__ == '__main__':
|
|
1365
|
+
command_name()
|
|
1366
|
+
```
|
|
1367
|
+
|
|
1368
|
+
User request: {user_input}
|
|
1369
|
+
|
|
1370
|
+
Available commands for reference: {command_context}
|
|
1371
|
+
|
|
1372
|
+
Generate a working command that implements the requested functionality. Use proper Click decorators, error handling, and helpful output."""
|
|
1373
|
+
|
|
1374
|
+
if LLM_PROVIDER == "local":
|
|
1375
|
+
try:
|
|
1376
|
+
response = ollama.generate(
|
|
1377
|
+
model=MODEL_NAME,
|
|
1378
|
+
prompt=prompt,
|
|
1379
|
+
options={
|
|
1380
|
+
"temperature": 0.3, # Lower temperature for more consistent code
|
|
1381
|
+
},
|
|
1382
|
+
)
|
|
1383
|
+
return response.get("response", "")
|
|
1384
|
+
except Exception as e:
|
|
1385
|
+
logger.error(f"AI code generation error: {e}")
|
|
1386
|
+
|
|
1387
|
+
return None
|
|
1388
|
+
|
|
1389
|
+
def _parse_command_response(self, response: str) -> dict:
|
|
1390
|
+
"""Parse AI response to extract command information."""
|
|
1391
|
+
import re
|
|
1392
|
+
|
|
1393
|
+
# Extract command name
|
|
1394
|
+
name_match = re.search(r"COMMAND_NAME:\s*([a-zA-Z_][a-zA-Z0-9_-]*)", response)
|
|
1395
|
+
if not name_match:
|
|
1396
|
+
return None
|
|
1397
|
+
|
|
1398
|
+
# Extract filename
|
|
1399
|
+
filename_match = re.search(r"FILENAME:\s*([a-zA-Z_][a-zA-Z0-9_.-]*\.py)", response)
|
|
1400
|
+
filename = filename_match.group(1) if filename_match else f"{name_match.group(1)}.py"
|
|
1401
|
+
|
|
1402
|
+
# Extract description
|
|
1403
|
+
desc_match = re.search(r"DESCRIPTION:\s*(.+)", response)
|
|
1404
|
+
description = desc_match.group(1).strip() if desc_match else "Auto-generated command"
|
|
1405
|
+
|
|
1406
|
+
# Extract code
|
|
1407
|
+
code_match = re.search(r"```python\n(.*?)\n```", response, re.DOTALL)
|
|
1408
|
+
if not code_match:
|
|
1409
|
+
# Try without python specifier
|
|
1410
|
+
code_match = re.search(r"```\n(.*?)\n```", response, re.DOTALL)
|
|
1411
|
+
|
|
1412
|
+
if not code_match:
|
|
1413
|
+
return None
|
|
1414
|
+
|
|
1415
|
+
return {
|
|
1416
|
+
"name": name_match.group(1),
|
|
1417
|
+
"filename": filename,
|
|
1418
|
+
"description": description,
|
|
1419
|
+
"code": code_match.group(1).strip(),
|
|
1420
|
+
}
|
|
1421
|
+
|
|
1422
|
+
def _create_command_file(self, command_info: dict) -> str:
|
|
1423
|
+
"""Create the command file in the appropriate directory."""
|
|
1424
|
+
from pathlib import Path
|
|
1425
|
+
|
|
1426
|
+
# Choose directory based on command type
|
|
1427
|
+
base_dir = Path(__file__).parent.parent.parent # mcli src directory
|
|
1428
|
+
|
|
1429
|
+
# Create in public directory for user-generated commands
|
|
1430
|
+
commands_dir = base_dir / "mcli" / "public" / "commands"
|
|
1431
|
+
commands_dir.mkdir(parents=True, exist_ok=True)
|
|
1432
|
+
|
|
1433
|
+
file_path = commands_dir / command_info["filename"]
|
|
1434
|
+
|
|
1435
|
+
try:
|
|
1436
|
+
with open(file_path, "w") as f:
|
|
1437
|
+
f.write(command_info["code"])
|
|
1438
|
+
return str(file_path)
|
|
1439
|
+
except Exception as e:
|
|
1440
|
+
logger.error(f"Failed to create command file: {e}")
|
|
1441
|
+
return None
|
|
1442
|
+
|
|
1443
|
+
def _test_command(self, command_name: str) -> bool:
|
|
1444
|
+
"""Test if the command works by trying to import and run help."""
|
|
1445
|
+
try:
|
|
1446
|
+
# Try to run the command help to see if it's recognized
|
|
1447
|
+
import subprocess
|
|
1448
|
+
|
|
1449
|
+
result = subprocess.run(
|
|
1450
|
+
["mcli", "commands", "list"], capture_output=True, text=True, timeout=10
|
|
1451
|
+
)
|
|
1452
|
+
# Check if our command appears in the output
|
|
1453
|
+
return command_name in result.stdout
|
|
1454
|
+
except Exception as e:
|
|
1455
|
+
logger.debug(f"Command test failed: {e}")
|
|
1456
|
+
return False
|
|
1457
|
+
|
|
1458
|
+
def is_system_control_request(self, user_input: str) -> bool:
|
|
1459
|
+
"""Check if user input is requesting system control functionality"""
|
|
1460
|
+
lower_input = user_input.lower()
|
|
1461
|
+
|
|
1462
|
+
# System control keywords
|
|
1463
|
+
system_keywords = [
|
|
1464
|
+
"open",
|
|
1465
|
+
"close",
|
|
1466
|
+
"launch",
|
|
1467
|
+
"quit",
|
|
1468
|
+
"textedit",
|
|
1469
|
+
"text edit",
|
|
1470
|
+
"screenshot",
|
|
1471
|
+
"screen capture",
|
|
1472
|
+
"calculator",
|
|
1473
|
+
"safari",
|
|
1474
|
+
"finder",
|
|
1475
|
+
"take screenshot",
|
|
1476
|
+
"write",
|
|
1477
|
+
"type",
|
|
1478
|
+
"save as",
|
|
1479
|
+
"file",
|
|
1480
|
+
"application",
|
|
1481
|
+
# System information keywords
|
|
1482
|
+
"system time",
|
|
1483
|
+
"what time",
|
|
1484
|
+
"current time",
|
|
1485
|
+
"system info",
|
|
1486
|
+
"system information",
|
|
1487
|
+
"system specs",
|
|
1488
|
+
"hardware info",
|
|
1489
|
+
"memory usage",
|
|
1490
|
+
"ram usage",
|
|
1491
|
+
"how much memory",
|
|
1492
|
+
"how much ram",
|
|
1493
|
+
"disk usage",
|
|
1494
|
+
"disk space",
|
|
1495
|
+
"storage space",
|
|
1496
|
+
"how much space",
|
|
1497
|
+
"clear cache",
|
|
1498
|
+
"clean cache",
|
|
1499
|
+
"clear system cache",
|
|
1500
|
+
"system cache",
|
|
1501
|
+
# Navigation and shell keywords
|
|
1502
|
+
"navigate to",
|
|
1503
|
+
"go to",
|
|
1504
|
+
"change to",
|
|
1505
|
+
"cd to",
|
|
1506
|
+
"move to",
|
|
1507
|
+
"list",
|
|
1508
|
+
"show files",
|
|
1509
|
+
"ls",
|
|
1510
|
+
"dir",
|
|
1511
|
+
"what's in",
|
|
1512
|
+
"clean simulator",
|
|
1513
|
+
"simulator data",
|
|
1514
|
+
"clean ios",
|
|
1515
|
+
"clean watchos",
|
|
1516
|
+
"run command",
|
|
1517
|
+
"execute",
|
|
1518
|
+
"shell",
|
|
1519
|
+
"terminal",
|
|
1520
|
+
"where am i",
|
|
1521
|
+
"current directory",
|
|
1522
|
+
"pwd",
|
|
1523
|
+
"current path",
|
|
1524
|
+
]
|
|
1525
|
+
|
|
1526
|
+
# Check for system control patterns
|
|
1527
|
+
system_patterns = [
|
|
1528
|
+
r"\bopen\s+\w+", # "open something"
|
|
1529
|
+
r"\bclose\s+\w+", # "close something"
|
|
1530
|
+
r"\btake\s+screenshot", # "take screenshot"
|
|
1531
|
+
r"\bwrite\s+.*\bin\s+\w+", # "write something in app"
|
|
1532
|
+
r"\bopen\s+.*\band\s+write", # "open app and write"
|
|
1533
|
+
r"\btextedit", # any textedit mention
|
|
1534
|
+
r"\btext\s+edit", # "text edit"
|
|
1535
|
+
# System information patterns
|
|
1536
|
+
r"\bwhat\s+time", # "what time"
|
|
1537
|
+
r"\bsystem\s+time", # "system time"
|
|
1538
|
+
r"\bcurrent\s+time", # "current time"
|
|
1539
|
+
r"\bsystem\s+info", # "system info"
|
|
1540
|
+
r"\bsystem\s+specs", # "system specs"
|
|
1541
|
+
r"\bhow\s+much\s+(ram|memory)", # "how much ram/memory"
|
|
1542
|
+
r"\bmemory\s+usage", # "memory usage"
|
|
1543
|
+
r"\bram\s+usage", # "ram usage"
|
|
1544
|
+
r"\bdisk\s+usage", # "disk usage"
|
|
1545
|
+
r"\bdisk\s+space", # "disk space"
|
|
1546
|
+
r"\bclear\s+(cache|system)", # "clear cache" or "clear system"
|
|
1547
|
+
]
|
|
1548
|
+
|
|
1549
|
+
import re
|
|
1550
|
+
|
|
1551
|
+
for pattern in system_patterns:
|
|
1552
|
+
if re.search(pattern, lower_input):
|
|
1553
|
+
return True
|
|
1554
|
+
|
|
1555
|
+
# Simple keyword check as fallback
|
|
1556
|
+
return any(keyword in lower_input for keyword in system_keywords)
|
|
1557
|
+
|
|
1558
|
+
def handle_system_control(self, user_input: str):
|
|
1559
|
+
"""Handle system control requests with intelligent reasoning and suggestions"""
|
|
1560
|
+
try:
|
|
1561
|
+
console.print("[dim]🤖 Processing system control request...[/dim]")
|
|
1562
|
+
|
|
1563
|
+
# Use the system integration to handle the request
|
|
1564
|
+
result = handle_system_request(user_input)
|
|
1565
|
+
|
|
1566
|
+
if result["success"]:
|
|
1567
|
+
message = result.get("message", "✅ System operation completed successfully!")
|
|
1568
|
+
console.print(f"[green]{message}[/green]")
|
|
1569
|
+
|
|
1570
|
+
# Provide intelligent follow-up suggestions based on the result
|
|
1571
|
+
self._provide_intelligent_suggestions(user_input, result)
|
|
1572
|
+
|
|
1573
|
+
# Show output if available
|
|
1574
|
+
if result.get("output") and result["output"].strip():
|
|
1575
|
+
output_lines = result["output"].strip().split("\n")
|
|
1576
|
+
console.print("[dim]Output:[/dim]")
|
|
1577
|
+
for line in output_lines[:10]: # Show first 10 lines
|
|
1578
|
+
console.print(f" {line}")
|
|
1579
|
+
if len(output_lines) > 10:
|
|
1580
|
+
console.print(f" [dim]... ({len(output_lines) - 10} more lines)[/dim]")
|
|
1581
|
+
|
|
1582
|
+
# Show special file paths for screenshots, etc.
|
|
1583
|
+
if result.get("screenshot_path"):
|
|
1584
|
+
console.print(
|
|
1585
|
+
f"[cyan]📸 Screenshot saved to: {result['screenshot_path']}[/cyan]"
|
|
1586
|
+
)
|
|
1587
|
+
|
|
1588
|
+
else:
|
|
1589
|
+
error_msg = result.get("error", "Unknown error occurred")
|
|
1590
|
+
console.print(f"[red]❌ {error_msg}[/red]")
|
|
1591
|
+
|
|
1592
|
+
# Show suggestions if available
|
|
1593
|
+
if result.get("suggestion"):
|
|
1594
|
+
console.print(f"[yellow]💡 {result['suggestion']}[/yellow]")
|
|
1595
|
+
|
|
1596
|
+
# Show available functions if this was an unknown request
|
|
1597
|
+
if result.get("available_functions"):
|
|
1598
|
+
console.print("[dim]Available system functions:[/dim]")
|
|
1599
|
+
for func in result["available_functions"][:5]:
|
|
1600
|
+
console.print(f" • {func}")
|
|
1601
|
+
|
|
1602
|
+
except Exception as e:
|
|
1603
|
+
console.print(f"[red]Error processing system control request: {e}[/red]")
|
|
1604
|
+
console.print("[yellow]Examples of system control commands:[/yellow]")
|
|
1605
|
+
console.print(" • 'Open TextEdit and write Hello World'")
|
|
1606
|
+
console.print(" • 'Take a screenshot'")
|
|
1607
|
+
console.print(" • 'Open Calculator'")
|
|
1608
|
+
console.print(" • 'Close TextEdit'")
|
|
1609
|
+
|
|
1610
|
+
def _provide_intelligent_suggestions(self, user_input: str, result: dict):
|
|
1611
|
+
"""Provide intelligent suggestions based on system control results"""
|
|
1612
|
+
try:
|
|
1613
|
+
lower_input = user_input.lower()
|
|
1614
|
+
data = result.get("data", {})
|
|
1615
|
+
|
|
1616
|
+
# Disk usage suggestions
|
|
1617
|
+
if "disk" in lower_input or "space" in lower_input:
|
|
1618
|
+
self._suggest_disk_cleanup(data)
|
|
1619
|
+
|
|
1620
|
+
# Memory usage suggestions
|
|
1621
|
+
elif "memory" in lower_input or "ram" in lower_input:
|
|
1622
|
+
self._suggest_memory_optimization(data)
|
|
1623
|
+
|
|
1624
|
+
# System info suggestions
|
|
1625
|
+
elif "system" in lower_input and ("info" in lower_input or "specs" in lower_input):
|
|
1626
|
+
self._suggest_system_actions(data)
|
|
1627
|
+
|
|
1628
|
+
# Time-based suggestions
|
|
1629
|
+
elif "time" in lower_input:
|
|
1630
|
+
self._suggest_time_actions(data)
|
|
1631
|
+
|
|
1632
|
+
except Exception as e:
|
|
1633
|
+
# Don't let suggestion errors break the main flow
|
|
1634
|
+
pass
|
|
1635
|
+
|
|
1636
|
+
def _suggest_disk_cleanup(self, disk_data: dict):
|
|
1637
|
+
"""Suggest disk cleanup actions based on usage"""
|
|
1638
|
+
if not disk_data:
|
|
1639
|
+
return
|
|
1640
|
+
|
|
1641
|
+
suggestions = []
|
|
1642
|
+
|
|
1643
|
+
# Check main disk usage
|
|
1644
|
+
if disk_data.get("total_disk_gb", 0) > 0:
|
|
1645
|
+
usage_pct = (disk_data.get("total_used_gb", 0) / disk_data["total_disk_gb"]) * 100
|
|
1646
|
+
|
|
1647
|
+
if usage_pct > 85:
|
|
1648
|
+
suggestions.append("Your disk is getting full! I can help you clear system caches.")
|
|
1649
|
+
suggestions.append("Try: 'clear system caches' to free up space")
|
|
1650
|
+
elif usage_pct > 70:
|
|
1651
|
+
suggestions.append("You're using quite a bit of disk space. Consider cleaning up.")
|
|
1652
|
+
suggestions.append("I can help with: 'clear system caches'")
|
|
1653
|
+
|
|
1654
|
+
# Check for large simulator volumes
|
|
1655
|
+
partitions = disk_data.get("partitions", [])
|
|
1656
|
+
simulator_partitions = [p for p in partitions if "CoreSimulator" in p.get("mountpoint", "")]
|
|
1657
|
+
|
|
1658
|
+
if simulator_partitions:
|
|
1659
|
+
total_sim_space = sum(p.get("used_gb", 0) for p in simulator_partitions)
|
|
1660
|
+
if total_sim_space > 50: # More than 50GB in simulators
|
|
1661
|
+
suggestions.append(
|
|
1662
|
+
f"📱 You have {total_sim_space:.1f}GB in iOS/watchOS simulators."
|
|
1663
|
+
)
|
|
1664
|
+
suggestions.append("Consider cleaning old simulator data if you don't need it.")
|
|
1665
|
+
|
|
1666
|
+
# Show suggestions
|
|
1667
|
+
if suggestions:
|
|
1668
|
+
console.print("\n[cyan]💡 Suggestions:[/cyan]")
|
|
1669
|
+
for suggestion in suggestions:
|
|
1670
|
+
console.print(f" {suggestion}")
|
|
1671
|
+
|
|
1672
|
+
def _suggest_memory_optimization(self, memory_data: dict):
|
|
1673
|
+
"""Suggest memory optimization actions"""
|
|
1674
|
+
if not memory_data:
|
|
1675
|
+
return
|
|
1676
|
+
|
|
1677
|
+
suggestions = []
|
|
1678
|
+
vm = memory_data.get("virtual_memory", {})
|
|
1679
|
+
swap = memory_data.get("swap_memory", {})
|
|
1680
|
+
|
|
1681
|
+
if vm.get("usage_percent", 0) > 85:
|
|
1682
|
+
suggestions.append("Your memory usage is quite high!")
|
|
1683
|
+
suggestions.append("Consider closing unused applications to free up RAM.")
|
|
1684
|
+
|
|
1685
|
+
if swap.get("usage_percent", 0) > 70:
|
|
1686
|
+
suggestions.append("High swap usage detected - your system is using disk as memory.")
|
|
1687
|
+
suggestions.append("This can slow things down. Try closing memory-intensive apps.")
|
|
1688
|
+
|
|
1689
|
+
# Suggest system monitoring
|
|
1690
|
+
suggestions.append("Want to monitor your system? Try: 'show system specs'")
|
|
1691
|
+
|
|
1692
|
+
if suggestions:
|
|
1693
|
+
console.print("\n[cyan]💡 Memory Tips:[/cyan]")
|
|
1694
|
+
for suggestion in suggestions:
|
|
1695
|
+
console.print(f" {suggestion}")
|
|
1696
|
+
|
|
1697
|
+
def _suggest_system_actions(self, system_data: dict):
|
|
1698
|
+
"""Suggest system-related actions"""
|
|
1699
|
+
if not system_data:
|
|
1700
|
+
return
|
|
1701
|
+
|
|
1702
|
+
suggestions = []
|
|
1703
|
+
cpu = system_data.get("cpu", {})
|
|
1704
|
+
memory = system_data.get("memory", {})
|
|
1705
|
+
|
|
1706
|
+
# CPU suggestions
|
|
1707
|
+
cpu_usage = cpu.get("cpu_usage_percent", 0)
|
|
1708
|
+
if cpu_usage > 80:
|
|
1709
|
+
suggestions.append(
|
|
1710
|
+
f"CPU usage is high ({cpu_usage}%). Check what's running with Activity Monitor."
|
|
1711
|
+
)
|
|
1712
|
+
|
|
1713
|
+
# Memory suggestions
|
|
1714
|
+
memory_pct = memory.get("usage_percent", 0)
|
|
1715
|
+
if memory_pct > 80:
|
|
1716
|
+
suggestions.append("Memory usage is high. Consider closing unused applications.")
|
|
1717
|
+
|
|
1718
|
+
# Uptime suggestions
|
|
1719
|
+
uptime_hours = system_data.get("uptime_hours", 0)
|
|
1720
|
+
if uptime_hours > 72: # More than 3 days
|
|
1721
|
+
suggestions.append(f"Your system has been up for {uptime_hours:.1f} hours.")
|
|
1722
|
+
suggestions.append("Consider restarting to refresh system performance.")
|
|
1723
|
+
|
|
1724
|
+
# General suggestions
|
|
1725
|
+
suggestions.append("I can help you with:")
|
|
1726
|
+
suggestions.append("• 'how much RAM do I have?' - Check memory usage")
|
|
1727
|
+
suggestions.append("• 'how much disk space do I have?' - Check storage")
|
|
1728
|
+
suggestions.append("• 'clear system caches' - Free up space")
|
|
1729
|
+
|
|
1730
|
+
if suggestions:
|
|
1731
|
+
console.print("\n[cyan]💡 System Insights:[/cyan]")
|
|
1732
|
+
for suggestion in suggestions:
|
|
1733
|
+
console.print(f" {suggestion}")
|
|
1734
|
+
|
|
1735
|
+
def _suggest_time_actions(self, time_data: dict):
|
|
1736
|
+
"""Suggest time-related actions"""
|
|
1737
|
+
if not time_data:
|
|
1738
|
+
return
|
|
1739
|
+
|
|
1740
|
+
suggestions = []
|
|
1741
|
+
current_time = time_data.get("current_time", "")
|
|
1742
|
+
|
|
1743
|
+
if current_time:
|
|
1744
|
+
import datetime
|
|
1745
|
+
|
|
1746
|
+
try:
|
|
1747
|
+
# Parse the time to get hour
|
|
1748
|
+
time_obj = datetime.datetime.strptime(current_time, "%Y-%m-%d %H:%M:%S")
|
|
1749
|
+
hour = time_obj.hour
|
|
1750
|
+
|
|
1751
|
+
if 22 <= hour or hour <= 6: # Late night/early morning
|
|
1752
|
+
suggestions.append("🌙 It's quite late! Consider taking a break.")
|
|
1753
|
+
elif 9 <= hour <= 17: # Work hours
|
|
1754
|
+
suggestions.append("⏰ It's work time! Stay productive.")
|
|
1755
|
+
elif 17 < hour < 22: # Evening
|
|
1756
|
+
suggestions.append("🌆 Good evening! Wrapping up for the day?")
|
|
1757
|
+
|
|
1758
|
+
except:
|
|
1759
|
+
pass
|
|
1760
|
+
|
|
1761
|
+
suggestions.append("I can also help you schedule tasks with the workflow system!")
|
|
1762
|
+
|
|
1763
|
+
if suggestions:
|
|
1764
|
+
console.print("\n[cyan]💡 Time Tips:[/cyan]")
|
|
1765
|
+
for suggestion in suggestions:
|
|
1766
|
+
console.print(f" {suggestion}")
|
|
1767
|
+
|
|
1768
|
+
def _is_system_help_request(self, query: str) -> bool:
|
|
1769
|
+
"""Detect if this is a request for system help or cleanup"""
|
|
1770
|
+
lower_query = query.lower()
|
|
1771
|
+
|
|
1772
|
+
help_patterns = [
|
|
1773
|
+
r"help.*free.*space",
|
|
1774
|
+
r"help.*clean.*up",
|
|
1775
|
+
r"help.*disk.*space",
|
|
1776
|
+
r"help.*memory",
|
|
1777
|
+
r"can you.*free",
|
|
1778
|
+
r"can you.*clean",
|
|
1779
|
+
r"can you.*help.*space",
|
|
1780
|
+
r"can you help.*free",
|
|
1781
|
+
r"yikes.*help",
|
|
1782
|
+
r"how.*clean",
|
|
1783
|
+
r"how.*free.*space",
|
|
1784
|
+
r"what.*clean",
|
|
1785
|
+
r"what.*free.*space",
|
|
1786
|
+
r"help me.*free",
|
|
1787
|
+
r"help me.*clean",
|
|
1788
|
+
]
|
|
1789
|
+
|
|
1790
|
+
import re
|
|
1791
|
+
|
|
1792
|
+
for pattern in help_patterns:
|
|
1793
|
+
if re.search(pattern, lower_query):
|
|
1794
|
+
return True
|
|
1795
|
+
|
|
1796
|
+
return False
|
|
1797
|
+
|
|
1798
|
+
def _handle_system_help_request(self, query: str):
|
|
1799
|
+
"""Handle requests for system help and cleanup"""
|
|
1800
|
+
lower_query = query.lower()
|
|
1801
|
+
|
|
1802
|
+
console.print("[dim]🤖 I can definitely help you with system cleanup![/dim]")
|
|
1803
|
+
|
|
1804
|
+
# Provide contextual help based on the query
|
|
1805
|
+
if "space" in lower_query or "disk" in lower_query:
|
|
1806
|
+
console.print("\n[green]💽 Here's what I can help you with for disk space:[/green]")
|
|
1807
|
+
console.print(
|
|
1808
|
+
"• [cyan]'clear system caches'[/cyan] - Clear system cache files and temporary data"
|
|
1809
|
+
)
|
|
1810
|
+
console.print(
|
|
1811
|
+
"• [cyan]'how much disk space do I have?'[/cyan] - Get detailed disk usage breakdown"
|
|
1812
|
+
)
|
|
1813
|
+
console.print(
|
|
1814
|
+
"• I can also identify large iOS simulator files that might be taking up space"
|
|
1815
|
+
)
|
|
1816
|
+
|
|
1817
|
+
console.print(
|
|
1818
|
+
"\n[yellow]💡 Quick tip:[/yellow] Try 'clear system caches' to start freeing up space!"
|
|
1819
|
+
)
|
|
1820
|
+
|
|
1821
|
+
elif "memory" in lower_query or "ram" in lower_query:
|
|
1822
|
+
console.print("\n[green]💾 Here's what I can help you with for memory:[/green]")
|
|
1823
|
+
console.print(
|
|
1824
|
+
"• [cyan]'how much RAM do I have?'[/cyan] - Check memory usage and get recommendations"
|
|
1825
|
+
)
|
|
1826
|
+
console.print(
|
|
1827
|
+
"• [cyan]'show system specs'[/cyan] - Get full system overview including memory"
|
|
1828
|
+
)
|
|
1829
|
+
console.print(
|
|
1830
|
+
"• I can identify if you need to close applications or restart your system"
|
|
1831
|
+
)
|
|
1832
|
+
|
|
1833
|
+
else:
|
|
1834
|
+
# General system help
|
|
1835
|
+
console.print("\n[green]🛠️ I can help you with various system tasks:[/green]")
|
|
1836
|
+
console.print("• [cyan]'clear system caches'[/cyan] - Free up disk space")
|
|
1837
|
+
console.print("• [cyan]'how much disk space do I have?'[/cyan] - Check storage usage")
|
|
1838
|
+
console.print("• [cyan]'how much RAM do I have?'[/cyan] - Check memory usage")
|
|
1839
|
+
console.print("• [cyan]'show system specs'[/cyan] - Get full system overview")
|
|
1840
|
+
console.print("• [cyan]'open Calculator'[/cyan] - Open applications")
|
|
1841
|
+
console.print("• [cyan]'take screenshot'[/cyan] - Take and save screenshots")
|
|
1842
|
+
|
|
1843
|
+
console.print(
|
|
1844
|
+
"\n[dim]Just ask me to do any of these tasks and I'll handle them for you![/dim]"
|
|
1845
|
+
)
|
|
1846
|
+
|
|
1847
|
+
def _load_scheduled_jobs(self):
|
|
1848
|
+
"""Load and start monitoring existing scheduled jobs"""
|
|
1849
|
+
try:
|
|
1850
|
+
# Lazy import to avoid circular dependencies
|
|
1851
|
+
from mcli.workflow.scheduler.job import JobStatus
|
|
1852
|
+
from mcli.workflow.scheduler.persistence import JobStorage
|
|
1853
|
+
|
|
1854
|
+
job_storage = JobStorage()
|
|
1855
|
+
jobs = job_storage.load_jobs()
|
|
1856
|
+
|
|
1857
|
+
active_count = 0
|
|
1858
|
+
for job_data in jobs:
|
|
1859
|
+
if job_data.get("status") in [JobStatus.PENDING.value, JobStatus.RUNNING.value]:
|
|
1860
|
+
active_count += 1
|
|
1861
|
+
|
|
1862
|
+
if active_count > 0:
|
|
1863
|
+
console.print(f"[dim]📅 {active_count} scheduled jobs loaded[/dim]")
|
|
1864
|
+
|
|
1865
|
+
except Exception as e:
|
|
1866
|
+
# Silently handle import/loading errors at startup
|
|
1867
|
+
pass
|
|
1868
|
+
|
|
1869
|
+
def _is_job_management_request(self, query: str) -> bool:
|
|
1870
|
+
"""Detect if this is a job/schedule management request"""
|
|
1871
|
+
lower_query = query.lower()
|
|
1872
|
+
|
|
1873
|
+
job_patterns = [
|
|
1874
|
+
r"schedule.*",
|
|
1875
|
+
r"every.*",
|
|
1876
|
+
r"daily.*",
|
|
1877
|
+
r"weekly.*",
|
|
1878
|
+
r"remind.*",
|
|
1879
|
+
r"job.*",
|
|
1880
|
+
r"task.*",
|
|
1881
|
+
r"my.*jobs",
|
|
1882
|
+
r"what.*running",
|
|
1883
|
+
r"status.*",
|
|
1884
|
+
r"cancel.*",
|
|
1885
|
+
r"stop.*job",
|
|
1886
|
+
r"list.*jobs",
|
|
1887
|
+
r"show.*jobs",
|
|
1888
|
+
r"cron.*",
|
|
1889
|
+
r"at.*am|pm",
|
|
1890
|
+
r"in.*minutes|hours|days",
|
|
1891
|
+
r"run.*cron.*test",
|
|
1892
|
+
r"cron.*test",
|
|
1893
|
+
r"show.*failed.*jobs",
|
|
1894
|
+
r"failed.*jobs",
|
|
1895
|
+
r"job.*details",
|
|
1896
|
+
r"job.*completion",
|
|
1897
|
+
r"completion.*details",
|
|
1898
|
+
r"performance.*metrics",
|
|
1899
|
+
r"execution.*history",
|
|
1900
|
+
r".*completion.*",
|
|
1901
|
+
]
|
|
1902
|
+
|
|
1903
|
+
import re
|
|
1904
|
+
|
|
1905
|
+
for pattern in job_patterns:
|
|
1906
|
+
if re.search(pattern, lower_query):
|
|
1907
|
+
return True
|
|
1908
|
+
|
|
1909
|
+
return False
|
|
1910
|
+
|
|
1911
|
+
def _handle_job_management(self, query: str):
|
|
1912
|
+
"""Handle job scheduling and management requests"""
|
|
1913
|
+
lower_query = query.lower()
|
|
1914
|
+
|
|
1915
|
+
console.print("[dim]🤖 Processing job management request...[/dim]")
|
|
1916
|
+
|
|
1917
|
+
try:
|
|
1918
|
+
# Run cron test
|
|
1919
|
+
if any(phrase in lower_query for phrase in ["run cron test", "cron test"]):
|
|
1920
|
+
self._handle_cron_test(query)
|
|
1921
|
+
return
|
|
1922
|
+
|
|
1923
|
+
# Show failed jobs
|
|
1924
|
+
if any(phrase in lower_query for phrase in ["show failed jobs", "failed jobs"]):
|
|
1925
|
+
self._show_failed_jobs()
|
|
1926
|
+
return
|
|
1927
|
+
|
|
1928
|
+
# Show job details/completion analysis
|
|
1929
|
+
if any(
|
|
1930
|
+
phrase in lower_query
|
|
1931
|
+
for phrase in [
|
|
1932
|
+
"job details",
|
|
1933
|
+
"job completion",
|
|
1934
|
+
"completion details",
|
|
1935
|
+
"performance metrics",
|
|
1936
|
+
"execution history",
|
|
1937
|
+
]
|
|
1938
|
+
):
|
|
1939
|
+
self._show_job_completion_details()
|
|
1940
|
+
return
|
|
1941
|
+
|
|
1942
|
+
# List jobs
|
|
1943
|
+
if any(
|
|
1944
|
+
phrase in lower_query
|
|
1945
|
+
for phrase in ["list jobs", "show jobs", "my jobs", "what's running", "status"]
|
|
1946
|
+
):
|
|
1947
|
+
self._show_agent_status()
|
|
1948
|
+
return
|
|
1949
|
+
|
|
1950
|
+
# Cancel/stop job
|
|
1951
|
+
if any(phrase in lower_query for phrase in ["cancel", "stop", "remove"]):
|
|
1952
|
+
self._handle_job_cancellation(query)
|
|
1953
|
+
return
|
|
1954
|
+
|
|
1955
|
+
# Schedule new job
|
|
1956
|
+
if any(
|
|
1957
|
+
phrase in lower_query
|
|
1958
|
+
for phrase in ["schedule", "every", "daily", "weekly", "remind", "at"]
|
|
1959
|
+
):
|
|
1960
|
+
self._handle_job_scheduling(query)
|
|
1961
|
+
return
|
|
1962
|
+
|
|
1963
|
+
# General job help
|
|
1964
|
+
console.print("[green]📅 I can help you with job scheduling and monitoring![/green]")
|
|
1965
|
+
console.print("\n[cyan]🕒 Cron & Job Management:[/cyan]")
|
|
1966
|
+
console.print("• [yellow]'run cron test'[/yellow] - Validate cron system functionality")
|
|
1967
|
+
console.print("• [yellow]'list my jobs'[/yellow] - Show all scheduled tasks")
|
|
1968
|
+
console.print("• [yellow]'what's my status?'[/yellow] - Agent status overview")
|
|
1969
|
+
console.print("• [yellow]'show failed jobs'[/yellow] - Analyze job failures")
|
|
1970
|
+
console.print("• [yellow]'job completion details'[/yellow] - Performance metrics")
|
|
1971
|
+
console.print("\n[cyan]📋 Job Scheduling:[/cyan]")
|
|
1972
|
+
console.print(
|
|
1973
|
+
"• [yellow]'schedule system cleanup daily at 2am'[/yellow] - Schedule recurring tasks"
|
|
1974
|
+
)
|
|
1975
|
+
console.print(
|
|
1976
|
+
"• [yellow]'remind me to check disk space every week'[/yellow] - Set reminders"
|
|
1977
|
+
)
|
|
1978
|
+
console.print("• [yellow]'cancel job cleanup'[/yellow] - Remove scheduled tasks")
|
|
1979
|
+
|
|
1980
|
+
except Exception as e:
|
|
1981
|
+
console.print(f"[red]❌ Error handling job request: {e}[/red]")
|
|
1982
|
+
|
|
1983
|
+
def _show_agent_status(self):
|
|
1984
|
+
"""Show comprehensive agent status including jobs, system state, and context"""
|
|
1985
|
+
console.print("\n[bold cyan]🤖 Personal Assistant Status Report[/bold cyan]")
|
|
1986
|
+
|
|
1987
|
+
# Jobs and schedules
|
|
1988
|
+
try:
|
|
1989
|
+
from mcli.workflow.scheduler.job import JobStatus, ScheduledJob
|
|
1990
|
+
from mcli.workflow.scheduler.persistence import JobStorage
|
|
1991
|
+
|
|
1992
|
+
job_storage = JobStorage()
|
|
1993
|
+
jobs = job_storage.load_jobs()
|
|
1994
|
+
|
|
1995
|
+
# Convert ScheduledJob objects to dictionaries for easier processing
|
|
1996
|
+
job_dicts = []
|
|
1997
|
+
active_jobs = []
|
|
1998
|
+
completed_jobs = []
|
|
1999
|
+
failed_jobs = []
|
|
2000
|
+
|
|
2001
|
+
for job in jobs:
|
|
2002
|
+
if hasattr(job, "to_dict"):
|
|
2003
|
+
job_dict = job.to_dict()
|
|
2004
|
+
else:
|
|
2005
|
+
# If it's already a dict or has dict-like access
|
|
2006
|
+
job_dict = {
|
|
2007
|
+
"name": getattr(job, "name", "Unknown"),
|
|
2008
|
+
"status": (
|
|
2009
|
+
getattr(job, "status", JobStatus.PENDING).value
|
|
2010
|
+
if hasattr(job, "status")
|
|
2011
|
+
else "pending"
|
|
2012
|
+
),
|
|
2013
|
+
"cron_expression": getattr(job, "cron_expression", "Unknown"),
|
|
2014
|
+
"next_run": getattr(job, "next_run", None),
|
|
2015
|
+
}
|
|
2016
|
+
job_dicts.append(job_dict)
|
|
2017
|
+
|
|
2018
|
+
status = job_dict.get("status")
|
|
2019
|
+
if status in [JobStatus.PENDING.value, JobStatus.RUNNING.value]:
|
|
2020
|
+
active_jobs.append(job_dict)
|
|
2021
|
+
elif status == JobStatus.COMPLETED.value:
|
|
2022
|
+
completed_jobs.append(job_dict)
|
|
2023
|
+
elif status == JobStatus.FAILED.value:
|
|
2024
|
+
failed_jobs.append(job_dict)
|
|
2025
|
+
|
|
2026
|
+
except Exception as e:
|
|
2027
|
+
jobs = []
|
|
2028
|
+
active_jobs = []
|
|
2029
|
+
completed_jobs = []
|
|
2030
|
+
failed_jobs = []
|
|
2031
|
+
|
|
2032
|
+
console.print(f"\n[green]📅 Scheduled Jobs:[/green]")
|
|
2033
|
+
if active_jobs:
|
|
2034
|
+
for job_data in active_jobs[:5]: # Show first 5
|
|
2035
|
+
try:
|
|
2036
|
+
job = Job.from_dict(job_data)
|
|
2037
|
+
console.print(
|
|
2038
|
+
f" • [cyan]{job.name}[/cyan] - {job.cron_expression} ({job.status.value})"
|
|
2039
|
+
)
|
|
2040
|
+
if job.next_run:
|
|
2041
|
+
console.print(f" Next run: {job.next_run}")
|
|
2042
|
+
except Exception:
|
|
2043
|
+
# Fallback to raw data
|
|
2044
|
+
name = job_data.get("name", "Unknown")
|
|
2045
|
+
cron = job_data.get("cron_expression", "Unknown")
|
|
2046
|
+
console.print(f" • [cyan]{name}[/cyan] - {cron}")
|
|
2047
|
+
else:
|
|
2048
|
+
console.print(" No active scheduled jobs")
|
|
2049
|
+
|
|
2050
|
+
if len(active_jobs) > 5:
|
|
2051
|
+
console.print(f" ... and {len(active_jobs) - 5} more active jobs")
|
|
2052
|
+
|
|
2053
|
+
# Recent activity
|
|
2054
|
+
if completed_jobs or failed_jobs:
|
|
2055
|
+
console.print(f"\n[blue]📊 Recent Activity:[/blue]")
|
|
2056
|
+
if completed_jobs:
|
|
2057
|
+
console.print(f" ✅ {len(completed_jobs)} completed jobs")
|
|
2058
|
+
if failed_jobs:
|
|
2059
|
+
console.print(f" ❌ {len(failed_jobs)} failed jobs")
|
|
2060
|
+
|
|
2061
|
+
# System context - get current system state
|
|
2062
|
+
try:
|
|
2063
|
+
from mcli.chat.system_controller import system_controller
|
|
2064
|
+
|
|
2065
|
+
# Quick system overview
|
|
2066
|
+
memory_result = system_controller.get_memory_usage()
|
|
2067
|
+
disk_result = system_controller.get_disk_usage()
|
|
2068
|
+
|
|
2069
|
+
console.print(f"\n[yellow]💻 System Context:[/yellow]")
|
|
2070
|
+
|
|
2071
|
+
if memory_result.get("success"):
|
|
2072
|
+
mem_data = memory_result["data"]["virtual_memory"]
|
|
2073
|
+
console.print(
|
|
2074
|
+
f" 💾 Memory: {mem_data['usage_percent']:.1f}% used ({mem_data['used_gb']:.1f}GB/{mem_data['total_gb']:.1f}GB)"
|
|
2075
|
+
)
|
|
2076
|
+
|
|
2077
|
+
if disk_result.get("success"):
|
|
2078
|
+
disk_data = disk_result["data"]
|
|
2079
|
+
if disk_data.get("total_disk_gb", 0) > 0:
|
|
2080
|
+
usage_pct = (
|
|
2081
|
+
disk_data.get("total_used_gb", 0) / disk_data["total_disk_gb"]
|
|
2082
|
+
) * 100
|
|
2083
|
+
console.print(
|
|
2084
|
+
f" 💽 Disk: {usage_pct:.1f}% used ({disk_data.get('total_free_gb', 0):.1f}GB free)"
|
|
2085
|
+
)
|
|
2086
|
+
|
|
2087
|
+
except Exception:
|
|
2088
|
+
console.print(f"\n[yellow]💻 System Context: Unable to get current status[/yellow]")
|
|
2089
|
+
|
|
2090
|
+
# Agent capabilities reminder
|
|
2091
|
+
console.print(f"\n[magenta]🛠️ I can help you with:[/magenta]")
|
|
2092
|
+
console.print(" • System monitoring and cleanup")
|
|
2093
|
+
console.print(" • Application control and automation")
|
|
2094
|
+
console.print(" • Scheduled tasks and reminders")
|
|
2095
|
+
console.print(" • File management and organization")
|
|
2096
|
+
console.print(" • Process monitoring and management")
|
|
2097
|
+
|
|
2098
|
+
console.print(
|
|
2099
|
+
"\n[dim]Ask me to schedule tasks, check system status, or automate any routine![/dim]"
|
|
2100
|
+
)
|
|
2101
|
+
|
|
2102
|
+
def _handle_job_scheduling(self, query: str):
|
|
2103
|
+
"""Handle requests to schedule new jobs"""
|
|
2104
|
+
console.print("[green]📅 Let me help you schedule that task![/green]")
|
|
2105
|
+
|
|
2106
|
+
# For now, provide guidance on scheduling
|
|
2107
|
+
# TODO: Implement natural language job scheduling parser
|
|
2108
|
+
console.print("\n[cyan]Here are some scheduling examples:[/cyan]")
|
|
2109
|
+
console.print("• [yellow]'schedule system cleanup daily at 2am'[/yellow]")
|
|
2110
|
+
console.print("• [yellow]'remind me to check disk space every Monday'[/yellow]")
|
|
2111
|
+
console.print("• [yellow]'run backup every day at 11pm'[/yellow]")
|
|
2112
|
+
console.print("• [yellow]'check memory usage every 2 hours'[/yellow]")
|
|
2113
|
+
|
|
2114
|
+
console.print("\n[blue]💡 I can schedule:[/blue]")
|
|
2115
|
+
console.print(" • System maintenance tasks")
|
|
2116
|
+
console.print(" • File cleanup operations")
|
|
2117
|
+
console.print(" • Health checks and monitoring")
|
|
2118
|
+
console.print(" • Automated backups")
|
|
2119
|
+
console.print(" • Custom reminders")
|
|
2120
|
+
|
|
2121
|
+
console.print(
|
|
2122
|
+
"\n[dim]The job scheduler is ready - just tell me what you'd like to automate![/dim]"
|
|
2123
|
+
)
|
|
2124
|
+
|
|
2125
|
+
def _handle_job_cancellation(self, query: str):
|
|
2126
|
+
"""Handle requests to cancel jobs"""
|
|
2127
|
+
try:
|
|
2128
|
+
from mcli.workflow.scheduler.job import JobStatus
|
|
2129
|
+
from mcli.workflow.scheduler.persistence import JobStorage
|
|
2130
|
+
|
|
2131
|
+
job_storage = JobStorage()
|
|
2132
|
+
jobs = job_storage.load_jobs()
|
|
2133
|
+
|
|
2134
|
+
active_jobs = []
|
|
2135
|
+
for job in jobs:
|
|
2136
|
+
if hasattr(job, "status") and job.status.value in [
|
|
2137
|
+
JobStatus.PENDING.value,
|
|
2138
|
+
JobStatus.RUNNING.value,
|
|
2139
|
+
]:
|
|
2140
|
+
active_jobs.append(job)
|
|
2141
|
+
|
|
2142
|
+
except Exception:
|
|
2143
|
+
jobs = []
|
|
2144
|
+
active_jobs = []
|
|
2145
|
+
|
|
2146
|
+
if not active_jobs:
|
|
2147
|
+
console.print("[yellow]📅 No active jobs to cancel[/yellow]")
|
|
2148
|
+
return
|
|
2149
|
+
|
|
2150
|
+
console.print("[blue]📅 Active jobs that can be cancelled:[/blue]")
|
|
2151
|
+
for i, job_data in enumerate(active_jobs, 1):
|
|
2152
|
+
job = Job.from_dict(job_data)
|
|
2153
|
+
console.print(f" {i}. [cyan]{job.name}[/cyan] - {job.cron_expression}")
|
|
2154
|
+
|
|
2155
|
+
console.print("\n[dim]To cancel a specific job, use: 'cancel job [name]'[/dim]")
|
|
2156
|
+
|
|
2157
|
+
def _show_startup_status(self):
|
|
2158
|
+
"""Show proactive status update when assistant starts"""
|
|
2159
|
+
try:
|
|
2160
|
+
# Quick system check
|
|
2161
|
+
from mcli.chat.system_controller import system_controller
|
|
2162
|
+
|
|
2163
|
+
# Get basic system state
|
|
2164
|
+
memory_result = system_controller.get_memory_usage()
|
|
2165
|
+
|
|
2166
|
+
# Check for active jobs
|
|
2167
|
+
try:
|
|
2168
|
+
from mcli.workflow.scheduler.job import JobStatus
|
|
2169
|
+
from mcli.workflow.scheduler.persistence import JobStorage
|
|
2170
|
+
|
|
2171
|
+
job_storage = JobStorage()
|
|
2172
|
+
jobs = job_storage.load_jobs()
|
|
2173
|
+
|
|
2174
|
+
# Count active jobs
|
|
2175
|
+
active_jobs = []
|
|
2176
|
+
for job in jobs:
|
|
2177
|
+
if hasattr(job, "status") and job.status.value in [
|
|
2178
|
+
JobStatus.PENDING.value,
|
|
2179
|
+
JobStatus.RUNNING.value,
|
|
2180
|
+
]:
|
|
2181
|
+
active_jobs.append(job)
|
|
2182
|
+
|
|
2183
|
+
except Exception:
|
|
2184
|
+
jobs = []
|
|
2185
|
+
active_jobs = []
|
|
2186
|
+
|
|
2187
|
+
status_items = []
|
|
2188
|
+
|
|
2189
|
+
# Memory alert if high
|
|
2190
|
+
if memory_result.get("success"):
|
|
2191
|
+
mem_data = memory_result["data"]["virtual_memory"]
|
|
2192
|
+
if mem_data.get("usage_percent", 0) > 85:
|
|
2193
|
+
status_items.append(f"⚠️ High memory usage: {mem_data['usage_percent']:.1f}%")
|
|
2194
|
+
elif mem_data.get("usage_percent", 0) > 75:
|
|
2195
|
+
status_items.append(f"💾 Memory: {mem_data['usage_percent']:.1f}% used")
|
|
2196
|
+
|
|
2197
|
+
# Active jobs
|
|
2198
|
+
if active_jobs:
|
|
2199
|
+
status_items.append(f"📅 {len(active_jobs)} scheduled jobs running")
|
|
2200
|
+
|
|
2201
|
+
# Recent failures (check for failed jobs in last 24 hours)
|
|
2202
|
+
failed_jobs = [j for j in jobs if j.get("status") == JobStatus.FAILED.value]
|
|
2203
|
+
if failed_jobs:
|
|
2204
|
+
status_items.append(f"❌ {len(failed_jobs)} recent job failures")
|
|
2205
|
+
|
|
2206
|
+
# Show status if there's something to report
|
|
2207
|
+
if status_items:
|
|
2208
|
+
console.print("\n[cyan]🤖 Assistant Status:[/cyan]")
|
|
2209
|
+
for item in status_items[:3]: # Show max 3 items
|
|
2210
|
+
console.print(f" {item}")
|
|
2211
|
+
|
|
2212
|
+
if len(active_jobs) > 0 or len(failed_jobs) > 0:
|
|
2213
|
+
console.print(" [dim]Say 'what's my status?' for detailed report[/dim]")
|
|
2214
|
+
else:
|
|
2215
|
+
console.print("\n[green]🤖 All systems running smoothly![/green]")
|
|
2216
|
+
|
|
2217
|
+
except Exception as e:
|
|
2218
|
+
console.print(f"\n[green]🤖 All systems running smoothly![/green]")
|
|
2219
|
+
|
|
2220
|
+
def _handle_cron_test(self, query: str):
|
|
2221
|
+
"""Handle cron test execution requests"""
|
|
2222
|
+
console.print("[green]🕒 Running MCLI Cron Validation Test...[/green]")
|
|
2223
|
+
|
|
2224
|
+
try:
|
|
2225
|
+
import subprocess
|
|
2226
|
+
import sys
|
|
2227
|
+
|
|
2228
|
+
# Determine test type from query
|
|
2229
|
+
is_verbose = "verbose" in query.lower() or "detailed" in query.lower()
|
|
2230
|
+
is_quick = "quick" in query.lower() or "fast" in query.lower()
|
|
2231
|
+
|
|
2232
|
+
# Build command
|
|
2233
|
+
cmd = [sys.executable, "-m", "mcli.app.cron_test_cmd"]
|
|
2234
|
+
|
|
2235
|
+
if is_quick:
|
|
2236
|
+
cmd.append("--quick")
|
|
2237
|
+
if is_verbose:
|
|
2238
|
+
cmd.append("--verbose")
|
|
2239
|
+
cmd.append("--cleanup") # Always cleanup in chat mode
|
|
2240
|
+
|
|
2241
|
+
# Run the cron test
|
|
2242
|
+
console.print("[dim]Executing cron validation...[/dim]")
|
|
2243
|
+
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120)
|
|
2244
|
+
|
|
2245
|
+
if result.returncode == 0:
|
|
2246
|
+
# Show successful output
|
|
2247
|
+
console.print(result.stdout)
|
|
2248
|
+
else:
|
|
2249
|
+
console.print(f"[red]❌ Cron test failed:[/red]")
|
|
2250
|
+
console.print(result.stderr if result.stderr else result.stdout)
|
|
2251
|
+
|
|
2252
|
+
except subprocess.TimeoutExpired:
|
|
2253
|
+
console.print("[red]❌ Cron test timed out after 2 minutes[/red]")
|
|
2254
|
+
except Exception as e:
|
|
2255
|
+
console.print(f"[red]❌ Failed to run cron test: {e}[/red]")
|
|
2256
|
+
console.print("\n[yellow]💡 Try running directly:[/yellow]")
|
|
2257
|
+
console.print(" mcli cron-test --quick --verbose")
|
|
2258
|
+
|
|
2259
|
+
def _show_failed_jobs(self):
|
|
2260
|
+
"""Show detailed information about failed jobs"""
|
|
2261
|
+
console.print("[red]❌ Analyzing Failed Jobs...[/red]")
|
|
2262
|
+
|
|
2263
|
+
try:
|
|
2264
|
+
from mcli.workflow.scheduler.job import JobStatus
|
|
2265
|
+
from mcli.workflow.scheduler.persistence import JobStorage
|
|
2266
|
+
|
|
2267
|
+
storage = JobStorage()
|
|
2268
|
+
all_jobs = storage.load_jobs()
|
|
2269
|
+
|
|
2270
|
+
# Find failed jobs
|
|
2271
|
+
failed_jobs = [
|
|
2272
|
+
job for job in all_jobs if hasattr(job, "status") and job.status == JobStatus.FAILED
|
|
2273
|
+
]
|
|
2274
|
+
|
|
2275
|
+
if not failed_jobs:
|
|
2276
|
+
console.print("[green]✅ No failed jobs found![/green]")
|
|
2277
|
+
console.print("All scheduled jobs are running successfully.")
|
|
2278
|
+
return
|
|
2279
|
+
|
|
2280
|
+
console.print(f"[yellow]Found {len(failed_jobs)} failed jobs:[/yellow]")
|
|
2281
|
+
|
|
2282
|
+
for i, job in enumerate(failed_jobs[:10], 1): # Show max 10 failed jobs
|
|
2283
|
+
console.print(f"\n[red]{i}. {job.name}[/red]")
|
|
2284
|
+
console.print(f" Status: {job.status.value}")
|
|
2285
|
+
console.print(f" Type: {job.job_type.value}")
|
|
2286
|
+
console.print(f" Schedule: {job.cron_expression}")
|
|
2287
|
+
|
|
2288
|
+
if hasattr(job, "last_error") and job.last_error:
|
|
2289
|
+
error_preview = (
|
|
2290
|
+
job.last_error[:100] + "..."
|
|
2291
|
+
if len(job.last_error) > 100
|
|
2292
|
+
else job.last_error
|
|
2293
|
+
)
|
|
2294
|
+
console.print(f" Error: {error_preview}")
|
|
2295
|
+
|
|
2296
|
+
if hasattr(job, "run_count"):
|
|
2297
|
+
console.print(f" Attempts: {job.run_count}")
|
|
2298
|
+
|
|
2299
|
+
if hasattr(job, "last_run") and job.last_run:
|
|
2300
|
+
console.print(f" Last attempt: {job.last_run.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
2301
|
+
|
|
2302
|
+
if len(failed_jobs) > 10:
|
|
2303
|
+
console.print(f"\n[dim]... and {len(failed_jobs) - 10} more failed jobs[/dim]")
|
|
2304
|
+
|
|
2305
|
+
# Show helpful actions
|
|
2306
|
+
console.print(f"\n[cyan]💡 Recommended Actions:[/cyan]")
|
|
2307
|
+
console.print("• Check job commands and file paths")
|
|
2308
|
+
console.print("• Verify system permissions")
|
|
2309
|
+
console.print("• Review error messages above")
|
|
2310
|
+
console.print("• Try: 'cancel job <name>' to remove problematic jobs")
|
|
2311
|
+
|
|
2312
|
+
except Exception as e:
|
|
2313
|
+
console.print(f"[red]❌ Failed to analyze jobs: {e}[/red]")
|
|
2314
|
+
|
|
2315
|
+
def _show_job_completion_details(self):
|
|
2316
|
+
"""Show comprehensive job completion analysis"""
|
|
2317
|
+
console.print("[blue]📊 Job Completion Analysis...[/blue]")
|
|
2318
|
+
|
|
2319
|
+
try:
|
|
2320
|
+
from datetime import datetime, timedelta
|
|
2321
|
+
|
|
2322
|
+
from mcli.workflow.scheduler.job import JobStatus
|
|
2323
|
+
from mcli.workflow.scheduler.persistence import JobStorage
|
|
2324
|
+
|
|
2325
|
+
storage = JobStorage()
|
|
2326
|
+
all_jobs = storage.load_jobs()
|
|
2327
|
+
|
|
2328
|
+
if not all_jobs:
|
|
2329
|
+
console.print("[yellow]No jobs found in the system.[/yellow]")
|
|
2330
|
+
return
|
|
2331
|
+
|
|
2332
|
+
# Categorize jobs
|
|
2333
|
+
completed_jobs = []
|
|
2334
|
+
running_jobs = []
|
|
2335
|
+
failed_jobs = []
|
|
2336
|
+
pending_jobs = []
|
|
2337
|
+
|
|
2338
|
+
for job in all_jobs:
|
|
2339
|
+
if hasattr(job, "status"):
|
|
2340
|
+
if job.status == JobStatus.COMPLETED:
|
|
2341
|
+
completed_jobs.append(job)
|
|
2342
|
+
elif job.status == JobStatus.RUNNING:
|
|
2343
|
+
running_jobs.append(job)
|
|
2344
|
+
elif job.status == JobStatus.FAILED:
|
|
2345
|
+
failed_jobs.append(job)
|
|
2346
|
+
else:
|
|
2347
|
+
pending_jobs.append(job)
|
|
2348
|
+
|
|
2349
|
+
# Status summary
|
|
2350
|
+
console.print(f"\n[green]📈 Job Status Summary:[/green]")
|
|
2351
|
+
console.print(f" ✅ Completed: {len(completed_jobs)}")
|
|
2352
|
+
console.print(f" 🔄 Running: {len(running_jobs)}")
|
|
2353
|
+
console.print(f" ❌ Failed: {len(failed_jobs)}")
|
|
2354
|
+
console.print(f" ⏳ Pending: {len(pending_jobs)}")
|
|
2355
|
+
|
|
2356
|
+
# Performance metrics
|
|
2357
|
+
total_runs = sum(getattr(job, "run_count", 0) for job in all_jobs)
|
|
2358
|
+
total_successes = sum(getattr(job, "success_count", 0) for job in all_jobs)
|
|
2359
|
+
total_failures = sum(getattr(job, "failure_count", 0) for job in all_jobs)
|
|
2360
|
+
|
|
2361
|
+
if total_runs > 0:
|
|
2362
|
+
success_rate = total_successes / total_runs * 100
|
|
2363
|
+
console.print(f"\n[cyan]⚡ Performance Metrics:[/cyan]")
|
|
2364
|
+
console.print(f" Total Executions: {total_runs}")
|
|
2365
|
+
console.print(f" Success Rate: {success_rate:.1f}%")
|
|
2366
|
+
console.print(f" Successful: {total_successes}")
|
|
2367
|
+
console.print(f" Failed: {total_failures}")
|
|
2368
|
+
|
|
2369
|
+
# Recent completions (last 5)
|
|
2370
|
+
recent_completed = sorted(
|
|
2371
|
+
[job for job in completed_jobs if hasattr(job, "last_run") and job.last_run],
|
|
2372
|
+
key=lambda j: j.last_run,
|
|
2373
|
+
reverse=True,
|
|
2374
|
+
)[:5]
|
|
2375
|
+
|
|
2376
|
+
if recent_completed:
|
|
2377
|
+
console.print(f"\n[green]🎯 Recent Completions:[/green]")
|
|
2378
|
+
for job in recent_completed:
|
|
2379
|
+
runtime = (
|
|
2380
|
+
f" ({job.runtime_seconds:.2f}s)"
|
|
2381
|
+
if hasattr(job, "runtime_seconds") and job.runtime_seconds > 0
|
|
2382
|
+
else ""
|
|
2383
|
+
)
|
|
2384
|
+
console.print(f" • {job.name}: {job.last_run.strftime('%H:%M:%S')}{runtime}")
|
|
2385
|
+
|
|
2386
|
+
# Job type breakdown
|
|
2387
|
+
job_types = {}
|
|
2388
|
+
for job in all_jobs:
|
|
2389
|
+
job_type = job.job_type.value if hasattr(job, "job_type") else "unknown"
|
|
2390
|
+
job_types[job_type] = job_types.get(job_type, 0) + 1
|
|
2391
|
+
|
|
2392
|
+
if job_types:
|
|
2393
|
+
console.print(f"\n[blue]📋 Job Types:[/blue]")
|
|
2394
|
+
for job_type, count in sorted(job_types.items()):
|
|
2395
|
+
console.print(f" {job_type}: {count} jobs")
|
|
2396
|
+
|
|
2397
|
+
# Helpful commands
|
|
2398
|
+
console.print(f"\n[yellow]🔧 Available Commands:[/yellow]")
|
|
2399
|
+
console.print("• 'show failed jobs' - Analyze job failures")
|
|
2400
|
+
console.print("• 'run cron test' - Validate cron system")
|
|
2401
|
+
console.print("• 'cancel job <name>' - Remove specific job")
|
|
2402
|
+
|
|
2403
|
+
except Exception as e:
|
|
2404
|
+
console.print(f"[red]❌ Failed to analyze job completions: {e}[/red]")
|
|
2405
|
+
|
|
2406
|
+
|
|
2407
|
+
if __name__ == "__main__":
|
|
2408
|
+
client = ChatClient()
|
|
2409
|
+
client.start_interactive_session()
|