mcli-framework 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (186) hide show
  1. mcli/app/chat_cmd.py +42 -0
  2. mcli/app/commands_cmd.py +226 -0
  3. mcli/app/completion_cmd.py +216 -0
  4. mcli/app/completion_helpers.py +288 -0
  5. mcli/app/cron_test_cmd.py +697 -0
  6. mcli/app/logs_cmd.py +419 -0
  7. mcli/app/main.py +492 -0
  8. mcli/app/model/model.py +1060 -0
  9. mcli/app/model_cmd.py +227 -0
  10. mcli/app/redis_cmd.py +269 -0
  11. mcli/app/video/video.py +1114 -0
  12. mcli/app/visual_cmd.py +303 -0
  13. mcli/chat/chat.py +2409 -0
  14. mcli/chat/command_rag.py +514 -0
  15. mcli/chat/enhanced_chat.py +652 -0
  16. mcli/chat/system_controller.py +1010 -0
  17. mcli/chat/system_integration.py +1016 -0
  18. mcli/cli.py +25 -0
  19. mcli/config.toml +20 -0
  20. mcli/lib/api/api.py +586 -0
  21. mcli/lib/api/daemon_client.py +203 -0
  22. mcli/lib/api/daemon_client_local.py +44 -0
  23. mcli/lib/api/daemon_decorator.py +217 -0
  24. mcli/lib/api/mcli_decorators.py +1032 -0
  25. mcli/lib/auth/auth.py +85 -0
  26. mcli/lib/auth/aws_manager.py +85 -0
  27. mcli/lib/auth/azure_manager.py +91 -0
  28. mcli/lib/auth/credential_manager.py +192 -0
  29. mcli/lib/auth/gcp_manager.py +93 -0
  30. mcli/lib/auth/key_manager.py +117 -0
  31. mcli/lib/auth/mcli_manager.py +93 -0
  32. mcli/lib/auth/token_manager.py +75 -0
  33. mcli/lib/auth/token_util.py +1011 -0
  34. mcli/lib/config/config.py +47 -0
  35. mcli/lib/discovery/__init__.py +1 -0
  36. mcli/lib/discovery/command_discovery.py +274 -0
  37. mcli/lib/erd/erd.py +1345 -0
  38. mcli/lib/erd/generate_graph.py +453 -0
  39. mcli/lib/files/files.py +76 -0
  40. mcli/lib/fs/fs.py +109 -0
  41. mcli/lib/lib.py +29 -0
  42. mcli/lib/logger/logger.py +611 -0
  43. mcli/lib/performance/optimizer.py +409 -0
  44. mcli/lib/performance/rust_bridge.py +502 -0
  45. mcli/lib/performance/uvloop_config.py +154 -0
  46. mcli/lib/pickles/pickles.py +50 -0
  47. mcli/lib/search/cached_vectorizer.py +479 -0
  48. mcli/lib/services/data_pipeline.py +460 -0
  49. mcli/lib/services/lsh_client.py +441 -0
  50. mcli/lib/services/redis_service.py +387 -0
  51. mcli/lib/shell/shell.py +137 -0
  52. mcli/lib/toml/toml.py +33 -0
  53. mcli/lib/ui/styling.py +47 -0
  54. mcli/lib/ui/visual_effects.py +634 -0
  55. mcli/lib/watcher/watcher.py +185 -0
  56. mcli/ml/api/app.py +215 -0
  57. mcli/ml/api/middleware.py +224 -0
  58. mcli/ml/api/routers/admin_router.py +12 -0
  59. mcli/ml/api/routers/auth_router.py +244 -0
  60. mcli/ml/api/routers/backtest_router.py +12 -0
  61. mcli/ml/api/routers/data_router.py +12 -0
  62. mcli/ml/api/routers/model_router.py +302 -0
  63. mcli/ml/api/routers/monitoring_router.py +12 -0
  64. mcli/ml/api/routers/portfolio_router.py +12 -0
  65. mcli/ml/api/routers/prediction_router.py +267 -0
  66. mcli/ml/api/routers/trade_router.py +12 -0
  67. mcli/ml/api/routers/websocket_router.py +76 -0
  68. mcli/ml/api/schemas.py +64 -0
  69. mcli/ml/auth/auth_manager.py +425 -0
  70. mcli/ml/auth/models.py +154 -0
  71. mcli/ml/auth/permissions.py +302 -0
  72. mcli/ml/backtesting/backtest_engine.py +502 -0
  73. mcli/ml/backtesting/performance_metrics.py +393 -0
  74. mcli/ml/cache.py +400 -0
  75. mcli/ml/cli/main.py +398 -0
  76. mcli/ml/config/settings.py +394 -0
  77. mcli/ml/configs/dvc_config.py +230 -0
  78. mcli/ml/configs/mlflow_config.py +131 -0
  79. mcli/ml/configs/mlops_manager.py +293 -0
  80. mcli/ml/dashboard/app.py +532 -0
  81. mcli/ml/dashboard/app_integrated.py +738 -0
  82. mcli/ml/dashboard/app_supabase.py +560 -0
  83. mcli/ml/dashboard/app_training.py +615 -0
  84. mcli/ml/dashboard/cli.py +51 -0
  85. mcli/ml/data_ingestion/api_connectors.py +501 -0
  86. mcli/ml/data_ingestion/data_pipeline.py +567 -0
  87. mcli/ml/data_ingestion/stream_processor.py +512 -0
  88. mcli/ml/database/migrations/env.py +94 -0
  89. mcli/ml/database/models.py +667 -0
  90. mcli/ml/database/session.py +200 -0
  91. mcli/ml/experimentation/ab_testing.py +845 -0
  92. mcli/ml/features/ensemble_features.py +607 -0
  93. mcli/ml/features/political_features.py +676 -0
  94. mcli/ml/features/recommendation_engine.py +809 -0
  95. mcli/ml/features/stock_features.py +573 -0
  96. mcli/ml/features/test_feature_engineering.py +346 -0
  97. mcli/ml/logging.py +85 -0
  98. mcli/ml/mlops/data_versioning.py +518 -0
  99. mcli/ml/mlops/experiment_tracker.py +377 -0
  100. mcli/ml/mlops/model_serving.py +481 -0
  101. mcli/ml/mlops/pipeline_orchestrator.py +614 -0
  102. mcli/ml/models/base_models.py +324 -0
  103. mcli/ml/models/ensemble_models.py +675 -0
  104. mcli/ml/models/recommendation_models.py +474 -0
  105. mcli/ml/models/test_models.py +487 -0
  106. mcli/ml/monitoring/drift_detection.py +676 -0
  107. mcli/ml/monitoring/metrics.py +45 -0
  108. mcli/ml/optimization/portfolio_optimizer.py +834 -0
  109. mcli/ml/preprocessing/data_cleaners.py +451 -0
  110. mcli/ml/preprocessing/feature_extractors.py +491 -0
  111. mcli/ml/preprocessing/ml_pipeline.py +382 -0
  112. mcli/ml/preprocessing/politician_trading_preprocessor.py +569 -0
  113. mcli/ml/preprocessing/test_preprocessing.py +294 -0
  114. mcli/ml/scripts/populate_sample_data.py +200 -0
  115. mcli/ml/tasks.py +400 -0
  116. mcli/ml/tests/test_integration.py +429 -0
  117. mcli/ml/tests/test_training_dashboard.py +387 -0
  118. mcli/public/oi/oi.py +15 -0
  119. mcli/public/public.py +4 -0
  120. mcli/self/self_cmd.py +1246 -0
  121. mcli/workflow/daemon/api_daemon.py +800 -0
  122. mcli/workflow/daemon/async_command_database.py +681 -0
  123. mcli/workflow/daemon/async_process_manager.py +591 -0
  124. mcli/workflow/daemon/client.py +530 -0
  125. mcli/workflow/daemon/commands.py +1196 -0
  126. mcli/workflow/daemon/daemon.py +905 -0
  127. mcli/workflow/daemon/daemon_api.py +59 -0
  128. mcli/workflow/daemon/enhanced_daemon.py +571 -0
  129. mcli/workflow/daemon/process_cli.py +244 -0
  130. mcli/workflow/daemon/process_manager.py +439 -0
  131. mcli/workflow/daemon/test_daemon.py +275 -0
  132. mcli/workflow/dashboard/dashboard_cmd.py +113 -0
  133. mcli/workflow/docker/docker.py +0 -0
  134. mcli/workflow/file/file.py +100 -0
  135. mcli/workflow/gcloud/config.toml +21 -0
  136. mcli/workflow/gcloud/gcloud.py +58 -0
  137. mcli/workflow/git_commit/ai_service.py +328 -0
  138. mcli/workflow/git_commit/commands.py +430 -0
  139. mcli/workflow/lsh_integration.py +355 -0
  140. mcli/workflow/model_service/client.py +594 -0
  141. mcli/workflow/model_service/download_and_run_efficient_models.py +288 -0
  142. mcli/workflow/model_service/lightweight_embedder.py +397 -0
  143. mcli/workflow/model_service/lightweight_model_server.py +714 -0
  144. mcli/workflow/model_service/lightweight_test.py +241 -0
  145. mcli/workflow/model_service/model_service.py +1955 -0
  146. mcli/workflow/model_service/ollama_efficient_runner.py +425 -0
  147. mcli/workflow/model_service/pdf_processor.py +386 -0
  148. mcli/workflow/model_service/test_efficient_runner.py +234 -0
  149. mcli/workflow/model_service/test_example.py +315 -0
  150. mcli/workflow/model_service/test_integration.py +131 -0
  151. mcli/workflow/model_service/test_new_features.py +149 -0
  152. mcli/workflow/openai/openai.py +99 -0
  153. mcli/workflow/politician_trading/commands.py +1790 -0
  154. mcli/workflow/politician_trading/config.py +134 -0
  155. mcli/workflow/politician_trading/connectivity.py +490 -0
  156. mcli/workflow/politician_trading/data_sources.py +395 -0
  157. mcli/workflow/politician_trading/database.py +410 -0
  158. mcli/workflow/politician_trading/demo.py +248 -0
  159. mcli/workflow/politician_trading/models.py +165 -0
  160. mcli/workflow/politician_trading/monitoring.py +413 -0
  161. mcli/workflow/politician_trading/scrapers.py +966 -0
  162. mcli/workflow/politician_trading/scrapers_california.py +412 -0
  163. mcli/workflow/politician_trading/scrapers_eu.py +377 -0
  164. mcli/workflow/politician_trading/scrapers_uk.py +350 -0
  165. mcli/workflow/politician_trading/scrapers_us_states.py +438 -0
  166. mcli/workflow/politician_trading/supabase_functions.py +354 -0
  167. mcli/workflow/politician_trading/workflow.py +852 -0
  168. mcli/workflow/registry/registry.py +180 -0
  169. mcli/workflow/repo/repo.py +223 -0
  170. mcli/workflow/scheduler/commands.py +493 -0
  171. mcli/workflow/scheduler/cron_parser.py +238 -0
  172. mcli/workflow/scheduler/job.py +182 -0
  173. mcli/workflow/scheduler/monitor.py +139 -0
  174. mcli/workflow/scheduler/persistence.py +324 -0
  175. mcli/workflow/scheduler/scheduler.py +679 -0
  176. mcli/workflow/sync/sync_cmd.py +437 -0
  177. mcli/workflow/sync/test_cmd.py +314 -0
  178. mcli/workflow/videos/videos.py +242 -0
  179. mcli/workflow/wakatime/wakatime.py +11 -0
  180. mcli/workflow/workflow.py +37 -0
  181. mcli_framework-7.0.0.dist-info/METADATA +479 -0
  182. mcli_framework-7.0.0.dist-info/RECORD +186 -0
  183. mcli_framework-7.0.0.dist-info/WHEEL +5 -0
  184. mcli_framework-7.0.0.dist-info/entry_points.txt +7 -0
  185. mcli_framework-7.0.0.dist-info/licenses/LICENSE +21 -0
  186. mcli_framework-7.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,652 @@
1
+ """
2
+ Enhanced MCLI Chat Assistant with Self-Referential Capabilities and RAG-based Command Search
3
+ """
4
+
5
+ import asyncio
6
+ import json
7
+ import os
8
+ import re
9
+ import readline
10
+ from datetime import datetime
11
+ from typing import Any, Dict, List, Optional
12
+
13
+ import requests
14
+ from rich.console import Console
15
+ from rich.panel import Panel
16
+ from rich.table import Table
17
+ from rich.text import Text
18
+
19
+ from mcli.chat.command_rag import get_command_rag_system
20
+ from mcli.chat.system_integration import handle_system_request
21
+ from mcli.lib.api.daemon_client import get_daemon_client
22
+ from mcli.lib.logger.logger import get_logger
23
+ from mcli.lib.toml.toml import read_from_toml
24
+ from mcli.lib.ui.styling import console
25
+
26
+ logger = get_logger(__name__)
27
+
28
+ # Load config
29
+ CONFIG_PATH = "config.toml"
30
+ config = {}
31
+ try:
32
+ config = read_from_toml(CONFIG_PATH, "llm") or {}
33
+ except Exception:
34
+ config = {}
35
+
36
+ # Enhanced system prompt for self-referential capabilities
37
+ ENHANCED_SYSTEM_PROMPT = """You are the MCLI Personal Assistant, an intelligent self-aware agent that helps manage your computer and tasks.
38
+
39
+ ## Core Capabilities
40
+ I am a true personal assistant with deep knowledge of the MCLI system and these capabilities:
41
+ - **System Management**: Memory, disk, applications, cleanup, performance optimization
42
+ - **Job Scheduling**: Cron jobs, reminders, recurring tasks, automation workflows
43
+ - **Process Management**: Background services, daemon control, Redis cache management
44
+ - **File Operations**: Organization, search, batch processing, format conversion
45
+ - **AI Integration**: Local and remote models, chat assistance, command reasoning
46
+ - **Performance**: Rust extensions, Redis caching, async operations, monitoring
47
+
48
+ ## Self-Awareness & Command Knowledge
49
+ I have complete knowledge of all available MCLI commands and can:
50
+ - **Search Commands**: Find relevant commands using semantic similarity
51
+ - **Suggest Actions**: Recommend specific commands with proper options and parameters
52
+ - **Provide Examples**: Show exact command syntax and usage patterns
53
+ - **Explain Capabilities**: Detail what each command does and when to use it
54
+ - **Guide Workflows**: Chain commands together for complex automation tasks
55
+
56
+ ## Current System Status
57
+ I maintain awareness of:
58
+ - All available MCLI commands and their parameters
59
+ - Currently scheduled jobs and their status
60
+ - System health and resource usage (CPU, memory, disk)
61
+ - Background services (Redis, daemon processes)
62
+ - Recent activities and completed tasks
63
+ - Performance optimization status (Rust extensions, caching)
64
+
65
+ ## Communication Style
66
+ - **Actionable**: Always suggest specific MCLI commands when relevant
67
+ - **Precise**: Use exact command syntax with proper options
68
+ - **Contextual**: Consider user's current task and system state
69
+ - **Proactive**: Suggest optimizations and automations
70
+ - **Self-Referential**: Leverage my knowledge of MCLI capabilities
71
+
72
+ When users ask about tasks, I should:
73
+ 1. Analyze their intent using command knowledge
74
+ 2. Suggest specific MCLI commands with exact syntax
75
+ 3. Provide working examples they can run immediately
76
+ 4. Explain the reasoning behind my suggestions
77
+ 5. Offer related commands for extended workflows
78
+
79
+ I can execute system commands and searches to provide real-time information and suggestions.
80
+ """
81
+
82
+
83
+ class EnhancedChatClient:
84
+ """Enhanced chat client with RAG-based command search and self-referential capabilities"""
85
+
86
+ def __init__(self, use_remote: bool = False, model_override: str = None):
87
+ self.daemon = get_daemon_client()
88
+ self.history = []
89
+ self.session_active = True
90
+ self.use_remote = use_remote
91
+ self.model_override = model_override
92
+ self.console = Console()
93
+
94
+ # RAG system for command search
95
+ self.rag_system = None
96
+
97
+ # Enhanced context tracking
98
+ self.conversation_context = []
99
+ self.system_context = {}
100
+ self.user_preferences = {}
101
+ self.simple_command_lookup = {}
102
+
103
+ self._configure_model_settings()
104
+ self._ensure_daemon_running()
105
+
106
+ # Initialize basic command discovery synchronously
107
+ try:
108
+ from mcli.lib.discovery.command_discovery import ClickCommandDiscovery
109
+
110
+ discovery = ClickCommandDiscovery()
111
+ commands = discovery.discover_all_commands()
112
+
113
+ self.simple_command_lookup = {}
114
+ self.commands_list = commands # Keep full list for better searching
115
+
116
+ # Index by name and full_name
117
+ for cmd in commands:
118
+ self.simple_command_lookup[cmd.name] = cmd
119
+ self.simple_command_lookup[cmd.full_name] = cmd
120
+
121
+ logger.info(f"Initialized with {len(commands)} commands")
122
+ except Exception as e:
123
+ logger.debug(f"Command discovery failed: {e}")
124
+ self.simple_command_lookup = {}
125
+ self.commands_list = []
126
+
127
+ async def _initialize_rag_system(self):
128
+ """Initialize the RAG system for command search"""
129
+ try:
130
+ # Use a simplified command discovery for now to avoid hanging
131
+ from mcli.lib.discovery.command_discovery import ClickCommandDiscovery
132
+
133
+ discovery = ClickCommandDiscovery()
134
+ commands = discovery.discover_all_commands()
135
+
136
+ # Create a comprehensive command lookup for basic functionality
137
+ self.simple_command_lookup = {}
138
+ self.commands_list = commands # Keep full list for better searching
139
+
140
+ # Index by name, full_name, and description keywords
141
+ for cmd in commands:
142
+ self.simple_command_lookup[cmd.name] = cmd
143
+ self.simple_command_lookup[cmd.full_name] = cmd
144
+ logger.info(f"Simple command discovery initialized with {len(commands)} commands")
145
+
146
+ # Skip the full RAG system for now to avoid hanging
147
+ self.rag_system = None
148
+
149
+ except Exception as e:
150
+ logger.error(f"Failed to initialize command discovery: {e}")
151
+ self.rag_system = None
152
+ self.simple_command_lookup = {}
153
+
154
+ def _configure_model_settings(self):
155
+ """Configure model settings with enhanced system prompt"""
156
+ global config
157
+
158
+ if not self.use_remote:
159
+ config.update(
160
+ {
161
+ "provider": "local",
162
+ "model": self.model_override or "prajjwal1/bert-tiny",
163
+ "ollama_base_url": "http://localhost:8080",
164
+ "system_prompt": ENHANCED_SYSTEM_PROMPT,
165
+ }
166
+ )
167
+ else:
168
+ if config.get("openai_api_key") or config.get("anthropic_api_key"):
169
+ config["system_prompt"] = ENHANCED_SYSTEM_PROMPT
170
+ else:
171
+ self.console.print(
172
+ "āš ļø No API keys found. Switching to local models.", style="yellow"
173
+ )
174
+ self.use_remote = False
175
+ self._configure_model_settings()
176
+
177
+ def _ensure_daemon_running(self):
178
+ """Ensure the daemon is running for system integration"""
179
+ try:
180
+ if not self.daemon.health_check():
181
+ self.daemon.start_daemon()
182
+ except Exception as e:
183
+ logger.debug(f"Daemon check failed: {e}")
184
+
185
+ async def _enrich_message_with_context(self, user_message: str) -> str:
186
+ """Enrich user message with relevant command context and system information"""
187
+ enriched_parts = [user_message]
188
+
189
+ # Initialize RAG system if not already done
190
+ if self.rag_system is None:
191
+ await self._initialize_rag_system()
192
+
193
+ # Skip command enrichment in the context - let the AI response handle it
194
+ try:
195
+ # Add system status only
196
+ system_status = await self._get_system_status()
197
+ if system_status:
198
+ enriched_parts.append(f"\n--- SYSTEM STATUS ---")
199
+ enriched_parts.append(system_status)
200
+
201
+ except Exception as e:
202
+ logger.debug(f"System status failed: {e}")
203
+
204
+ return "\n".join(enriched_parts)
205
+
206
+ async def _get_system_status(self) -> str:
207
+ """Get current system status for context"""
208
+ status_parts = []
209
+
210
+ try:
211
+ # Redis status
212
+ import subprocess
213
+
214
+ result = subprocess.run(
215
+ ["python", "-m", "mcli", "redis", "status"],
216
+ capture_output=True,
217
+ text=True,
218
+ timeout=5,
219
+ )
220
+ if "RUNNING" in result.stdout:
221
+ status_parts.append("Redis cache: Active")
222
+ else:
223
+ status_parts.append("Redis cache: Inactive")
224
+ except:
225
+ pass
226
+
227
+ try:
228
+ # Performance status
229
+ from mcli.lib.performance.rust_bridge import check_rust_extensions
230
+
231
+ rust_status = check_rust_extensions()
232
+ if rust_status["available"]:
233
+ status_parts.append(
234
+ f"Rust extensions: Active ({sum(rust_status.values()) - 1}/4 components)"
235
+ )
236
+ else:
237
+ status_parts.append("Rust extensions: Inactive")
238
+ except:
239
+ pass
240
+
241
+ return " | ".join(status_parts) if status_parts else "System status unavailable"
242
+
243
+ def start_interactive_session(self):
244
+ """Start enhanced interactive chat session"""
245
+ self.console.print("\nšŸ¤– MCLI Enhanced Chat Assistant", style="bold cyan")
246
+ self.console.print(
247
+ "I can help you discover and use MCLI commands through intelligent search!",
248
+ style="cyan",
249
+ )
250
+ self.console.print(
251
+ "Type 'help' for assistance, 'commands' to search commands, or 'quit' to exit.\n"
252
+ )
253
+
254
+ while self.session_active:
255
+ try:
256
+ user_input = input("šŸ’¬ You: ").strip()
257
+
258
+ if not user_input:
259
+ continue
260
+
261
+ # Handle special commands
262
+ if user_input.lower() in ["quit", "exit", "bye"]:
263
+ self._handle_quit()
264
+ break
265
+ elif user_input.lower() == "help":
266
+ self._show_help()
267
+ continue
268
+ elif user_input.lower().startswith("commands"):
269
+ asyncio.run(self._handle_command_search(user_input))
270
+ continue
271
+ elif user_input.lower() == "status":
272
+ asyncio.run(self._show_system_status())
273
+ continue
274
+ elif user_input.lower() == "clear":
275
+ os.system("clear" if os.name == "posix" else "cls")
276
+ continue
277
+
278
+ # Process the message with RAG enhancement
279
+ asyncio.run(self._process_enhanced_message(user_input))
280
+
281
+ except KeyboardInterrupt:
282
+ self.console.print("\n\nšŸ‘‹ Goodbye!", style="cyan")
283
+ break
284
+ except EOFError:
285
+ break
286
+ except Exception as e:
287
+ self.console.print(f"āŒ Error: {e}", style="red")
288
+
289
+ async def _process_enhanced_message(self, user_message: str):
290
+ """Process message with RAG enhancement"""
291
+ try:
292
+ # Enrich message with command context
293
+ enriched_message = await self._enrich_message_with_context(user_message)
294
+
295
+ # Add to conversation history
296
+ self.conversation_context.append(
297
+ {
298
+ "user": user_message,
299
+ "timestamp": datetime.now().isoformat(),
300
+ "enriched": len(enriched_message) > len(user_message),
301
+ }
302
+ )
303
+
304
+ # Get AI response
305
+ response = await self._get_ai_response(enriched_message)
306
+
307
+ if response:
308
+ self._display_response(response)
309
+
310
+ # Extract and highlight any MCLI commands in the response
311
+ await self._highlight_mcli_commands(response)
312
+ else:
313
+ self.console.print("āŒ Sorry, I couldn't process that request.", style="red")
314
+
315
+ except Exception as e:
316
+ logger.error(f"Message processing failed: {e}")
317
+ self.console.print(f"āŒ Error processing message: {e}", style="red")
318
+
319
+ async def _get_ai_response(self, message: str) -> Optional[str]:
320
+ """Get AI response using configured provider"""
321
+ try:
322
+ if self.use_remote and config.get("openai_api_key"):
323
+ return await self._get_openai_response(message)
324
+ elif self.use_remote and config.get("anthropic_api_key"):
325
+ return await self._get_anthropic_response(message)
326
+ else:
327
+ return await self._get_local_response(message)
328
+ except Exception as e:
329
+ logger.error(f"AI response failed: {e}")
330
+ return None
331
+
332
+ async def _get_openai_response(self, message: str) -> str:
333
+ """Get response from OpenAI"""
334
+ try:
335
+ import openai
336
+
337
+ openai.api_key = config.get("openai_api_key")
338
+
339
+ response = openai.chat.completions.create(
340
+ model=config.get("model", "gpt-3.5-turbo"),
341
+ messages=[
342
+ {
343
+ "role": "system",
344
+ "content": config.get("system_prompt", ENHANCED_SYSTEM_PROMPT),
345
+ },
346
+ {"role": "user", "content": message},
347
+ ],
348
+ temperature=config.get("temperature", 0.7),
349
+ max_tokens=1000,
350
+ )
351
+
352
+ return response.choices[0].message.content
353
+ except Exception as e:
354
+ logger.error(f"OpenAI request failed: {e}")
355
+ return None
356
+
357
+ async def _get_anthropic_response(self, message: str) -> str:
358
+ """Get response from Anthropic"""
359
+ try:
360
+ import anthropic
361
+
362
+ client = anthropic.Anthropic(api_key=config.get("anthropic_api_key"))
363
+
364
+ response = client.messages.create(
365
+ model=config.get("model", "claude-3-sonnet-20240229"),
366
+ max_tokens=1000,
367
+ temperature=config.get("temperature", 0.7),
368
+ system=config.get("system_prompt", ENHANCED_SYSTEM_PROMPT),
369
+ messages=[{"role": "user", "content": message}],
370
+ )
371
+
372
+ return response.content[0].text
373
+ except Exception as e:
374
+ logger.error(f"Anthropic request failed: {e}")
375
+ return None
376
+
377
+ async def _get_local_response(self, message: str) -> str:
378
+ """Get response from local lightweight model"""
379
+ try:
380
+ # Use improved command matching for local responses
381
+ if hasattr(self, "commands_list") and self.commands_list:
382
+ user_lower = message.lower()
383
+ user_words = [word for word in user_lower.split() if len(word) > 2]
384
+ matching_commands = []
385
+
386
+ # Search through all commands for better matching
387
+ for cmd in self.commands_list:
388
+ score = 0
389
+ matched_reasons = []
390
+
391
+ # High priority: Check for exact phrase matches in full name
392
+ cmd_name_clean = cmd.full_name.lower().replace(".", " ")
393
+ if user_lower in cmd_name_clean:
394
+ score += 10
395
+ matched_reasons.append(f"exact phrase match")
396
+
397
+ # High priority: Check if user input matches command structure
398
+ user_parts = user_lower.split()
399
+ cmd_parts = cmd.full_name.lower().split(".")
400
+
401
+ # Check if all user words appear in the command path
402
+ if len(user_parts) >= 2 and all(
403
+ any(user_word in cmd_part for cmd_part in cmd_parts)
404
+ for user_word in user_parts
405
+ ):
406
+ score += 8
407
+ matched_reasons.append(f"matches command structure")
408
+
409
+ # Medium priority: Individual word matches in name
410
+ for word in user_words:
411
+ if word in cmd.full_name.lower():
412
+ score += 2
413
+ matched_reasons.append(f"name contains '{word}'")
414
+
415
+ # Lower priority: Check description for keywords
416
+ for word in user_words:
417
+ if word in cmd.description.lower():
418
+ score += 1
419
+ matched_reasons.append(f"description contains '{word}'")
420
+
421
+ # Special keyword matching
422
+ keyword_map = {
423
+ "video": ["video", "mp4", "avi", "mov", "ffmpeg", "frames"],
424
+ "redis": ["redis", "cache"],
425
+ "file": ["file", "convert", "pdf", "oxps"],
426
+ "daemon": ["daemon", "service", "api"],
427
+ "workflow": ["workflow", "automation"],
428
+ "system": ["system", "status", "monitor"],
429
+ }
430
+
431
+ for query_word in user_words:
432
+ for category, keywords in keyword_map.items():
433
+ if query_word in keywords:
434
+ for keyword in keywords:
435
+ if (
436
+ keyword in cmd.full_name.lower()
437
+ or keyword in cmd.description.lower()
438
+ ):
439
+ score += 3
440
+ matched_reasons.append(f"matches {category} category")
441
+ break
442
+
443
+ if score > 0:
444
+ matching_commands.append((cmd, score, matched_reasons))
445
+
446
+ # Sort by score and remove duplicates
447
+ matching_commands.sort(key=lambda x: x[1], reverse=True)
448
+
449
+ # Remove duplicates by full_name
450
+ seen = set()
451
+ unique_commands = []
452
+ for cmd, score, reasons in matching_commands:
453
+ if cmd.full_name not in seen:
454
+ seen.add(cmd.full_name)
455
+ unique_commands.append((cmd, score, reasons))
456
+
457
+ # Build response
458
+ if unique_commands:
459
+ response_parts = ["I found these relevant MCLI commands for you:\n"]
460
+ for i, (cmd, score, reasons) in enumerate(unique_commands[:5], 1):
461
+ response_parts.append(f"{i}. **mcli {cmd.full_name.replace('.', ' ')}**")
462
+ response_parts.append(f" {cmd.description}")
463
+ response_parts.append(f" (Score: {score} - {', '.join(reasons[:2])})\n")
464
+
465
+ response_parts.append("You can get more help with: `mcli <command> --help`")
466
+ return "\n".join(response_parts)
467
+ else:
468
+ # Suggest broader search
469
+ return (
470
+ f"I didn't find specific commands for '{message}', but I can help you explore!\n\n"
471
+ f"Try these approaches:\n"
472
+ f"• Use 'commands' to browse all available commands\n"
473
+ f"• Ask about specific topics like 'video processing', 'file conversion', 'system monitoring'\n"
474
+ f"• I have {len(self.commands_list)} commands available across categories like workflow, redis, files, and more!"
475
+ )
476
+
477
+ return "I'm ready to help! You can ask me about MCLI commands or use 'commands' to explore available options."
478
+
479
+ except Exception as e:
480
+ logger.error(f"Local response failed: {e}")
481
+ return f"I'm here to help with MCLI commands. Try asking about specific tasks like 'video processing' or 'file conversion'."
482
+
483
+ def _display_response(self, response: str):
484
+ """Display AI response with enhanced formatting"""
485
+ # Create a panel for the response
486
+ panel = Panel(
487
+ response,
488
+ title="šŸ¤– MCLI Assistant",
489
+ title_align="left",
490
+ border_style="cyan",
491
+ padding=(1, 2),
492
+ )
493
+ self.console.print(panel)
494
+
495
+ async def _highlight_mcli_commands(self, response: str):
496
+ """Extract and highlight MCLI commands from response"""
497
+ # Look for command patterns in the response
498
+ command_pattern = r"`mcli\s+([^`]+)`|mcli\s+([\w\s\-\.]+)"
499
+ matches = re.findall(command_pattern, response, re.IGNORECASE)
500
+
501
+ if matches:
502
+ self.console.print("\nšŸ’” **Detected Commands:**", style="yellow")
503
+ for match in matches[:3]: # Show up to 3 commands
504
+ command = match[0] or match[1]
505
+ if command.strip():
506
+ self.console.print(f" • `mcli {command.strip()}`", style="green")
507
+
508
+ async def _handle_command_search(self, query: str):
509
+ """Handle command search queries"""
510
+ search_term = query[8:].strip() if len(query) > 8 else "" # Remove "commands"
511
+
512
+ if not self.rag_system:
513
+ self.console.print(
514
+ "āŒ Command search not available (RAG system not initialized)", style="red"
515
+ )
516
+ return
517
+
518
+ if not search_term:
519
+ # Show all categories
520
+ capabilities = self.rag_system.get_system_capabilities()
521
+ self._show_command_categories(capabilities)
522
+ else:
523
+ # Search for specific commands
524
+ await self._search_and_display_commands(search_term)
525
+
526
+ def _show_command_categories(self, capabilities: Dict[str, Any]):
527
+ """Show command categories"""
528
+ table = Table(title="šŸ“‹ MCLI Command Categories")
529
+ table.add_column("Category", style="cyan")
530
+ table.add_column("Commands", justify="right", style="magenta")
531
+ table.add_column("Examples", style="green")
532
+
533
+ for category, info in capabilities["categories"].items():
534
+ examples = ", ".join([cmd["name"].split(".")[-1] for cmd in info["commands"][:2]])
535
+ if len(info["commands"]) > 2:
536
+ examples += f", ... (+{len(info['commands']) - 2} more)"
537
+
538
+ table.add_row(category, str(info["count"]), examples)
539
+
540
+ self.console.print(table)
541
+ self.console.print(
542
+ "\nšŸ’” Use 'commands <search_term>' to find specific commands", style="yellow"
543
+ )
544
+
545
+ async def _search_and_display_commands(self, search_term: str):
546
+ """Search and display matching commands"""
547
+ try:
548
+ results = await self.rag_system.search_commands(search_term, limit=8)
549
+
550
+ if not results:
551
+ self.console.print(f"āŒ No commands found matching: {search_term}", style="red")
552
+ return
553
+
554
+ self.console.print(f"\nšŸ” **Commands matching '{search_term}':**\n")
555
+
556
+ for i, (context, score) in enumerate(results, 1):
557
+ cmd = context.command
558
+ # Create command display
559
+ self.console.print(f"**{i}. {cmd.full_name.replace('.', ' ')}**", style="cyan")
560
+ self.console.print(f" {cmd.description}", style="white")
561
+ self.console.print(f" Category: {context.category}", style="yellow")
562
+
563
+ if context.examples:
564
+ self.console.print(f" Example: `{context.examples[0]}`", style="green")
565
+
566
+ self.console.print(f" Relevance: {score:.2f}\n", style="dim")
567
+
568
+ except Exception as e:
569
+ self.console.print(f"āŒ Search failed: {e}", style="red")
570
+
571
+ async def _show_system_status(self):
572
+ """Show comprehensive system status"""
573
+ try:
574
+ status_info = await self._get_system_status()
575
+
576
+ # Create status panel
577
+ status_panel = Panel(status_info, title="šŸ“Š System Status", border_style="green")
578
+ self.console.print(status_panel)
579
+
580
+ # Show RAG system status
581
+ if self.rag_system:
582
+ capabilities = self.rag_system.get_system_capabilities()
583
+ self.console.print(
584
+ f"\n🧠 Command Knowledge: {capabilities['total_commands']} commands indexed"
585
+ )
586
+ else:
587
+ self.console.print("\nāš ļø Command search system not available", style="yellow")
588
+
589
+ except Exception as e:
590
+ self.console.print(f"āŒ Status check failed: {e}", style="red")
591
+
592
+ def _show_help(self):
593
+ """Show enhanced help information"""
594
+ help_text = """
595
+ šŸ¤– **MCLI Enhanced Chat Assistant Help**
596
+
597
+ **Basic Commands:**
598
+ • Just chat naturally - I'll suggest relevant MCLI commands
599
+ • `help` - Show this help message
600
+ • `commands` - Browse all available command categories
601
+ • `commands <search>` - Search for specific commands
602
+ • `status` - Show system and performance status
603
+ • `clear` - Clear the screen
604
+ • `quit` - Exit the chat
605
+
606
+ **What I Can Do:**
607
+ ✨ **Command Discovery**: Find the right MCLI command for any task
608
+ šŸ” **Semantic Search**: Search commands by description or intent
609
+ šŸ’” **Smart Suggestions**: Get contextual command recommendations
610
+ šŸ“– **Usage Examples**: See exact command syntax and parameters
611
+ ⚔ **Performance Aware**: Know about Rust extensions and Redis status
612
+ šŸ”— **Workflow Building**: Chain commands together for complex tasks
613
+
614
+ **Example Queries:**
615
+ • "How do I start Redis cache?"
616
+ • "Show me file conversion commands"
617
+ • "I need to schedule a recurring task"
618
+ • "What performance optimizations are available?"
619
+ • "Help me automate video processing"
620
+
621
+ **Tips:**
622
+ • Be specific about what you want to accomplish
623
+ • Ask for examples if you need exact command syntax
624
+ • Use 'commands <keyword>' to explore specific areas
625
+ • I can see all available MCLI commands and their capabilities!
626
+ """
627
+
628
+ help_panel = Panel(help_text.strip(), title="ā“ Help & Usage Guide", border_style="blue")
629
+ self.console.print(help_panel)
630
+
631
+ def _handle_quit(self):
632
+ """Handle quit command"""
633
+ self.session_active = False
634
+ self.console.print("\nšŸŽÆ **Session Summary:**")
635
+ if self.conversation_context:
636
+ self.console.print(f" • Processed {len(self.conversation_context)} messages")
637
+ enriched_count = sum(1 for ctx in self.conversation_context if ctx.get("enriched"))
638
+ if enriched_count:
639
+ self.console.print(f" • Enhanced {enriched_count} messages with command context")
640
+
641
+ self.console.print("\nšŸ‘‹ **Thank you for using MCLI Enhanced Chat!**", style="cyan")
642
+ self.console.print(
643
+ "šŸ’” Remember: You can always run `mcli chat` to start a new session.", style="yellow"
644
+ )
645
+
646
+
647
+ # Compatibility function for existing interface
648
+ def create_enhanced_chat_client(
649
+ use_remote: bool = False, model_override: str = None
650
+ ) -> EnhancedChatClient:
651
+ """Create enhanced chat client instance"""
652
+ return EnhancedChatClient(use_remote=use_remote, model_override=model_override)