mcli-framework 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (186) hide show
  1. mcli/app/chat_cmd.py +42 -0
  2. mcli/app/commands_cmd.py +226 -0
  3. mcli/app/completion_cmd.py +216 -0
  4. mcli/app/completion_helpers.py +288 -0
  5. mcli/app/cron_test_cmd.py +697 -0
  6. mcli/app/logs_cmd.py +419 -0
  7. mcli/app/main.py +492 -0
  8. mcli/app/model/model.py +1060 -0
  9. mcli/app/model_cmd.py +227 -0
  10. mcli/app/redis_cmd.py +269 -0
  11. mcli/app/video/video.py +1114 -0
  12. mcli/app/visual_cmd.py +303 -0
  13. mcli/chat/chat.py +2409 -0
  14. mcli/chat/command_rag.py +514 -0
  15. mcli/chat/enhanced_chat.py +652 -0
  16. mcli/chat/system_controller.py +1010 -0
  17. mcli/chat/system_integration.py +1016 -0
  18. mcli/cli.py +25 -0
  19. mcli/config.toml +20 -0
  20. mcli/lib/api/api.py +586 -0
  21. mcli/lib/api/daemon_client.py +203 -0
  22. mcli/lib/api/daemon_client_local.py +44 -0
  23. mcli/lib/api/daemon_decorator.py +217 -0
  24. mcli/lib/api/mcli_decorators.py +1032 -0
  25. mcli/lib/auth/auth.py +85 -0
  26. mcli/lib/auth/aws_manager.py +85 -0
  27. mcli/lib/auth/azure_manager.py +91 -0
  28. mcli/lib/auth/credential_manager.py +192 -0
  29. mcli/lib/auth/gcp_manager.py +93 -0
  30. mcli/lib/auth/key_manager.py +117 -0
  31. mcli/lib/auth/mcli_manager.py +93 -0
  32. mcli/lib/auth/token_manager.py +75 -0
  33. mcli/lib/auth/token_util.py +1011 -0
  34. mcli/lib/config/config.py +47 -0
  35. mcli/lib/discovery/__init__.py +1 -0
  36. mcli/lib/discovery/command_discovery.py +274 -0
  37. mcli/lib/erd/erd.py +1345 -0
  38. mcli/lib/erd/generate_graph.py +453 -0
  39. mcli/lib/files/files.py +76 -0
  40. mcli/lib/fs/fs.py +109 -0
  41. mcli/lib/lib.py +29 -0
  42. mcli/lib/logger/logger.py +611 -0
  43. mcli/lib/performance/optimizer.py +409 -0
  44. mcli/lib/performance/rust_bridge.py +502 -0
  45. mcli/lib/performance/uvloop_config.py +154 -0
  46. mcli/lib/pickles/pickles.py +50 -0
  47. mcli/lib/search/cached_vectorizer.py +479 -0
  48. mcli/lib/services/data_pipeline.py +460 -0
  49. mcli/lib/services/lsh_client.py +441 -0
  50. mcli/lib/services/redis_service.py +387 -0
  51. mcli/lib/shell/shell.py +137 -0
  52. mcli/lib/toml/toml.py +33 -0
  53. mcli/lib/ui/styling.py +47 -0
  54. mcli/lib/ui/visual_effects.py +634 -0
  55. mcli/lib/watcher/watcher.py +185 -0
  56. mcli/ml/api/app.py +215 -0
  57. mcli/ml/api/middleware.py +224 -0
  58. mcli/ml/api/routers/admin_router.py +12 -0
  59. mcli/ml/api/routers/auth_router.py +244 -0
  60. mcli/ml/api/routers/backtest_router.py +12 -0
  61. mcli/ml/api/routers/data_router.py +12 -0
  62. mcli/ml/api/routers/model_router.py +302 -0
  63. mcli/ml/api/routers/monitoring_router.py +12 -0
  64. mcli/ml/api/routers/portfolio_router.py +12 -0
  65. mcli/ml/api/routers/prediction_router.py +267 -0
  66. mcli/ml/api/routers/trade_router.py +12 -0
  67. mcli/ml/api/routers/websocket_router.py +76 -0
  68. mcli/ml/api/schemas.py +64 -0
  69. mcli/ml/auth/auth_manager.py +425 -0
  70. mcli/ml/auth/models.py +154 -0
  71. mcli/ml/auth/permissions.py +302 -0
  72. mcli/ml/backtesting/backtest_engine.py +502 -0
  73. mcli/ml/backtesting/performance_metrics.py +393 -0
  74. mcli/ml/cache.py +400 -0
  75. mcli/ml/cli/main.py +398 -0
  76. mcli/ml/config/settings.py +394 -0
  77. mcli/ml/configs/dvc_config.py +230 -0
  78. mcli/ml/configs/mlflow_config.py +131 -0
  79. mcli/ml/configs/mlops_manager.py +293 -0
  80. mcli/ml/dashboard/app.py +532 -0
  81. mcli/ml/dashboard/app_integrated.py +738 -0
  82. mcli/ml/dashboard/app_supabase.py +560 -0
  83. mcli/ml/dashboard/app_training.py +615 -0
  84. mcli/ml/dashboard/cli.py +51 -0
  85. mcli/ml/data_ingestion/api_connectors.py +501 -0
  86. mcli/ml/data_ingestion/data_pipeline.py +567 -0
  87. mcli/ml/data_ingestion/stream_processor.py +512 -0
  88. mcli/ml/database/migrations/env.py +94 -0
  89. mcli/ml/database/models.py +667 -0
  90. mcli/ml/database/session.py +200 -0
  91. mcli/ml/experimentation/ab_testing.py +845 -0
  92. mcli/ml/features/ensemble_features.py +607 -0
  93. mcli/ml/features/political_features.py +676 -0
  94. mcli/ml/features/recommendation_engine.py +809 -0
  95. mcli/ml/features/stock_features.py +573 -0
  96. mcli/ml/features/test_feature_engineering.py +346 -0
  97. mcli/ml/logging.py +85 -0
  98. mcli/ml/mlops/data_versioning.py +518 -0
  99. mcli/ml/mlops/experiment_tracker.py +377 -0
  100. mcli/ml/mlops/model_serving.py +481 -0
  101. mcli/ml/mlops/pipeline_orchestrator.py +614 -0
  102. mcli/ml/models/base_models.py +324 -0
  103. mcli/ml/models/ensemble_models.py +675 -0
  104. mcli/ml/models/recommendation_models.py +474 -0
  105. mcli/ml/models/test_models.py +487 -0
  106. mcli/ml/monitoring/drift_detection.py +676 -0
  107. mcli/ml/monitoring/metrics.py +45 -0
  108. mcli/ml/optimization/portfolio_optimizer.py +834 -0
  109. mcli/ml/preprocessing/data_cleaners.py +451 -0
  110. mcli/ml/preprocessing/feature_extractors.py +491 -0
  111. mcli/ml/preprocessing/ml_pipeline.py +382 -0
  112. mcli/ml/preprocessing/politician_trading_preprocessor.py +569 -0
  113. mcli/ml/preprocessing/test_preprocessing.py +294 -0
  114. mcli/ml/scripts/populate_sample_data.py +200 -0
  115. mcli/ml/tasks.py +400 -0
  116. mcli/ml/tests/test_integration.py +429 -0
  117. mcli/ml/tests/test_training_dashboard.py +387 -0
  118. mcli/public/oi/oi.py +15 -0
  119. mcli/public/public.py +4 -0
  120. mcli/self/self_cmd.py +1246 -0
  121. mcli/workflow/daemon/api_daemon.py +800 -0
  122. mcli/workflow/daemon/async_command_database.py +681 -0
  123. mcli/workflow/daemon/async_process_manager.py +591 -0
  124. mcli/workflow/daemon/client.py +530 -0
  125. mcli/workflow/daemon/commands.py +1196 -0
  126. mcli/workflow/daemon/daemon.py +905 -0
  127. mcli/workflow/daemon/daemon_api.py +59 -0
  128. mcli/workflow/daemon/enhanced_daemon.py +571 -0
  129. mcli/workflow/daemon/process_cli.py +244 -0
  130. mcli/workflow/daemon/process_manager.py +439 -0
  131. mcli/workflow/daemon/test_daemon.py +275 -0
  132. mcli/workflow/dashboard/dashboard_cmd.py +113 -0
  133. mcli/workflow/docker/docker.py +0 -0
  134. mcli/workflow/file/file.py +100 -0
  135. mcli/workflow/gcloud/config.toml +21 -0
  136. mcli/workflow/gcloud/gcloud.py +58 -0
  137. mcli/workflow/git_commit/ai_service.py +328 -0
  138. mcli/workflow/git_commit/commands.py +430 -0
  139. mcli/workflow/lsh_integration.py +355 -0
  140. mcli/workflow/model_service/client.py +594 -0
  141. mcli/workflow/model_service/download_and_run_efficient_models.py +288 -0
  142. mcli/workflow/model_service/lightweight_embedder.py +397 -0
  143. mcli/workflow/model_service/lightweight_model_server.py +714 -0
  144. mcli/workflow/model_service/lightweight_test.py +241 -0
  145. mcli/workflow/model_service/model_service.py +1955 -0
  146. mcli/workflow/model_service/ollama_efficient_runner.py +425 -0
  147. mcli/workflow/model_service/pdf_processor.py +386 -0
  148. mcli/workflow/model_service/test_efficient_runner.py +234 -0
  149. mcli/workflow/model_service/test_example.py +315 -0
  150. mcli/workflow/model_service/test_integration.py +131 -0
  151. mcli/workflow/model_service/test_new_features.py +149 -0
  152. mcli/workflow/openai/openai.py +99 -0
  153. mcli/workflow/politician_trading/commands.py +1790 -0
  154. mcli/workflow/politician_trading/config.py +134 -0
  155. mcli/workflow/politician_trading/connectivity.py +490 -0
  156. mcli/workflow/politician_trading/data_sources.py +395 -0
  157. mcli/workflow/politician_trading/database.py +410 -0
  158. mcli/workflow/politician_trading/demo.py +248 -0
  159. mcli/workflow/politician_trading/models.py +165 -0
  160. mcli/workflow/politician_trading/monitoring.py +413 -0
  161. mcli/workflow/politician_trading/scrapers.py +966 -0
  162. mcli/workflow/politician_trading/scrapers_california.py +412 -0
  163. mcli/workflow/politician_trading/scrapers_eu.py +377 -0
  164. mcli/workflow/politician_trading/scrapers_uk.py +350 -0
  165. mcli/workflow/politician_trading/scrapers_us_states.py +438 -0
  166. mcli/workflow/politician_trading/supabase_functions.py +354 -0
  167. mcli/workflow/politician_trading/workflow.py +852 -0
  168. mcli/workflow/registry/registry.py +180 -0
  169. mcli/workflow/repo/repo.py +223 -0
  170. mcli/workflow/scheduler/commands.py +493 -0
  171. mcli/workflow/scheduler/cron_parser.py +238 -0
  172. mcli/workflow/scheduler/job.py +182 -0
  173. mcli/workflow/scheduler/monitor.py +139 -0
  174. mcli/workflow/scheduler/persistence.py +324 -0
  175. mcli/workflow/scheduler/scheduler.py +679 -0
  176. mcli/workflow/sync/sync_cmd.py +437 -0
  177. mcli/workflow/sync/test_cmd.py +314 -0
  178. mcli/workflow/videos/videos.py +242 -0
  179. mcli/workflow/wakatime/wakatime.py +11 -0
  180. mcli/workflow/workflow.py +37 -0
  181. mcli_framework-7.0.0.dist-info/METADATA +479 -0
  182. mcli_framework-7.0.0.dist-info/RECORD +186 -0
  183. mcli_framework-7.0.0.dist-info/WHEEL +5 -0
  184. mcli_framework-7.0.0.dist-info/entry_points.txt +7 -0
  185. mcli_framework-7.0.0.dist-info/licenses/LICENSE +21 -0
  186. mcli_framework-7.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,425 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Script to download and run efficient models from Ollama using MCLI model service.
4
+
5
+ This script uses the Ollama API to pull the most efficient models and then
6
+ integrates them with the MCLI model service for local inference.
7
+ """
8
+
9
+ import json
10
+ import os
11
+ import subprocess
12
+ import sys
13
+ import time
14
+ from pathlib import Path
15
+ from typing import Dict, List, Optional
16
+
17
+ import click
18
+ import requests
19
+
20
+ # Add the parent directory to the path so we can import the model service
21
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
22
+
23
+ from mcli.workflow.model_service.model_service import ModelManager, ModelService
24
+
25
+ # Efficient models from Ollama with their model names
26
+ EFFICIENT_MODELS = {
27
+ "phi3-mini": {
28
+ "name": "Phi-3 Mini",
29
+ "ollama_name": "phi3-mini",
30
+ "description": "Microsoft's lightweight 3.8B model with excellent reasoning",
31
+ "parameters": "3.8B",
32
+ "efficiency_score": 9.5,
33
+ "accuracy_score": 8.5,
34
+ "tags": ["reasoning", "efficient", "lightweight"],
35
+ },
36
+ "gemma3n-1b": {
37
+ "name": "Gemma3n 1B",
38
+ "ollama_name": "gemma3n:1b",
39
+ "description": "Google's efficient 1B model for everyday devices",
40
+ "parameters": "1B",
41
+ "efficiency_score": 9.8,
42
+ "accuracy_score": 7.5,
43
+ "tags": ["efficient", "small", "fast"],
44
+ },
45
+ "tinyllama-1.1b": {
46
+ "name": "TinyLlama 1.1B",
47
+ "ollama_name": "tinyllama:1.1b",
48
+ "description": "Compact 1.1B model trained on 3 trillion tokens",
49
+ "parameters": "1.1B",
50
+ "efficiency_score": 9.7,
51
+ "accuracy_score": 7.0,
52
+ "tags": ["compact", "fast", "lightweight"],
53
+ },
54
+ "phi4-mini-reasoning": {
55
+ "name": "Phi-4 Mini Reasoning",
56
+ "ollama_name": "phi4-mini-reasoning",
57
+ "description": "Lightweight 3.8B model with advanced reasoning",
58
+ "parameters": "3.8B",
59
+ "efficiency_score": 9.3,
60
+ "accuracy_score": 8.8,
61
+ "tags": ["reasoning", "advanced", "efficient"],
62
+ },
63
+ "llama3.2-1b": {
64
+ "name": "Llama 3.2 1B",
65
+ "ollama_name": "llama3.2:1b",
66
+ "description": "Meta's efficient 1B model with good performance",
67
+ "parameters": "1B",
68
+ "efficiency_score": 9.6,
69
+ "accuracy_score": 7.8,
70
+ "tags": ["meta", "efficient", "balanced"],
71
+ },
72
+ }
73
+
74
+
75
+ def check_ollama_installed():
76
+ """Check if Ollama is installed and running"""
77
+ try:
78
+ # Check if ollama command exists
79
+ result = subprocess.run(["ollama", "--version"], capture_output=True, text=True, timeout=5)
80
+ if result.returncode == 0:
81
+ print(f"โœ… Ollama found: {result.stdout.strip()}")
82
+ return True
83
+ else:
84
+ print("โŒ Ollama command failed")
85
+ return False
86
+ except FileNotFoundError:
87
+ print("โŒ Ollama not found. Please install Ollama first:")
88
+ print(" Visit: https://ollama.com/download")
89
+ return False
90
+ except subprocess.TimeoutExpired:
91
+ print("โŒ Ollama command timed out")
92
+ return False
93
+
94
+
95
+ def check_ollama_server():
96
+ """Check if Ollama server is running"""
97
+ try:
98
+ response = requests.get("http://localhost:11434/api/tags", timeout=5)
99
+ if response.status_code == 200:
100
+ print("โœ… Ollama server is running")
101
+ return True
102
+ else:
103
+ print("โŒ Ollama server not responding")
104
+ return False
105
+ except requests.exceptions.RequestException:
106
+ print("โŒ Ollama server not running. Starting Ollama...")
107
+ try:
108
+ subprocess.Popen(
109
+ ["ollama", "serve"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
110
+ )
111
+ time.sleep(3)
112
+ return check_ollama_server()
113
+ except Exception as e:
114
+ print(f"โŒ Failed to start Ollama server: {e}")
115
+ return False
116
+
117
+
118
+ def get_system_info():
119
+ """Get system information for model selection"""
120
+ import psutil
121
+
122
+ # Get CPU info
123
+ cpu_count = psutil.cpu_count()
124
+ cpu_freq = psutil.cpu_freq()
125
+ memory_gb = psutil.virtual_memory().total / (1024**3)
126
+
127
+ # Check for GPU
128
+ try:
129
+ import torch
130
+
131
+ gpu_available = torch.cuda.is_available()
132
+ if gpu_available:
133
+ gpu_name = torch.cuda.get_device_name(0)
134
+ gpu_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3)
135
+ else:
136
+ gpu_name = "None"
137
+ gpu_memory = 0
138
+ except ImportError:
139
+ gpu_available = False
140
+ gpu_name = "PyTorch not available"
141
+ gpu_memory = 0
142
+
143
+ return {
144
+ "cpu_count": cpu_count,
145
+ "cpu_freq_mhz": cpu_freq.current if cpu_freq else 0,
146
+ "memory_gb": memory_gb,
147
+ "gpu_available": gpu_available,
148
+ "gpu_name": gpu_name,
149
+ "gpu_memory_gb": gpu_memory,
150
+ }
151
+
152
+
153
+ def recommend_model(system_info: Dict) -> str:
154
+ """Recommend the best model based on system capabilities"""
155
+ print("๐Ÿ” Analyzing system capabilities...")
156
+ print(f" CPU Cores: {system_info['cpu_count']}")
157
+ print(f" CPU Frequency: {system_info['cpu_freq_mhz']:.0f} MHz")
158
+ print(f" RAM: {system_info['memory_gb']:.1f} GB")
159
+ print(f" GPU: {system_info['gpu_name']}")
160
+ print(f" GPU Memory: {system_info['gpu_memory_gb']:.1f} GB")
161
+
162
+ # Recommendation logic based on system capabilities
163
+ if system_info["gpu_available"] and system_info["gpu_memory_gb"] >= 4:
164
+ # Good GPU available
165
+ if system_info["memory_gb"] >= 16:
166
+ return "phi3-mini" # Best balance for good hardware
167
+ else:
168
+ return "gemma3n-1b" # More memory efficient
169
+ elif system_info["memory_gb"] >= 8:
170
+ # CPU-only with decent RAM
171
+ return "phi3-mini"
172
+ else:
173
+ # Limited resources
174
+ return "tinyllama-1.1b"
175
+
176
+
177
+ def pull_ollama_model(model_key: str) -> bool:
178
+ """Pull a model from Ollama"""
179
+ model_info = EFFICIENT_MODELS[model_key]
180
+
181
+ print(f"\n๐Ÿ“ฅ Pulling {model_info['name']} from Ollama...")
182
+ print(f" Model: {model_info['ollama_name']}")
183
+ print(f" Parameters: {model_info['parameters']}")
184
+ print(f" Efficiency Score: {model_info['efficiency_score']}/10")
185
+ print(f" Accuracy Score: {model_info['accuracy_score']}/10")
186
+
187
+ try:
188
+ # Pull the model using ollama command
189
+ result = subprocess.run(
190
+ ["ollama", "pull", model_info["ollama_name"]],
191
+ capture_output=True,
192
+ text=True,
193
+ timeout=300, # 5 minutes timeout
194
+ )
195
+
196
+ if result.returncode == 0:
197
+ print(f"โœ… Successfully pulled {model_info['name']}")
198
+ return True
199
+ else:
200
+ print(f"โŒ Failed to pull {model_info['name']}: {result.stderr}")
201
+ return False
202
+
203
+ except subprocess.TimeoutExpired:
204
+ print(f"โŒ Timeout while pulling {model_info['name']}")
205
+ return False
206
+ except Exception as e:
207
+ print(f"โŒ Error pulling {model_info['name']}: {e}")
208
+ return False
209
+
210
+
211
+ def test_ollama_model(model_key: str):
212
+ """Test the Ollama model with sample prompts"""
213
+ model_info = EFFICIENT_MODELS[model_key]
214
+
215
+ print(f"\n๐Ÿงช Testing {model_info['name']} via Ollama...")
216
+
217
+ test_prompts = [
218
+ "Explain quantum computing in simple terms.",
219
+ "Write a Python function to calculate fibonacci numbers.",
220
+ "What are the benefits of renewable energy?",
221
+ "Translate 'Hello, how are you?' to Spanish.",
222
+ ]
223
+
224
+ for i, prompt in enumerate(test_prompts, 1):
225
+ print(f"\n๐Ÿ“ Test {i}: {prompt}")
226
+
227
+ try:
228
+ # Use Ollama API to generate response
229
+ response = requests.post(
230
+ "http://localhost:11434/api/generate",
231
+ json={"model": model_info["ollama_name"], "prompt": prompt, "stream": False},
232
+ timeout=30,
233
+ )
234
+
235
+ if response.status_code == 200:
236
+ result = response.json()
237
+ response_text = result.get("response", "")
238
+ print(f"โฑ๏ธ Response time: {result.get('eval_duration', 0):.2f} seconds")
239
+ print(
240
+ f"๐Ÿค– Response: {response_text[:200]}{'...' if len(response_text) > 200 else ''}"
241
+ )
242
+ else:
243
+ print(f"โŒ API error: {response.status_code}")
244
+
245
+ except Exception as e:
246
+ print(f"โŒ Error generating response: {e}")
247
+
248
+
249
+ def list_available_models():
250
+ """List models available in Ollama"""
251
+ try:
252
+ response = requests.get("http://localhost:11434/api/tags", timeout=5)
253
+ if response.status_code == 200:
254
+ models = response.json().get("models", [])
255
+ print("\n๐Ÿ“‹ Available models in Ollama:")
256
+ for model in models:
257
+ print(f" - {model['name']} ({model.get('size', 'unknown size')})")
258
+ return models
259
+ else:
260
+ print("โŒ Failed to get model list")
261
+ return []
262
+ except Exception as e:
263
+ print(f"โŒ Error listing models: {e}")
264
+ return []
265
+
266
+
267
+ def create_mcli_integration_script(model_key: str):
268
+ """Create a script to integrate the Ollama model with MCLI"""
269
+ model_info = EFFICIENT_MODELS[model_key]
270
+
271
+ script_content = f'''#!/usr/bin/env python3
272
+ """
273
+ Integration script for {model_info['name']} with MCLI model service.
274
+ This script provides a bridge between Ollama and MCLI model service.
275
+ """
276
+
277
+ import requests
278
+ import json
279
+ import time
280
+ from typing import Dict, Any
281
+
282
+ class OllamaMCLIBridge:
283
+ def __init__(self, ollama_model: str, mcli_api_url: str = "http://localhost:8000"):
284
+ self.ollama_model = ollama_model
285
+ self.mcli_api_url = mcli_api_url
286
+
287
+ def generate_text(self, prompt: str, max_length: int = 512, temperature: float = 0.7) -> str:
288
+ """Generate text using Ollama model"""
289
+ try:
290
+ response = requests.post(
291
+ "http://localhost:11434/api/generate",
292
+ json={{
293
+ "model": self.ollama_model,
294
+ "prompt": prompt,
295
+ "stream": False,
296
+ "options": {{
297
+ "num_predict": max_length,
298
+ "temperature": temperature
299
+ }}
300
+ }},
301
+ timeout=30
302
+ )
303
+
304
+ if response.status_code == 200:
305
+ result = response.json()
306
+ return result.get('response', '')
307
+ else:
308
+ raise Exception(f"Ollama API error: {{response.status_code}}")
309
+
310
+ except Exception as e:
311
+ raise Exception(f"Error generating text: {{e}}")
312
+
313
+ def test_model(self):
314
+ """Test the model with sample prompts"""
315
+ test_prompts = [
316
+ "Explain quantum computing in simple terms.",
317
+ "Write a Python function to calculate fibonacci numbers.",
318
+ "What are the benefits of renewable energy?"
319
+ ]
320
+
321
+ print(f"๐Ÿงช Testing {{self.ollama_model}}...")
322
+
323
+ for i, prompt in enumerate(test_prompts, 1):
324
+ print(f"\\n๐Ÿ“ Test {{i}}: {{prompt}}")
325
+
326
+ try:
327
+ start_time = time.time()
328
+ response = self.generate_text(prompt)
329
+ execution_time = time.time() - start_time
330
+
331
+ print(f"โฑ๏ธ Response time: {{execution_time:.2f}} seconds")
332
+ print(f"๐Ÿค– Response: {{response[:200]}}{{'...' if len(response) > 200 else ''}}")
333
+
334
+ except Exception as e:
335
+ print(f"โŒ Error: {{e}}")
336
+
337
+ if __name__ == "__main__":
338
+ bridge = OllamaMCLIBridge("{model_info['ollama_name']}")
339
+ bridge.test_model()
340
+ '''
341
+
342
+ script_path = Path(f"ollama_{model_key}_bridge.py")
343
+ with open(script_path, "w") as f:
344
+ f.write(script_content)
345
+
346
+ # Make executable
347
+ script_path.chmod(0o755)
348
+
349
+ print(f"โœ… Created integration script: {script_path}")
350
+ return script_path
351
+
352
+
353
+ @click.command()
354
+ @click.option(
355
+ "--model",
356
+ type=click.Choice(list(EFFICIENT_MODELS.keys())),
357
+ help="Specific model to download and run",
358
+ )
359
+ @click.option(
360
+ "--auto", is_flag=True, default=True, help="Automatically select best model for your system"
361
+ )
362
+ @click.option("--test", is_flag=True, default=True, help="Run test prompts after setup")
363
+ @click.option("--list-models", is_flag=True, help="List available models in Ollama")
364
+ @click.option("--create-bridge", is_flag=True, help="Create MCLI integration script")
365
+ def main(model: Optional[str], auto: bool, test: bool, list_models: bool, create_bridge: bool):
366
+ """Download and run efficient models from Ollama"""
367
+
368
+ print("๐Ÿš€ Ollama Efficient Model Runner")
369
+ print("=" * 50)
370
+
371
+ # Check Ollama installation
372
+ if not check_ollama_installed():
373
+ return 1
374
+
375
+ # Check Ollama server
376
+ if not check_ollama_server():
377
+ return 1
378
+
379
+ if list_models:
380
+ list_available_models()
381
+ return 0
382
+
383
+ # Get system info and recommend model
384
+ system_info = get_system_info()
385
+
386
+ if model:
387
+ selected_model = model
388
+ print(f"๐ŸŽฏ Using specified model: {selected_model}")
389
+ elif auto:
390
+ selected_model = recommend_model(system_info)
391
+ print(f"๐ŸŽฏ Recommended model: {selected_model}")
392
+ else:
393
+ print("Available models:")
394
+ for key, info in EFFICIENT_MODELS.items():
395
+ print(f" {key}: {info['name']} ({info['parameters']})")
396
+ selected_model = click.prompt(
397
+ "Select model", type=click.Choice(list(EFFICIENT_MODELS.keys()))
398
+ )
399
+
400
+ # Pull the model
401
+ if not pull_ollama_model(selected_model):
402
+ print("โŒ Failed to pull model")
403
+ return 1
404
+
405
+ # Test the model
406
+ if test:
407
+ test_ollama_model(selected_model)
408
+
409
+ # Create integration script
410
+ if create_bridge:
411
+ script_path = create_mcli_integration_script(selected_model)
412
+ print(f"\n๐Ÿ”— Integration script created: {script_path}")
413
+ print(f" Run: python {script_path}")
414
+
415
+ model_info = EFFICIENT_MODELS[selected_model]
416
+ print(f"\n๐ŸŽ‰ Setup complete! Model {model_info['name']} is ready to use.")
417
+ print(f"๐Ÿ“Š Model: {model_info['ollama_name']}")
418
+ print(f"๐ŸŒ Ollama API: http://localhost:11434")
419
+ print(f"๐Ÿ“ Use 'ollama list' to see all models")
420
+
421
+ return 0
422
+
423
+
424
+ if __name__ == "__main__":
425
+ sys.exit(main())