mcli-framework 7.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mcli-framework might be problematic. Click here for more details.

Files changed (186) hide show
  1. mcli/app/chat_cmd.py +42 -0
  2. mcli/app/commands_cmd.py +226 -0
  3. mcli/app/completion_cmd.py +216 -0
  4. mcli/app/completion_helpers.py +288 -0
  5. mcli/app/cron_test_cmd.py +697 -0
  6. mcli/app/logs_cmd.py +419 -0
  7. mcli/app/main.py +492 -0
  8. mcli/app/model/model.py +1060 -0
  9. mcli/app/model_cmd.py +227 -0
  10. mcli/app/redis_cmd.py +269 -0
  11. mcli/app/video/video.py +1114 -0
  12. mcli/app/visual_cmd.py +303 -0
  13. mcli/chat/chat.py +2409 -0
  14. mcli/chat/command_rag.py +514 -0
  15. mcli/chat/enhanced_chat.py +652 -0
  16. mcli/chat/system_controller.py +1010 -0
  17. mcli/chat/system_integration.py +1016 -0
  18. mcli/cli.py +25 -0
  19. mcli/config.toml +20 -0
  20. mcli/lib/api/api.py +586 -0
  21. mcli/lib/api/daemon_client.py +203 -0
  22. mcli/lib/api/daemon_client_local.py +44 -0
  23. mcli/lib/api/daemon_decorator.py +217 -0
  24. mcli/lib/api/mcli_decorators.py +1032 -0
  25. mcli/lib/auth/auth.py +85 -0
  26. mcli/lib/auth/aws_manager.py +85 -0
  27. mcli/lib/auth/azure_manager.py +91 -0
  28. mcli/lib/auth/credential_manager.py +192 -0
  29. mcli/lib/auth/gcp_manager.py +93 -0
  30. mcli/lib/auth/key_manager.py +117 -0
  31. mcli/lib/auth/mcli_manager.py +93 -0
  32. mcli/lib/auth/token_manager.py +75 -0
  33. mcli/lib/auth/token_util.py +1011 -0
  34. mcli/lib/config/config.py +47 -0
  35. mcli/lib/discovery/__init__.py +1 -0
  36. mcli/lib/discovery/command_discovery.py +274 -0
  37. mcli/lib/erd/erd.py +1345 -0
  38. mcli/lib/erd/generate_graph.py +453 -0
  39. mcli/lib/files/files.py +76 -0
  40. mcli/lib/fs/fs.py +109 -0
  41. mcli/lib/lib.py +29 -0
  42. mcli/lib/logger/logger.py +611 -0
  43. mcli/lib/performance/optimizer.py +409 -0
  44. mcli/lib/performance/rust_bridge.py +502 -0
  45. mcli/lib/performance/uvloop_config.py +154 -0
  46. mcli/lib/pickles/pickles.py +50 -0
  47. mcli/lib/search/cached_vectorizer.py +479 -0
  48. mcli/lib/services/data_pipeline.py +460 -0
  49. mcli/lib/services/lsh_client.py +441 -0
  50. mcli/lib/services/redis_service.py +387 -0
  51. mcli/lib/shell/shell.py +137 -0
  52. mcli/lib/toml/toml.py +33 -0
  53. mcli/lib/ui/styling.py +47 -0
  54. mcli/lib/ui/visual_effects.py +634 -0
  55. mcli/lib/watcher/watcher.py +185 -0
  56. mcli/ml/api/app.py +215 -0
  57. mcli/ml/api/middleware.py +224 -0
  58. mcli/ml/api/routers/admin_router.py +12 -0
  59. mcli/ml/api/routers/auth_router.py +244 -0
  60. mcli/ml/api/routers/backtest_router.py +12 -0
  61. mcli/ml/api/routers/data_router.py +12 -0
  62. mcli/ml/api/routers/model_router.py +302 -0
  63. mcli/ml/api/routers/monitoring_router.py +12 -0
  64. mcli/ml/api/routers/portfolio_router.py +12 -0
  65. mcli/ml/api/routers/prediction_router.py +267 -0
  66. mcli/ml/api/routers/trade_router.py +12 -0
  67. mcli/ml/api/routers/websocket_router.py +76 -0
  68. mcli/ml/api/schemas.py +64 -0
  69. mcli/ml/auth/auth_manager.py +425 -0
  70. mcli/ml/auth/models.py +154 -0
  71. mcli/ml/auth/permissions.py +302 -0
  72. mcli/ml/backtesting/backtest_engine.py +502 -0
  73. mcli/ml/backtesting/performance_metrics.py +393 -0
  74. mcli/ml/cache.py +400 -0
  75. mcli/ml/cli/main.py +398 -0
  76. mcli/ml/config/settings.py +394 -0
  77. mcli/ml/configs/dvc_config.py +230 -0
  78. mcli/ml/configs/mlflow_config.py +131 -0
  79. mcli/ml/configs/mlops_manager.py +293 -0
  80. mcli/ml/dashboard/app.py +532 -0
  81. mcli/ml/dashboard/app_integrated.py +738 -0
  82. mcli/ml/dashboard/app_supabase.py +560 -0
  83. mcli/ml/dashboard/app_training.py +615 -0
  84. mcli/ml/dashboard/cli.py +51 -0
  85. mcli/ml/data_ingestion/api_connectors.py +501 -0
  86. mcli/ml/data_ingestion/data_pipeline.py +567 -0
  87. mcli/ml/data_ingestion/stream_processor.py +512 -0
  88. mcli/ml/database/migrations/env.py +94 -0
  89. mcli/ml/database/models.py +667 -0
  90. mcli/ml/database/session.py +200 -0
  91. mcli/ml/experimentation/ab_testing.py +845 -0
  92. mcli/ml/features/ensemble_features.py +607 -0
  93. mcli/ml/features/political_features.py +676 -0
  94. mcli/ml/features/recommendation_engine.py +809 -0
  95. mcli/ml/features/stock_features.py +573 -0
  96. mcli/ml/features/test_feature_engineering.py +346 -0
  97. mcli/ml/logging.py +85 -0
  98. mcli/ml/mlops/data_versioning.py +518 -0
  99. mcli/ml/mlops/experiment_tracker.py +377 -0
  100. mcli/ml/mlops/model_serving.py +481 -0
  101. mcli/ml/mlops/pipeline_orchestrator.py +614 -0
  102. mcli/ml/models/base_models.py +324 -0
  103. mcli/ml/models/ensemble_models.py +675 -0
  104. mcli/ml/models/recommendation_models.py +474 -0
  105. mcli/ml/models/test_models.py +487 -0
  106. mcli/ml/monitoring/drift_detection.py +676 -0
  107. mcli/ml/monitoring/metrics.py +45 -0
  108. mcli/ml/optimization/portfolio_optimizer.py +834 -0
  109. mcli/ml/preprocessing/data_cleaners.py +451 -0
  110. mcli/ml/preprocessing/feature_extractors.py +491 -0
  111. mcli/ml/preprocessing/ml_pipeline.py +382 -0
  112. mcli/ml/preprocessing/politician_trading_preprocessor.py +569 -0
  113. mcli/ml/preprocessing/test_preprocessing.py +294 -0
  114. mcli/ml/scripts/populate_sample_data.py +200 -0
  115. mcli/ml/tasks.py +400 -0
  116. mcli/ml/tests/test_integration.py +429 -0
  117. mcli/ml/tests/test_training_dashboard.py +387 -0
  118. mcli/public/oi/oi.py +15 -0
  119. mcli/public/public.py +4 -0
  120. mcli/self/self_cmd.py +1246 -0
  121. mcli/workflow/daemon/api_daemon.py +800 -0
  122. mcli/workflow/daemon/async_command_database.py +681 -0
  123. mcli/workflow/daemon/async_process_manager.py +591 -0
  124. mcli/workflow/daemon/client.py +530 -0
  125. mcli/workflow/daemon/commands.py +1196 -0
  126. mcli/workflow/daemon/daemon.py +905 -0
  127. mcli/workflow/daemon/daemon_api.py +59 -0
  128. mcli/workflow/daemon/enhanced_daemon.py +571 -0
  129. mcli/workflow/daemon/process_cli.py +244 -0
  130. mcli/workflow/daemon/process_manager.py +439 -0
  131. mcli/workflow/daemon/test_daemon.py +275 -0
  132. mcli/workflow/dashboard/dashboard_cmd.py +113 -0
  133. mcli/workflow/docker/docker.py +0 -0
  134. mcli/workflow/file/file.py +100 -0
  135. mcli/workflow/gcloud/config.toml +21 -0
  136. mcli/workflow/gcloud/gcloud.py +58 -0
  137. mcli/workflow/git_commit/ai_service.py +328 -0
  138. mcli/workflow/git_commit/commands.py +430 -0
  139. mcli/workflow/lsh_integration.py +355 -0
  140. mcli/workflow/model_service/client.py +594 -0
  141. mcli/workflow/model_service/download_and_run_efficient_models.py +288 -0
  142. mcli/workflow/model_service/lightweight_embedder.py +397 -0
  143. mcli/workflow/model_service/lightweight_model_server.py +714 -0
  144. mcli/workflow/model_service/lightweight_test.py +241 -0
  145. mcli/workflow/model_service/model_service.py +1955 -0
  146. mcli/workflow/model_service/ollama_efficient_runner.py +425 -0
  147. mcli/workflow/model_service/pdf_processor.py +386 -0
  148. mcli/workflow/model_service/test_efficient_runner.py +234 -0
  149. mcli/workflow/model_service/test_example.py +315 -0
  150. mcli/workflow/model_service/test_integration.py +131 -0
  151. mcli/workflow/model_service/test_new_features.py +149 -0
  152. mcli/workflow/openai/openai.py +99 -0
  153. mcli/workflow/politician_trading/commands.py +1790 -0
  154. mcli/workflow/politician_trading/config.py +134 -0
  155. mcli/workflow/politician_trading/connectivity.py +490 -0
  156. mcli/workflow/politician_trading/data_sources.py +395 -0
  157. mcli/workflow/politician_trading/database.py +410 -0
  158. mcli/workflow/politician_trading/demo.py +248 -0
  159. mcli/workflow/politician_trading/models.py +165 -0
  160. mcli/workflow/politician_trading/monitoring.py +413 -0
  161. mcli/workflow/politician_trading/scrapers.py +966 -0
  162. mcli/workflow/politician_trading/scrapers_california.py +412 -0
  163. mcli/workflow/politician_trading/scrapers_eu.py +377 -0
  164. mcli/workflow/politician_trading/scrapers_uk.py +350 -0
  165. mcli/workflow/politician_trading/scrapers_us_states.py +438 -0
  166. mcli/workflow/politician_trading/supabase_functions.py +354 -0
  167. mcli/workflow/politician_trading/workflow.py +852 -0
  168. mcli/workflow/registry/registry.py +180 -0
  169. mcli/workflow/repo/repo.py +223 -0
  170. mcli/workflow/scheduler/commands.py +493 -0
  171. mcli/workflow/scheduler/cron_parser.py +238 -0
  172. mcli/workflow/scheduler/job.py +182 -0
  173. mcli/workflow/scheduler/monitor.py +139 -0
  174. mcli/workflow/scheduler/persistence.py +324 -0
  175. mcli/workflow/scheduler/scheduler.py +679 -0
  176. mcli/workflow/sync/sync_cmd.py +437 -0
  177. mcli/workflow/sync/test_cmd.py +314 -0
  178. mcli/workflow/videos/videos.py +242 -0
  179. mcli/workflow/wakatime/wakatime.py +11 -0
  180. mcli/workflow/workflow.py +37 -0
  181. mcli_framework-7.0.0.dist-info/METADATA +479 -0
  182. mcli_framework-7.0.0.dist-info/RECORD +186 -0
  183. mcli_framework-7.0.0.dist-info/WHEEL +5 -0
  184. mcli_framework-7.0.0.dist-info/entry_points.txt +7 -0
  185. mcli_framework-7.0.0.dist-info/licenses/LICENSE +21 -0
  186. mcli_framework-7.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,315 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test example for the MCLI Model Service
4
+
5
+ This script demonstrates how to:
6
+ 1. Start the model service
7
+ 2. Load a simple language model
8
+ 3. Make inference requests
9
+ 4. Test different model types
10
+ """
11
+
12
+ import json
13
+ import os
14
+ import subprocess
15
+ import sys
16
+ import time
17
+ from pathlib import Path
18
+
19
+ import requests
20
+
21
+ # Add the current directory to Python path
22
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
23
+
24
+ from client import ModelServiceClient
25
+
26
+
27
+ def check_service_running(url: str = "http://localhost:8000") -> bool:
28
+ """Check if the model service is running"""
29
+ try:
30
+ response = requests.get(f"{url}/health", timeout=5)
31
+ return response.status_code == 200
32
+ except:
33
+ return False
34
+
35
+
36
+ def start_service():
37
+ """Start the model service if not running"""
38
+ if check_service_running():
39
+ print("✅ Model service is already running")
40
+ return True
41
+
42
+ print("🚀 Starting model service...")
43
+ try:
44
+ # Start the service in the background
45
+ process = subprocess.Popen(
46
+ [sys.executable, "model_service.py", "start"],
47
+ stdout=subprocess.PIPE,
48
+ stderr=subprocess.PIPE,
49
+ )
50
+
51
+ # Wait a bit for the service to start
52
+ time.sleep(5)
53
+
54
+ if check_service_running():
55
+ print("✅ Model service started successfully")
56
+ return True
57
+ else:
58
+ print("❌ Failed to start model service")
59
+ return False
60
+
61
+ except Exception as e:
62
+ print(f"❌ Error starting service: {e}")
63
+ return False
64
+
65
+
66
+ def test_text_generation():
67
+ """Test text generation with a simple model"""
68
+ print("\n" + "=" * 60)
69
+ print("🧪 Testing Text Generation")
70
+ print("=" * 60)
71
+
72
+ client = ModelServiceClient()
73
+
74
+ try:
75
+ # Load a simple text generation model
76
+ print("📥 Loading GPT-2 model...")
77
+ model_id = client.load_model(
78
+ name="GPT-2 Test",
79
+ model_type="text-generation",
80
+ model_path="gpt2", # This will download from Hugging Face
81
+ temperature=0.7,
82
+ max_length=50,
83
+ )
84
+ print(f"✅ Model loaded with ID: {model_id}")
85
+
86
+ # Test text generation
87
+ test_prompts = [
88
+ "Hello, how are you?",
89
+ "The future of artificial intelligence is",
90
+ "Once upon a time, there was a magical",
91
+ "The best way to learn programming is",
92
+ ]
93
+
94
+ for i, prompt in enumerate(test_prompts, 1):
95
+ print(f"\n📝 Test {i}: {prompt}")
96
+ result = client.generate_text(model_id, prompt)
97
+ print(f"🤖 Generated: {result['generated_text']}")
98
+ print(f"⏱️ Time: {result['execution_time_ms']} ms")
99
+
100
+ # Clean up
101
+ client.unload_model(model_id)
102
+ print(f"\n🧹 Model {model_id} unloaded")
103
+
104
+ except Exception as e:
105
+ print(f"❌ Error in text generation test: {e}")
106
+
107
+
108
+ def test_text_classification():
109
+ """Test text classification with a sentiment model"""
110
+ print("\n" + "=" * 60)
111
+ print("🧪 Testing Text Classification")
112
+ print("=" * 60)
113
+
114
+ client = ModelServiceClient()
115
+
116
+ try:
117
+ # Load a sentiment classification model
118
+ print("📥 Loading BERT sentiment model...")
119
+ model_id = client.load_model(
120
+ name="BERT Sentiment",
121
+ model_type="text-classification",
122
+ model_path="nlptown/bert-base-multilingual-uncased-sentiment",
123
+ )
124
+ print(f"✅ Model loaded with ID: {model_id}")
125
+
126
+ # Test text classification
127
+ test_texts = [
128
+ "I love this product! It's amazing!",
129
+ "This is the worst experience ever.",
130
+ "The service was okay, nothing special.",
131
+ "Absolutely fantastic and wonderful!",
132
+ ]
133
+
134
+ for i, text in enumerate(test_texts, 1):
135
+ print(f"\n📝 Test {i}: {text}")
136
+ result = client.classify_text(model_id, text)
137
+ print("🏷️ Classifications:")
138
+ for class_name, probability in result["classifications"].items():
139
+ print(f" {class_name}: {probability:.4f}")
140
+ print(f"⏱️ Time: {result['execution_time_ms']} ms")
141
+
142
+ # Clean up
143
+ client.unload_model(model_id)
144
+ print(f"\n🧹 Model {model_id} unloaded")
145
+
146
+ except Exception as e:
147
+ print(f"❌ Error in text classification test: {e}")
148
+
149
+
150
+ def test_translation():
151
+ """Test translation with a translation model"""
152
+ print("\n" + "=" * 60)
153
+ print("🧪 Testing Translation")
154
+ print("=" * 60)
155
+
156
+ client = ModelServiceClient()
157
+
158
+ try:
159
+ # Load a translation model
160
+ print("📥 Loading Marian translation model...")
161
+ model_id = client.load_model(
162
+ name="Marian EN-FR", model_type="translation", model_path="Helsinki-NLP/opus-mt-en-fr"
163
+ )
164
+ print(f"✅ Model loaded with ID: {model_id}")
165
+
166
+ # Test translation
167
+ test_texts = [
168
+ "Hello, how are you?",
169
+ "The weather is beautiful today.",
170
+ "I love learning new languages.",
171
+ "Technology is advancing rapidly.",
172
+ ]
173
+
174
+ for i, text in enumerate(test_texts, 1):
175
+ print(f"\n📝 Test {i}: {text}")
176
+ result = client.translate_text(model_id, text, source_lang="en", target_lang="fr")
177
+ print(f"🌐 Translation: {result['translated_text']}")
178
+ print(f"⏱️ Time: {result['execution_time_ms']} ms")
179
+
180
+ # Clean up
181
+ client.unload_model(model_id)
182
+ print(f"\n🧹 Model {model_id} unloaded")
183
+
184
+ except Exception as e:
185
+ print(f"❌ Error in translation test: {e}")
186
+
187
+
188
+ def test_batch_operations():
189
+ """Test batch operations and performance"""
190
+ print("\n" + "=" * 60)
191
+ print("🧪 Testing Batch Operations")
192
+ print("=" * 60)
193
+
194
+ client = ModelServiceClient()
195
+
196
+ try:
197
+ # Load a model for batch testing
198
+ print("📥 Loading model for batch testing...")
199
+ model_id = client.load_model(
200
+ name="GPT-2 Batch Test",
201
+ model_type="text-generation",
202
+ model_path="gpt2",
203
+ temperature=0.8,
204
+ max_length=30,
205
+ )
206
+
207
+ # Generate multiple prompts
208
+ prompts = [
209
+ "The quick brown fox",
210
+ "In a galaxy far away",
211
+ "The best time to plant a tree",
212
+ "Life is what happens when",
213
+ "Success is not final, failure",
214
+ ]
215
+
216
+ print(f"🔄 Processing {len(prompts)} prompts...")
217
+ start_time = time.time()
218
+
219
+ results = []
220
+ for i, prompt in enumerate(prompts, 1):
221
+ print(f" Processing {i}/{len(prompts)}: {prompt[:20]}...")
222
+ result = client.generate_text(model_id, prompt)
223
+ results.append(
224
+ {
225
+ "prompt": prompt,
226
+ "generated": result["generated_text"],
227
+ "time": result["execution_time_ms"],
228
+ }
229
+ )
230
+
231
+ total_time = time.time() - start_time
232
+
233
+ # Display results
234
+ print(f"\n📊 Batch Results:")
235
+ print(f"Total time: {total_time:.2f} seconds")
236
+ print(f"Average time per request: {total_time/len(prompts):.2f} seconds")
237
+
238
+ for i, result in enumerate(results, 1):
239
+ print(f"\n{i}. Prompt: {result['prompt']}")
240
+ print(f" Generated: {result['generated']}")
241
+ print(f" Time: {result['time']} ms")
242
+
243
+ # Clean up
244
+ client.unload_model(model_id)
245
+ print(f"\n🧹 Model {model_id} unloaded")
246
+
247
+ except Exception as e:
248
+ print(f"❌ Error in batch operations test: {e}")
249
+
250
+
251
+ def test_service_management():
252
+ """Test service management functions"""
253
+ print("\n" + "=" * 60)
254
+ print("🧪 Testing Service Management")
255
+ print("=" * 60)
256
+
257
+ client = ModelServiceClient()
258
+
259
+ try:
260
+ # Get service status
261
+ print("📊 Getting service status...")
262
+ status = client.get_status()
263
+ health = client.get_health()
264
+
265
+ print(f"Service: {status['service']}")
266
+ print(f"Version: {status['version']}")
267
+ print(f"Status: {status['status']}")
268
+ print(f"Models loaded: {status['models_loaded']}")
269
+ print(f"Memory usage: {health.get('memory_usage_mb', 0):.1f} MB")
270
+
271
+ # List models
272
+ print("\n📋 Listing models...")
273
+ models = client.list_models()
274
+ if models:
275
+ print(f"Found {len(models)} models:")
276
+ for model in models:
277
+ status_icon = "✅" if model.get("is_loaded") else "⏳"
278
+ print(f" {status_icon} {model['name']} ({model['model_type']})")
279
+ else:
280
+ print("No models found")
281
+
282
+ print("\n✅ Service management test completed")
283
+
284
+ except Exception as e:
285
+ print(f"❌ Error in service management test: {e}")
286
+
287
+
288
+ def main():
289
+ """Main test function"""
290
+ print("🚀 MCLI Model Service Test Suite")
291
+ print("=" * 60)
292
+
293
+ # Check if service is running
294
+ if not check_service_running():
295
+ print("❌ Model service is not running")
296
+ print("Please start the service first:")
297
+ print(" python model_service.py start")
298
+ return
299
+
300
+ print("✅ Model service is running")
301
+
302
+ # Run tests
303
+ test_service_management()
304
+ test_text_generation()
305
+ test_text_classification()
306
+ test_translation()
307
+ test_batch_operations()
308
+
309
+ print("\n" + "=" * 60)
310
+ print("🎉 All tests completed!")
311
+ print("=" * 60)
312
+
313
+
314
+ if __name__ == "__main__":
315
+ main()
@@ -0,0 +1,131 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script for lightweight model server integration with MCLI model service
4
+ """
5
+
6
+ import json
7
+ import sys
8
+ import time
9
+ from pathlib import Path
10
+
11
+ import requests
12
+
13
+ # Add the parent directory to the path so we can import the model service
14
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
15
+
16
+ from mcli.workflow.model_service.model_service import LIGHTWEIGHT_MODELS, ModelService
17
+
18
+
19
+ def test_lightweight_integration():
20
+ """Test the lightweight server integration"""
21
+ print("🧪 Testing Lightweight Model Server Integration")
22
+ print("=" * 60)
23
+
24
+ # Create service instance
25
+ service = ModelService()
26
+
27
+ # Test 1: List available lightweight models
28
+ print("\n1. Testing lightweight models listing...")
29
+ try:
30
+ models = service.lightweight_server.downloader.get_downloaded_models()
31
+ print(f"✅ Downloaded models: {models}")
32
+
33
+ print("Available lightweight models:")
34
+ for key, info in LIGHTWEIGHT_MODELS.items():
35
+ status = (
36
+ "✅ Downloaded"
37
+ if key in service.lightweight_server.loaded_models
38
+ else "⏳ Not downloaded"
39
+ )
40
+ print(f" {status} - {info['name']} ({info['parameters']})")
41
+ except Exception as e:
42
+ print(f"❌ Error listing models: {e}")
43
+
44
+ # Test 2: System analysis
45
+ print("\n2. Testing system analysis...")
46
+ try:
47
+ system_info = service.lightweight_server.get_system_info()
48
+ print(f"✅ System info: {system_info}")
49
+
50
+ recommended = service.lightweight_server.recommend_model()
51
+ print(f"✅ Recommended model: {recommended}")
52
+ except Exception as e:
53
+ print(f"❌ Error analyzing system: {e}")
54
+
55
+ # Test 3: Download a small model
56
+ print("\n3. Testing model download...")
57
+ try:
58
+ # Use the smallest model for testing
59
+ test_model = "prajjwal1/bert-tiny"
60
+ print(f"📥 Downloading {test_model}...")
61
+
62
+ success = service.lightweight_server.download_and_load_model(test_model)
63
+ if success:
64
+ print(f"✅ Successfully downloaded {test_model}")
65
+ else:
66
+ print(f"❌ Failed to download {test_model}")
67
+ except Exception as e:
68
+ print(f"❌ Error downloading model: {e}")
69
+
70
+ # Test 4: API endpoints (if server is running)
71
+ print("\n4. Testing API endpoints...")
72
+ try:
73
+ # Test lightweight models endpoint
74
+ response = requests.get("http://localhost:8000/lightweight/models", timeout=5)
75
+ if response.status_code == 200:
76
+ data = response.json()
77
+ print(f"✅ Lightweight models API: {len(data.get('models', {}))} models available")
78
+ else:
79
+ print(f"⚠️ Lightweight models API returned status {response.status_code}")
80
+ except requests.exceptions.ConnectionError:
81
+ print("⚠️ Model service not running, skipping API tests")
82
+ except Exception as e:
83
+ print(f"❌ Error testing API: {e}")
84
+
85
+ print("\n✅ Integration test completed!")
86
+
87
+
88
+ def test_cli_commands():
89
+ """Test CLI commands"""
90
+ print("\n🧪 Testing CLI Commands")
91
+ print("=" * 40)
92
+
93
+ print("Available CLI commands:")
94
+ print(" mcli model-service lightweight --list")
95
+ print(" mcli model-service lightweight --auto")
96
+ print(" mcli model-service lightweight --download prajjwal1/bert-tiny")
97
+ print(" mcli model-service lightweight --start-server --port 8080")
98
+ print(" mcli model-service lightweight-run --auto --port 8080")
99
+ print(" mcli model-service lightweight-run --list-models")
100
+
101
+
102
+ def main():
103
+ """Main test function"""
104
+ print("🚀 MCLI Lightweight Model Server Integration Test")
105
+ print("=" * 70)
106
+
107
+ test_lightweight_integration()
108
+ test_cli_commands()
109
+
110
+ print("\n📝 Usage Examples:")
111
+ print("1. List available lightweight models:")
112
+ print(" mcli model-service lightweight --list")
113
+ print()
114
+ print("2. Download recommended model:")
115
+ print(" mcli model-service lightweight --auto")
116
+ print()
117
+ print("3. Start lightweight server:")
118
+ print(" mcli model-service lightweight --start-server --port 8080")
119
+ print()
120
+ print("4. Run standalone lightweight server:")
121
+ print(" mcli model-service lightweight-run --auto --port 8080")
122
+ print()
123
+ print("5. API endpoints (when service is running):")
124
+ print(" GET /lightweight/models")
125
+ print(" POST /lightweight/models/{model_key}/download")
126
+ print(" POST /lightweight/start")
127
+ print(" GET /lightweight/status")
128
+
129
+
130
+ if __name__ == "__main__":
131
+ main()
@@ -0,0 +1,149 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script for the new model service features:
4
+ 1. List models functionality
5
+ 2. Add model from URL functionality
6
+ """
7
+
8
+ import os
9
+ import sys
10
+ from pathlib import Path
11
+
12
+ # Add the parent directory to the path so we can import the model service
13
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent))
14
+
15
+ from mcli.workflow.model_service.model_service import ModelManager, ModelService
16
+
17
+
18
+ def test_list_models():
19
+ """Test the list models functionality"""
20
+ print("🧪 Testing list models functionality...")
21
+
22
+ try:
23
+ service = ModelService()
24
+ models = service.model_manager.db.get_all_models()
25
+
26
+ print(f"📝 Found {len(models)} models in database")
27
+
28
+ if models:
29
+ print("Models:")
30
+ for model in models:
31
+ status = "🟢 Loaded" if model.is_loaded else "⚪ Not Loaded"
32
+ print(f" {status} - {model.name} ({model.model_type})")
33
+ else:
34
+ print("No models found in database")
35
+
36
+ # Test summary
37
+ summary = service.model_manager.get_models_summary()
38
+ print(f"\n📊 Summary:")
39
+ print(f" Total models: {summary['total_models']}")
40
+ print(f" Loaded models: {summary['loaded_models']}")
41
+ print(f" Total memory: {summary['total_memory_mb']:.1f} MB")
42
+
43
+ print("✅ List models test passed!")
44
+
45
+ except Exception as e:
46
+ print(f"❌ List models test failed: {e}")
47
+ return False
48
+
49
+ return True
50
+
51
+
52
+ def test_add_model_from_url():
53
+ """Test the add model from URL functionality"""
54
+ print("\n🧪 Testing add model from URL functionality...")
55
+
56
+ try:
57
+ service = ModelService()
58
+
59
+ # Test with a simple model URL (this is just a test URL)
60
+ test_model_url = (
61
+ "https://huggingface.co/microsoft/DialoGPT-small/resolve/main/pytorch_model.bin"
62
+ )
63
+ test_tokenizer_url = (
64
+ "https://huggingface.co/microsoft/DialoGPT-small/resolve/main/tokenizer.json"
65
+ )
66
+
67
+ print(f"🌐 Testing with URL: {test_model_url}")
68
+
69
+ # Note: This would actually download the model, so we'll just test the function exists
70
+ # In a real scenario, you'd want to test with a smaller model or mock the download
71
+
72
+ # Test that the method exists and can be called
73
+ if hasattr(service.model_manager, "add_model_from_url"):
74
+ print("✅ add_model_from_url method exists")
75
+ else:
76
+ print("❌ add_model_from_url method not found")
77
+ return False
78
+
79
+ if hasattr(service.model_manager, "download_model_from_url"):
80
+ print("✅ download_model_from_url method exists")
81
+ else:
82
+ print("❌ download_model_from_url method not found")
83
+ return False
84
+
85
+ print("✅ Add model from URL test passed!")
86
+
87
+ except Exception as e:
88
+ print(f"❌ Add model from URL test failed: {e}")
89
+ return False
90
+
91
+ return True
92
+
93
+
94
+ def test_api_endpoints():
95
+ """Test that the new API endpoints are properly defined"""
96
+ print("\n🧪 Testing API endpoints...")
97
+
98
+ try:
99
+ service = ModelService()
100
+
101
+ # Check if the new endpoints are defined
102
+ routes = [route.path for route in service.app.routes]
103
+
104
+ expected_routes = ["/models", "/models/summary", "/models/from-url"]
105
+
106
+ for route in expected_routes:
107
+ if route in routes:
108
+ print(f"✅ Route {route} found")
109
+ else:
110
+ print(f"❌ Route {route} not found")
111
+ return False
112
+
113
+ print("✅ API endpoints test passed!")
114
+
115
+ except Exception as e:
116
+ print(f"❌ API endpoints test failed: {e}")
117
+ return False
118
+
119
+ return True
120
+
121
+
122
+ def main():
123
+ """Run all tests"""
124
+ print("🚀 Testing new model service features...")
125
+ print("=" * 50)
126
+
127
+ tests = [test_list_models, test_add_model_from_url, test_api_endpoints]
128
+
129
+ passed = 0
130
+ total = len(tests)
131
+
132
+ for test in tests:
133
+ if test():
134
+ passed += 1
135
+ print()
136
+
137
+ print("=" * 50)
138
+ print(f"📊 Test Results: {passed}/{total} tests passed")
139
+
140
+ if passed == total:
141
+ print("🎉 All tests passed!")
142
+ return 0
143
+ else:
144
+ print("❌ Some tests failed!")
145
+ return 1
146
+
147
+
148
+ if __name__ == "__main__":
149
+ sys.exit(main())
@@ -0,0 +1,99 @@
1
+ import os
2
+
3
+ import click
4
+ import requests
5
+
6
+ from mcli.lib.logger.logger import get_logger
7
+
8
+ logger = get_logger(__name__)
9
+
10
+
11
+ OPENAI_NASTY_CATEGORIES = {
12
+ "sexual",
13
+ "hate",
14
+ "harassment",
15
+ "self-harm",
16
+ "sexual/minors",
17
+ "hate/threatening",
18
+ "violence/graphic",
19
+ "self-harm/intent",
20
+ "self-harm/instructions",
21
+ "harassment/threatening",
22
+ "violence",
23
+ }
24
+
25
+ # Get API key from environment variable
26
+ openai_api_key = os.environ.get("OPENAI_API_KEY", "")
27
+ start_sequence = "\nA:"
28
+ restart_sequence = "\n\nQ: "
29
+
30
+
31
+ class OpenAI:
32
+ def __init__(self):
33
+ self.class_name = self.__class__.__name__
34
+
35
+ def log_error(self, error, exception=None, warning=False):
36
+ if warning:
37
+ logger.error(error)
38
+ else:
39
+ logger.error(error)
40
+
41
+ def is_text_risky(self, text: str) -> object:
42
+ """Ask the openai moderation endpoint if the text is risky.
43
+
44
+ See https://platform.openai.com/docs/guides/moderation/quickstart for details.
45
+ """
46
+ allowed_categories = {"violence"} # Can be triggered by some AI safety terms
47
+
48
+ response = None
49
+ try:
50
+ http_response = requests.post(
51
+ "https://api.openai.com/v1/moderations",
52
+ headers={
53
+ "Content-Type": "application/json",
54
+ "Authorization": f"Bearer {openai_api_key}",
55
+ },
56
+ json={"input": text},
57
+ )
58
+ except Exception as e:
59
+ self.log_error("Error in Requests module trying to moderate content", e)
60
+ return True
61
+
62
+ if http_response.status_code == 401:
63
+ self.log_error("OpenAI Authentication Failed")
64
+ return True
65
+ elif http_response.status_code == 429:
66
+ self.log_error("OpenAI Rate Limit Exceeded", warning=True)
67
+ return True
68
+ elif http_response.status_code != 200:
69
+ self.log_error(
70
+ f"Possible issue with the OpenAI API. Status: {http_response.status_code}, Content: {http_response.text}"
71
+ )
72
+ return True
73
+ response = http_response.json()
74
+ logger.info(response)
75
+
76
+ return response
77
+
78
+
79
+ @click.group(name="openai")
80
+ def openai():
81
+ """OpenAI CLI command group."""
82
+ pass
83
+
84
+
85
+ @openai.command(name="is_text_risky")
86
+ @click.argument("text")
87
+ def is_text_risky(text: str):
88
+ """Check if the provided text is risky using OpenAI moderation."""
89
+ openai_instance = OpenAI()
90
+ result = openai_instance.is_text_risky(text)
91
+ click.echo(f"Is the text risky? {result}")
92
+
93
+
94
+ if __name__ == "__main__":
95
+ oai = OpenAI()
96
+ # Example usage
97
+ text_to_check = "This is a test message."
98
+ is_risky = oai.is_text_risky(text_to_check)
99
+ print(f"Is the text risky? {is_risky}")