isa-model 0.4.0__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (199) hide show
  1. isa_model/client.py +466 -43
  2. isa_model/core/cache/redis_cache.py +12 -3
  3. isa_model/core/config/config_manager.py +230 -3
  4. isa_model/core/config.py +90 -0
  5. isa_model/core/database/direct_db_client.py +114 -0
  6. isa_model/core/database/migration_manager.py +563 -0
  7. isa_model/core/database/migrations.py +21 -1
  8. isa_model/core/database/supabase_client.py +154 -19
  9. isa_model/core/dependencies.py +316 -0
  10. isa_model/core/discovery/__init__.py +19 -0
  11. isa_model/core/discovery/consul_discovery.py +190 -0
  12. isa_model/core/logging/__init__.py +54 -0
  13. isa_model/core/logging/influx_logger.py +523 -0
  14. isa_model/core/logging/loki_logger.py +160 -0
  15. isa_model/core/models/__init__.py +27 -18
  16. isa_model/core/models/config_models.py +625 -0
  17. isa_model/core/models/deployment_billing_tracker.py +430 -0
  18. isa_model/core/models/model_manager.py +40 -17
  19. isa_model/core/models/model_metadata.py +690 -0
  20. isa_model/core/models/model_repo.py +174 -18
  21. isa_model/core/models/system_models.py +857 -0
  22. isa_model/core/repositories/__init__.py +9 -0
  23. isa_model/core/repositories/config_repository.py +912 -0
  24. isa_model/core/services/intelligent_model_selector.py +399 -21
  25. isa_model/core/storage/hf_storage.py +1 -1
  26. isa_model/core/types.py +1 -0
  27. isa_model/deployment/__init__.py +5 -48
  28. isa_model/deployment/core/__init__.py +2 -31
  29. isa_model/deployment/core/deployment_manager.py +1278 -370
  30. isa_model/deployment/local/__init__.py +31 -0
  31. isa_model/deployment/local/config.py +248 -0
  32. isa_model/deployment/local/gpu_gateway.py +607 -0
  33. isa_model/deployment/local/health_checker.py +428 -0
  34. isa_model/deployment/local/provider.py +586 -0
  35. isa_model/deployment/local/tensorrt_service.py +621 -0
  36. isa_model/deployment/local/transformers_service.py +644 -0
  37. isa_model/deployment/local/vllm_service.py +527 -0
  38. isa_model/deployment/modal/__init__.py +8 -0
  39. isa_model/deployment/modal/config.py +136 -0
  40. isa_model/deployment/{services/auto_hf_modal_deployer.py → modal/deployer.py} +1 -1
  41. isa_model/deployment/modal/services/__init__.py +3 -0
  42. isa_model/deployment/modal/services/audio/__init__.py +1 -0
  43. isa_model/deployment/modal/services/embedding/__init__.py +1 -0
  44. isa_model/deployment/modal/services/llm/__init__.py +1 -0
  45. isa_model/deployment/modal/services/llm/isa_llm_service.py +424 -0
  46. isa_model/deployment/modal/services/video/__init__.py +1 -0
  47. isa_model/deployment/modal/services/vision/__init__.py +1 -0
  48. isa_model/deployment/models/org-org-acme-corp-tenant-a-service-llm-20250825-225822/tenant-a-service_modal_service.py +48 -0
  49. isa_model/deployment/models/org-test-org-123-prefix-test-service-llm-20250825-225822/prefix-test-service_modal_service.py +48 -0
  50. isa_model/deployment/models/test-llm-service-llm-20250825-204442/test-llm-service_modal_service.py +48 -0
  51. isa_model/deployment/models/test-monitoring-gpt2-llm-20250825-212906/test-monitoring-gpt2_modal_service.py +48 -0
  52. isa_model/deployment/models/test-monitoring-gpt2-llm-20250825-213009/test-monitoring-gpt2_modal_service.py +48 -0
  53. isa_model/deployment/storage/__init__.py +5 -0
  54. isa_model/deployment/storage/deployment_repository.py +824 -0
  55. isa_model/deployment/triton/__init__.py +10 -0
  56. isa_model/deployment/triton/config.py +196 -0
  57. isa_model/deployment/triton/configs/__init__.py +1 -0
  58. isa_model/deployment/triton/provider.py +512 -0
  59. isa_model/deployment/triton/scripts/__init__.py +1 -0
  60. isa_model/deployment/triton/templates/__init__.py +1 -0
  61. isa_model/inference/__init__.py +47 -1
  62. isa_model/inference/ai_factory.py +137 -10
  63. isa_model/inference/legacy_services/__init__.py +21 -0
  64. isa_model/inference/legacy_services/model_evaluation.py +637 -0
  65. isa_model/inference/legacy_services/model_service.py +573 -0
  66. isa_model/inference/legacy_services/model_serving.py +717 -0
  67. isa_model/inference/legacy_services/model_training.py +561 -0
  68. isa_model/inference/models/__init__.py +21 -0
  69. isa_model/inference/models/inference_config.py +551 -0
  70. isa_model/inference/models/inference_record.py +675 -0
  71. isa_model/inference/models/performance_models.py +714 -0
  72. isa_model/inference/repositories/__init__.py +9 -0
  73. isa_model/inference/repositories/inference_repository.py +828 -0
  74. isa_model/inference/services/audio/base_stt_service.py +184 -11
  75. isa_model/inference/services/audio/openai_stt_service.py +22 -6
  76. isa_model/inference/services/custom_model_manager.py +277 -0
  77. isa_model/inference/services/embedding/ollama_embed_service.py +15 -3
  78. isa_model/inference/services/embedding/resilient_embed_service.py +285 -0
  79. isa_model/inference/services/llm/__init__.py +10 -2
  80. isa_model/inference/services/llm/base_llm_service.py +335 -24
  81. isa_model/inference/services/llm/cerebras_llm_service.py +628 -0
  82. isa_model/inference/services/llm/helpers/llm_adapter.py +9 -4
  83. isa_model/inference/services/llm/helpers/llm_prompts.py +342 -0
  84. isa_model/inference/services/llm/helpers/llm_utils.py +321 -23
  85. isa_model/inference/services/llm/huggingface_llm_service.py +581 -0
  86. isa_model/inference/services/llm/local_llm_service.py +747 -0
  87. isa_model/inference/services/llm/ollama_llm_service.py +9 -2
  88. isa_model/inference/services/llm/openai_llm_service.py +33 -16
  89. isa_model/inference/services/llm/yyds_llm_service.py +8 -2
  90. isa_model/inference/services/vision/__init__.py +22 -1
  91. isa_model/inference/services/vision/blip_vision_service.py +359 -0
  92. isa_model/inference/services/vision/helpers/image_utils.py +8 -5
  93. isa_model/inference/services/vision/isa_vision_service.py +65 -4
  94. isa_model/inference/services/vision/openai_vision_service.py +19 -10
  95. isa_model/inference/services/vision/vgg16_vision_service.py +257 -0
  96. isa_model/serving/api/cache_manager.py +245 -0
  97. isa_model/serving/api/dependencies/__init__.py +1 -0
  98. isa_model/serving/api/dependencies/auth.py +194 -0
  99. isa_model/serving/api/dependencies/database.py +139 -0
  100. isa_model/serving/api/error_handlers.py +284 -0
  101. isa_model/serving/api/fastapi_server.py +172 -22
  102. isa_model/serving/api/middleware/auth.py +8 -2
  103. isa_model/serving/api/middleware/security.py +23 -33
  104. isa_model/serving/api/middleware/tenant_context.py +414 -0
  105. isa_model/serving/api/routes/analytics.py +4 -1
  106. isa_model/serving/api/routes/config.py +645 -0
  107. isa_model/serving/api/routes/deployment_billing.py +315 -0
  108. isa_model/serving/api/routes/deployments.py +138 -2
  109. isa_model/serving/api/routes/gpu_gateway.py +440 -0
  110. isa_model/serving/api/routes/health.py +32 -12
  111. isa_model/serving/api/routes/inference_monitoring.py +486 -0
  112. isa_model/serving/api/routes/local_deployments.py +448 -0
  113. isa_model/serving/api/routes/tenants.py +575 -0
  114. isa_model/serving/api/routes/unified.py +680 -18
  115. isa_model/serving/api/routes/webhooks.py +479 -0
  116. isa_model/serving/api/startup.py +68 -54
  117. isa_model/utils/gpu_utils.py +311 -0
  118. {isa_model-0.4.0.dist-info → isa_model-0.4.3.dist-info}/METADATA +66 -24
  119. isa_model-0.4.3.dist-info/RECORD +193 -0
  120. isa_model/core/storage/minio_storage.py +0 -0
  121. isa_model/deployment/cloud/__init__.py +0 -9
  122. isa_model/deployment/cloud/modal/__init__.py +0 -10
  123. isa_model/deployment/core/deployment_config.py +0 -356
  124. isa_model/deployment/core/isa_deployment_service.py +0 -401
  125. isa_model/deployment/gpu_int8_ds8/app/server.py +0 -66
  126. isa_model/deployment/gpu_int8_ds8/scripts/test_client.py +0 -43
  127. isa_model/deployment/gpu_int8_ds8/scripts/test_client_os.py +0 -35
  128. isa_model/deployment/runtime/deployed_service.py +0 -338
  129. isa_model/deployment/services/__init__.py +0 -9
  130. isa_model/deployment/services/auto_deploy_vision_service.py +0 -538
  131. isa_model/deployment/services/model_service.py +0 -332
  132. isa_model/deployment/services/service_monitor.py +0 -356
  133. isa_model/deployment/services/service_registry.py +0 -527
  134. isa_model/eval/__init__.py +0 -92
  135. isa_model/eval/benchmarks/__init__.py +0 -27
  136. isa_model/eval/benchmarks/multimodal_datasets.py +0 -460
  137. isa_model/eval/benchmarks.py +0 -701
  138. isa_model/eval/config/__init__.py +0 -10
  139. isa_model/eval/config/evaluation_config.py +0 -108
  140. isa_model/eval/evaluators/__init__.py +0 -24
  141. isa_model/eval/evaluators/audio_evaluator.py +0 -727
  142. isa_model/eval/evaluators/base_evaluator.py +0 -503
  143. isa_model/eval/evaluators/embedding_evaluator.py +0 -742
  144. isa_model/eval/evaluators/llm_evaluator.py +0 -472
  145. isa_model/eval/evaluators/vision_evaluator.py +0 -564
  146. isa_model/eval/example_evaluation.py +0 -395
  147. isa_model/eval/factory.py +0 -798
  148. isa_model/eval/infrastructure/__init__.py +0 -24
  149. isa_model/eval/infrastructure/experiment_tracker.py +0 -466
  150. isa_model/eval/isa_benchmarks.py +0 -700
  151. isa_model/eval/isa_integration.py +0 -582
  152. isa_model/eval/metrics.py +0 -951
  153. isa_model/eval/tests/unit/test_basic.py +0 -396
  154. isa_model/serving/api/routes/evaluations.py +0 -579
  155. isa_model/training/__init__.py +0 -168
  156. isa_model/training/annotation/annotation_schema.py +0 -47
  157. isa_model/training/annotation/processors/annotation_processor.py +0 -126
  158. isa_model/training/annotation/storage/dataset_manager.py +0 -131
  159. isa_model/training/annotation/storage/dataset_schema.py +0 -44
  160. isa_model/training/annotation/tests/test_annotation_flow.py +0 -109
  161. isa_model/training/annotation/tests/test_minio copy.py +0 -113
  162. isa_model/training/annotation/tests/test_minio_upload.py +0 -43
  163. isa_model/training/annotation/views/annotation_controller.py +0 -158
  164. isa_model/training/cloud/__init__.py +0 -22
  165. isa_model/training/cloud/job_orchestrator.py +0 -402
  166. isa_model/training/cloud/runpod_trainer.py +0 -454
  167. isa_model/training/cloud/storage_manager.py +0 -482
  168. isa_model/training/core/__init__.py +0 -26
  169. isa_model/training/core/config.py +0 -181
  170. isa_model/training/core/dataset.py +0 -222
  171. isa_model/training/core/trainer.py +0 -720
  172. isa_model/training/core/utils.py +0 -213
  173. isa_model/training/examples/intelligent_training_example.py +0 -281
  174. isa_model/training/factory.py +0 -424
  175. isa_model/training/intelligent/__init__.py +0 -25
  176. isa_model/training/intelligent/decision_engine.py +0 -643
  177. isa_model/training/intelligent/intelligent_factory.py +0 -888
  178. isa_model/training/intelligent/knowledge_base.py +0 -751
  179. isa_model/training/intelligent/resource_optimizer.py +0 -839
  180. isa_model/training/intelligent/task_classifier.py +0 -576
  181. isa_model/training/storage/__init__.py +0 -24
  182. isa_model/training/storage/core_integration.py +0 -439
  183. isa_model/training/storage/training_repository.py +0 -552
  184. isa_model/training/storage/training_storage.py +0 -628
  185. isa_model-0.4.0.dist-info/RECORD +0 -182
  186. /isa_model/deployment/{cloud/modal → modal/services/audio}/isa_audio_chatTTS_service.py +0 -0
  187. /isa_model/deployment/{cloud/modal → modal/services/audio}/isa_audio_fish_service.py +0 -0
  188. /isa_model/deployment/{cloud/modal → modal/services/audio}/isa_audio_openvoice_service.py +0 -0
  189. /isa_model/deployment/{cloud/modal → modal/services/audio}/isa_audio_service_v2.py +0 -0
  190. /isa_model/deployment/{cloud/modal → modal/services/embedding}/isa_embed_rerank_service.py +0 -0
  191. /isa_model/deployment/{cloud/modal → modal/services/video}/isa_video_hunyuan_service.py +0 -0
  192. /isa_model/deployment/{cloud/modal → modal/services/vision}/isa_vision_ocr_service.py +0 -0
  193. /isa_model/deployment/{cloud/modal → modal/services/vision}/isa_vision_qwen25_service.py +0 -0
  194. /isa_model/deployment/{cloud/modal → modal/services/vision}/isa_vision_table_service.py +0 -0
  195. /isa_model/deployment/{cloud/modal → modal/services/vision}/isa_vision_ui_service.py +0 -0
  196. /isa_model/deployment/{cloud/modal → modal/services/vision}/isa_vision_ui_service_optimized.py +0 -0
  197. /isa_model/deployment/{services → modal/services/vision}/simple_auto_deploy_vision_service.py +0 -0
  198. {isa_model-0.4.0.dist-info → isa_model-0.4.3.dist-info}/WHEEL +0 -0
  199. {isa_model-0.4.0.dist-info → isa_model-0.4.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,573 @@
1
+ """
2
+ Model Service Suite
3
+ Main orchestrator for machine learning model operations following 3-step pipeline pattern
4
+ """
5
+
6
+ import pandas as pd
7
+ from typing import Dict, List, Any, Optional
8
+ import logging
9
+ from dataclasses import dataclass, field
10
+ from datetime import datetime
11
+
12
+ from .model_training import ModelTrainingService, TrainingConfig, TrainingResult
13
+ from .model_evaluation import ModelEvaluationService, EvaluationResult
14
+ from .model_serving import ModelServingService, ServingConfig, ServingResult
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ @dataclass
19
+ class ModelConfig:
20
+ """Configuration for complete model operations"""
21
+ training_enabled: bool = True
22
+ evaluation_enabled: bool = True
23
+ serving_enabled: bool = False
24
+ training_config: Optional[TrainingConfig] = None
25
+ evaluation_config: Optional[Dict[str, Any]] = None
26
+ serving_config: Optional[ServingConfig] = None
27
+ validation_level: str = "standard" # basic, standard, strict
28
+
29
+ @dataclass
30
+ class ModelResult:
31
+ """Result of complete model pipeline"""
32
+ success: bool
33
+ model_info: Optional[Dict[str, Any]] = None
34
+ training_results: Optional[TrainingResult] = None
35
+ evaluation_results: Optional[EvaluationResult] = None
36
+ serving_results: Optional[ServingResult] = None
37
+ pipeline_summary: Dict[str, Any] = field(default_factory=dict)
38
+ performance_metrics: Dict[str, Any] = field(default_factory=dict)
39
+ warnings: List[str] = field(default_factory=list)
40
+ errors: List[str] = field(default_factory=list)
41
+ metadata: Dict[str, Any] = field(default_factory=dict)
42
+
43
+ class ModelService:
44
+ """
45
+ Model Service Suite
46
+
47
+ Orchestrates machine learning model operations through 3 steps:
48
+ 1. Model Training (build and train ML models)
49
+ 2. Model Evaluation (evaluate and validate models)
50
+ 3. Model Serving (deploy and serve models)
51
+
52
+ Follows the same pattern as preprocessor, transformation, and storage services.
53
+ """
54
+
55
+ def __init__(self, config: Optional[ModelConfig] = None):
56
+ self.config = config or ModelConfig()
57
+
58
+ # Initialize step services
59
+ self.training_service = ModelTrainingService()
60
+ self.evaluation_service = ModelEvaluationService()
61
+ self.serving_service = ModelServingService()
62
+
63
+ # Performance tracking
64
+ self.execution_stats = {
65
+ 'total_model_operations': 0,
66
+ 'successful_model_operations': 0,
67
+ 'failed_model_operations': 0,
68
+ 'models_created': 0,
69
+ 'models_deployed': 0,
70
+ 'average_duration': 0.0
71
+ }
72
+
73
+ logger.info("Model Service initialized")
74
+
75
+ def create_model(self,
76
+ training_data: pd.DataFrame,
77
+ target_column: str,
78
+ model_spec: Dict[str, Any],
79
+ config: Optional[ModelConfig] = None) -> ModelResult:
80
+ """
81
+ Execute complete model pipeline
82
+
83
+ Args:
84
+ training_data: Dataset for model training
85
+ target_column: Target variable column name
86
+ model_spec: Specification of model requirements
87
+ config: Optional configuration override
88
+
89
+ Returns:
90
+ ModelResult with complete model information
91
+ """
92
+ start_time = datetime.now()
93
+ config = config or self.config
94
+
95
+ try:
96
+ logger.info(f"Starting model pipeline for target: {target_column}")
97
+
98
+ # Initialize result
99
+ result = ModelResult(
100
+ success=False,
101
+ metadata={
102
+ 'start_time': start_time,
103
+ 'target_column': target_column,
104
+ 'data_shape': training_data.shape,
105
+ 'model_spec': model_spec
106
+ }
107
+ )
108
+
109
+ pipeline_summary = {}
110
+ performance_metrics = {}
111
+ model_info = None
112
+
113
+ # Step 1: Model Training
114
+ if config.training_enabled:
115
+ logger.info("Executing Step 1: Model Training")
116
+ training_config = self._prepare_training_config(model_spec, config)
117
+
118
+ training_result = self.training_service.train_model(
119
+ data=training_data,
120
+ target_column=target_column,
121
+ training_config=training_config
122
+ )
123
+
124
+ if training_result.success:
125
+ model_info = training_result.model_info
126
+ pipeline_summary['training'] = {
127
+ 'model_id': model_info['model_id'],
128
+ 'algorithm': model_info['algorithm'],
129
+ 'problem_type': model_info['problem_type']
130
+ }
131
+ performance_metrics['training'] = training_result.performance_metrics
132
+ result.training_results = training_result
133
+ if training_result.warnings:
134
+ result.warnings.extend(training_result.warnings)
135
+ else:
136
+ result.errors.extend(training_result.errors)
137
+ result.errors.append("Step 1 (Model Training) failed")
138
+ return self._finalize_result(result, start_time)
139
+
140
+ # Step 2: Model Evaluation
141
+ if config.evaluation_enabled and model_info:
142
+ logger.info("Executing Step 2: Model Evaluation")
143
+
144
+ # Split data for evaluation if not provided
145
+ evaluation_data = self._prepare_evaluation_data(training_data, target_column, model_spec)
146
+ evaluation_config = self._prepare_evaluation_config(model_spec, config)
147
+
148
+ # Get trained model information
149
+ trained_model = self.training_service.get_trained_model(model_info['model_id'])
150
+
151
+ if trained_model:
152
+ evaluation_result = self.evaluation_service.evaluate_model(
153
+ model_info=trained_model,
154
+ test_data=evaluation_data,
155
+ target_column=target_column,
156
+ evaluation_config=evaluation_config
157
+ )
158
+
159
+ if evaluation_result.success:
160
+ pipeline_summary['evaluation'] = {
161
+ 'evaluation_metrics': evaluation_result.evaluation_metrics,
162
+ 'cross_validation': evaluation_result.cross_validation_results
163
+ }
164
+ performance_metrics['evaluation'] = evaluation_result.performance_metrics
165
+ result.evaluation_results = evaluation_result
166
+ if evaluation_result.warnings:
167
+ result.warnings.extend(evaluation_result.warnings)
168
+ else:
169
+ result.errors.extend(evaluation_result.errors)
170
+ result.errors.append("Step 2 (Model Evaluation) failed")
171
+ # Don't fail the entire operation for evaluation errors
172
+ logger.warning("Evaluation failed but training was successful")
173
+ else:
174
+ result.warnings.append("Trained model not available for evaluation")
175
+
176
+ # Step 3: Model Serving (optional)
177
+ serving_results = None
178
+ if config.serving_enabled and model_info:
179
+ logger.info("Executing Step 3: Model Serving")
180
+ serving_config = self._prepare_serving_config(model_spec, config, model_info['model_id'])
181
+
182
+ # Get trained model information
183
+ trained_model = self.training_service.get_trained_model(model_info['model_id'])
184
+
185
+ if trained_model:
186
+ serving_result = self.serving_service.deploy_model(
187
+ model_info=trained_model,
188
+ serving_config=serving_config
189
+ )
190
+
191
+ if serving_result.success:
192
+ pipeline_summary['serving'] = {
193
+ 'deployment_status': 'active',
194
+ 'serving_mode': serving_config.serving_mode,
195
+ 'serving_info': serving_result.serving_info
196
+ }
197
+ performance_metrics['serving'] = serving_result.performance_metrics
198
+ result.serving_results = serving_result
199
+ if serving_result.warnings:
200
+ result.warnings.extend(serving_result.warnings)
201
+ else:
202
+ result.errors.extend(serving_result.errors)
203
+ result.warnings.append("Step 3 (Model Serving) failed but model was trained successfully")
204
+ logger.warning("Serving failed but training was successful")
205
+ else:
206
+ result.warnings.append("Trained model not available for serving")
207
+
208
+ # Success
209
+ result.success = True
210
+ result.model_info = model_info
211
+ result.pipeline_summary = pipeline_summary
212
+ result.performance_metrics = performance_metrics
213
+
214
+ return self._finalize_result(result, start_time)
215
+
216
+ except Exception as e:
217
+ logger.error(f"Model pipeline failed: {e}")
218
+ result.errors.append(f"Pipeline error: {str(e)}")
219
+ return self._finalize_result(result, start_time)
220
+
221
+ def compare_algorithms(self,
222
+ training_data: pd.DataFrame,
223
+ target_column: str,
224
+ algorithms: List[str],
225
+ comparison_config: Optional[Dict[str, Any]] = None) -> Dict[str, ModelResult]:
226
+ """Create and compare multiple models with different algorithms"""
227
+ comparison_config = comparison_config or {}
228
+ results = {}
229
+
230
+ logger.info(f"Comparing {len(algorithms)} algorithms")
231
+
232
+ for algorithm in algorithms:
233
+ algorithm_spec = {
234
+ 'training': {
235
+ 'algorithm': algorithm,
236
+ 'hyperparameters': comparison_config.get('hyperparameters', {}).get(algorithm, {})
237
+ },
238
+ 'evaluation': comparison_config.get('evaluation', {}),
239
+ 'serving': comparison_config.get('serving', {})
240
+ }
241
+
242
+ try:
243
+ result = self.create_model(
244
+ training_data=training_data,
245
+ target_column=target_column,
246
+ model_spec=algorithm_spec,
247
+ config=ModelConfig(
248
+ training_enabled=True,
249
+ evaluation_enabled=True,
250
+ serving_enabled=False # Don't deploy during comparison
251
+ )
252
+ )
253
+ results[algorithm] = result
254
+
255
+ except Exception as e:
256
+ logger.error(f"Algorithm comparison failed for {algorithm}: {e}")
257
+ results[algorithm] = ModelResult(
258
+ success=False,
259
+ errors=[str(e)]
260
+ )
261
+
262
+ return results
263
+
264
+ def get_algorithm_recommendations(self,
265
+ training_data: pd.DataFrame,
266
+ target_column: str,
267
+ preferences: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
268
+ """Get ML algorithm recommendations for the dataset"""
269
+ try:
270
+ return self.training_service.get_algorithm_recommendations(
271
+ training_data, target_column
272
+ )
273
+ except Exception as e:
274
+ logger.error(f"Algorithm recommendations failed: {e}")
275
+ return {'success': False, 'error': str(e)}
276
+
277
+ def predict_with_model(self,
278
+ model_id: str,
279
+ input_data: pd.DataFrame,
280
+ prediction_config: Optional[Dict[str, Any]] = None) -> ServingResult:
281
+ """Make predictions using a trained and deployed model"""
282
+ try:
283
+ return self.serving_service.predict(
284
+ model_id=model_id,
285
+ input_data=input_data,
286
+ prediction_config=prediction_config
287
+ )
288
+ except Exception as e:
289
+ logger.error(f"Prediction failed: {e}")
290
+ return ServingResult(
291
+ success=False,
292
+ errors=[str(e)]
293
+ )
294
+
295
+ def deploy_existing_model(self,
296
+ model_id: str,
297
+ serving_config: ServingConfig) -> ServingResult:
298
+ """Deploy an existing trained model for serving"""
299
+ try:
300
+ # Get trained model info
301
+ trained_model = self.training_service.get_trained_model(model_id)
302
+ if not trained_model:
303
+ return ServingResult(
304
+ success=False,
305
+ errors=[f"Model {model_id} not found"]
306
+ )
307
+
308
+ return self.serving_service.deploy_model(
309
+ model_info=trained_model,
310
+ serving_config=serving_config
311
+ )
312
+
313
+ except Exception as e:
314
+ logger.error(f"Model deployment failed: {e}")
315
+ return ServingResult(
316
+ success=False,
317
+ errors=[str(e)]
318
+ )
319
+
320
+ def get_model_performance(self, model_id: str) -> Dict[str, Any]:
321
+ """Get comprehensive performance analysis for a model"""
322
+ try:
323
+ # Get evaluation results
324
+ evaluation_result = self.evaluation_service.get_evaluation_result(model_id)
325
+
326
+ # Get serving status if deployed
327
+ serving_status = self.serving_service.get_serving_status(model_id)
328
+
329
+ # Get training info
330
+ training_info = self.training_service.get_trained_model(model_id)
331
+
332
+ return {
333
+ 'model_id': model_id,
334
+ 'training_info': {
335
+ 'algorithm': training_info.get('training_config').algorithm if training_info else None,
336
+ 'problem_type': training_info.get('problem_type') if training_info else None,
337
+ 'created_at': training_info.get('created_at') if training_info else None
338
+ },
339
+ 'evaluation_metrics': evaluation_result.evaluation_metrics if evaluation_result else {},
340
+ 'serving_status': serving_status if not serving_status.get('error') else 'not_deployed',
341
+ 'performance_summary': self._generate_performance_summary(evaluation_result, serving_status)
342
+ }
343
+
344
+ except Exception as e:
345
+ logger.error(f"Performance analysis failed: {e}")
346
+ return {'error': str(e)}
347
+
348
+ def list_models(self, include_performance: bool = False) -> List[Dict[str, Any]]:
349
+ """List all models with optional performance information"""
350
+ try:
351
+ models = self.training_service.list_trained_models()
352
+
353
+ if include_performance:
354
+ for model in models:
355
+ model_id = model['model_id']
356
+ performance = self.get_model_performance(model_id)
357
+ model['performance'] = performance
358
+
359
+ return models
360
+
361
+ except Exception as e:
362
+ logger.error(f"Model listing failed: {e}")
363
+ return []
364
+
365
+ def _prepare_training_config(self,
366
+ model_spec: Dict[str, Any],
367
+ config: ModelConfig) -> TrainingConfig:
368
+ """Prepare training configuration from model specification"""
369
+ training_spec = model_spec.get('training', {})
370
+
371
+ return TrainingConfig(
372
+ algorithm=training_spec.get('algorithm', 'random_forest_classifier'),
373
+ problem_type=training_spec.get('problem_type'),
374
+ hyperparameters=training_spec.get('hyperparameters', {}),
375
+ cross_validation=training_spec.get('cross_validation', True),
376
+ cv_folds=training_spec.get('cv_folds', 5),
377
+ test_size=training_spec.get('test_size', 0.2),
378
+ preprocessing_options=training_spec.get('preprocessing_options', {}),
379
+ feature_selection=training_spec.get('feature_selection', False)
380
+ )
381
+
382
+ def _prepare_evaluation_config(self,
383
+ model_spec: Dict[str, Any],
384
+ config: ModelConfig) -> Dict[str, Any]:
385
+ """Prepare evaluation configuration"""
386
+ evaluation_spec = model_spec.get('evaluation', {})
387
+
388
+ return {
389
+ 'perform_cv': evaluation_spec.get('perform_cv', True),
390
+ 'cv_folds': evaluation_spec.get('cv_folds', 5),
391
+ 'validation_curves': evaluation_spec.get('validation_curves', False),
392
+ 'comparison_metrics': evaluation_spec.get('metrics', ['accuracy', 'f1_score', 'r2_score'])
393
+ }
394
+
395
+ def _prepare_serving_config(self,
396
+ model_spec: Dict[str, Any],
397
+ config: ModelConfig,
398
+ model_id: str) -> ServingConfig:
399
+ """Prepare serving configuration"""
400
+ serving_spec = model_spec.get('serving', {})
401
+
402
+ return ServingConfig(
403
+ model_id=model_id,
404
+ serving_mode=serving_spec.get('serving_mode', 'batch'),
405
+ cache_predictions=serving_spec.get('cache_predictions', True),
406
+ cache_ttl_seconds=serving_spec.get('cache_ttl', 3600),
407
+ batch_size=serving_spec.get('batch_size', 1000),
408
+ enable_monitoring=serving_spec.get('enable_monitoring', True),
409
+ preprocessing_required=serving_spec.get('preprocessing_required', True)
410
+ )
411
+
412
+ def _prepare_evaluation_data(self,
413
+ training_data: pd.DataFrame,
414
+ target_column: str,
415
+ model_spec: Dict[str, Any]) -> pd.DataFrame:
416
+ """Prepare evaluation data (use training data for now, in practice would be separate test set)"""
417
+ # In a production system, this would use a separate test dataset
418
+ # For now, return the training data (the evaluation service will handle train/test split)
419
+ return training_data
420
+
421
+ def _generate_performance_summary(self,
422
+ evaluation_result: Optional[EvaluationResult],
423
+ serving_status: Dict[str, Any]) -> Dict[str, Any]:
424
+ """Generate a performance summary"""
425
+ summary = {
426
+ 'overall_score': 'unknown',
427
+ 'key_metrics': {},
428
+ 'deployment_status': 'not_deployed',
429
+ 'recommendations': []
430
+ }
431
+
432
+ if evaluation_result and evaluation_result.success:
433
+ metrics = evaluation_result.evaluation_metrics
434
+
435
+ # Determine overall performance score
436
+ if 'accuracy' in metrics:
437
+ score = metrics['accuracy']
438
+ elif 'r2_score' in metrics:
439
+ score = max(0, metrics['r2_score']) # Ensure non-negative
440
+ else:
441
+ score = 0.5 # Default
442
+
443
+ if score >= 0.8:
444
+ summary['overall_score'] = 'excellent'
445
+ elif score >= 0.7:
446
+ summary['overall_score'] = 'good'
447
+ elif score >= 0.6:
448
+ summary['overall_score'] = 'fair'
449
+ else:
450
+ summary['overall_score'] = 'poor'
451
+
452
+ summary['key_metrics'] = metrics
453
+ summary['recommendations'] = evaluation_result.recommendations
454
+
455
+ if not serving_status.get('error'):
456
+ summary['deployment_status'] = 'deployed'
457
+
458
+ return summary
459
+
460
+ def _finalize_result(self,
461
+ result: ModelResult,
462
+ start_time: datetime) -> ModelResult:
463
+ """Finalize model result with timing and stats"""
464
+ end_time = datetime.now()
465
+ duration = (end_time - start_time).total_seconds()
466
+
467
+ # Update performance metrics
468
+ result.performance_metrics['total_duration'] = duration
469
+ result.performance_metrics['end_time'] = end_time
470
+ result.metadata['end_time'] = end_time
471
+ result.metadata['duration_seconds'] = duration
472
+
473
+ # Update execution stats
474
+ self.execution_stats['total_model_operations'] += 1
475
+ if result.success:
476
+ self.execution_stats['successful_model_operations'] += 1
477
+ self.execution_stats['models_created'] += 1
478
+
479
+ # Count deployed models
480
+ if result.serving_results and result.serving_results.success:
481
+ self.execution_stats['models_deployed'] += 1
482
+ else:
483
+ self.execution_stats['failed_model_operations'] += 1
484
+
485
+ # Update average duration
486
+ total = self.execution_stats['total_model_operations']
487
+ old_avg = self.execution_stats['average_duration']
488
+ self.execution_stats['average_duration'] = (old_avg * (total - 1) + duration) / total
489
+
490
+ logger.info(f"Model pipeline completed: success={result.success}, duration={duration:.2f}s")
491
+ return result
492
+
493
+ def get_service_statistics(self) -> Dict[str, Any]:
494
+ """Get comprehensive service statistics"""
495
+ try:
496
+ return {
497
+ 'service_stats': self.get_execution_stats(),
498
+ 'individual_service_stats': {
499
+ 'training': self.training_service.get_execution_stats(),
500
+ 'evaluation': self.evaluation_service.get_execution_stats(),
501
+ 'serving': self.serving_service.get_execution_stats()
502
+ },
503
+ 'model_overview': {
504
+ 'total_models': len(self.training_service.list_trained_models()),
505
+ 'deployed_models': len(self.serving_service.serving_configs),
506
+ 'serving_cache_stats': self.serving_service.model_cache.get_stats()
507
+ }
508
+ }
509
+ except Exception as e:
510
+ logger.error(f"Failed to get service statistics: {e}")
511
+ return {'error': str(e)}
512
+
513
+ def get_execution_stats(self) -> Dict[str, Any]:
514
+ """Get service execution statistics"""
515
+ return {
516
+ **self.execution_stats,
517
+ 'success_rate': (
518
+ self.execution_stats['successful_model_operations'] /
519
+ max(1, self.execution_stats['total_model_operations'])
520
+ ),
521
+ 'deployment_rate': (
522
+ self.execution_stats['models_deployed'] /
523
+ max(1, self.execution_stats['models_created'])
524
+ )
525
+ }
526
+
527
+ def cleanup(self):
528
+ """Cleanup all service resources"""
529
+ try:
530
+ self.serving_service.cleanup()
531
+ logger.info("Model Service cleanup completed")
532
+ except Exception as e:
533
+ logger.warning(f"Model service cleanup warning: {e}")
534
+
535
+ def create_model_spec(self,
536
+ algorithm: str,
537
+ serving_mode: str = "batch",
538
+ hyperparameters: Optional[Dict[str, Any]] = None,
539
+ evaluation_metrics: Optional[List[str]] = None) -> Dict[str, Any]:
540
+ """
541
+ Helper to create model specification
542
+
543
+ Args:
544
+ algorithm: ML algorithm to use
545
+ serving_mode: How to serve the model
546
+ hyperparameters: Algorithm hyperparameters
547
+ evaluation_metrics: Metrics for evaluation
548
+
549
+ Returns:
550
+ Complete model specification
551
+ """
552
+ spec = {
553
+ 'training': {
554
+ 'algorithm': algorithm,
555
+ 'hyperparameters': hyperparameters or {},
556
+ 'cross_validation': True,
557
+ 'cv_folds': 5,
558
+ 'test_size': 0.2
559
+ },
560
+ 'evaluation': {
561
+ 'perform_cv': True,
562
+ 'cv_folds': 5,
563
+ 'metrics': evaluation_metrics or ['accuracy', 'f1_score', 'r2_score']
564
+ },
565
+ 'serving': {
566
+ 'serving_mode': serving_mode,
567
+ 'cache_predictions': True,
568
+ 'batch_size': 1000,
569
+ 'enable_monitoring': True
570
+ }
571
+ }
572
+
573
+ return spec