isa-model 0.4.3__py3-none-any.whl → 0.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isa_model/core/config.py +3 -3
- isa_model/core/models/model_manager.py +1 -69
- {isa_model-0.4.3.dist-info → isa_model-0.4.4.dist-info}/METADATA +6 -1
- {isa_model-0.4.3.dist-info → isa_model-0.4.4.dist-info}/RECORD +6 -19
- isa_model/core/security/secrets.py +0 -358
- isa_model/core/storage/hf_storage.py +0 -419
- isa_model/deployment/local/__init__.py +0 -31
- isa_model/deployment/local/config.py +0 -248
- isa_model/deployment/local/gpu_gateway.py +0 -607
- isa_model/deployment/local/health_checker.py +0 -428
- isa_model/deployment/local/provider.py +0 -586
- isa_model/deployment/local/tensorrt_service.py +0 -621
- isa_model/deployment/local/transformers_service.py +0 -644
- isa_model/deployment/local/vllm_service.py +0 -527
- isa_model/inference/services/custom_model_manager.py +0 -277
- isa_model/inference/services/llm/local_llm_service.py +0 -747
- isa_model/inference/services/vision/blip_vision_service.py +0 -359
- {isa_model-0.4.3.dist-info → isa_model-0.4.4.dist-info}/WHEEL +0 -0
- {isa_model-0.4.3.dist-info → isa_model-0.4.4.dist-info}/top_level.txt +0 -0
@@ -1,586 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Local GPU deployment provider
|
3
|
-
|
4
|
-
Unified provider for local GPU model deployment with support for multiple backends:
|
5
|
-
- vLLM for high-performance LLM inference
|
6
|
-
- TensorRT-LLM for maximum optimization
|
7
|
-
- HuggingFace Transformers for universal compatibility
|
8
|
-
"""
|
9
|
-
|
10
|
-
import os
|
11
|
-
import json
|
12
|
-
import logging
|
13
|
-
import asyncio
|
14
|
-
from typing import Dict, List, Optional, Any, Union
|
15
|
-
from pathlib import Path
|
16
|
-
from datetime import datetime
|
17
|
-
|
18
|
-
from .config import LocalGPUConfig, LocalServiceType, LocalBackend
|
19
|
-
from .vllm_service import VLLMService
|
20
|
-
from .tensorrt_service import TensorRTLLMService
|
21
|
-
from .transformers_service import TransformersService
|
22
|
-
from .health_checker import get_health_checker, ServiceStatus
|
23
|
-
from ...utils.gpu_utils import get_gpu_manager
|
24
|
-
|
25
|
-
logger = logging.getLogger(__name__)
|
26
|
-
|
27
|
-
|
28
|
-
class LocalGPUProvider:
|
29
|
-
"""
|
30
|
-
Unified local GPU deployment provider.
|
31
|
-
|
32
|
-
This provider manages local GPU model deployments with support for:
|
33
|
-
- Multiple inference backends (vLLM, TensorRT-LLM, Transformers)
|
34
|
-
- Automatic GPU resource management
|
35
|
-
- Service health monitoring
|
36
|
-
- Performance optimization
|
37
|
-
|
38
|
-
Example:
|
39
|
-
```python
|
40
|
-
from isa_model.deployment.local import LocalGPUProvider, create_vllm_config
|
41
|
-
|
42
|
-
# Initialize provider
|
43
|
-
provider = LocalGPUProvider()
|
44
|
-
|
45
|
-
# Create service configuration
|
46
|
-
config = create_vllm_config(
|
47
|
-
service_name="llama2-7b",
|
48
|
-
model_id="meta-llama/Llama-2-7b-chat-hf"
|
49
|
-
)
|
50
|
-
|
51
|
-
# Deploy service
|
52
|
-
result = await provider.deploy(config)
|
53
|
-
print(f"Service deployed: {result['service_url']}")
|
54
|
-
|
55
|
-
# Use the service
|
56
|
-
response = await provider.generate_text(
|
57
|
-
service_name="llama2-7b",
|
58
|
-
prompt="Hello, how are you?"
|
59
|
-
)
|
60
|
-
```
|
61
|
-
"""
|
62
|
-
|
63
|
-
def __init__(self, workspace_dir: str = "./local_deployments"):
|
64
|
-
"""
|
65
|
-
Initialize local GPU provider.
|
66
|
-
|
67
|
-
Args:
|
68
|
-
workspace_dir: Directory for deployment artifacts and logs
|
69
|
-
"""
|
70
|
-
self.workspace_dir = Path(workspace_dir)
|
71
|
-
self.workspace_dir.mkdir(parents=True, exist_ok=True)
|
72
|
-
|
73
|
-
# Component managers
|
74
|
-
self.gpu_manager = get_gpu_manager()
|
75
|
-
self.health_checker = get_health_checker()
|
76
|
-
|
77
|
-
# Service tracking
|
78
|
-
self.services: Dict[str, Any] = {} # service_name -> service instance
|
79
|
-
self.configs: Dict[str, LocalGPUConfig] = {} # service_name -> config
|
80
|
-
self.deployments: Dict[str, Dict[str, Any]] = {} # deployment tracking
|
81
|
-
|
82
|
-
# Service registry file
|
83
|
-
self.registry_file = self.workspace_dir / "service_registry.json"
|
84
|
-
self._load_registry()
|
85
|
-
|
86
|
-
logger.info("Local GPU provider initialized")
|
87
|
-
logger.info(f"Workspace directory: {self.workspace_dir}")
|
88
|
-
logger.info(f"Available GPUs: {len(self.gpu_manager.gpus)}")
|
89
|
-
|
90
|
-
async def deploy(self, config: LocalGPUConfig) -> Dict[str, Any]:
|
91
|
-
"""
|
92
|
-
Deploy a model service with the specified configuration.
|
93
|
-
|
94
|
-
Args:
|
95
|
-
config: Local GPU deployment configuration
|
96
|
-
|
97
|
-
Returns:
|
98
|
-
Deployment result with service information
|
99
|
-
"""
|
100
|
-
service_name = config.service_name
|
101
|
-
|
102
|
-
logger.info("=" * 60)
|
103
|
-
logger.info(f"STARTING LOCAL DEPLOYMENT: {service_name}")
|
104
|
-
logger.info(f"MODEL: {config.model_id}")
|
105
|
-
logger.info(f"BACKEND: {config.backend.value}")
|
106
|
-
logger.info("=" * 60)
|
107
|
-
|
108
|
-
try:
|
109
|
-
# Check if service already exists
|
110
|
-
if service_name in self.services:
|
111
|
-
return {
|
112
|
-
"success": False,
|
113
|
-
"error": f"Service {service_name} already deployed",
|
114
|
-
"existing_service": self.get_service_info(service_name)
|
115
|
-
}
|
116
|
-
|
117
|
-
# Validate configuration
|
118
|
-
validation_result = await self._validate_config(config)
|
119
|
-
if not validation_result["valid"]:
|
120
|
-
return {
|
121
|
-
"success": False,
|
122
|
-
"error": f"Configuration validation failed: {validation_result['error']}",
|
123
|
-
"validation_details": validation_result
|
124
|
-
}
|
125
|
-
|
126
|
-
# Create service instance
|
127
|
-
service = await self._create_service(config)
|
128
|
-
if not service:
|
129
|
-
return {
|
130
|
-
"success": False,
|
131
|
-
"error": f"Failed to create service for backend: {config.backend.value}"
|
132
|
-
}
|
133
|
-
|
134
|
-
# Deploy based on backend type
|
135
|
-
deployment_start_time = datetime.now()
|
136
|
-
|
137
|
-
if config.backend == LocalBackend.VLLM:
|
138
|
-
deploy_result = await self._deploy_vllm_service(service, config)
|
139
|
-
elif config.backend == LocalBackend.TENSORRT_LLM:
|
140
|
-
deploy_result = await self._deploy_tensorrt_service(service, config)
|
141
|
-
elif config.backend == LocalBackend.TRANSFORMERS:
|
142
|
-
deploy_result = await self._deploy_transformers_service(service, config)
|
143
|
-
else:
|
144
|
-
return {
|
145
|
-
"success": False,
|
146
|
-
"error": f"Unsupported backend: {config.backend.value}"
|
147
|
-
}
|
148
|
-
|
149
|
-
if deploy_result["success"]:
|
150
|
-
# Register service
|
151
|
-
self.services[service_name] = service
|
152
|
-
self.configs[service_name] = config
|
153
|
-
|
154
|
-
# Track deployment
|
155
|
-
deployment_info = {
|
156
|
-
"service_name": service_name,
|
157
|
-
"config": config.to_dict(),
|
158
|
-
"backend": config.backend.value,
|
159
|
-
"deployed_at": deployment_start_time.isoformat(),
|
160
|
-
"status": "deployed",
|
161
|
-
**deploy_result
|
162
|
-
}
|
163
|
-
self.deployments[service_name] = deployment_info
|
164
|
-
|
165
|
-
# Register with health checker
|
166
|
-
self.health_checker.register_service(service_name, service)
|
167
|
-
await self.health_checker.start_monitoring(service_name)
|
168
|
-
|
169
|
-
# Save registry
|
170
|
-
self._save_registry()
|
171
|
-
|
172
|
-
logger.info("=" * 60)
|
173
|
-
logger.info("LOCAL DEPLOYMENT COMPLETED SUCCESSFULLY!")
|
174
|
-
logger.info("=" * 60)
|
175
|
-
logger.info(f"Service: {service_name}")
|
176
|
-
logger.info(f"Backend: {config.backend.value}")
|
177
|
-
|
178
|
-
return {
|
179
|
-
"success": True,
|
180
|
-
"service_name": service_name,
|
181
|
-
"backend": config.backend.value,
|
182
|
-
"deployment_info": deployment_info,
|
183
|
-
**deploy_result
|
184
|
-
}
|
185
|
-
else:
|
186
|
-
return deploy_result
|
187
|
-
|
188
|
-
except Exception as e:
|
189
|
-
logger.error("=" * 60)
|
190
|
-
logger.error("LOCAL DEPLOYMENT FAILED!")
|
191
|
-
logger.error("=" * 60)
|
192
|
-
logger.error(f"Error: {e}")
|
193
|
-
|
194
|
-
return {
|
195
|
-
"success": False,
|
196
|
-
"error": str(e),
|
197
|
-
"service_name": service_name
|
198
|
-
}
|
199
|
-
|
200
|
-
async def undeploy(self, service_name: str) -> Dict[str, Any]:
|
201
|
-
"""
|
202
|
-
Stop and remove a deployed service.
|
203
|
-
|
204
|
-
Args:
|
205
|
-
service_name: Name of service to undeploy
|
206
|
-
|
207
|
-
Returns:
|
208
|
-
Undeploy result
|
209
|
-
"""
|
210
|
-
if service_name not in self.services:
|
211
|
-
return {
|
212
|
-
"success": False,
|
213
|
-
"error": f"Service {service_name} not found"
|
214
|
-
}
|
215
|
-
|
216
|
-
try:
|
217
|
-
logger.info(f"Undeploying service: {service_name}")
|
218
|
-
|
219
|
-
service = self.services[service_name]
|
220
|
-
|
221
|
-
# Stop monitoring
|
222
|
-
await self.health_checker.stop_monitoring(service_name)
|
223
|
-
self.health_checker.unregister_service(service_name)
|
224
|
-
|
225
|
-
# Stop service
|
226
|
-
if hasattr(service, 'stop'):
|
227
|
-
stop_result = await service.stop()
|
228
|
-
elif hasattr(service, 'unload_model'):
|
229
|
-
stop_result = await service.unload_model()
|
230
|
-
else:
|
231
|
-
stop_result = {"success": True}
|
232
|
-
|
233
|
-
# Clean up
|
234
|
-
if hasattr(service, 'cleanup'):
|
235
|
-
await service.cleanup()
|
236
|
-
|
237
|
-
# Remove from tracking
|
238
|
-
del self.services[service_name]
|
239
|
-
del self.configs[service_name]
|
240
|
-
if service_name in self.deployments:
|
241
|
-
del self.deployments[service_name]
|
242
|
-
|
243
|
-
# Save registry
|
244
|
-
self._save_registry()
|
245
|
-
|
246
|
-
logger.info(f"Service undeployed: {service_name}")
|
247
|
-
|
248
|
-
return {
|
249
|
-
"success": True,
|
250
|
-
"service_name": service_name,
|
251
|
-
"stop_result": stop_result
|
252
|
-
}
|
253
|
-
|
254
|
-
except Exception as e:
|
255
|
-
logger.error(f"Failed to undeploy service {service_name}: {e}")
|
256
|
-
return {
|
257
|
-
"success": False,
|
258
|
-
"error": str(e)
|
259
|
-
}
|
260
|
-
|
261
|
-
async def list_services(self) -> List[Dict[str, Any]]:
|
262
|
-
"""List all deployed services"""
|
263
|
-
services = []
|
264
|
-
|
265
|
-
for service_name, service in self.services.items():
|
266
|
-
try:
|
267
|
-
config = self.configs[service_name]
|
268
|
-
health = await self.health_checker.check_service_health(service_name)
|
269
|
-
metrics = self.health_checker.get_service_metrics(service_name)
|
270
|
-
|
271
|
-
service_info = {
|
272
|
-
"service_name": service_name,
|
273
|
-
"model_id": config.model_id,
|
274
|
-
"backend": config.backend.value,
|
275
|
-
"service_type": config.service_type.value,
|
276
|
-
"status": health.get("status", "unknown"),
|
277
|
-
"healthy": health.get("healthy", False),
|
278
|
-
"response_time_ms": health.get("response_time_ms"),
|
279
|
-
"error_count": metrics.error_count if metrics else 0,
|
280
|
-
"uptime_seconds": metrics.uptime_seconds if metrics else None,
|
281
|
-
"deployed_at": self.deployments.get(service_name, {}).get("deployed_at")
|
282
|
-
}
|
283
|
-
|
284
|
-
# Add service-specific info
|
285
|
-
if hasattr(service, 'get_service_info'):
|
286
|
-
service_info.update(service.get_service_info())
|
287
|
-
|
288
|
-
services.append(service_info)
|
289
|
-
|
290
|
-
except Exception as e:
|
291
|
-
logger.error(f"Error getting info for service {service_name}: {e}")
|
292
|
-
services.append({
|
293
|
-
"service_name": service_name,
|
294
|
-
"status": "error",
|
295
|
-
"error": str(e)
|
296
|
-
})
|
297
|
-
|
298
|
-
return services
|
299
|
-
|
300
|
-
async def get_service_info(self, service_name: str) -> Optional[Dict[str, Any]]:
|
301
|
-
"""Get detailed information about a specific service"""
|
302
|
-
if service_name not in self.services:
|
303
|
-
return None
|
304
|
-
|
305
|
-
try:
|
306
|
-
service = self.services[service_name]
|
307
|
-
config = self.configs[service_name]
|
308
|
-
health = await self.health_checker.check_service_health(service_name)
|
309
|
-
metrics = self.health_checker.get_service_metrics(service_name)
|
310
|
-
|
311
|
-
info = {
|
312
|
-
"service_name": service_name,
|
313
|
-
"config": config.to_dict(),
|
314
|
-
"health": health,
|
315
|
-
"metrics": {
|
316
|
-
"status": metrics.status.value if metrics else "unknown",
|
317
|
-
"last_check": metrics.last_check.isoformat() if metrics else None,
|
318
|
-
"error_count": metrics.error_count if metrics else 0,
|
319
|
-
"consecutive_failures": metrics.consecutive_failures if metrics else 0,
|
320
|
-
"uptime_seconds": metrics.uptime_seconds if metrics else None,
|
321
|
-
"last_error": metrics.last_error if metrics else None
|
322
|
-
} if metrics else {},
|
323
|
-
"deployment_info": self.deployments.get(service_name, {})
|
324
|
-
}
|
325
|
-
|
326
|
-
# Add service-specific info
|
327
|
-
if hasattr(service, 'get_service_info'):
|
328
|
-
info["service_details"] = service.get_service_info()
|
329
|
-
|
330
|
-
return info
|
331
|
-
|
332
|
-
except Exception as e:
|
333
|
-
logger.error(f"Error getting service info for {service_name}: {e}")
|
334
|
-
return {
|
335
|
-
"service_name": service_name,
|
336
|
-
"error": str(e)
|
337
|
-
}
|
338
|
-
|
339
|
-
async def generate_text(self, service_name: str, prompt: str, **kwargs) -> Dict[str, Any]:
|
340
|
-
"""Generate text using a deployed service"""
|
341
|
-
if service_name not in self.services:
|
342
|
-
return {
|
343
|
-
"success": False,
|
344
|
-
"error": f"Service {service_name} not found"
|
345
|
-
}
|
346
|
-
|
347
|
-
try:
|
348
|
-
service = self.services[service_name]
|
349
|
-
|
350
|
-
# Check service health
|
351
|
-
health = await self.health_checker.check_service_health(service_name)
|
352
|
-
if not health.get("healthy", False):
|
353
|
-
return {
|
354
|
-
"success": False,
|
355
|
-
"error": f"Service {service_name} is not healthy: {health.get('error', 'Unknown error')}"
|
356
|
-
}
|
357
|
-
|
358
|
-
# Generate text
|
359
|
-
if hasattr(service, 'generate'):
|
360
|
-
return await service.generate(prompt, **kwargs)
|
361
|
-
elif hasattr(service, 'generate_text'):
|
362
|
-
return await service.generate_text(prompt, **kwargs)
|
363
|
-
else:
|
364
|
-
return {
|
365
|
-
"success": False,
|
366
|
-
"error": f"Service {service_name} does not support text generation"
|
367
|
-
}
|
368
|
-
|
369
|
-
except Exception as e:
|
370
|
-
logger.error(f"Text generation failed for service {service_name}: {e}")
|
371
|
-
return {
|
372
|
-
"success": False,
|
373
|
-
"error": str(e)
|
374
|
-
}
|
375
|
-
|
376
|
-
async def chat_completion(self, service_name: str, messages: List[Dict[str, str]], **kwargs) -> Dict[str, Any]:
|
377
|
-
"""Generate chat completion using a deployed service"""
|
378
|
-
if service_name not in self.services:
|
379
|
-
return {
|
380
|
-
"success": False,
|
381
|
-
"error": f"Service {service_name} not found"
|
382
|
-
}
|
383
|
-
|
384
|
-
try:
|
385
|
-
service = self.services[service_name]
|
386
|
-
|
387
|
-
# Check service health
|
388
|
-
health = await self.health_checker.check_service_health(service_name)
|
389
|
-
if not health.get("healthy", False):
|
390
|
-
return {
|
391
|
-
"success": False,
|
392
|
-
"error": f"Service {service_name} is not healthy"
|
393
|
-
}
|
394
|
-
|
395
|
-
# Generate chat completion
|
396
|
-
if hasattr(service, 'chat_completions'):
|
397
|
-
return await service.chat_completions(messages, **kwargs)
|
398
|
-
elif hasattr(service, 'chat_completion'):
|
399
|
-
return await service.chat_completion(messages, **kwargs)
|
400
|
-
else:
|
401
|
-
return {
|
402
|
-
"success": False,
|
403
|
-
"error": f"Service {service_name} does not support chat completion"
|
404
|
-
}
|
405
|
-
|
406
|
-
except Exception as e:
|
407
|
-
logger.error(f"Chat completion failed for service {service_name}: {e}")
|
408
|
-
return {
|
409
|
-
"success": False,
|
410
|
-
"error": str(e)
|
411
|
-
}
|
412
|
-
|
413
|
-
async def get_system_status(self) -> Dict[str, Any]:
|
414
|
-
"""Get overall system status"""
|
415
|
-
system_health = self.health_checker.get_system_health()
|
416
|
-
|
417
|
-
return {
|
418
|
-
**system_health,
|
419
|
-
"provider": "local_gpu",
|
420
|
-
"workspace_dir": str(self.workspace_dir),
|
421
|
-
"total_deployments": len(self.services),
|
422
|
-
"available_backends": [backend.value for backend in LocalBackend],
|
423
|
-
"gpu_status": {
|
424
|
-
"cuda_available": self.gpu_manager.cuda_available,
|
425
|
-
"nvidia_smi_available": self.gpu_manager.nvidia_smi_available,
|
426
|
-
"gpu_count": len(self.gpu_manager.gpus)
|
427
|
-
}
|
428
|
-
}
|
429
|
-
|
430
|
-
async def _validate_config(self, config: LocalGPUConfig) -> Dict[str, Any]:
|
431
|
-
"""Validate deployment configuration"""
|
432
|
-
try:
|
433
|
-
# Check GPU requirements
|
434
|
-
compatibility = self.gpu_manager.check_gpu_compatibility(
|
435
|
-
config.model_id,
|
436
|
-
config.model_precision
|
437
|
-
)
|
438
|
-
|
439
|
-
if not compatibility[0]:
|
440
|
-
return {
|
441
|
-
"valid": False,
|
442
|
-
"error": f"GPU compatibility check failed: {', '.join(compatibility[1])}"
|
443
|
-
}
|
444
|
-
|
445
|
-
# Check backend availability
|
446
|
-
backend_available = await self._check_backend_availability(config.backend)
|
447
|
-
if not backend_available["available"]:
|
448
|
-
return {
|
449
|
-
"valid": False,
|
450
|
-
"error": f"Backend {config.backend.value} not available: {backend_available['error']}"
|
451
|
-
}
|
452
|
-
|
453
|
-
# Check port availability
|
454
|
-
if config.backend == LocalBackend.VLLM:
|
455
|
-
port_available = await self._check_port_available(config.port)
|
456
|
-
if not port_available:
|
457
|
-
return {
|
458
|
-
"valid": False,
|
459
|
-
"error": f"Port {config.port} is not available"
|
460
|
-
}
|
461
|
-
|
462
|
-
return {
|
463
|
-
"valid": True,
|
464
|
-
"gpu_compatibility": compatibility,
|
465
|
-
"backend_check": backend_available
|
466
|
-
}
|
467
|
-
|
468
|
-
except Exception as e:
|
469
|
-
return {
|
470
|
-
"valid": False,
|
471
|
-
"error": str(e)
|
472
|
-
}
|
473
|
-
|
474
|
-
async def _check_backend_availability(self, backend: LocalBackend) -> Dict[str, Any]:
|
475
|
-
"""Check if a backend is available"""
|
476
|
-
try:
|
477
|
-
if backend == LocalBackend.VLLM:
|
478
|
-
try:
|
479
|
-
import vllm
|
480
|
-
return {"available": True}
|
481
|
-
except ImportError:
|
482
|
-
return {"available": False, "error": "vLLM not installed"}
|
483
|
-
|
484
|
-
elif backend == LocalBackend.TENSORRT_LLM:
|
485
|
-
try:
|
486
|
-
import tensorrt_llm
|
487
|
-
return {"available": True}
|
488
|
-
except ImportError:
|
489
|
-
return {"available": False, "error": "TensorRT-LLM not installed"}
|
490
|
-
|
491
|
-
elif backend == LocalBackend.TRANSFORMERS:
|
492
|
-
try:
|
493
|
-
import transformers
|
494
|
-
return {"available": True}
|
495
|
-
except ImportError:
|
496
|
-
return {"available": False, "error": "Transformers not installed"}
|
497
|
-
|
498
|
-
else:
|
499
|
-
return {"available": False, "error": f"Unknown backend: {backend.value}"}
|
500
|
-
|
501
|
-
except Exception as e:
|
502
|
-
return {"available": False, "error": str(e)}
|
503
|
-
|
504
|
-
async def _check_port_available(self, port: int) -> bool:
|
505
|
-
"""Check if a port is available"""
|
506
|
-
try:
|
507
|
-
import socket
|
508
|
-
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
509
|
-
result = s.connect_ex(('127.0.0.1', port))
|
510
|
-
return result != 0 # Port is available if connection fails
|
511
|
-
except:
|
512
|
-
return False
|
513
|
-
|
514
|
-
async def _create_service(self, config: LocalGPUConfig) -> Optional[Any]:
|
515
|
-
"""Create service instance based on backend"""
|
516
|
-
try:
|
517
|
-
if config.backend == LocalBackend.VLLM:
|
518
|
-
return VLLMService(config)
|
519
|
-
elif config.backend == LocalBackend.TENSORRT_LLM:
|
520
|
-
return TensorRTLLMService(config)
|
521
|
-
elif config.backend == LocalBackend.TRANSFORMERS:
|
522
|
-
return TransformersService(config)
|
523
|
-
else:
|
524
|
-
logger.error(f"Unsupported backend: {config.backend.value}")
|
525
|
-
return None
|
526
|
-
|
527
|
-
except Exception as e:
|
528
|
-
logger.error(f"Failed to create service: {e}")
|
529
|
-
return None
|
530
|
-
|
531
|
-
async def _deploy_vllm_service(self, service: VLLMService, config: LocalGPUConfig) -> Dict[str, Any]:
|
532
|
-
"""Deploy vLLM service"""
|
533
|
-
result = await service.start()
|
534
|
-
if result["success"]:
|
535
|
-
return {
|
536
|
-
**result,
|
537
|
-
"service_url": f"http://{config.host}:{config.port}",
|
538
|
-
"api_base": f"http://{config.host}:{config.port}/v1"
|
539
|
-
}
|
540
|
-
return result
|
541
|
-
|
542
|
-
async def _deploy_tensorrt_service(self, service: TensorRTLLMService, config: LocalGPUConfig) -> Dict[str, Any]:
|
543
|
-
"""Deploy TensorRT-LLM service"""
|
544
|
-
# Build engine first
|
545
|
-
build_result = await service.build_engine()
|
546
|
-
if not build_result["success"]:
|
547
|
-
return build_result
|
548
|
-
|
549
|
-
# Load model
|
550
|
-
load_result = await service.load_model()
|
551
|
-
return load_result
|
552
|
-
|
553
|
-
async def _deploy_transformers_service(self, service: TransformersService, config: LocalGPUConfig) -> Dict[str, Any]:
|
554
|
-
"""Deploy Transformers service"""
|
555
|
-
return await service.load_model()
|
556
|
-
|
557
|
-
def _load_registry(self):
|
558
|
-
"""Load service registry from file"""
|
559
|
-
if self.registry_file.exists():
|
560
|
-
try:
|
561
|
-
with open(self.registry_file, 'r') as f:
|
562
|
-
registry_data = json.load(f)
|
563
|
-
|
564
|
-
# Note: We don't automatically reload services on startup
|
565
|
-
# This would require more complex state management
|
566
|
-
logger.info(f"Service registry loaded: {len(registry_data)} entries")
|
567
|
-
|
568
|
-
except Exception as e:
|
569
|
-
logger.warning(f"Failed to load service registry: {e}")
|
570
|
-
|
571
|
-
def _save_registry(self):
|
572
|
-
"""Save service registry to file"""
|
573
|
-
try:
|
574
|
-
registry_data = {}
|
575
|
-
for service_name, deployment in self.deployments.items():
|
576
|
-
registry_data[service_name] = {
|
577
|
-
"config": deployment["config"],
|
578
|
-
"deployed_at": deployment["deployed_at"],
|
579
|
-
"backend": deployment["backend"]
|
580
|
-
}
|
581
|
-
|
582
|
-
with open(self.registry_file, 'w') as f:
|
583
|
-
json.dump(registry_data, f, indent=2)
|
584
|
-
|
585
|
-
except Exception as e:
|
586
|
-
logger.error(f"Failed to save service registry: {e}")
|