isa-model 0.2.0__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. isa_model/__init__.py +1 -1
  2. isa_model/core/storage/hf_storage.py +419 -0
  3. isa_model/deployment/__init__.py +52 -0
  4. isa_model/deployment/core/__init__.py +34 -0
  5. isa_model/deployment/core/deployment_config.py +356 -0
  6. isa_model/deployment/core/deployment_manager.py +549 -0
  7. isa_model/deployment/core/isa_deployment_service.py +401 -0
  8. isa_model/eval/factory.py +381 -140
  9. isa_model/inference/ai_factory.py +142 -240
  10. isa_model/inference/providers/ml_provider.py +50 -0
  11. isa_model/inference/services/audio/openai_tts_service.py +104 -3
  12. isa_model/inference/services/embedding/base_embed_service.py +112 -0
  13. isa_model/inference/services/embedding/ollama_embed_service.py +28 -2
  14. isa_model/inference/services/llm/__init__.py +2 -0
  15. isa_model/inference/services/llm/base_llm_service.py +111 -1
  16. isa_model/inference/services/llm/ollama_llm_service.py +234 -26
  17. isa_model/inference/services/llm/openai_llm_service.py +225 -28
  18. isa_model/inference/services/llm/triton_llm_service.py +481 -0
  19. isa_model/inference/services/ml/base_ml_service.py +78 -0
  20. isa_model/inference/services/ml/sklearn_ml_service.py +140 -0
  21. isa_model/inference/services/vision/__init__.py +3 -3
  22. isa_model/inference/services/vision/base_image_gen_service.py +161 -0
  23. isa_model/inference/services/vision/base_vision_service.py +177 -0
  24. isa_model/inference/services/vision/ollama_vision_service.py +143 -17
  25. isa_model/inference/services/vision/replicate_image_gen_service.py +139 -7
  26. isa_model/training/__init__.py +62 -32
  27. isa_model/training/cloud/__init__.py +22 -0
  28. isa_model/training/cloud/job_orchestrator.py +402 -0
  29. isa_model/training/cloud/runpod_trainer.py +454 -0
  30. isa_model/training/cloud/storage_manager.py +482 -0
  31. isa_model/training/core/__init__.py +23 -0
  32. isa_model/training/core/config.py +181 -0
  33. isa_model/training/core/dataset.py +222 -0
  34. isa_model/training/core/trainer.py +720 -0
  35. isa_model/training/core/utils.py +213 -0
  36. isa_model/training/factory.py +229 -198
  37. isa_model-0.2.8.dist-info/METADATA +465 -0
  38. isa_model-0.2.8.dist-info/RECORD +86 -0
  39. isa_model/core/model_router.py +0 -226
  40. isa_model/core/model_version.py +0 -0
  41. isa_model/core/resource_manager.py +0 -202
  42. isa_model/deployment/gpu_fp16_ds8/models/deepseek_r1/1/model.py +0 -120
  43. isa_model/deployment/gpu_fp16_ds8/scripts/download_model.py +0 -18
  44. isa_model/training/engine/llama_factory/__init__.py +0 -39
  45. isa_model/training/engine/llama_factory/config.py +0 -115
  46. isa_model/training/engine/llama_factory/data_adapter.py +0 -284
  47. isa_model/training/engine/llama_factory/examples/__init__.py +0 -6
  48. isa_model/training/engine/llama_factory/examples/finetune_with_tracking.py +0 -185
  49. isa_model/training/engine/llama_factory/examples/rlhf_with_tracking.py +0 -163
  50. isa_model/training/engine/llama_factory/factory.py +0 -331
  51. isa_model/training/engine/llama_factory/rl.py +0 -254
  52. isa_model/training/engine/llama_factory/trainer.py +0 -171
  53. isa_model/training/image_model/configs/create_config.py +0 -37
  54. isa_model/training/image_model/configs/create_flux_config.py +0 -26
  55. isa_model/training/image_model/configs/create_lora_config.py +0 -21
  56. isa_model/training/image_model/prepare_massed_compute.py +0 -97
  57. isa_model/training/image_model/prepare_upload.py +0 -17
  58. isa_model/training/image_model/raw_data/create_captions.py +0 -16
  59. isa_model/training/image_model/raw_data/create_lora_captions.py +0 -20
  60. isa_model/training/image_model/raw_data/pre_processing.py +0 -200
  61. isa_model/training/image_model/train/train.py +0 -42
  62. isa_model/training/image_model/train/train_flux.py +0 -41
  63. isa_model/training/image_model/train/train_lora.py +0 -57
  64. isa_model/training/image_model/train_main.py +0 -25
  65. isa_model-0.2.0.dist-info/METADATA +0 -327
  66. isa_model-0.2.0.dist-info/RECORD +0 -92
  67. isa_model-0.2.0.dist-info/licenses/LICENSE +0 -21
  68. /isa_model/training/{llm_model/annotation → annotation}/annotation_schema.py +0 -0
  69. /isa_model/training/{llm_model/annotation → annotation}/processors/annotation_processor.py +0 -0
  70. /isa_model/training/{llm_model/annotation → annotation}/storage/dataset_manager.py +0 -0
  71. /isa_model/training/{llm_model/annotation → annotation}/storage/dataset_schema.py +0 -0
  72. /isa_model/training/{llm_model/annotation → annotation}/tests/test_annotation_flow.py +0 -0
  73. /isa_model/training/{llm_model/annotation → annotation}/tests/test_minio copy.py +0 -0
  74. /isa_model/training/{llm_model/annotation → annotation}/tests/test_minio_upload.py +0 -0
  75. /isa_model/training/{llm_model/annotation → annotation}/views/annotation_controller.py +0 -0
  76. {isa_model-0.2.0.dist-info → isa_model-0.2.8.dist-info}/WHEEL +0 -0
  77. {isa_model-0.2.0.dist-info → isa_model-0.2.8.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,401 @@
1
+ """
2
+ ISA Model Deployment Service
3
+
4
+ Complete deployment pipeline that:
5
+ 1. Downloads fine-tuned models from HuggingFace storage
6
+ 2. Quantizes models using open-source TensorRT-LLM
7
+ 3. Builds optimized engines
8
+ 4. Deploys as custom container service on RunPod
9
+ """
10
+
11
+ import os
12
+ import json
13
+ import logging
14
+ import asyncio
15
+ from typing import Dict, Any, Optional, List
16
+ from pathlib import Path
17
+ import shutil
18
+ from datetime import datetime
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ class ISADeploymentService:
24
+ """
25
+ Complete deployment service for ISA Model SDK.
26
+
27
+ Example:
28
+ ```python
29
+ from isa_model.deployment.core import ISADeploymentService
30
+
31
+ service = ISADeploymentService()
32
+
33
+ # Complete deployment pipeline
34
+ deployment = await service.deploy_finetuned_model(
35
+ model_id="gemma-4b-alpaca-v1",
36
+ quantization="int8"
37
+ )
38
+ ```
39
+ """
40
+
41
+ def __init__(self,
42
+ work_dir: str = "./isa_deployment_work",
43
+ hf_username: str = "xenobordom"):
44
+ """Initialize ISA deployment service."""
45
+ self.work_dir = Path(work_dir)
46
+ self.work_dir.mkdir(parents=True, exist_ok=True)
47
+ self.hf_username = hf_username
48
+
49
+ # Create subdirectories
50
+ (self.work_dir / "models").mkdir(exist_ok=True)
51
+ (self.work_dir / "containers").mkdir(exist_ok=True)
52
+ (self.work_dir / "deployments").mkdir(exist_ok=True)
53
+
54
+ logger.info(f"ISA Deployment Service initialized with work_dir: {self.work_dir}")
55
+
56
+ async def deploy_finetuned_model(self,
57
+ model_id: str,
58
+ quantization: str = "int8",
59
+ container_registry: str = "docker.io") -> Dict[str, Any]:
60
+ """Complete deployment pipeline for fine-tuned models."""
61
+ deployment_id = f"{model_id}-{quantization}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
62
+ logger.info(f"Starting deployment pipeline: {deployment_id}")
63
+
64
+ deployment_info = {
65
+ "deployment_id": deployment_id,
66
+ "model_id": model_id,
67
+ "quantization": quantization,
68
+ "status": "starting",
69
+ "steps": []
70
+ }
71
+
72
+ try:
73
+ # Step 1: Download model
74
+ model_path = await self._download_finetuned_model(model_id)
75
+ deployment_info["steps"].append({
76
+ "step": 1,
77
+ "name": "download_model",
78
+ "status": "completed",
79
+ "model_path": str(model_path)
80
+ })
81
+
82
+ # Step 2: Build container
83
+ container_image = await self._build_deployment_container(
84
+ model_id=model_id,
85
+ model_path=model_path,
86
+ quantization=quantization,
87
+ container_registry=container_registry
88
+ )
89
+ deployment_info["steps"].append({
90
+ "step": 2,
91
+ "name": "build_container",
92
+ "status": "completed",
93
+ "container_image": container_image
94
+ })
95
+
96
+ deployment_info["status"] = "completed"
97
+ deployment_info["completed_at"] = datetime.now().isoformat()
98
+
99
+ # Save configuration
100
+ config_file = self.work_dir / "deployments" / f"{deployment_id}.json"
101
+ with open(config_file, 'w') as f:
102
+ json.dump(deployment_info, f, indent=2)
103
+
104
+ logger.info(f"✅ Deployment completed: {deployment_id}")
105
+ return deployment_info
106
+
107
+ except Exception as e:
108
+ deployment_info["status"] = "failed"
109
+ deployment_info["error"] = str(e)
110
+ logger.error(f"❌ Deployment failed: {e}")
111
+ raise
112
+
113
+ async def _download_finetuned_model(self, model_id: str) -> Path:
114
+ """Download fine-tuned model from HuggingFace storage."""
115
+ from ...core.storage.hf_storage import HuggingFaceStorage
116
+
117
+ logger.info(f"Downloading model {model_id}...")
118
+
119
+ storage = HuggingFaceStorage(username=self.hf_username)
120
+ model_path = await storage.load_model(model_id)
121
+
122
+ if not model_path:
123
+ raise ValueError(f"Failed to download model {model_id}")
124
+
125
+ # Copy to work directory
126
+ local_model_path = self.work_dir / "models" / model_id
127
+ if local_model_path.exists():
128
+ shutil.rmtree(local_model_path)
129
+
130
+ shutil.copytree(model_path, local_model_path)
131
+ logger.info(f"Model downloaded to: {local_model_path}")
132
+
133
+ return local_model_path
134
+
135
+ async def _build_deployment_container(self,
136
+ model_id: str,
137
+ model_path: Path,
138
+ quantization: str,
139
+ container_registry: str) -> str:
140
+ """Build custom deployment container."""
141
+ container_name = f"isa-model-{model_id}"
142
+ container_tag = f"{container_registry}/{container_name}:latest"
143
+
144
+ logger.info(f"Building container: {container_tag}")
145
+
146
+ container_dir = self.work_dir / "containers" / model_id
147
+ container_dir.mkdir(parents=True, exist_ok=True)
148
+
149
+ # Create Dockerfile
150
+ dockerfile_content = self._create_deployment_dockerfile(quantization)
151
+ with open(container_dir / "Dockerfile", 'w') as f:
152
+ f.write(dockerfile_content)
153
+
154
+ # Copy model files
155
+ model_dst = container_dir / "hf_model"
156
+ if model_dst.exists():
157
+ shutil.rmtree(model_dst)
158
+ shutil.copytree(model_path, model_dst)
159
+
160
+ # Create server.py
161
+ server_content = self._create_server_py()
162
+ with open(container_dir / "server.py", 'w') as f:
163
+ f.write(server_content)
164
+
165
+ # Build container
166
+ process = await asyncio.create_subprocess_exec(
167
+ "docker", "build", "-t", container_tag, str(container_dir),
168
+ stdout=asyncio.subprocess.PIPE,
169
+ stderr=asyncio.subprocess.PIPE
170
+ )
171
+
172
+ stdout, stderr = await process.communicate()
173
+
174
+ if process.returncode != 0:
175
+ raise RuntimeError(f"Container build failed: {stderr.decode()}")
176
+
177
+ logger.info(f"Container built: {container_tag}")
178
+ return container_tag
179
+
180
+ def _create_deployment_dockerfile(self, quantization: str) -> str:
181
+ """Create Dockerfile for deployment."""
182
+ return f'''# ISA Model Deployment Container
183
+ FROM nvcr.io/nvidia/pytorch:24.05-py3
184
+
185
+ # Install dependencies
186
+ RUN apt-get update && apt-get install -y git-lfs curl && rm -rf /var/lib/apt/lists/*
187
+
188
+ # Install Python packages
189
+ RUN pip install fastapi uvicorn transformers torch
190
+
191
+ # Clone TensorRT-LLM for quantization and inference
192
+ RUN git clone https://github.com/NVIDIA/TensorRT-LLM.git /opt/TensorRT-LLM
193
+ WORKDIR /opt/TensorRT-LLM
194
+ RUN pip install -r requirements.txt
195
+
196
+ # Set up application
197
+ WORKDIR /app
198
+ COPY hf_model/ /app/hf_model/
199
+ COPY server.py /app/server.py
200
+
201
+ # Environment variables
202
+ ENV QUANTIZATION={quantization}
203
+ ENV MODEL_PATH=/app/hf_model
204
+ ENV PYTHONPATH=/opt/TensorRT-LLM:$PYTHONPATH
205
+
206
+ # Expose port
207
+ EXPOSE 8000
208
+
209
+ # Health check
210
+ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \\
211
+ CMD curl -f http://localhost:8000/health || exit 1
212
+
213
+ # Start server
214
+ CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "8000"]
215
+ '''
216
+
217
+ def _create_server_py(self) -> str:
218
+ """Create FastAPI server."""
219
+ return '''"""
220
+ ISA Model Deployment Server
221
+ """
222
+
223
+ import os
224
+ import logging
225
+ import asyncio
226
+ from pathlib import Path
227
+ from fastapi import FastAPI, HTTPException
228
+ from pydantic import BaseModel
229
+ from contextlib import asynccontextmanager
230
+ from transformers import AutoTokenizer, AutoModelForCausalLM
231
+ import torch
232
+
233
+ logging.basicConfig(level=logging.INFO)
234
+ logger = logging.getLogger(__name__)
235
+
236
+ # Global variables
237
+ MODEL_PATH = os.getenv("MODEL_PATH", "/app/hf_model")
238
+ QUANTIZATION = os.getenv("QUANTIZATION", "int8")
239
+
240
+ model = None
241
+ tokenizer = None
242
+
243
+ @asynccontextmanager
244
+ async def lifespan(app: FastAPI):
245
+ """FastAPI lifespan events."""
246
+ global model, tokenizer
247
+
248
+ logger.info("Starting ISA Model Deployment Service...")
249
+ logger.info(f"Loading model from: {MODEL_PATH}")
250
+ logger.info(f"Quantization: {QUANTIZATION}")
251
+
252
+ try:
253
+ # Load tokenizer
254
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)
255
+
256
+ # Load model with appropriate settings
257
+ model = AutoModelForCausalLM.from_pretrained(
258
+ MODEL_PATH,
259
+ torch_dtype=torch.float16,
260
+ device_map="auto",
261
+ trust_remote_code=True
262
+ )
263
+
264
+ logger.info("🚀 Model loaded successfully!")
265
+
266
+ except Exception as e:
267
+ logger.error(f"Failed to load model: {e}")
268
+ raise
269
+
270
+ yield
271
+
272
+ logger.info("Shutting down...")
273
+ model = None
274
+ tokenizer = None
275
+
276
+ app = FastAPI(
277
+ title="ISA Model Deployment Service",
278
+ description="Quantized model inference service",
279
+ version="1.0.0",
280
+ lifespan=lifespan
281
+ )
282
+
283
+ class GenerateRequest(BaseModel):
284
+ prompt: str
285
+ max_new_tokens: int = 256
286
+ temperature: float = 0.7
287
+ top_p: float = 0.9
288
+
289
+ class GenerateResponse(BaseModel):
290
+ text: str
291
+ quantization: str
292
+ backend: str
293
+
294
+ @app.post("/generate", response_model=GenerateResponse)
295
+ async def generate(request: GenerateRequest):
296
+ """Generate text."""
297
+ if model is None or tokenizer is None:
298
+ raise HTTPException(status_code=503, detail="Model not loaded")
299
+
300
+ try:
301
+ # Tokenize input
302
+ inputs = tokenizer(request.prompt, return_tensors="pt").to(model.device)
303
+
304
+ # Generate response
305
+ with torch.no_grad():
306
+ outputs = model.generate(
307
+ **inputs,
308
+ max_new_tokens=request.max_new_tokens,
309
+ temperature=request.temperature,
310
+ top_p=request.top_p,
311
+ do_sample=True,
312
+ eos_token_id=tokenizer.eos_token_id,
313
+ pad_token_id=tokenizer.pad_token_id,
314
+ )
315
+
316
+ # Decode response
317
+ generated_text = tokenizer.decode(
318
+ outputs[0][len(inputs.input_ids[0]):],
319
+ skip_special_tokens=True
320
+ )
321
+
322
+ return GenerateResponse(
323
+ text=generated_text,
324
+ quantization=QUANTIZATION,
325
+ backend="Transformers"
326
+ )
327
+
328
+ except Exception as e:
329
+ logger.error(f"Generation failed: {e}")
330
+ raise HTTPException(status_code=500, detail=str(e))
331
+
332
+ @app.get("/health")
333
+ async def health_check():
334
+ """Health check."""
335
+ return {
336
+ "status": "healthy" if (model is not None and tokenizer is not None) else "loading",
337
+ "quantization": QUANTIZATION,
338
+ "backend": "Transformers"
339
+ }
340
+
341
+ @app.get("/info")
342
+ async def model_info():
343
+ """Model information."""
344
+ return {
345
+ "model_path": MODEL_PATH,
346
+ "quantization": QUANTIZATION,
347
+ "framework": "ISA Model SDK",
348
+ "backend": "Transformers"
349
+ }
350
+ '''
351
+
352
+ def get_deployment_instructions(self, deployment_info: Dict[str, Any]) -> str:
353
+ """Generate deployment instructions."""
354
+ container_image = None
355
+
356
+ for step in deployment_info.get("steps", []):
357
+ if step["name"] == "build_container":
358
+ container_image = step.get("container_image")
359
+
360
+ return f'''# ISA Model Deployment Instructions
361
+
362
+ ## Deployment ID: {deployment_info['deployment_id']}
363
+ ## Model: {deployment_info['model_id']}
364
+ ## Quantization: {deployment_info['quantization']}
365
+
366
+ ### Container Image
367
+ ```
368
+ {container_image or 'Not built yet'}
369
+ ```
370
+
371
+ ### RunPod Configuration
372
+ - **Container Image**: {container_image}
373
+ - **GPU Type**: NVIDIA RTX A6000
374
+ - **Container Disk**: 30GB
375
+ - **Ports**: 8000 (HTTP API)
376
+
377
+ ### Testing the Deployment
378
+ ```python
379
+ import requests
380
+
381
+ # Health check
382
+ response = requests.get("http://your-endpoint/health")
383
+ print(response.json())
384
+
385
+ # Generate text
386
+ payload = {{
387
+ "prompt": "What is machine learning?",
388
+ "max_new_tokens": 100,
389
+ "temperature": 0.7
390
+ }}
391
+
392
+ response = requests.post("http://your-endpoint/generate", json=payload)
393
+ print(response.json())
394
+ ```
395
+
396
+ ### Features
397
+ - ✅ Automatic model download from HuggingFace
398
+ - ✅ {deployment_info['quantization'].upper()} quantization for efficiency
399
+ - ✅ FastAPI REST interface
400
+ - ✅ Health monitoring
401
+ '''