isa-model 0.2.0__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. isa_model/__init__.py +1 -1
  2. isa_model/core/storage/hf_storage.py +419 -0
  3. isa_model/deployment/__init__.py +52 -0
  4. isa_model/deployment/core/__init__.py +34 -0
  5. isa_model/deployment/core/deployment_config.py +356 -0
  6. isa_model/deployment/core/deployment_manager.py +549 -0
  7. isa_model/deployment/core/isa_deployment_service.py +401 -0
  8. isa_model/eval/factory.py +381 -140
  9. isa_model/inference/ai_factory.py +142 -240
  10. isa_model/inference/providers/ml_provider.py +50 -0
  11. isa_model/inference/services/audio/openai_tts_service.py +104 -3
  12. isa_model/inference/services/embedding/base_embed_service.py +112 -0
  13. isa_model/inference/services/embedding/ollama_embed_service.py +28 -2
  14. isa_model/inference/services/llm/__init__.py +2 -0
  15. isa_model/inference/services/llm/base_llm_service.py +111 -1
  16. isa_model/inference/services/llm/ollama_llm_service.py +234 -26
  17. isa_model/inference/services/llm/openai_llm_service.py +243 -28
  18. isa_model/inference/services/llm/triton_llm_service.py +481 -0
  19. isa_model/inference/services/ml/base_ml_service.py +78 -0
  20. isa_model/inference/services/ml/sklearn_ml_service.py +140 -0
  21. isa_model/inference/services/vision/__init__.py +3 -3
  22. isa_model/inference/services/vision/base_image_gen_service.py +161 -0
  23. isa_model/inference/services/vision/base_vision_service.py +177 -0
  24. isa_model/inference/services/vision/ollama_vision_service.py +143 -17
  25. isa_model/inference/services/vision/replicate_image_gen_service.py +139 -7
  26. isa_model/training/__init__.py +62 -32
  27. isa_model/training/cloud/__init__.py +22 -0
  28. isa_model/training/cloud/job_orchestrator.py +402 -0
  29. isa_model/training/cloud/runpod_trainer.py +454 -0
  30. isa_model/training/cloud/storage_manager.py +482 -0
  31. isa_model/training/core/__init__.py +23 -0
  32. isa_model/training/core/config.py +181 -0
  33. isa_model/training/core/dataset.py +222 -0
  34. isa_model/training/core/trainer.py +720 -0
  35. isa_model/training/core/utils.py +213 -0
  36. isa_model/training/factory.py +229 -198
  37. isa_model-0.2.9.dist-info/METADATA +465 -0
  38. isa_model-0.2.9.dist-info/RECORD +86 -0
  39. isa_model/core/model_router.py +0 -226
  40. isa_model/core/model_version.py +0 -0
  41. isa_model/core/resource_manager.py +0 -202
  42. isa_model/deployment/gpu_fp16_ds8/models/deepseek_r1/1/model.py +0 -120
  43. isa_model/deployment/gpu_fp16_ds8/scripts/download_model.py +0 -18
  44. isa_model/training/engine/llama_factory/__init__.py +0 -39
  45. isa_model/training/engine/llama_factory/config.py +0 -115
  46. isa_model/training/engine/llama_factory/data_adapter.py +0 -284
  47. isa_model/training/engine/llama_factory/examples/__init__.py +0 -6
  48. isa_model/training/engine/llama_factory/examples/finetune_with_tracking.py +0 -185
  49. isa_model/training/engine/llama_factory/examples/rlhf_with_tracking.py +0 -163
  50. isa_model/training/engine/llama_factory/factory.py +0 -331
  51. isa_model/training/engine/llama_factory/rl.py +0 -254
  52. isa_model/training/engine/llama_factory/trainer.py +0 -171
  53. isa_model/training/image_model/configs/create_config.py +0 -37
  54. isa_model/training/image_model/configs/create_flux_config.py +0 -26
  55. isa_model/training/image_model/configs/create_lora_config.py +0 -21
  56. isa_model/training/image_model/prepare_massed_compute.py +0 -97
  57. isa_model/training/image_model/prepare_upload.py +0 -17
  58. isa_model/training/image_model/raw_data/create_captions.py +0 -16
  59. isa_model/training/image_model/raw_data/create_lora_captions.py +0 -20
  60. isa_model/training/image_model/raw_data/pre_processing.py +0 -200
  61. isa_model/training/image_model/train/train.py +0 -42
  62. isa_model/training/image_model/train/train_flux.py +0 -41
  63. isa_model/training/image_model/train/train_lora.py +0 -57
  64. isa_model/training/image_model/train_main.py +0 -25
  65. isa_model-0.2.0.dist-info/METADATA +0 -327
  66. isa_model-0.2.0.dist-info/RECORD +0 -92
  67. isa_model-0.2.0.dist-info/licenses/LICENSE +0 -21
  68. /isa_model/training/{llm_model/annotation → annotation}/annotation_schema.py +0 -0
  69. /isa_model/training/{llm_model/annotation → annotation}/processors/annotation_processor.py +0 -0
  70. /isa_model/training/{llm_model/annotation → annotation}/storage/dataset_manager.py +0 -0
  71. /isa_model/training/{llm_model/annotation → annotation}/storage/dataset_schema.py +0 -0
  72. /isa_model/training/{llm_model/annotation → annotation}/tests/test_annotation_flow.py +0 -0
  73. /isa_model/training/{llm_model/annotation → annotation}/tests/test_minio copy.py +0 -0
  74. /isa_model/training/{llm_model/annotation → annotation}/tests/test_minio_upload.py +0 -0
  75. /isa_model/training/{llm_model/annotation → annotation}/views/annotation_controller.py +0 -0
  76. {isa_model-0.2.0.dist-info → isa_model-0.2.9.dist-info}/WHEEL +0 -0
  77. {isa_model-0.2.0.dist-info → isa_model-0.2.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,465 @@
1
+ Metadata-Version: 2.4
2
+ Name: isa_model
3
+ Version: 0.2.9
4
+ Summary: Unified AI model serving framework
5
+ Author: isA_Model Contributors
6
+ Classifier: Development Status :: 3 - Alpha
7
+ Classifier: Intended Audience :: Developers
8
+ Classifier: Operating System :: OS Independent
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: >=3.8
11
+ Description-Content-Type: text/markdown
12
+ Requires-Dist: fastapi>=0.95.0
13
+ Requires-Dist: numpy>=1.20.0
14
+ Requires-Dist: httpx>=0.23.0
15
+ Requires-Dist: pydantic>=2.0.0
16
+ Requires-Dist: uvicorn>=0.22.0
17
+ Requires-Dist: requests>=2.28.0
18
+ Requires-Dist: aiohttp>=3.8.0
19
+ Requires-Dist: transformers>=4.30.0
20
+ Requires-Dist: langchain-core>=0.1.0
21
+ Requires-Dist: huggingface-hub>=0.16.0
22
+ Requires-Dist: kubernetes>=25.3.0
23
+ Requires-Dist: mlflow>=2.4.0
24
+ Requires-Dist: torch>=2.0.0
25
+ Requires-Dist: openai>=1.10.0
26
+ Requires-Dist: replicate>=0.23.0
27
+ Requires-Dist: python-dotenv>=1.0.0
28
+ Requires-Dist: ollama>=0.3.0
29
+ Requires-Dist: runpod>=1.0.0
30
+ Requires-Dist: boto3>=1.26.0
31
+ Requires-Dist: google-cloud-storage>=2.7.0
32
+ Requires-Dist: datasets>=2.10.0
33
+ Requires-Dist: accelerate>=0.20.0
34
+ Requires-Dist: bitsandbytes>=0.39.0
35
+ Requires-Dist: peft>=0.4.0
36
+ Requires-Dist: trl>=0.4.0
37
+
38
+ # isa_model_sdk - Unified AI Model Serving Framework
39
+
40
+ A comprehensive Python framework for working with multiple AI providers and models through a unified interface. Support for OpenAI, Replicate, Ollama, and more, with advanced training and evaluation capabilities.
41
+
42
+ ## Installation
43
+
44
+ ```bash
45
+ pip install isa_model_sdk
46
+ ```
47
+
48
+ ## Quick Start
49
+
50
+ The isa_model_sdk package supports three main usage patterns:
51
+
52
+ ### 1. Pass API Keys Directly (Recommended)
53
+
54
+ This is the most flexible approach - no environment variables needed:
55
+
56
+ ```python
57
+ from isa_model.inference.ai_factory import AIFactory
58
+
59
+ # Create factory instance
60
+ factory = AIFactory.get_instance()
61
+
62
+ # Use OpenAI with API key
63
+ llm = factory.get_llm(
64
+ model_name="gpt-4o-mini",
65
+ provider="openai",
66
+ api_key="your-openai-api-key-here"
67
+ )
68
+
69
+ # Use Replicate for image generation
70
+ image_gen = factory.get_vision_model(
71
+ model_name="stability-ai/sdxl",
72
+ provider="replicate",
73
+ api_key="your-replicate-token-here"
74
+ )
75
+ ```
76
+
77
+ ### 2. Use Environment Variables
78
+
79
+ Set your API keys as environment variables:
80
+
81
+ ```bash
82
+ export OPENAI_API_KEY="your-openai-api-key"
83
+ export REPLICATE_API_TOKEN="your-replicate-token"
84
+ ```
85
+
86
+ Then use without passing keys:
87
+
88
+ ```python
89
+ from isa_model.inference.ai_factory import AIFactory
90
+
91
+ factory = AIFactory.get_instance()
92
+
93
+ # Will automatically use OPENAI_API_KEY from environment
94
+ llm = factory.get_llm(model_name="gpt-4o-mini", provider="openai")
95
+
96
+ # Will automatically use REPLICATE_API_TOKEN from environment
97
+ image_gen = factory.get_vision_model(model_name="stability-ai/sdxl", provider="replicate")
98
+ ```
99
+
100
+ ### 3. Use Local Models (No API Key Needed)
101
+
102
+ For local models like Ollama, no API keys are required:
103
+
104
+ ```python
105
+ from isa_model.inference.ai_factory import AIFactory
106
+
107
+ factory = AIFactory.get_instance()
108
+
109
+ # Use local Ollama model (no API key needed)
110
+ llm = factory.get_llm(model_name="llama3.1", provider="ollama")
111
+ ```
112
+
113
+ ## 🎯 Training & Evaluation Framework
114
+
115
+ **NEW in v0.0.1**: Comprehensive training and evaluation capabilities for LLMs, Stable Diffusion, and ML models.
116
+
117
+ ### Quick Training Example
118
+
119
+ ```python
120
+ from isa_model.training import TrainingFactory, train_gemma
121
+ from isa_model.eval import EvaluationFactory
122
+
123
+ # Quick Gemma training
124
+ model_path = train_gemma(
125
+ dataset_path="tatsu-lab/alpaca",
126
+ model_size="4b",
127
+ num_epochs=3,
128
+ use_lora=True
129
+ )
130
+
131
+ # Comprehensive evaluation
132
+ evaluator = EvaluationFactory(use_wandb=True)
133
+ results = evaluator.evaluate_llm(
134
+ model_path=model_path,
135
+ dataset_path="test_data.json",
136
+ metrics=["perplexity", "bleu", "rouge"]
137
+ )
138
+
139
+ # Run benchmarks
140
+ mmlu_results = evaluator.run_benchmark(
141
+ model_path=model_path,
142
+ benchmark="mmlu"
143
+ )
144
+ ```
145
+
146
+ ### Advanced Training Configuration
147
+
148
+ ```python
149
+ from isa_model.training import TrainingFactory
150
+
151
+ factory = TrainingFactory()
152
+
153
+ # Advanced LLM training
154
+ model_path = factory.train_model(
155
+ model_name="google/gemma-2-4b-it",
156
+ dataset_path="custom_dataset.json",
157
+ use_lora=True,
158
+ batch_size=4,
159
+ num_epochs=3,
160
+ learning_rate=2e-5,
161
+ lora_rank=8,
162
+ lora_alpha=16
163
+ )
164
+
165
+ # Upload to HuggingFace
166
+ hf_url = factory.upload_to_huggingface(
167
+ model_path=model_path,
168
+ hf_model_name="your-username/gemma-4b-custom",
169
+ hf_token="your-hf-token"
170
+ )
171
+ ```
172
+
173
+ ### Cloud Training on RunPod
174
+
175
+ ```python
176
+ # Train on RunPod cloud infrastructure
177
+ result = factory.train_on_runpod(
178
+ model_name="google/gemma-2-4b-it",
179
+ dataset_path="tatsu-lab/alpaca",
180
+ runpod_api_key="your-runpod-key",
181
+ template_id="your-template-id",
182
+ gpu_type="NVIDIA RTX A6000"
183
+ )
184
+ ```
185
+
186
+ ## Function Calling with bind_tools
187
+
188
+ **Enhanced in v0.0.1**: LangChain-compatible function calling interface for all LLM services.
189
+
190
+ ### Basic Function Calling
191
+
192
+ ```python
193
+ import asyncio
194
+ from isa_model.inference.ai_factory import AIFactory
195
+
196
+ # Define your tool functions
197
+ def get_weather(location: str) -> str:
198
+ """Get weather information for a location"""
199
+ weather_data = {
200
+ "paris": "Sunny, 22°C",
201
+ "london": "Cloudy, 18°C",
202
+ "tokyo": "Clear, 25°C"
203
+ }
204
+ return weather_data.get(location.lower(), f"Weather data not available for {location}")
205
+
206
+ def calculate_math(expression: str) -> str:
207
+ """Calculate a mathematical expression"""
208
+ try:
209
+ result = eval(expression) # Use safely in production
210
+ return f"The result of {expression} is {result}"
211
+ except:
212
+ return f"Error calculating {expression}"
213
+
214
+ async def main():
215
+ factory = AIFactory.get_instance()
216
+
217
+ # Create LLM with any provider
218
+ llm = factory.get_llm("gpt-4o-mini", "openai", api_key="your-key")
219
+ # or: llm = factory.get_llm("llama3.1", "ollama") # Local model
220
+
221
+ # Bind tools to the service (LangChain-style interface)
222
+ llm_with_tools = llm.bind_tools([get_weather, calculate_math])
223
+
224
+ # Use the service with tools
225
+ response = await llm_with_tools.achat([
226
+ {"role": "user", "content": "What's the weather in Paris? Also calculate 15 * 8"}
227
+ ])
228
+
229
+ print(response) # Model will use tools automatically
230
+ await llm.close()
231
+
232
+ asyncio.run(main())
233
+ ```
234
+
235
+ ## Supported Services
236
+
237
+ ### Language Models (LLM)
238
+
239
+ ```python
240
+ # OpenAI models
241
+ llm = factory.get_llm("gpt-4o-mini", "openai", api_key="your-key")
242
+ llm = factory.get_llm("gpt-4o", "openai", api_key="your-key")
243
+
244
+ # Ollama models (local)
245
+ llm = factory.get_llm("llama3.1", "ollama")
246
+ llm = factory.get_llm("codellama", "ollama")
247
+
248
+ # Replicate models
249
+ llm = factory.get_llm("meta/llama-3-70b-instruct", "replicate", api_key="your-token")
250
+
251
+ # All LLM services support bind_tools() for function calling
252
+ llm_with_tools = llm.bind_tools([your_functions])
253
+ ```
254
+
255
+ ### Vision Models
256
+
257
+ ```python
258
+ # OpenAI vision
259
+ vision = factory.get_vision_model("gpt-4o", "openai", api_key="your-key")
260
+
261
+ # Replicate image generation
262
+ image_gen = factory.get_vision_model("stability-ai/sdxl", "replicate", api_key="your-token")
263
+
264
+ # Ollama vision (local)
265
+ vision = factory.get_vision_model("llava", "ollama")
266
+ ```
267
+
268
+ ### Embedding Models
269
+
270
+ ```python
271
+ # OpenAI embeddings
272
+ embedder = factory.get_embedding("text-embedding-3-small", "openai", {"api_key": "your-key"})
273
+
274
+ # Ollama embeddings (local)
275
+ embedder = factory.get_embedding("bge-m3", "ollama")
276
+ ```
277
+
278
+ ## Training Framework Features
279
+
280
+ ### Multi-Modal Training Support
281
+ - **LLM Training**: Gemma, Llama, GPT-style models with LoRA/QLoRA
282
+ - **Stable Diffusion**: Image generation model training
283
+ - **ML Models**: XGBoost, Random Forest, traditional ML
284
+ - **Computer Vision**: CNN, Vision Transformers
285
+
286
+ ### Training Modes
287
+ - **Local Training**: On your machine with CPU/GPU
288
+ - **Cloud Training**: RunPod, AWS, GCP integration
289
+ - **Distributed Training**: Multi-GPU support
290
+
291
+ ### Data Pipeline
292
+ - **Annotation Service**: Human-in-the-loop data annotation
293
+ - **Dataset Management**: HuggingFace, local, cloud storage
294
+ - **Quality Control**: Data validation and filtering
295
+
296
+ ## Evaluation Framework Features
297
+
298
+ ### Comprehensive Evaluation
299
+ - **LLM Metrics**: Perplexity, BLEU, ROUGE, BERTScore
300
+ - **Benchmark Tests**: MMLU, HellaSwag, ARC, GSM8K
301
+ - **Image Metrics**: FID, IS, LPIPS for generative models
302
+ - **Custom Metrics**: Domain-specific evaluations
303
+
304
+ ### Experiment Tracking
305
+ - **Weights & Biases**: Experiment tracking and visualization
306
+ - **MLflow**: Model registry and experiment management
307
+ - **Model Comparison**: Side-by-side performance analysis
308
+
309
+ ## Usage Examples
310
+
311
+ ### Chat Completion
312
+
313
+ ```python
314
+ import asyncio
315
+ from isa_model.inference.ai_factory import AIFactory
316
+
317
+ async def chat_example():
318
+ factory = AIFactory.get_instance()
319
+ llm = factory.get_llm("gpt-4o-mini", "openai", api_key="your-key")
320
+
321
+ messages = [
322
+ {"role": "user", "content": "Hello, how are you?"}
323
+ ]
324
+
325
+ response = await llm.achat(messages)
326
+ print(response)
327
+
328
+ # Run the async function
329
+ asyncio.run(chat_example())
330
+ ```
331
+
332
+ ### Image Generation
333
+
334
+ ```python
335
+ import asyncio
336
+ from isa_model.inference.ai_factory import AIFactory
337
+
338
+ async def image_gen_example():
339
+ factory = AIFactory.get_instance()
340
+ image_gen = factory.get_vision_model(
341
+ "stability-ai/sdxl",
342
+ "replicate",
343
+ api_key="your-replicate-token"
344
+ )
345
+
346
+ result = await image_gen.generate_image(
347
+ prompt="A beautiful sunset over mountains",
348
+ width=1024,
349
+ height=1024
350
+ )
351
+
352
+ # Save the generated image
353
+ with open("generated_image.png", "wb") as f:
354
+ f.write(result["image_data"])
355
+
356
+ asyncio.run(image_gen_example())
357
+ ```
358
+
359
+ ### Complete Training and Evaluation Workflow
360
+
361
+ ```python
362
+ from isa_model.training import TrainingFactory
363
+ from isa_model.eval import EvaluationFactory
364
+
365
+ # Initialize factories
366
+ trainer = TrainingFactory()
367
+ evaluator = EvaluationFactory(use_wandb=True, wandb_project="my-project")
368
+
369
+ # Train model
370
+ model_path = trainer.train_model(
371
+ model_name="google/gemma-2-4b-it",
372
+ dataset_path="training_data.json",
373
+ use_lora=True,
374
+ num_epochs=3
375
+ )
376
+
377
+ # Evaluate model
378
+ results = evaluator.evaluate_llm(
379
+ model_path=model_path,
380
+ dataset_path="test_data.json",
381
+ metrics=["bleu", "rouge", "accuracy"]
382
+ )
383
+
384
+ # Run benchmarks
385
+ benchmark_results = evaluator.run_benchmark(
386
+ model_path=model_path,
387
+ benchmark="mmlu"
388
+ )
389
+
390
+ # Compare with base model
391
+ comparison = evaluator.compare_models([
392
+ "google/gemma-2-4b-it", # Base model
393
+ model_path # Fine-tuned model
394
+ ], benchmark="arc")
395
+
396
+ print(f"Training completed: {model_path}")
397
+ print(f"Evaluation results: {results}")
398
+ ```
399
+
400
+ ## What's New in v0.0.1
401
+
402
+ ### 🎯 Training Framework
403
+ - **Multi-modal training**: LLM, Stable Diffusion, ML models
404
+ - **Cloud integration**: RunPod training support
405
+ - **LoRA/QLoRA**: Memory-efficient fine-tuning
406
+ - **HuggingFace integration**: Direct dataset loading and model uploading
407
+
408
+ ### 📊 Evaluation Framework
409
+ - **Comprehensive metrics**: BLEU, ROUGE, perplexity, and more
410
+ - **Standard benchmarks**: MMLU, HellaSwag, ARC, GSM8K
411
+ - **Experiment tracking**: Weights & Biases and MLflow integration
412
+ - **Model comparison**: Side-by-side performance analysis
413
+
414
+ ### 🔧 Enhanced Inference
415
+ - **Improved function calling**: Better tool binding and execution
416
+ - **Better error handling**: More informative error messages
417
+ - **Performance optimizations**: Faster model loading and inference
418
+
419
+ ## Development
420
+
421
+ ### Installing for Development
422
+
423
+ ```bash
424
+ git clone <repository-url>
425
+ cd isA_Model
426
+ pip install -e .
427
+ ```
428
+
429
+ ### Running Tests
430
+
431
+ ```bash
432
+ # Set environment variables
433
+ export OPENAI_API_KEY="your-key"
434
+ export REPLICATE_API_TOKEN="your-token"
435
+
436
+ # Run inference tests
437
+ python tests/units/inference/test_all_services.py
438
+
439
+ # Run training tests
440
+ python tests/test_training_setup.py
441
+ ```
442
+
443
+ ### Building and Publishing
444
+
445
+ ```bash
446
+ # Build the package
447
+ python -m build
448
+
449
+ # Upload to PyPI (requires PYPI_API_TOKEN in .env.local)
450
+ source .venv/bin/activate
451
+ source .env.local
452
+ python -m twine upload dist/isa_model_sdk-0.0.1* --username __token__ --password "$PYPI_API_TOKEN"
453
+ ```
454
+
455
+ ## License
456
+
457
+ MIT License - see LICENSE file for details.
458
+
459
+ ## Contributing
460
+
461
+ Contributions are welcome! Please read our contributing guidelines and submit pull requests to our GitHub repository.
462
+
463
+ ## Support
464
+
465
+ For questions and support, please open an issue on our GitHub repository.
@@ -0,0 +1,86 @@
1
+ isa_model/__init__.py,sha256=skxx7AA-1BzIT_UaDHcNmIo4rEhgL8MqOk8vPpZPrAo,87
2
+ isa_model/core/model_manager.py,sha256=eQp0MV0x5sghL1qliPUWkFX4sEKqInyGLoICfNkJnZM,5275
3
+ isa_model/core/model_registry.py,sha256=gT8yFxi1gC-45Bolc9WX19ZvrjuV1xyBgQX6TFhz62k,14032
4
+ isa_model/core/model_storage.py,sha256=yMLapW87EY1EPXw6S7H8UQAZh3hJ1KxsEohjgjw-HrA,4507
5
+ isa_model/core/storage/hf_storage.py,sha256=HTj1-YGJM3Q-9_Adw7u4NjEmSdr0njsFEL45KXzfcFw,14701
6
+ isa_model/core/storage/local_storage.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ isa_model/core/storage/minio_storage.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ isa_model/deployment/__init__.py,sha256=Wu-sBbQtwx7wzWu_MRON3RPmI4y8UfhK8pGe12-iUzs,1323
9
+ isa_model/deployment/core/__init__.py,sha256=QJkJrs0FYgYcjvnHMSvAtUBiT6uq_ciqLKWLwx0mkDg,803
10
+ isa_model/deployment/core/deployment_config.py,sha256=__bHohsvbdZK_rS_86S1rSHPPP1bTkOnx_G0cj1HMcA,11305
11
+ isa_model/deployment/core/deployment_manager.py,sha256=kICHX1V8wIlmldkrfdqakz2OAjitUfGY6ZG_QjGzZbM,20068
12
+ isa_model/deployment/core/isa_deployment_service.py,sha256=sXU7REZ4xhUUGrpqxlJh-twx18rd97Da4sPEk62QaME,12600
13
+ isa_model/deployment/gpu_int8_ds8/app/server.py,sha256=lwWxdnR2DNEd0vIGQyfabKtDSUzSHVQsy3Z_AJejpVg,2102
14
+ isa_model/deployment/gpu_int8_ds8/scripts/test_client.py,sha256=aCULgRYzEQj_ELUK1bmPgN99yvFgNR5C0O3gc8S32pg,1421
15
+ isa_model/deployment/gpu_int8_ds8/scripts/test_client_os.py,sha256=XXrneTCHUeh1LNRcu-YtZQ5B4pNawlrxC-cTWmJU2A8,936
16
+ isa_model/eval/__init__.py,sha256=3sM7qLSIL_RMKcsmkCYcjOjv9ozuk16r7pnl4F-XeNA,1197
17
+ isa_model/eval/benchmarks.py,sha256=_L4Vwj2hwf2yhqoleIASO9z5e3LRCClCVEVCQbGt0I8,16885
18
+ isa_model/eval/factory.py,sha256=uQXD1cZGPaMss2YGwtr8xONK9i_K7kHZG7-uwvNgEpk,29416
19
+ isa_model/eval/metrics.py,sha256=mYeGwSa9PkgY0p-vadAscvak-pLrVfCSrsmAodVpgNQ,22584
20
+ isa_model/inference/__init__.py,sha256=usfuQJ4zYY2RRtHkE-V6LuJ5aN7WJogtPUj9Qmy4Wvw,318
21
+ isa_model/inference/ai_factory.py,sha256=ccU-OlnOz3X7ohjg9fa6tzS0CDR0LDKUR8oA_qctwF4,10858
22
+ isa_model/inference/base.py,sha256=qwOddnSGI0GUdD6qIdGBPQpkW7UjU3Y-zaZvu70B4WA,1278
23
+ isa_model/inference/adapter/unified_api.py,sha256=67_Ok8W20m6Otf6r9WyOEVpnxondP4UAxOASk9ozDk4,8668
24
+ isa_model/inference/providers/__init__.py,sha256=a83q-LMFv8u47wf0XtxvqOw_mlVgA_90wtuwy02qdDE,581
25
+ isa_model/inference/providers/base_provider.py,sha256=btkSXE7o1IfOpv22hMM6_DNlm05tbLMszsP1J4T26KE,924
26
+ isa_model/inference/providers/ml_provider.py,sha256=4oGGF7lVWQ91Qh3h7olyPFoACLxCROaMxUZlDiZrRL4,1661
27
+ isa_model/inference/providers/model_cache_manager.py,sha256=dLRpx7OJweQ5LcSAkU7D0DQRfLtIhG6nGvg4W_gau80,15315
28
+ isa_model/inference/providers/ollama_provider.py,sha256=BLkWp4gmCw6Fwf1yNRY90VftMqwca9YOGOHf6DqVEKs,2692
29
+ isa_model/inference/providers/openai_provider.py,sha256=8ywUsrvlvC7VY3LNOVJP1IcRwBMi1NvG0PoI0lYo4jM,3881
30
+ isa_model/inference/providers/replicate_provider.py,sha256=qXnK3Yzy5-gaduVJVY8asrIIi-97m4WGUkG963_4ifk,3948
31
+ isa_model/inference/providers/triton_provider.py,sha256=GKlth7cTOx6ERbsXXJ0gDNby3kVGQNULBDt098BXBSU,15258
32
+ isa_model/inference/services/__init__.py,sha256=p-UlEGMnadGUD6zzwfAjf367S2QQ-z1sD6TP-K4EjEM,353
33
+ isa_model/inference/services/base_service.py,sha256=PB6eZp-PynUdo9a0QofvHgrrJLUFYM_FSafTg7fvWrY,3083
34
+ isa_model/inference/services/audio/base_stt_service.py,sha256=tIfdRLEppcFEyTEmI8zi8OwMd7wVP423MQDN4iYDEcE,2800
35
+ isa_model/inference/services/audio/base_tts_service.py,sha256=BzZ3JrrLpm4COthNyNrIO2QgP7RZkXDNPEELEKHzIbA,4164
36
+ isa_model/inference/services/audio/openai_tts_service.py,sha256=0YvSfG4q3IEuJveXVdsGq6jbkJ9AbcLf1k4RDnKB5ks,6222
37
+ isa_model/inference/services/embedding/base_embed_service.py,sha256=Nr6snNtOM0_ZqFfJdV7ThTb2nYVHYddGoOJXkGuyBIg,3259
38
+ isa_model/inference/services/embedding/ollama_embed_service.py,sha256=s6LPSh-D06kFYXQjoKJp8jnatW5cx_unGbVFaq7tm5c,4745
39
+ isa_model/inference/services/embedding/openai_embed_service.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
+ isa_model/inference/services/llm/__init__.py,sha256=C6t9w33j3Ap4oGcJal9-htifKe0rxwws_kC3F-_B_Ps,341
41
+ isa_model/inference/services/llm/base_llm_service.py,sha256=hf4egO9_s3rOQYwyhDS6O_8ECIAltkj4Ir89PTosraE,8381
42
+ isa_model/inference/services/llm/ollama_llm_service.py,sha256=EfLdoovyrChYBlGreQukpSZt5l6DkfXwjjmPPovmm70,12934
43
+ isa_model/inference/services/llm/openai_llm_service.py,sha256=k2sETG7qaMfYMR_THP_dh_wQS3zvXu4zJp27kc09-K4,15168
44
+ isa_model/inference/services/llm/triton_llm_service.py,sha256=ZFo7JoZ799Nvyi8Cz1jfWOa6TUn0hDRJtBrotadMAd4,17673
45
+ isa_model/inference/services/ml/base_ml_service.py,sha256=mLBA6ENowa3KVzNqHyhWxf_Pr-cJJj84lDE4TniPzYI,2894
46
+ isa_model/inference/services/ml/sklearn_ml_service.py,sha256=Lf9JrwvI25lca7JBbjB_e66eAUtXFbwxZ3Hs13dVGkA,5512
47
+ isa_model/inference/services/others/table_transformer_service.py,sha256=r74h6QUSwSj6jTt-gRProz9SgwBwKWDe50NR0uqW0ZI,2367
48
+ isa_model/inference/services/vision/__init__.py,sha256=N9Zr7o2uQKoyUEvpmyOIgXPx9ivrix3gQ1OLoiQ7BLo,283
49
+ isa_model/inference/services/vision/base_image_gen_service.py,sha256=XC0PWlH3LXMGhic57BjEucwXm1rU5_g3mbMoYQiEU5c,5410
50
+ isa_model/inference/services/vision/base_vision_service.py,sha256=Yk2C9rD3zfORWCXSYTWPj5HB08A_eD1YiNIShF0_MjY,5418
51
+ isa_model/inference/services/vision/ollama_vision_service.py,sha256=KE0D-Q75bTcxNcigo_wfPAtSHrzQzWNvN6Pcs2c_N-w,6495
52
+ isa_model/inference/services/vision/openai_vision_service.py,sha256=5M182cV-wKCnV_U0CGWu4uFrggo--3YLD_0_FpNW9Ak,2920
53
+ isa_model/inference/services/vision/replicate_image_gen_service.py,sha256=3jjZ1c7YVCT_Or212SI0zTGRJtmUOs1yWLs8jV8QgOA,12162
54
+ isa_model/inference/services/vision/helpers/image_utils.py,sha256=hTZi4MLktETupPIbE-TXMSi1kix6h8UfLiyEIDt2rzA,1751
55
+ isa_model/inference/services/vision/helpers/text_splitter.py,sha256=6AbvcQ7H6MS54B9d9T1XBGg4GhvmKfZqp00lKp9pF-U,1635
56
+ isa_model/inference/utils/conversion/bge_rerank_convert.py,sha256=1dvtxe5-PPCe2Au6SO8F2XaD-xdIoeA4zDTcid2L9FU,2691
57
+ isa_model/inference/utils/conversion/onnx_converter.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
58
+ isa_model/inference/utils/conversion/torch_converter.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
59
+ isa_model/scripts/inference_tracker.py,sha256=T6qQJMHJcAIQ8eYlgqpM9RWxfiV4z5xIolaoglKBBsg,8831
60
+ isa_model/scripts/mlflow_manager.py,sha256=7xMN0_wELr1jcALuTW9WeWirRkPZPlE2LlFfZKflXBY,12142
61
+ isa_model/scripts/model_registry.py,sha256=7rycPkVk8WHUO3LJaHfdyy5Yq8qmd_4WkGk4wKan-2w,14279
62
+ isa_model/scripts/start_mlflow.py,sha256=3AGKBzByjzbZ56I8w0IOfYnp3V6EU2Lv9NtX9maSqL8,2571
63
+ isa_model/scripts/training_tracker.py,sha256=cnXPi8ip2OK76-aWAOgC-dKx90PqZLEnP6UbHso7Fwc,8080
64
+ isa_model/training/__init__.py,sha256=lfgaSGmOdfizXYV0NKZ6UDXdx_KIlit62eVhGK_6zXA,1533
65
+ isa_model/training/factory.py,sha256=c-77203-2jL3ppcAhhtms6eb8RwI1xz7k324V_NqCFM,13733
66
+ isa_model/training/annotation/annotation_schema.py,sha256=BDEgUlRxMoXGTn12VZ_UUU8rWUHQW_JL39d1AvWU-04,1271
67
+ isa_model/training/annotation/processors/annotation_processor.py,sha256=hz5VhaPLLPuwq2IoBMbxrZfOS_xBVCrqWk1GEKW2zd0,4839
68
+ isa_model/training/annotation/storage/dataset_manager.py,sha256=nKxhmkw-K5vO7Wd5I0Rp5j9fqwV06h_9i_1lVQiU7uU,4592
69
+ isa_model/training/annotation/storage/dataset_schema.py,sha256=JPhrT-pbT0jGd_rmDlhyTesXKv9OYxy85U-RAJFe05o,1086
70
+ isa_model/training/annotation/tests/test_annotation_flow.py,sha256=DXYHP8rLKaLII6bo5Rtltqk4sQxr8k8G-wQegfuXHiE,3605
71
+ isa_model/training/annotation/tests/test_minio copy.py,sha256=EI-PlH5xttAZF14Z_xn6LjgIJBkvP2qjLcvbX2hc0RM,3946
72
+ isa_model/training/annotation/tests/test_minio_upload.py,sha256=fL1eMubwR6L9lYc3zEwlWU9yjJuTsIYi93i0l9QUjm0,1109
73
+ isa_model/training/annotation/views/annotation_controller.py,sha256=3VzJ52yI-YIpcaAAXy2qac7sr4hTnFdtn-ZEKTt4IkM,5792
74
+ isa_model/training/cloud/__init__.py,sha256=ZVsNsnZUgueqtd-e1__xD19njf7KrLTwz28htaU174g,678
75
+ isa_model/training/cloud/job_orchestrator.py,sha256=WDv_7HwibjN7iogCKPnO0UTvvl7ADCQc6rRDHOBj_OQ,15501
76
+ isa_model/training/cloud/runpod_trainer.py,sha256=x9NMFNMnz6HEolADf9wYY5OnkklOspwcE6u_pf_7SVQ,15208
77
+ isa_model/training/cloud/storage_manager.py,sha256=qitWGuPHKNmYCSkMdOO0ccz2xPxFa9EfhcDZwQ-fZXA,18556
78
+ isa_model/training/core/__init__.py,sha256=HyIsPibT0MAy9v55BK95K30aXKkBU6tgVbeTL17HFTY,526
79
+ isa_model/training/core/config.py,sha256=oqgKpBvtzrN6jwLIQYQ2707lH6nmjrktRiSxp9iocVc,5084
80
+ isa_model/training/core/dataset.py,sha256=XCFsnf0NUMU1dJpdvo_CAMyvXB-9_RCUEiy8TU50e20,7802
81
+ isa_model/training/core/trainer.py,sha256=h5TjqjdFr0Fsv5y4-0siy1KmOlqLfliVaUXybvuoeXU,26932
82
+ isa_model/training/core/utils.py,sha256=Nik0M2ssfNbWqP6fKO0Kfyhzr_H6Q19ioxB-qCYbn5E,8387
83
+ isa_model-0.2.9.dist-info/METADATA,sha256=3Ba9NXgqR5OFKe-XCx3plV4TbevfFuyoj6Rspxc-ibA,12226
84
+ isa_model-0.2.9.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
85
+ isa_model-0.2.9.dist-info/top_level.txt,sha256=eHSy_Xb3kNkh2kK11mi1mZh0Wz91AQ5b8k2KFYO-rE8,10
86
+ isa_model-0.2.9.dist-info/RECORD,,