isa-model 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isa_model/__init__.py +5 -0
- isa_model/core/model_manager.py +143 -0
- isa_model/core/model_registry.py +115 -0
- isa_model/core/model_router.py +226 -0
- isa_model/core/model_storage.py +133 -0
- isa_model/core/model_version.py +0 -0
- isa_model/core/resource_manager.py +202 -0
- isa_model/core/storage/hf_storage.py +0 -0
- isa_model/core/storage/local_storage.py +0 -0
- isa_model/core/storage/minio_storage.py +0 -0
- isa_model/deployment/gpu_fp16_ds8/models/deepseek_r1/1/model.py +120 -0
- isa_model/deployment/gpu_fp16_ds8/scripts/download_model.py +18 -0
- isa_model/deployment/gpu_int8_ds8/app/server.py +66 -0
- isa_model/deployment/gpu_int8_ds8/scripts/test_client.py +43 -0
- isa_model/deployment/gpu_int8_ds8/scripts/test_client_os.py +35 -0
- isa_model/inference/__init__.py +11 -0
- isa_model/inference/adapter/unified_api.py +248 -0
- isa_model/inference/ai_factory.py +359 -0
- isa_model/inference/base.py +46 -0
- isa_model/inference/providers/__init__.py +19 -0
- isa_model/inference/providers/base_provider.py +30 -0
- isa_model/inference/providers/model_cache_manager.py +341 -0
- isa_model/inference/providers/ollama_provider.py +73 -0
- isa_model/inference/providers/openai_provider.py +101 -0
- isa_model/inference/providers/replicate_provider.py +107 -0
- isa_model/inference/providers/triton_provider.py +439 -0
- isa_model/inference/services/__init__.py +14 -0
- isa_model/inference/services/audio/base_stt_service.py +91 -0
- isa_model/inference/services/audio/base_tts_service.py +136 -0
- isa_model/inference/services/audio/openai_tts_service.py +71 -0
- isa_model/inference/services/base_service.py +106 -0
- isa_model/inference/services/embedding/ollama_embed_service.py +97 -0
- isa_model/inference/services/embedding/openai_embed_service.py +0 -0
- isa_model/inference/services/llm/__init__.py +12 -0
- isa_model/inference/services/llm/base_llm_service.py +134 -0
- isa_model/inference/services/llm/ollama_llm_service.py +99 -0
- isa_model/inference/services/llm/openai_llm_service.py +138 -0
- isa_model/inference/services/others/table_transformer_service.py +61 -0
- isa_model/inference/services/vision/__init__.py +12 -0
- isa_model/inference/services/vision/helpers/image_utils.py +58 -0
- isa_model/inference/services/vision/helpers/text_splitter.py +46 -0
- isa_model/inference/services/vision/ollama_vision_service.py +60 -0
- isa_model/inference/services/vision/openai_vision_service.py +80 -0
- isa_model/inference/services/vision/replicate_image_gen_service.py +185 -0
- isa_model/inference/utils/conversion/bge_rerank_convert.py +73 -0
- isa_model/inference/utils/conversion/onnx_converter.py +0 -0
- isa_model/inference/utils/conversion/torch_converter.py +0 -0
- isa_model/scripts/inference_tracker.py +283 -0
- isa_model/scripts/mlflow_manager.py +379 -0
- isa_model/scripts/model_registry.py +465 -0
- isa_model/scripts/start_mlflow.py +95 -0
- isa_model/scripts/training_tracker.py +257 -0
- isa_model/training/engine/llama_factory/__init__.py +39 -0
- isa_model/training/engine/llama_factory/config.py +115 -0
- isa_model/training/engine/llama_factory/data_adapter.py +284 -0
- isa_model/training/engine/llama_factory/examples/__init__.py +6 -0
- isa_model/training/engine/llama_factory/examples/finetune_with_tracking.py +185 -0
- isa_model/training/engine/llama_factory/examples/rlhf_with_tracking.py +163 -0
- isa_model/training/engine/llama_factory/factory.py +331 -0
- isa_model/training/engine/llama_factory/rl.py +254 -0
- isa_model/training/engine/llama_factory/trainer.py +171 -0
- isa_model/training/image_model/configs/create_config.py +37 -0
- isa_model/training/image_model/configs/create_flux_config.py +26 -0
- isa_model/training/image_model/configs/create_lora_config.py +21 -0
- isa_model/training/image_model/prepare_massed_compute.py +97 -0
- isa_model/training/image_model/prepare_upload.py +17 -0
- isa_model/training/image_model/raw_data/create_captions.py +16 -0
- isa_model/training/image_model/raw_data/create_lora_captions.py +20 -0
- isa_model/training/image_model/raw_data/pre_processing.py +200 -0
- isa_model/training/image_model/train/train.py +42 -0
- isa_model/training/image_model/train/train_flux.py +41 -0
- isa_model/training/image_model/train/train_lora.py +57 -0
- isa_model/training/image_model/train_main.py +25 -0
- isa_model/training/llm_model/annotation/annotation_schema.py +47 -0
- isa_model/training/llm_model/annotation/processors/annotation_processor.py +126 -0
- isa_model/training/llm_model/annotation/storage/dataset_manager.py +131 -0
- isa_model/training/llm_model/annotation/storage/dataset_schema.py +44 -0
- isa_model/training/llm_model/annotation/tests/test_annotation_flow.py +109 -0
- isa_model/training/llm_model/annotation/tests/test_minio copy.py +113 -0
- isa_model/training/llm_model/annotation/tests/test_minio_upload.py +43 -0
- isa_model/training/llm_model/annotation/views/annotation_controller.py +158 -0
- isa_model-0.0.1.dist-info/METADATA +327 -0
- isa_model-0.0.1.dist-info/RECORD +86 -0
- isa_model-0.0.1.dist-info/WHEEL +5 -0
- isa_model-0.0.1.dist-info/licenses/LICENSE +21 -0
- isa_model-0.0.1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,327 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: isa-model
|
3
|
+
Version: 0.0.1
|
4
|
+
Summary: Unified AI model serving framework
|
5
|
+
Author-email: isA_Model Contributors <your.email@example.com>
|
6
|
+
License-Expression: MIT
|
7
|
+
Classifier: Development Status :: 3 - Alpha
|
8
|
+
Classifier: Intended Audience :: Developers
|
9
|
+
Classifier: Operating System :: OS Independent
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
11
|
+
Requires-Python: >=3.8
|
12
|
+
Description-Content-Type: text/markdown
|
13
|
+
License-File: LICENSE
|
14
|
+
Requires-Dist: fastapi>=0.95.0
|
15
|
+
Requires-Dist: numpy>=1.20.0
|
16
|
+
Requires-Dist: httpx>=0.23.0
|
17
|
+
Requires-Dist: pydantic>=2.0.0
|
18
|
+
Requires-Dist: uvicorn>=0.22.0
|
19
|
+
Requires-Dist: requests>=2.28.0
|
20
|
+
Requires-Dist: aiohttp>=3.8.0
|
21
|
+
Requires-Dist: transformers>=4.30.0
|
22
|
+
Requires-Dist: langchain-core>=0.1.0
|
23
|
+
Requires-Dist: huggingface-hub>=0.16.0
|
24
|
+
Requires-Dist: kubernetes>=25.3.0
|
25
|
+
Requires-Dist: mlflow>=2.4.0
|
26
|
+
Requires-Dist: torch>=2.0.0
|
27
|
+
Requires-Dist: openai>=1.10.0
|
28
|
+
Requires-Dist: replicate>=0.23.0
|
29
|
+
Requires-Dist: python-dotenv>=1.0.0
|
30
|
+
Dynamic: license-file
|
31
|
+
|
32
|
+
# isA Model - Unified AI Model Serving Framework
|
33
|
+
|
34
|
+
A comprehensive Python framework for working with multiple AI providers and models through a unified interface. Support for OpenAI, Replicate, Ollama, and more.
|
35
|
+
|
36
|
+
## Installation
|
37
|
+
|
38
|
+
```bash
|
39
|
+
pip install isa-model
|
40
|
+
```
|
41
|
+
|
42
|
+
## Quick Start
|
43
|
+
|
44
|
+
The isa-model package supports three main usage patterns:
|
45
|
+
|
46
|
+
### 1. Pass API Keys Directly (Recommended)
|
47
|
+
|
48
|
+
This is the most flexible approach - no environment variables needed:
|
49
|
+
|
50
|
+
```python
|
51
|
+
from isa_model.inference.ai_factory import AIFactory
|
52
|
+
|
53
|
+
# Create factory instance
|
54
|
+
factory = AIFactory.get_instance()
|
55
|
+
|
56
|
+
# Use OpenAI with API key
|
57
|
+
llm = factory.get_llm(
|
58
|
+
model_name="gpt-4o-mini",
|
59
|
+
provider="openai",
|
60
|
+
api_key="your-openai-api-key-here"
|
61
|
+
)
|
62
|
+
|
63
|
+
# Use Replicate for image generation
|
64
|
+
image_gen = factory.get_vision_model(
|
65
|
+
model_name="stability-ai/sdxl",
|
66
|
+
provider="replicate",
|
67
|
+
api_key="your-replicate-token-here"
|
68
|
+
)
|
69
|
+
```
|
70
|
+
|
71
|
+
### 2. Use Environment Variables
|
72
|
+
|
73
|
+
Set your API keys as environment variables:
|
74
|
+
|
75
|
+
```bash
|
76
|
+
export OPENAI_API_KEY="your-openai-api-key"
|
77
|
+
export REPLICATE_API_TOKEN="your-replicate-token"
|
78
|
+
```
|
79
|
+
|
80
|
+
Then use without passing keys:
|
81
|
+
|
82
|
+
```python
|
83
|
+
from isa_model.inference.ai_factory import AIFactory
|
84
|
+
|
85
|
+
factory = AIFactory.get_instance()
|
86
|
+
|
87
|
+
# Will automatically use OPENAI_API_KEY from environment
|
88
|
+
llm = factory.get_llm(model_name="gpt-4o-mini", provider="openai")
|
89
|
+
|
90
|
+
# Will automatically use REPLICATE_API_TOKEN from environment
|
91
|
+
image_gen = factory.get_vision_model(model_name="stability-ai/sdxl", provider="replicate")
|
92
|
+
```
|
93
|
+
|
94
|
+
### 3. Use Local Models (No API Key Needed)
|
95
|
+
|
96
|
+
For local models like Ollama, no API keys are required:
|
97
|
+
|
98
|
+
```python
|
99
|
+
from isa_model.inference.ai_factory import AIFactory
|
100
|
+
|
101
|
+
factory = AIFactory.get_instance()
|
102
|
+
|
103
|
+
# Use local Ollama model (no API key needed)
|
104
|
+
llm = factory.get_llm(model_name="llama3.1", provider="ollama")
|
105
|
+
```
|
106
|
+
|
107
|
+
## Supported Services
|
108
|
+
|
109
|
+
### Language Models (LLM)
|
110
|
+
|
111
|
+
```python
|
112
|
+
# OpenAI models
|
113
|
+
llm = factory.get_llm("gpt-4o-mini", "openai", api_key="your-key")
|
114
|
+
llm = factory.get_llm("gpt-4o", "openai", api_key="your-key")
|
115
|
+
|
116
|
+
# Ollama models (local)
|
117
|
+
llm = factory.get_llm("llama3.1", "ollama")
|
118
|
+
llm = factory.get_llm("codellama", "ollama")
|
119
|
+
|
120
|
+
# Replicate models
|
121
|
+
llm = factory.get_llm("meta/llama-3-70b-instruct", "replicate", api_key="your-token")
|
122
|
+
```
|
123
|
+
|
124
|
+
### Vision Models
|
125
|
+
|
126
|
+
```python
|
127
|
+
# OpenAI vision
|
128
|
+
vision = factory.get_vision_model("gpt-4o", "openai", api_key="your-key")
|
129
|
+
|
130
|
+
# Replicate image generation
|
131
|
+
image_gen = factory.get_vision_model("stability-ai/sdxl", "replicate", api_key="your-token")
|
132
|
+
|
133
|
+
# Ollama vision (local)
|
134
|
+
vision = factory.get_vision_model("llava", "ollama")
|
135
|
+
```
|
136
|
+
|
137
|
+
### Embedding Models
|
138
|
+
|
139
|
+
```python
|
140
|
+
# OpenAI embeddings
|
141
|
+
embedder = factory.get_embedding("text-embedding-3-small", "openai", {"api_key": "your-key"})
|
142
|
+
|
143
|
+
# Ollama embeddings (local)
|
144
|
+
embedder = factory.get_embedding("bge-m3", "ollama")
|
145
|
+
```
|
146
|
+
|
147
|
+
## Base Service Classes
|
148
|
+
|
149
|
+
The framework provides comprehensive base classes for implementing new AI services:
|
150
|
+
|
151
|
+
### BaseLLMService
|
152
|
+
- `ainvoke()` - Universal invocation method
|
153
|
+
- `achat()` - Chat completion with messages
|
154
|
+
- `acompletion()` - Simple text completion
|
155
|
+
- `agenerate()` - Generate multiple completions
|
156
|
+
- `astream_chat()` - Streaming chat responses
|
157
|
+
- `get_token_usage()` - Token usage statistics
|
158
|
+
|
159
|
+
### BaseVisionService
|
160
|
+
- `analyze_image()` - Analyze and describe images
|
161
|
+
- `describe_image()` - Generate detailed descriptions
|
162
|
+
- `extract_text()` - OCR text extraction
|
163
|
+
- `detect_objects()` - Object detection
|
164
|
+
- `classify_image()` - Image classification
|
165
|
+
- `compare_images()` - Image similarity comparison
|
166
|
+
|
167
|
+
### BaseImageGenService
|
168
|
+
- `generate_image()` - Generate single image from text
|
169
|
+
- `generate_images()` - Generate multiple images
|
170
|
+
- `image_to_image()` - Transform existing images
|
171
|
+
- `get_supported_sizes()` - Get supported dimensions
|
172
|
+
|
173
|
+
### BaseEmbedService
|
174
|
+
- `create_text_embedding()` - Single text embedding
|
175
|
+
- `create_text_embeddings()` - Batch text embeddings
|
176
|
+
- `compute_similarity()` - Similarity calculation
|
177
|
+
- `find_similar_texts()` - Semantic search
|
178
|
+
|
179
|
+
### BaseSTTService (Speech-to-Text)
|
180
|
+
- `transcribe_audio()` - Audio transcription
|
181
|
+
- `transcribe_audio_batch()` - Batch transcription
|
182
|
+
- `detect_language()` - Language detection
|
183
|
+
|
184
|
+
### BaseTTSService (Text-to-Speech)
|
185
|
+
- `synthesize_speech()` - Text to speech conversion
|
186
|
+
- `synthesize_speech_to_file()` - Save speech to file
|
187
|
+
- `get_available_voices()` - List available voices
|
188
|
+
|
189
|
+
## Usage Examples
|
190
|
+
|
191
|
+
### Chat Completion
|
192
|
+
|
193
|
+
```python
|
194
|
+
import asyncio
|
195
|
+
from isa_model.inference.ai_factory import AIFactory
|
196
|
+
|
197
|
+
async def chat_example():
|
198
|
+
factory = AIFactory.get_instance()
|
199
|
+
llm = factory.get_llm("gpt-4o-mini", "openai", api_key="your-key")
|
200
|
+
|
201
|
+
messages = [
|
202
|
+
{"role": "user", "content": "Hello, how are you?"}
|
203
|
+
]
|
204
|
+
|
205
|
+
response = await llm.achat(messages)
|
206
|
+
print(response)
|
207
|
+
|
208
|
+
# Run the async function
|
209
|
+
asyncio.run(chat_example())
|
210
|
+
```
|
211
|
+
|
212
|
+
### Image Analysis
|
213
|
+
|
214
|
+
```python
|
215
|
+
import asyncio
|
216
|
+
from isa_model.inference.ai_factory import AIFactory
|
217
|
+
|
218
|
+
async def vision_example():
|
219
|
+
factory = AIFactory.get_instance()
|
220
|
+
vision = factory.get_vision_model("gpt-4o", "openai", api_key="your-key")
|
221
|
+
|
222
|
+
result = await vision.analyze_image(
|
223
|
+
image="path/to/your/image.jpg",
|
224
|
+
prompt="What do you see in this image?"
|
225
|
+
)
|
226
|
+
|
227
|
+
print(result["text"])
|
228
|
+
|
229
|
+
asyncio.run(vision_example())
|
230
|
+
```
|
231
|
+
|
232
|
+
### Image Generation
|
233
|
+
|
234
|
+
```python
|
235
|
+
import asyncio
|
236
|
+
from isa_model.inference.ai_factory import AIFactory
|
237
|
+
|
238
|
+
async def image_gen_example():
|
239
|
+
factory = AIFactory.get_instance()
|
240
|
+
image_gen = factory.get_vision_model(
|
241
|
+
"stability-ai/sdxl",
|
242
|
+
"replicate",
|
243
|
+
api_key="your-replicate-token"
|
244
|
+
)
|
245
|
+
|
246
|
+
result = await image_gen.generate_image(
|
247
|
+
prompt="A beautiful sunset over mountains",
|
248
|
+
width=1024,
|
249
|
+
height=1024
|
250
|
+
)
|
251
|
+
|
252
|
+
# Save the generated image
|
253
|
+
with open("generated_image.png", "wb") as f:
|
254
|
+
f.write(result["image_data"])
|
255
|
+
|
256
|
+
asyncio.run(image_gen_example())
|
257
|
+
```
|
258
|
+
|
259
|
+
## Configuration Options
|
260
|
+
|
261
|
+
You can pass additional configuration options:
|
262
|
+
|
263
|
+
```python
|
264
|
+
# Custom configuration
|
265
|
+
config = {
|
266
|
+
"temperature": 0.7,
|
267
|
+
"max_tokens": 1000,
|
268
|
+
"top_p": 0.9
|
269
|
+
}
|
270
|
+
|
271
|
+
llm = factory.get_llm(
|
272
|
+
model_name="gpt-4o-mini",
|
273
|
+
provider="openai",
|
274
|
+
config=config,
|
275
|
+
api_key="your-key"
|
276
|
+
)
|
277
|
+
```
|
278
|
+
|
279
|
+
## Error Handling
|
280
|
+
|
281
|
+
The framework provides informative error messages and graceful fallbacks:
|
282
|
+
|
283
|
+
```python
|
284
|
+
try:
|
285
|
+
llm = factory.get_llm("gpt-4o-mini", "openai", api_key="invalid-key")
|
286
|
+
response = await llm.achat([{"role": "user", "content": "Hello"}])
|
287
|
+
except Exception as e:
|
288
|
+
print(f"Error: {e}")
|
289
|
+
```
|
290
|
+
|
291
|
+
## Development
|
292
|
+
|
293
|
+
### Installing for Development
|
294
|
+
|
295
|
+
```bash
|
296
|
+
git clone <repository-url>
|
297
|
+
cd isA_Model
|
298
|
+
pip install -e .
|
299
|
+
```
|
300
|
+
|
301
|
+
### Running Tests
|
302
|
+
|
303
|
+
```bash
|
304
|
+
pytest tests/
|
305
|
+
```
|
306
|
+
|
307
|
+
### Building and Publishing
|
308
|
+
|
309
|
+
```bash
|
310
|
+
# Build the package
|
311
|
+
python -m build
|
312
|
+
|
313
|
+
# Upload to PyPI (requires PYPI_API_TOKEN in .env.local)
|
314
|
+
bash scripts/normal_update.sh
|
315
|
+
```
|
316
|
+
|
317
|
+
## License
|
318
|
+
|
319
|
+
MIT License - see LICENSE file for details.
|
320
|
+
|
321
|
+
## Contributing
|
322
|
+
|
323
|
+
Contributions are welcome! Please read our contributing guidelines and submit pull requests to our GitHub repository.
|
324
|
+
|
325
|
+
## Support
|
326
|
+
|
327
|
+
For questions and support, please open an issue on our GitHub repository.
|
@@ -0,0 +1,86 @@
|
|
1
|
+
isa_model/__init__.py,sha256=d63WuNpouABPnomHiQKmPp829-ba-CtKnyefZwgFNsc,87
|
2
|
+
isa_model/core/model_manager.py,sha256=eQp0MV0x5sghL1qliPUWkFX4sEKqInyGLoICfNkJnZM,5275
|
3
|
+
isa_model/core/model_registry.py,sha256=3K32y9N0M1fXoUH_EBPoFq9Tj1enFgOSx9H57upmsHs,4005
|
4
|
+
isa_model/core/model_router.py,sha256=WT45wP5Ta-c3QErPGUY86G9-IpWQXjLC5FG8cPI-qK0,8637
|
5
|
+
isa_model/core/model_storage.py,sha256=yMLapW87EY1EPXw6S7H8UQAZh3hJ1KxsEohjgjw-HrA,4507
|
6
|
+
isa_model/core/model_version.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
+
isa_model/core/resource_manager.py,sha256=jlrlhHqtCbq4sAFgfGEEhTWRcuftXtjfV6SjkZs1boM,8545
|
8
|
+
isa_model/core/storage/hf_storage.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
+
isa_model/core/storage/local_storage.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
+
isa_model/core/storage/minio_storage.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
|
+
isa_model/deployment/gpu_fp16_ds8/models/deepseek_r1/1/model.py,sha256=jCAN_r-22gNR1HURAjmIZVeuU1TNidcyXRzxQdi2jOs,4015
|
12
|
+
isa_model/deployment/gpu_fp16_ds8/scripts/download_model.py,sha256=E6iSAgBu3OGfK-HLIXbGWfYSAIGQSGzj6wMAy1JAugI,566
|
13
|
+
isa_model/deployment/gpu_int8_ds8/app/server.py,sha256=lwWxdnR2DNEd0vIGQyfabKtDSUzSHVQsy3Z_AJejpVg,2102
|
14
|
+
isa_model/deployment/gpu_int8_ds8/scripts/test_client.py,sha256=aCULgRYzEQj_ELUK1bmPgN99yvFgNR5C0O3gc8S32pg,1421
|
15
|
+
isa_model/deployment/gpu_int8_ds8/scripts/test_client_os.py,sha256=XXrneTCHUeh1LNRcu-YtZQ5B4pNawlrxC-cTWmJU2A8,936
|
16
|
+
isa_model/inference/__init__.py,sha256=usfuQJ4zYY2RRtHkE-V6LuJ5aN7WJogtPUj9Qmy4Wvw,318
|
17
|
+
isa_model/inference/ai_factory.py,sha256=mF-Pj8FUCsOvSG0IIg1OVMhDwXnpxIbZQJWVYWfjv2s,14660
|
18
|
+
isa_model/inference/base.py,sha256=qwOddnSGI0GUdD6qIdGBPQpkW7UjU3Y-zaZvu70B4WA,1278
|
19
|
+
isa_model/inference/adapter/unified_api.py,sha256=67_Ok8W20m6Otf6r9WyOEVpnxondP4UAxOASk9ozDk4,8668
|
20
|
+
isa_model/inference/providers/__init__.py,sha256=a83q-LMFv8u47wf0XtxvqOw_mlVgA_90wtuwy02qdDE,581
|
21
|
+
isa_model/inference/providers/base_provider.py,sha256=btkSXE7o1IfOpv22hMM6_DNlm05tbLMszsP1J4T26KE,924
|
22
|
+
isa_model/inference/providers/model_cache_manager.py,sha256=dLRpx7OJweQ5LcSAkU7D0DQRfLtIhG6nGvg4W_gau80,15315
|
23
|
+
isa_model/inference/providers/ollama_provider.py,sha256=BLkWp4gmCw6Fwf1yNRY90VftMqwca9YOGOHf6DqVEKs,2692
|
24
|
+
isa_model/inference/providers/openai_provider.py,sha256=8ywUsrvlvC7VY3LNOVJP1IcRwBMi1NvG0PoI0lYo4jM,3881
|
25
|
+
isa_model/inference/providers/replicate_provider.py,sha256=qXnK3Yzy5-gaduVJVY8asrIIi-97m4WGUkG963_4ifk,3948
|
26
|
+
isa_model/inference/providers/triton_provider.py,sha256=GKlth7cTOx6ERbsXXJ0gDNby3kVGQNULBDt098BXBSU,15258
|
27
|
+
isa_model/inference/services/__init__.py,sha256=p-UlEGMnadGUD6zzwfAjf367S2QQ-z1sD6TP-K4EjEM,353
|
28
|
+
isa_model/inference/services/base_service.py,sha256=PB6eZp-PynUdo9a0QofvHgrrJLUFYM_FSafTg7fvWrY,3083
|
29
|
+
isa_model/inference/services/audio/base_stt_service.py,sha256=tIfdRLEppcFEyTEmI8zi8OwMd7wVP423MQDN4iYDEcE,2800
|
30
|
+
isa_model/inference/services/audio/base_tts_service.py,sha256=BzZ3JrrLpm4COthNyNrIO2QgP7RZkXDNPEELEKHzIbA,4164
|
31
|
+
isa_model/inference/services/audio/openai_tts_service.py,sha256=0R3-AFSAU0sOCx9iXmRSLxgAh6Tm1n887mPK4_MGTgY,2560
|
32
|
+
isa_model/inference/services/embedding/ollama_embed_service.py,sha256=qspgGDcLPakG0yirdKDT0r7asgUkMO-soM2J1OaqE6g,3700
|
33
|
+
isa_model/inference/services/embedding/openai_embed_service.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
|
+
isa_model/inference/services/llm/__init__.py,sha256=Yf6zMjcHs-eGLzl9mRGRf75S_5oBuNxxpc2r3mDw-CE,269
|
35
|
+
isa_model/inference/services/llm/base_llm_service.py,sha256=HtlTArjeCywvkzlAp4R4iWkoaOEQg1RdltOZdbeRYFQ,3982
|
36
|
+
isa_model/inference/services/llm/ollama_llm_service.py,sha256=WYCuJG16x9RtQm0vFJNxQWSVKFtNnCk8kc3_HZB4UKs,3875
|
37
|
+
isa_model/inference/services/llm/openai_llm_service.py,sha256=EuduDfZ-10PEoFrjRr7KmZvxew2B-GQSYlvmqKQcVXA,5394
|
38
|
+
isa_model/inference/services/others/table_transformer_service.py,sha256=r74h6QUSwSj6jTt-gRProz9SgwBwKWDe50NR0uqW0ZI,2367
|
39
|
+
isa_model/inference/services/vision/__init__.py,sha256=t06-E1Fo89MTLJweHx3ai892HHbvnwh5G0ovj0Y-2wk,277
|
40
|
+
isa_model/inference/services/vision/ollama_vision_service.py,sha256=aGWMF11YZ-4-6kSkX5af0m1u704OYutO0wwmfWszhBE,2004
|
41
|
+
isa_model/inference/services/vision/openai_vision_service.py,sha256=5M182cV-wKCnV_U0CGWu4uFrggo--3YLD_0_FpNW9Ak,2920
|
42
|
+
isa_model/inference/services/vision/replicate_image_gen_service.py,sha256=03jhK31C_4p0Xj5V9GiCRlgF-UpNw7aHjCYiCCiqWNw,7826
|
43
|
+
isa_model/inference/services/vision/helpers/image_utils.py,sha256=hTZi4MLktETupPIbE-TXMSi1kix6h8UfLiyEIDt2rzA,1751
|
44
|
+
isa_model/inference/services/vision/helpers/text_splitter.py,sha256=6AbvcQ7H6MS54B9d9T1XBGg4GhvmKfZqp00lKp9pF-U,1635
|
45
|
+
isa_model/inference/utils/conversion/bge_rerank_convert.py,sha256=1dvtxe5-PPCe2Au6SO8F2XaD-xdIoeA4zDTcid2L9FU,2691
|
46
|
+
isa_model/inference/utils/conversion/onnx_converter.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
47
|
+
isa_model/inference/utils/conversion/torch_converter.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
48
|
+
isa_model/scripts/inference_tracker.py,sha256=T6qQJMHJcAIQ8eYlgqpM9RWxfiV4z5xIolaoglKBBsg,8831
|
49
|
+
isa_model/scripts/mlflow_manager.py,sha256=7xMN0_wELr1jcALuTW9WeWirRkPZPlE2LlFfZKflXBY,12142
|
50
|
+
isa_model/scripts/model_registry.py,sha256=7rycPkVk8WHUO3LJaHfdyy5Yq8qmd_4WkGk4wKan-2w,14279
|
51
|
+
isa_model/scripts/start_mlflow.py,sha256=3AGKBzByjzbZ56I8w0IOfYnp3V6EU2Lv9NtX9maSqL8,2571
|
52
|
+
isa_model/scripts/training_tracker.py,sha256=cnXPi8ip2OK76-aWAOgC-dKx90PqZLEnP6UbHso7Fwc,8080
|
53
|
+
isa_model/training/engine/llama_factory/__init__.py,sha256=WCqmUHTidASN4owGDOPSnKeLdG1gbK1MXQrRAzjP0z4,969
|
54
|
+
isa_model/training/engine/llama_factory/config.py,sha256=3OvjuXs9IyfcY52pB1SpXSOe0VwmKZvsmy8VK9Ig6Ss,3178
|
55
|
+
isa_model/training/engine/llama_factory/data_adapter.py,sha256=krqLp6Jy-IFQ6_M8O3FCtU-qqzUFJ65aNHpVq9C4Zyk,8865
|
56
|
+
isa_model/training/engine/llama_factory/factory.py,sha256=U92JKd37QXOBKz4bkSs0Ryoi2fgDIkHXbH15ByS1Q3g,10539
|
57
|
+
isa_model/training/engine/llama_factory/rl.py,sha256=lFXmrZ4bbmfVlIrJ4080DufRt0Pdp2xc7ay5SW0XeSE,8218
|
58
|
+
isa_model/training/engine/llama_factory/trainer.py,sha256=jUx66YflsHdCcXJGRNJkvpBx6nR1Xyx9f7IIMe44eyc,5095
|
59
|
+
isa_model/training/engine/llama_factory/examples/__init__.py,sha256=hXCXa9vQK3Xp26xcaMECN-gMKPtVgK6-IaL3wsXULtM,220
|
60
|
+
isa_model/training/engine/llama_factory/examples/finetune_with_tracking.py,sha256=NMYqQUJA8hS3q2Jh2Mr2_5jvb-wYeiPVgiEk7UKkljg,4995
|
61
|
+
isa_model/training/engine/llama_factory/examples/rlhf_with_tracking.py,sha256=dlSauNk5L30nMrq-0yJZkY9Tk1cwgdNRM5Flx8ZRZOI,4350
|
62
|
+
isa_model/training/image_model/prepare_massed_compute.py,sha256=u0_Xc-0UcjEXL8bOaFDpdJHhUlO4kKLiZ8dkSZil5EE,2981
|
63
|
+
isa_model/training/image_model/prepare_upload.py,sha256=4qXJH16ME2rbQNRChG7lQDYTcOflsoAKSsrfXaAb_oQ,508
|
64
|
+
isa_model/training/image_model/train_main.py,sha256=0WkEZTU68kSn9351MyYSI4EarqFhj5bFBfykqAUVw30,804
|
65
|
+
isa_model/training/image_model/configs/create_config.py,sha256=6kn4m202xNYcwuB8jsyZWrth0-PBv5k07SUtnHz0JCo,951
|
66
|
+
isa_model/training/image_model/configs/create_flux_config.py,sha256=zVMvCk6O0fJpojp2HxADNyLUYKlEWVQ7fmscMH7X_oU,1006
|
67
|
+
isa_model/training/image_model/configs/create_lora_config.py,sha256=aQ-ze3vf30H5Dsvm-dENt_Gg3ELYq035Rl2ktFPCi8I,729
|
68
|
+
isa_model/training/image_model/raw_data/create_captions.py,sha256=7CoHaDoZZZXzZIMF9yLi7iYNEaPkP4tsembUJi2f8Fo,488
|
69
|
+
isa_model/training/image_model/raw_data/create_lora_captions.py,sha256=UR417mpWV1Tf57px36yAX7B5E-MnwZfnP9FUbB3Z89k,687
|
70
|
+
isa_model/training/image_model/raw_data/pre_processing.py,sha256=KvUDNrw7sozrqnF15lH4mAnKsmHsO883glu0EQBq_6I,8137
|
71
|
+
isa_model/training/image_model/train/train.py,sha256=gNMmh_8RtQ3ihz4hJdRcEuwbC5D1SVsgNJjfUHcXmSs,1440
|
72
|
+
isa_model/training/image_model/train/train_flux.py,sha256=J5LMUeUKcrFa-8Fer_c7QQiwty0YhFO5w1v0KXv_Oes,1612
|
73
|
+
isa_model/training/image_model/train/train_lora.py,sha256=8bSiEMMAwvFDZ-VIhx11jbnO5HpFvaDkM7BC7AuA6DU,2232
|
74
|
+
isa_model/training/llm_model/annotation/annotation_schema.py,sha256=BDEgUlRxMoXGTn12VZ_UUU8rWUHQW_JL39d1AvWU-04,1271
|
75
|
+
isa_model/training/llm_model/annotation/processors/annotation_processor.py,sha256=hz5VhaPLLPuwq2IoBMbxrZfOS_xBVCrqWk1GEKW2zd0,4839
|
76
|
+
isa_model/training/llm_model/annotation/storage/dataset_manager.py,sha256=nKxhmkw-K5vO7Wd5I0Rp5j9fqwV06h_9i_1lVQiU7uU,4592
|
77
|
+
isa_model/training/llm_model/annotation/storage/dataset_schema.py,sha256=JPhrT-pbT0jGd_rmDlhyTesXKv9OYxy85U-RAJFe05o,1086
|
78
|
+
isa_model/training/llm_model/annotation/tests/test_annotation_flow.py,sha256=DXYHP8rLKaLII6bo5Rtltqk4sQxr8k8G-wQegfuXHiE,3605
|
79
|
+
isa_model/training/llm_model/annotation/tests/test_minio copy.py,sha256=EI-PlH5xttAZF14Z_xn6LjgIJBkvP2qjLcvbX2hc0RM,3946
|
80
|
+
isa_model/training/llm_model/annotation/tests/test_minio_upload.py,sha256=fL1eMubwR6L9lYc3zEwlWU9yjJuTsIYi93i0l9QUjm0,1109
|
81
|
+
isa_model/training/llm_model/annotation/views/annotation_controller.py,sha256=3VzJ52yI-YIpcaAAXy2qac7sr4hTnFdtn-ZEKTt4IkM,5792
|
82
|
+
isa_model-0.0.1.dist-info/licenses/LICENSE,sha256=nNPdMBBVrQz3f7AgKFZuyQgdar9d90Vdw51es-P72Dw,1084
|
83
|
+
isa_model-0.0.1.dist-info/METADATA,sha256=rNFm9b9gkD38nhWJRj1RoPaSHbdQs8c2HwiqOCpd65w,8105
|
84
|
+
isa_model-0.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
85
|
+
isa_model-0.0.1.dist-info/top_level.txt,sha256=eHSy_Xb3kNkh2kK11mi1mZh0Wz91AQ5b8k2KFYO-rE8,10
|
86
|
+
isa_model-0.0.1.dist-info/RECORD,,
|
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2023-2024 isA_Model Contributors
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
@@ -0,0 +1 @@
|
|
1
|
+
isa_model
|