isa-model 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isa_model/__init__.py +5 -0
- isa_model/core/model_manager.py +143 -0
- isa_model/core/model_registry.py +115 -0
- isa_model/core/model_router.py +226 -0
- isa_model/core/model_storage.py +133 -0
- isa_model/core/model_version.py +0 -0
- isa_model/core/resource_manager.py +202 -0
- isa_model/core/storage/hf_storage.py +0 -0
- isa_model/core/storage/local_storage.py +0 -0
- isa_model/core/storage/minio_storage.py +0 -0
- isa_model/deployment/mlflow_gateway/__init__.py +8 -0
- isa_model/deployment/mlflow_gateway/start_gateway.py +65 -0
- isa_model/deployment/unified_multimodal_client.py +341 -0
- isa_model/inference/__init__.py +11 -0
- isa_model/inference/adapter/triton_adapter.py +453 -0
- isa_model/inference/adapter/unified_api.py +248 -0
- isa_model/inference/ai_factory.py +354 -0
- isa_model/inference/backends/Pytorch/bge_embed_backend.py +188 -0
- isa_model/inference/backends/Pytorch/gemma_backend.py +167 -0
- isa_model/inference/backends/Pytorch/llama_backend.py +166 -0
- isa_model/inference/backends/Pytorch/whisper_backend.py +194 -0
- isa_model/inference/backends/__init__.py +53 -0
- isa_model/inference/backends/base_backend_client.py +26 -0
- isa_model/inference/backends/container_services.py +104 -0
- isa_model/inference/backends/local_services.py +72 -0
- isa_model/inference/backends/openai_client.py +130 -0
- isa_model/inference/backends/replicate_client.py +197 -0
- isa_model/inference/backends/third_party_services.py +239 -0
- isa_model/inference/backends/triton_client.py +97 -0
- isa_model/inference/base.py +46 -0
- isa_model/inference/client_sdk/__init__.py +0 -0
- isa_model/inference/client_sdk/client.py +134 -0
- isa_model/inference/client_sdk/client_data_std.py +34 -0
- isa_model/inference/client_sdk/client_sdk_schema.py +16 -0
- isa_model/inference/client_sdk/exceptions.py +0 -0
- isa_model/inference/engine/triton/model_repository/bge/1/model.py +174 -0
- isa_model/inference/engine/triton/model_repository/gemma/1/model.py +250 -0
- isa_model/inference/engine/triton/model_repository/llama/1/model.py +76 -0
- isa_model/inference/engine/triton/model_repository/whisper/1/model.py +195 -0
- isa_model/inference/providers/__init__.py +19 -0
- isa_model/inference/providers/base_provider.py +30 -0
- isa_model/inference/providers/model_cache_manager.py +341 -0
- isa_model/inference/providers/ollama_provider.py +73 -0
- isa_model/inference/providers/openai_provider.py +87 -0
- isa_model/inference/providers/replicate_provider.py +94 -0
- isa_model/inference/providers/triton_provider.py +439 -0
- isa_model/inference/providers/vllm_provider.py +0 -0
- isa_model/inference/providers/yyds_provider.py +83 -0
- isa_model/inference/services/__init__.py +14 -0
- isa_model/inference/services/audio/fish_speech/handler.py +215 -0
- isa_model/inference/services/audio/runpod_tts_fish_service.py +212 -0
- isa_model/inference/services/audio/triton_speech_service.py +138 -0
- isa_model/inference/services/audio/whisper_service.py +186 -0
- isa_model/inference/services/audio/yyds_audio_service.py +71 -0
- isa_model/inference/services/base_service.py +106 -0
- isa_model/inference/services/base_tts_service.py +66 -0
- isa_model/inference/services/embedding/bge_service.py +183 -0
- isa_model/inference/services/embedding/ollama_embed_service.py +85 -0
- isa_model/inference/services/embedding/ollama_rerank_service.py +118 -0
- isa_model/inference/services/embedding/onnx_rerank_service.py +73 -0
- isa_model/inference/services/llm/__init__.py +16 -0
- isa_model/inference/services/llm/gemma_service.py +143 -0
- isa_model/inference/services/llm/llama_service.py +143 -0
- isa_model/inference/services/llm/ollama_llm_service.py +108 -0
- isa_model/inference/services/llm/openai_llm_service.py +129 -0
- isa_model/inference/services/llm/replicate_llm_service.py +179 -0
- isa_model/inference/services/llm/triton_llm_service.py +230 -0
- isa_model/inference/services/others/table_transformer_service.py +61 -0
- isa_model/inference/services/vision/__init__.py +12 -0
- isa_model/inference/services/vision/helpers/image_utils.py +58 -0
- isa_model/inference/services/vision/helpers/text_splitter.py +46 -0
- isa_model/inference/services/vision/ollama_vision_service.py +60 -0
- isa_model/inference/services/vision/replicate_vision_service.py +241 -0
- isa_model/inference/services/vision/triton_vision_service.py +199 -0
- isa_model/inference/services/vision/yyds_vision_service.py +80 -0
- isa_model/inference/utils/conversion/bge_rerank_convert.py +73 -0
- isa_model/inference/utils/conversion/onnx_converter.py +0 -0
- isa_model/inference/utils/conversion/torch_converter.py +0 -0
- isa_model/scripts/inference_tracker.py +283 -0
- isa_model/scripts/mlflow_manager.py +379 -0
- isa_model/scripts/model_registry.py +465 -0
- isa_model/scripts/start_mlflow.py +95 -0
- isa_model/scripts/training_tracker.py +257 -0
- isa_model/training/engine/llama_factory/__init__.py +39 -0
- isa_model/training/engine/llama_factory/config.py +115 -0
- isa_model/training/engine/llama_factory/data_adapter.py +284 -0
- isa_model/training/engine/llama_factory/examples/__init__.py +6 -0
- isa_model/training/engine/llama_factory/examples/finetune_with_tracking.py +185 -0
- isa_model/training/engine/llama_factory/examples/rlhf_with_tracking.py +163 -0
- isa_model/training/engine/llama_factory/factory.py +331 -0
- isa_model/training/engine/llama_factory/rl.py +254 -0
- isa_model/training/engine/llama_factory/trainer.py +171 -0
- isa_model/training/image_model/configs/create_config.py +37 -0
- isa_model/training/image_model/configs/create_flux_config.py +26 -0
- isa_model/training/image_model/configs/create_lora_config.py +21 -0
- isa_model/training/image_model/prepare_massed_compute.py +97 -0
- isa_model/training/image_model/prepare_upload.py +17 -0
- isa_model/training/image_model/raw_data/create_captions.py +16 -0
- isa_model/training/image_model/raw_data/create_lora_captions.py +20 -0
- isa_model/training/image_model/raw_data/pre_processing.py +200 -0
- isa_model/training/image_model/train/train.py +42 -0
- isa_model/training/image_model/train/train_flux.py +41 -0
- isa_model/training/image_model/train/train_lora.py +57 -0
- isa_model/training/image_model/train_main.py +25 -0
- isa_model/training/llm_model/annotation/annotation_schema.py +47 -0
- isa_model/training/llm_model/annotation/processors/annotation_processor.py +126 -0
- isa_model/training/llm_model/annotation/storage/dataset_manager.py +131 -0
- isa_model/training/llm_model/annotation/storage/dataset_schema.py +44 -0
- isa_model/training/llm_model/annotation/tests/test_annotation_flow.py +109 -0
- isa_model/training/llm_model/annotation/tests/test_minio copy.py +113 -0
- isa_model/training/llm_model/annotation/tests/test_minio_upload.py +43 -0
- isa_model/training/llm_model/annotation/views/annotation_controller.py +158 -0
- isa_model-0.1.0.dist-info/METADATA +116 -0
- isa_model-0.1.0.dist-info/RECORD +117 -0
- isa_model-0.1.0.dist-info/WHEEL +5 -0
- isa_model-0.1.0.dist-info/licenses/LICENSE +21 -0
- isa_model-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,44 @@
|
|
1
|
+
# app/services/llm_model/annotation/dataset/dataset_schema.py
|
2
|
+
from enum import Enum
|
3
|
+
from pydantic import BaseModel, Field
|
4
|
+
from typing import Dict, List, Optional
|
5
|
+
from datetime import datetime
|
6
|
+
from bson import ObjectId
|
7
|
+
|
8
|
+
class DatasetType(str, Enum):
|
9
|
+
SFT = "sft"
|
10
|
+
RLHF = "rlhf"
|
11
|
+
|
12
|
+
class DatasetStatus(str, Enum):
|
13
|
+
PENDING = "pending"
|
14
|
+
PROCESSING = "processing"
|
15
|
+
READY = "ready"
|
16
|
+
ERROR = "error"
|
17
|
+
|
18
|
+
class DatasetFiles(BaseModel):
|
19
|
+
train: str
|
20
|
+
eval: Optional[str]
|
21
|
+
test: Optional[str]
|
22
|
+
|
23
|
+
class DatasetStats(BaseModel):
|
24
|
+
total_examples: int
|
25
|
+
avg_length: Optional[float]
|
26
|
+
num_conversations: Optional[int]
|
27
|
+
additional_metrics: Optional[Dict] = {}
|
28
|
+
|
29
|
+
class Dataset(BaseModel):
|
30
|
+
id: Optional[ObjectId] = Field(None, alias="_id")
|
31
|
+
name: str
|
32
|
+
type: DatasetType
|
33
|
+
version: str
|
34
|
+
storage_path: str
|
35
|
+
files: DatasetFiles
|
36
|
+
stats: DatasetStats
|
37
|
+
source_annotations: List[str]
|
38
|
+
created_at: datetime
|
39
|
+
status: DatasetStatus
|
40
|
+
metadata: Optional[Dict] = {}
|
41
|
+
|
42
|
+
class Config:
|
43
|
+
arbitrary_types_allowed = True
|
44
|
+
populate_by_name = True
|
@@ -0,0 +1,109 @@
|
|
1
|
+
# test_annotation_flow.py
|
2
|
+
import os
|
3
|
+
os.environ["ENV"] = "local"
|
4
|
+
|
5
|
+
import asyncio
|
6
|
+
from datetime import datetime
|
7
|
+
from bson import ObjectId
|
8
|
+
from app.services.llm_model.annotation.views.annotation_controller import AnnotationController
|
9
|
+
from app.services.llm_model.annotation.processors.annotation_processor import AnnotationProcessor
|
10
|
+
from app.services.llm_model.annotation.annotation_schema import (
|
11
|
+
AnnotationFeedback,
|
12
|
+
RatingScale,
|
13
|
+
AnnotationType,
|
14
|
+
AnnotationAspects,
|
15
|
+
BetterResponse
|
16
|
+
)
|
17
|
+
from app.config.config_manager import config_manager
|
18
|
+
|
19
|
+
async def setup_test_data():
|
20
|
+
"""Setup initial test data in MongoDB"""
|
21
|
+
db = await config_manager.get_db('mongodb')
|
22
|
+
|
23
|
+
# Create a test annotation
|
24
|
+
test_annotation = {
|
25
|
+
"_id": ObjectId(),
|
26
|
+
"project_name": "test_project",
|
27
|
+
"items": [{
|
28
|
+
"item_id": "test_item_1",
|
29
|
+
"input": {
|
30
|
+
"messages": [{
|
31
|
+
"role": "user",
|
32
|
+
"content": "What is the capital of France?"
|
33
|
+
}]
|
34
|
+
},
|
35
|
+
"output": {
|
36
|
+
"content": "The capital of France is Paris."
|
37
|
+
},
|
38
|
+
"status": "pending"
|
39
|
+
}],
|
40
|
+
"created_at": datetime.utcnow().isoformat()
|
41
|
+
}
|
42
|
+
|
43
|
+
await db['annotations'].insert_one(test_annotation)
|
44
|
+
return test_annotation
|
45
|
+
|
46
|
+
async def test_annotation_flow():
|
47
|
+
"""Test the complete annotation flow"""
|
48
|
+
try:
|
49
|
+
# Initialize controllers
|
50
|
+
annotation_controller = AnnotationController()
|
51
|
+
annotation_processor = AnnotationProcessor()
|
52
|
+
|
53
|
+
# Setup test data
|
54
|
+
test_data = await setup_test_data()
|
55
|
+
annotation_id = str(test_data["_id"])
|
56
|
+
item_id = test_data["items"][0]["item_id"]
|
57
|
+
|
58
|
+
print("1. Created test annotation")
|
59
|
+
|
60
|
+
# Create test feedback
|
61
|
+
feedback = AnnotationFeedback(
|
62
|
+
rating=RatingScale.EXCELLENT,
|
63
|
+
category=AnnotationType.ACCURACY,
|
64
|
+
aspects=AnnotationAspects(
|
65
|
+
factually_correct=True,
|
66
|
+
relevant=True,
|
67
|
+
harmful=False,
|
68
|
+
biased=False,
|
69
|
+
complete=True,
|
70
|
+
efficient=True
|
71
|
+
),
|
72
|
+
better_response=BetterResponse(
|
73
|
+
content="Paris is the capital city of France, known for its iconic Eiffel Tower.",
|
74
|
+
reason="Added more context and detail"
|
75
|
+
),
|
76
|
+
comment="Good response, but could be more detailed"
|
77
|
+
)
|
78
|
+
|
79
|
+
# Submit annotation
|
80
|
+
result = await annotation_controller.submit_annotation(
|
81
|
+
annotation_id=annotation_id,
|
82
|
+
item_id=item_id,
|
83
|
+
feedback=feedback,
|
84
|
+
annotator_id="test_annotator"
|
85
|
+
)
|
86
|
+
|
87
|
+
print("2. Submitted annotation:", result)
|
88
|
+
|
89
|
+
# Process annotation queue
|
90
|
+
await annotation_processor.process_queue()
|
91
|
+
print("3. Processed annotation queue")
|
92
|
+
|
93
|
+
# Verify dataset creation
|
94
|
+
db = await config_manager.get_db('mongodb')
|
95
|
+
datasets = await db['training_datasets'].find().to_list(length=10)
|
96
|
+
|
97
|
+
print("\nCreated Datasets:")
|
98
|
+
for dataset in datasets:
|
99
|
+
print(f"- {dataset['name']} ({dataset['type']})")
|
100
|
+
print(f" Status: {dataset['status']}")
|
101
|
+
print(f" Examples: {dataset['stats']['total_examples']}")
|
102
|
+
|
103
|
+
except Exception as e:
|
104
|
+
print(f"Error during test: {e}")
|
105
|
+
|
106
|
+
if __name__ == "__main__":
|
107
|
+
# Run the test
|
108
|
+
print("Starting annotation flow test...")
|
109
|
+
asyncio.run(test_annotation_flow())
|
@@ -0,0 +1,113 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
|
3
|
+
import os
|
4
|
+
from minio import Minio
|
5
|
+
import json
|
6
|
+
import logging
|
7
|
+
from io import BytesIO
|
8
|
+
|
9
|
+
# Configure logging
|
10
|
+
logging.basicConfig(level=logging.INFO)
|
11
|
+
logger = logging.getLogger(__name__)
|
12
|
+
|
13
|
+
def test_minio():
|
14
|
+
try:
|
15
|
+
# Get MinIO host from environment variable or use default
|
16
|
+
minio_host = os.getenv("MINIO_HOST", "localhost:9000")
|
17
|
+
logger.info(f"Using MinIO host: {minio_host}")
|
18
|
+
|
19
|
+
# 1. Create MinIO client
|
20
|
+
logger.info("Creating MinIO client...")
|
21
|
+
client = Minio(
|
22
|
+
minio_host,
|
23
|
+
access_key="minioadmin",
|
24
|
+
secret_key="minioadmin",
|
25
|
+
secure=False
|
26
|
+
)
|
27
|
+
|
28
|
+
# 2. Test bucket operations
|
29
|
+
bucket_name = "knowledge-files" # Changed to match the actual bucket name
|
30
|
+
logger.info(f"Testing bucket operations with {bucket_name}...")
|
31
|
+
|
32
|
+
# Create bucket if it doesn't exist
|
33
|
+
if not client.bucket_exists(bucket_name):
|
34
|
+
client.make_bucket(bucket_name)
|
35
|
+
logger.info(f"Created new bucket: {bucket_name}")
|
36
|
+
else:
|
37
|
+
logger.info(f"Using existing bucket: {bucket_name}")
|
38
|
+
|
39
|
+
# Set bucket policy - allow all operations
|
40
|
+
policy = {
|
41
|
+
"Version": "2012-10-17",
|
42
|
+
"Statement": [
|
43
|
+
{
|
44
|
+
"Effect": "Allow",
|
45
|
+
"Principal": {"AWS": ["*"]},
|
46
|
+
"Action": [
|
47
|
+
"s3:GetBucketLocation",
|
48
|
+
"s3:ListBucket",
|
49
|
+
"s3:ListBucketMultipartUploads"
|
50
|
+
],
|
51
|
+
"Resource": [f"arn:aws:s3:::{bucket_name}"]
|
52
|
+
},
|
53
|
+
{
|
54
|
+
"Effect": "Allow",
|
55
|
+
"Principal": {"AWS": ["*"]},
|
56
|
+
"Action": [
|
57
|
+
"s3:AbortMultipartUpload",
|
58
|
+
"s3:DeleteObject",
|
59
|
+
"s3:GetObject",
|
60
|
+
"s3:ListMultipartUploadParts",
|
61
|
+
"s3:PutObject"
|
62
|
+
],
|
63
|
+
"Resource": [f"arn:aws:s3:::{bucket_name}/*"]
|
64
|
+
}
|
65
|
+
]
|
66
|
+
}
|
67
|
+
|
68
|
+
try:
|
69
|
+
client.set_bucket_policy(bucket_name, json.dumps(policy))
|
70
|
+
logger.info("Set bucket policy successfully")
|
71
|
+
except Exception as e:
|
72
|
+
logger.warning(f"Failed to set bucket policy: {e}")
|
73
|
+
|
74
|
+
# 3. Test file upload
|
75
|
+
source_file = "init-scripts/files/haley_system.txt"
|
76
|
+
object_name = "haley_system.txt"
|
77
|
+
|
78
|
+
if os.path.exists(source_file):
|
79
|
+
# Get file size
|
80
|
+
file_size = os.path.getsize(source_file)
|
81
|
+
logger.info(f"Found source file: {source_file} (size: {file_size} bytes)")
|
82
|
+
|
83
|
+
# Upload file
|
84
|
+
with open(source_file, 'rb') as file_data:
|
85
|
+
client.put_object(
|
86
|
+
bucket_name,
|
87
|
+
object_name,
|
88
|
+
file_data,
|
89
|
+
file_size,
|
90
|
+
content_type="text/plain"
|
91
|
+
)
|
92
|
+
logger.info(f"Uploaded file: {object_name}")
|
93
|
+
|
94
|
+
# 4. Test file download
|
95
|
+
data = client.get_object(bucket_name, object_name)
|
96
|
+
content = data.read().decode('utf-8')
|
97
|
+
logger.info(f"Successfully downloaded file. First 100 chars: {content[:100]}...")
|
98
|
+
|
99
|
+
# 5. Verify file exists
|
100
|
+
stat = client.stat_object(bucket_name, object_name)
|
101
|
+
logger.info(f"File stats: {stat}")
|
102
|
+
|
103
|
+
logger.info("Test completed successfully!")
|
104
|
+
else:
|
105
|
+
logger.error(f"Source file not found: {source_file}")
|
106
|
+
raise FileNotFoundError(f"Source file not found: {source_file}")
|
107
|
+
|
108
|
+
except Exception as e:
|
109
|
+
logger.error(f"Test failed: {str(e)}")
|
110
|
+
raise
|
111
|
+
|
112
|
+
if __name__ == "__main__":
|
113
|
+
test_minio()
|
@@ -0,0 +1,43 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
import os
|
3
|
+
from minio import Minio
|
4
|
+
import io
|
5
|
+
|
6
|
+
# MinIO client setup
|
7
|
+
client = Minio(
|
8
|
+
"localhost:9000",
|
9
|
+
access_key="minioadmin",
|
10
|
+
secret_key="minioadmin",
|
11
|
+
secure=False
|
12
|
+
)
|
13
|
+
|
14
|
+
# Test bucket name
|
15
|
+
bucket_name = "test-bucket"
|
16
|
+
|
17
|
+
# Create bucket if not exists
|
18
|
+
if not client.bucket_exists(bucket_name):
|
19
|
+
client.make_bucket(bucket_name)
|
20
|
+
print(f"Created bucket: {bucket_name}")
|
21
|
+
else:
|
22
|
+
print(f"Bucket already exists: {bucket_name}")
|
23
|
+
|
24
|
+
# Upload test file
|
25
|
+
source_file = "init-scripts/files/haley_system.txt"
|
26
|
+
if os.path.exists(source_file):
|
27
|
+
with open(source_file, 'rb') as file_data:
|
28
|
+
content = file_data.read()
|
29
|
+
# Upload file
|
30
|
+
client.put_object(
|
31
|
+
bucket_name,
|
32
|
+
"haley_system.txt",
|
33
|
+
io.BytesIO(content),
|
34
|
+
len(content),
|
35
|
+
content_type="text/plain"
|
36
|
+
)
|
37
|
+
print(f"Successfully uploaded {source_file}")
|
38
|
+
|
39
|
+
# Verify upload
|
40
|
+
stat = client.stat_object(bucket_name, "haley_system.txt")
|
41
|
+
print(f"File stats: {stat}")
|
42
|
+
else:
|
43
|
+
print(f"Source file not found: {source_file}")
|
@@ -0,0 +1,158 @@
|
|
1
|
+
# app/services/llm_model/tracing/annotation/annotation_controller.py
|
2
|
+
from typing import Dict, Any, List, Optional
|
3
|
+
from datetime import datetime
|
4
|
+
from bson import ObjectId
|
5
|
+
from app.config.config_manager import config_manager
|
6
|
+
from app.services.training.llm_model.annotation.annotation_schema import AnnotationFeedback, RatingScale
|
7
|
+
from app.services.training.llm_model.annotation.storage.dataset_manager import DatasetManager
|
8
|
+
|
9
|
+
|
10
|
+
class AnnotationController:
|
11
|
+
def __init__(self):
|
12
|
+
self.logger = config_manager.get_logger(__name__)
|
13
|
+
|
14
|
+
async def get_pending_annotations(
|
15
|
+
self,
|
16
|
+
project_name: str,
|
17
|
+
category: Optional[str] = None,
|
18
|
+
min_rating: Optional[int] = None,
|
19
|
+
page: int = 1,
|
20
|
+
limit: int = 10
|
21
|
+
) -> Dict[str, Any]:
|
22
|
+
"""Get filtered list of pending annotations"""
|
23
|
+
db = await config_manager.get_db('mongodb')
|
24
|
+
collection = db['annotations']
|
25
|
+
|
26
|
+
# Build query with filters
|
27
|
+
query = {"status": "pending", "project_name": project_name}
|
28
|
+
if category:
|
29
|
+
query["annotation_type"] = category
|
30
|
+
if min_rating:
|
31
|
+
query["items.feedback.rating"] = {"$gte": min_rating}
|
32
|
+
|
33
|
+
annotations = await collection.find(query)\
|
34
|
+
.sort("created_at", -1)\
|
35
|
+
.skip((page - 1) * limit)\
|
36
|
+
.limit(limit)\
|
37
|
+
.to_list(length=limit)
|
38
|
+
|
39
|
+
return {
|
40
|
+
"annotations": annotations,
|
41
|
+
"pagination": {
|
42
|
+
"page": page,
|
43
|
+
"limit": limit,
|
44
|
+
"total": await collection.count_documents(query)
|
45
|
+
}
|
46
|
+
}
|
47
|
+
|
48
|
+
async def submit_annotation(
|
49
|
+
self,
|
50
|
+
annotation_id: str,
|
51
|
+
item_id: str,
|
52
|
+
feedback: AnnotationFeedback,
|
53
|
+
annotator_id: str
|
54
|
+
) -> Dict[str, Any]:
|
55
|
+
"""Submit and process annotation feedback"""
|
56
|
+
db = await config_manager.get_db('mongodb')
|
57
|
+
collection = db['annotations']
|
58
|
+
|
59
|
+
# Determine if annotation should be selected for training
|
60
|
+
is_selected = self._evaluate_for_training(feedback)
|
61
|
+
feedback_dict = feedback.dict()
|
62
|
+
feedback_dict["is_selected_for_training"] = is_selected
|
63
|
+
|
64
|
+
# Update annotation
|
65
|
+
result = await collection.update_one(
|
66
|
+
{
|
67
|
+
"_id": ObjectId(annotation_id),
|
68
|
+
"items.item_id": item_id
|
69
|
+
},
|
70
|
+
{
|
71
|
+
"$set": {
|
72
|
+
"items.$.feedback": feedback_dict,
|
73
|
+
"items.$.status": "completed",
|
74
|
+
"items.$.annotated_at": datetime.utcnow().isoformat(),
|
75
|
+
"items.$.annotator_id": annotator_id,
|
76
|
+
"items.$.training_status": "pending" if is_selected else "none"
|
77
|
+
}
|
78
|
+
}
|
79
|
+
)
|
80
|
+
|
81
|
+
# Process for training if selected
|
82
|
+
if is_selected:
|
83
|
+
await self._queue_for_training(annotation_id, item_id, feedback)
|
84
|
+
|
85
|
+
return {
|
86
|
+
"status": "success",
|
87
|
+
"selected_for_training": is_selected,
|
88
|
+
"message": "Annotation submitted successfully"
|
89
|
+
}
|
90
|
+
|
91
|
+
def _evaluate_for_training(self, feedback: AnnotationFeedback) -> bool:
|
92
|
+
"""Evaluate if annotation should be used for training"""
|
93
|
+
# Select for SFT if rating is excellent and aspects are positive
|
94
|
+
if feedback.rating == RatingScale.EXCELLENT:
|
95
|
+
aspects = feedback.aspects
|
96
|
+
if all([
|
97
|
+
aspects.factually_correct,
|
98
|
+
aspects.relevant,
|
99
|
+
not aspects.harmful,
|
100
|
+
not aspects.biased
|
101
|
+
]):
|
102
|
+
return True
|
103
|
+
|
104
|
+
# Select for RLHF if better response is provided
|
105
|
+
if feedback.better_response:
|
106
|
+
return True
|
107
|
+
|
108
|
+
return False
|
109
|
+
|
110
|
+
async def _queue_for_training(
|
111
|
+
self,
|
112
|
+
annotation_id: str,
|
113
|
+
item_id: str,
|
114
|
+
feedback: AnnotationFeedback
|
115
|
+
):
|
116
|
+
"""Queue selected annotations for training data generation"""
|
117
|
+
db = await config_manager.get_db('mongodb')
|
118
|
+
training_queue = db['training_queue']
|
119
|
+
|
120
|
+
await training_queue.insert_one({
|
121
|
+
"annotation_id": annotation_id,
|
122
|
+
"item_id": item_id,
|
123
|
+
"type": "sft" if feedback.rating == RatingScale.EXCELLENT else "rlhf",
|
124
|
+
"feedback": feedback.dict(),
|
125
|
+
"status": "pending",
|
126
|
+
"created_at": datetime.utcnow().isoformat()
|
127
|
+
})
|
128
|
+
|
129
|
+
class DatasetPreparationProcessor:
|
130
|
+
def __init__(self):
|
131
|
+
self.logger = config_manager.get_logger(__name__)
|
132
|
+
self.dataset_manager = DatasetManager()
|
133
|
+
self.batch_size = 1000 # Configure as needed
|
134
|
+
|
135
|
+
async def process_annotation_queue(self) -> None:
|
136
|
+
"""Process pending annotations and prepare datasets"""
|
137
|
+
db = await config_manager.get_db('mongodb')
|
138
|
+
annotation_queue = db['dataset_preparation_queue']
|
139
|
+
|
140
|
+
# Process items for SFT dataset
|
141
|
+
sft_items = await self._get_pending_annotations("sft")
|
142
|
+
if len(sft_items) >= self.batch_size:
|
143
|
+
await self._create_sft_dataset(sft_items)
|
144
|
+
|
145
|
+
# Process items for RLHF dataset
|
146
|
+
rlhf_items = await self._get_pending_annotations("rlhf")
|
147
|
+
if len(rlhf_items) >= self.batch_size:
|
148
|
+
await self._create_rlhf_dataset(rlhf_items)
|
149
|
+
|
150
|
+
async def _get_pending_annotations(self, dataset_type: str) -> List[Dict[str, Any]]:
|
151
|
+
"""Get pending annotations for dataset preparation"""
|
152
|
+
db = await config_manager.get_db('mongodb')
|
153
|
+
queue = db['dataset_preparation_queue']
|
154
|
+
|
155
|
+
return await queue.find({
|
156
|
+
"status": "pending",
|
157
|
+
"dataset_type": dataset_type
|
158
|
+
}).to_list(length=self.batch_size)
|
@@ -0,0 +1,116 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: isa-model
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: Unified AI model serving framework
|
5
|
+
Author-email: isA_Model Contributors <your.email@example.com>
|
6
|
+
License: MIT
|
7
|
+
Classifier: Development Status :: 3 - Alpha
|
8
|
+
Classifier: Intended Audience :: Developers
|
9
|
+
Classifier: Operating System :: OS Independent
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
12
|
+
Requires-Python: >=3.8
|
13
|
+
Description-Content-Type: text/markdown
|
14
|
+
License-File: LICENSE
|
15
|
+
Requires-Dist: fastapi>=0.95.0
|
16
|
+
Requires-Dist: numpy>=1.20.0
|
17
|
+
Requires-Dist: httpx>=0.23.0
|
18
|
+
Requires-Dist: pydantic>=2.0.0
|
19
|
+
Requires-Dist: uvicorn>=0.22.0
|
20
|
+
Requires-Dist: requests>=2.28.0
|
21
|
+
Requires-Dist: aiohttp>=3.8.0
|
22
|
+
Requires-Dist: transformers>=4.30.0
|
23
|
+
Requires-Dist: langchain-core>=0.1.0
|
24
|
+
Requires-Dist: tritonclient[grpc,http]>=2.30.0
|
25
|
+
Requires-Dist: huggingface-hub>=0.16.0
|
26
|
+
Requires-Dist: kubernetes>=25.3.0
|
27
|
+
Requires-Dist: mlflow>=2.4.0
|
28
|
+
Requires-Dist: torch>=2.0.0
|
29
|
+
Dynamic: license-file
|
30
|
+
|
31
|
+
# isA_Model - AI服务工厂
|
32
|
+
|
33
|
+
isA_Model是一个轻量级AI服务工厂,用于统一管理和调用不同的AI模型和服务提供商。
|
34
|
+
|
35
|
+
## 特性
|
36
|
+
|
37
|
+
- 支持多种AI提供商(Ollama, OpenAI, Replicate, Triton)
|
38
|
+
- 统一的API接口
|
39
|
+
- 灵活的工厂模式
|
40
|
+
- 异步支持
|
41
|
+
- 单例模式,高效缓存
|
42
|
+
|
43
|
+
## 安装
|
44
|
+
|
45
|
+
```bash
|
46
|
+
pip install -r requirements.txt
|
47
|
+
```
|
48
|
+
|
49
|
+
## 快速开始
|
50
|
+
|
51
|
+
使用AI工厂很简单:
|
52
|
+
|
53
|
+
```python
|
54
|
+
from isa_model.inference.ai_factory import AIFactory
|
55
|
+
from isa_model.inference.base import ModelType
|
56
|
+
|
57
|
+
# 获取工厂实例
|
58
|
+
factory = AIFactory()
|
59
|
+
|
60
|
+
# LLM示例 - 使用Ollama
|
61
|
+
llm = factory.get_llm(model_name="llama3.1", provider="ollama")
|
62
|
+
response = await llm.generate("你好,请介绍一下自己。")
|
63
|
+
print(response)
|
64
|
+
|
65
|
+
# 图像生成示例 - 使用Replicate
|
66
|
+
vision_service = factory.get_vision_model(
|
67
|
+
model_name="stability-ai/sdxl:c221b2b8ef527988fb59bf24a8b97c4561f1c671f73bd389f866bfb27c061316",
|
68
|
+
provider="replicate",
|
69
|
+
config={"api_token": "your_replicate_token"}
|
70
|
+
)
|
71
|
+
result = await vision_service.generate_image({
|
72
|
+
"prompt": "A beautiful sunset over mountains",
|
73
|
+
"num_inference_steps": 25
|
74
|
+
})
|
75
|
+
print(result["urls"])
|
76
|
+
```
|
77
|
+
|
78
|
+
## 工厂架构
|
79
|
+
|
80
|
+
isA_Model使用三层架构:
|
81
|
+
|
82
|
+
1. **客户端层** - 应用程序代码
|
83
|
+
2. **服务层** - 模型服务实现(LLM, 图像, 嵌入等)
|
84
|
+
3. **提供商层** - 底层API集成(Ollama, OpenAI, Replicate等)
|
85
|
+
|
86
|
+
### 主要组件
|
87
|
+
|
88
|
+
- `AIFactory` - 中央工厂类,提供模型和服务访问
|
89
|
+
- `BaseService` - 所有服务的基类
|
90
|
+
- `BaseProvider` - 所有提供商的基类
|
91
|
+
- 特定服务实现 - 如`ReplicateVisionService`, `OllamaLLMService`等
|
92
|
+
|
93
|
+
## 支持的模型类型
|
94
|
+
|
95
|
+
- **LLM** - 大语言模型
|
96
|
+
- **VISION** - 图像生成和分析
|
97
|
+
- **EMBEDDING** - 文本嵌入
|
98
|
+
- **AUDIO** - 语音识别
|
99
|
+
- **RERANK** - 重排序
|
100
|
+
|
101
|
+
## 示例
|
102
|
+
|
103
|
+
查看`test_*.py`文件获取更多使用示例。
|
104
|
+
|
105
|
+
## 环境变量
|
106
|
+
|
107
|
+
将API密钥和其他配置添加到`.env.local`文件中:
|
108
|
+
|
109
|
+
```
|
110
|
+
OPENAI_API_KEY=your_openai_key
|
111
|
+
REPLICATE_API_TOKEN=your_replicate_token
|
112
|
+
```
|
113
|
+
|
114
|
+
## 许可证
|
115
|
+
|
116
|
+
MIT
|