isa-model 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. isa_model/__init__.py +1 -1
  2. isa_model/core/model_registry.py +273 -46
  3. isa_model/deployment/gpu_fp16_ds8/models/deepseek_r1/1/model.py +120 -0
  4. isa_model/deployment/gpu_fp16_ds8/scripts/download_model.py +18 -0
  5. isa_model/deployment/gpu_int8_ds8/app/server.py +66 -0
  6. isa_model/deployment/gpu_int8_ds8/scripts/test_client.py +43 -0
  7. isa_model/deployment/gpu_int8_ds8/scripts/test_client_os.py +35 -0
  8. isa_model/eval/__init__.py +56 -0
  9. isa_model/eval/benchmarks.py +469 -0
  10. isa_model/eval/factory.py +582 -0
  11. isa_model/eval/metrics.py +628 -0
  12. isa_model/inference/ai_factory.py +98 -93
  13. isa_model/inference/providers/openai_provider.py +21 -7
  14. isa_model/inference/providers/replicate_provider.py +18 -5
  15. isa_model/inference/providers/triton_provider.py +1 -1
  16. isa_model/inference/services/audio/base_stt_service.py +91 -0
  17. isa_model/inference/services/audio/base_tts_service.py +136 -0
  18. isa_model/inference/services/audio/{yyds_audio_service.py → openai_tts_service.py} +4 -4
  19. isa_model/inference/services/embedding/ollama_embed_service.py +48 -36
  20. isa_model/inference/services/llm/__init__.py +0 -4
  21. isa_model/inference/services/llm/base_llm_service.py +134 -0
  22. isa_model/inference/services/llm/ollama_llm_service.py +1 -10
  23. isa_model/inference/services/llm/openai_llm_service.py +70 -61
  24. isa_model/inference/services/vision/__init__.py +1 -1
  25. isa_model/inference/services/vision/ollama_vision_service.py +4 -4
  26. isa_model/inference/services/vision/{yyds_vision_service.py → openai_vision_service.py} +5 -5
  27. isa_model/inference/services/vision/replicate_image_gen_service.py +185 -0
  28. isa_model/training/__init__.py +44 -0
  29. isa_model/training/factory.py +393 -0
  30. isa_model-0.2.0.dist-info/METADATA +327 -0
  31. {isa_model-0.1.0.dist-info → isa_model-0.2.0.dist-info}/RECORD +35 -60
  32. isa_model/deployment/mlflow_gateway/__init__.py +0 -8
  33. isa_model/deployment/mlflow_gateway/start_gateway.py +0 -65
  34. isa_model/deployment/unified_multimodal_client.py +0 -341
  35. isa_model/inference/adapter/triton_adapter.py +0 -453
  36. isa_model/inference/backends/Pytorch/bge_embed_backend.py +0 -188
  37. isa_model/inference/backends/Pytorch/gemma_backend.py +0 -167
  38. isa_model/inference/backends/Pytorch/llama_backend.py +0 -166
  39. isa_model/inference/backends/Pytorch/whisper_backend.py +0 -194
  40. isa_model/inference/backends/__init__.py +0 -53
  41. isa_model/inference/backends/base_backend_client.py +0 -26
  42. isa_model/inference/backends/container_services.py +0 -104
  43. isa_model/inference/backends/local_services.py +0 -72
  44. isa_model/inference/backends/openai_client.py +0 -130
  45. isa_model/inference/backends/replicate_client.py +0 -197
  46. isa_model/inference/backends/third_party_services.py +0 -239
  47. isa_model/inference/backends/triton_client.py +0 -97
  48. isa_model/inference/client_sdk/client.py +0 -134
  49. isa_model/inference/client_sdk/client_data_std.py +0 -34
  50. isa_model/inference/client_sdk/client_sdk_schema.py +0 -16
  51. isa_model/inference/client_sdk/exceptions.py +0 -0
  52. isa_model/inference/engine/triton/model_repository/bge/1/model.py +0 -174
  53. isa_model/inference/engine/triton/model_repository/gemma/1/model.py +0 -250
  54. isa_model/inference/engine/triton/model_repository/llama/1/model.py +0 -76
  55. isa_model/inference/engine/triton/model_repository/whisper/1/model.py +0 -195
  56. isa_model/inference/providers/vllm_provider.py +0 -0
  57. isa_model/inference/providers/yyds_provider.py +0 -83
  58. isa_model/inference/services/audio/fish_speech/handler.py +0 -215
  59. isa_model/inference/services/audio/runpod_tts_fish_service.py +0 -212
  60. isa_model/inference/services/audio/triton_speech_service.py +0 -138
  61. isa_model/inference/services/audio/whisper_service.py +0 -186
  62. isa_model/inference/services/base_tts_service.py +0 -66
  63. isa_model/inference/services/embedding/bge_service.py +0 -183
  64. isa_model/inference/services/embedding/ollama_rerank_service.py +0 -118
  65. isa_model/inference/services/embedding/onnx_rerank_service.py +0 -73
  66. isa_model/inference/services/llm/gemma_service.py +0 -143
  67. isa_model/inference/services/llm/llama_service.py +0 -143
  68. isa_model/inference/services/llm/replicate_llm_service.py +0 -179
  69. isa_model/inference/services/llm/triton_llm_service.py +0 -230
  70. isa_model/inference/services/vision/replicate_vision_service.py +0 -241
  71. isa_model/inference/services/vision/triton_vision_service.py +0 -199
  72. isa_model-0.1.0.dist-info/METADATA +0 -116
  73. /isa_model/inference/{client_sdk/__init__.py → services/embedding/openai_embed_service.py} +0 -0
  74. {isa_model-0.1.0.dist-info → isa_model-0.2.0.dist-info}/WHEEL +0 -0
  75. {isa_model-0.1.0.dist-info → isa_model-0.2.0.dist-info}/licenses/LICENSE +0 -0
  76. {isa_model-0.1.0.dist-info → isa_model-0.2.0.dist-info}/top_level.txt +0 -0
@@ -1,186 +0,0 @@
1
- import logging
2
- import asyncio
3
- import io
4
- import numpy as np
5
- from typing import Dict, Any, Optional, Union, BinaryIO
6
-
7
- from isa_model.inference.services.base_service import BaseService
8
- from isa_model.inference.backends.triton_client import TritonClient
9
-
10
- logger = logging.getLogger(__name__)
11
-
12
-
13
- class WhisperService(BaseService):
14
- """
15
- Service for Whisper speech-to-text using Triton Inference Server.
16
- """
17
-
18
- def __init__(self, triton_url: str = "localhost:8001", model_name: str = "whisper"):
19
- """
20
- Initialize the Whisper service.
21
-
22
- Args:
23
- triton_url: URL of the Triton Inference Server
24
- model_name: Name of the model in Triton
25
- """
26
- super().__init__()
27
- self.triton_url = triton_url
28
- self.model_name = model_name
29
- self.client = None
30
-
31
- # Default configuration
32
- self.default_config = {
33
- "language": "en",
34
- "sampling_rate": 16000
35
- }
36
-
37
- self.logger = logger
38
-
39
- async def load(self) -> None:
40
- """
41
- Load the client connection to Triton.
42
- """
43
- if self.is_loaded():
44
- return
45
-
46
- try:
47
- from tritonclient.http import InferenceServerClient
48
-
49
- # Create Triton client
50
- self.logger.info(f"Connecting to Triton server at {self.triton_url}")
51
- self.client = TritonClient(self.triton_url)
52
-
53
- # Check if model is ready
54
- if not await self.client.is_model_ready(self.model_name):
55
- self.logger.error(f"Model {self.model_name} is not ready on Triton server")
56
- raise RuntimeError(f"Model {self.model_name} is not ready on Triton server")
57
-
58
- self._loaded = True
59
- self.logger.info(f"Connected to Triton for model {self.model_name}")
60
-
61
- except Exception as e:
62
- self.logger.error(f"Failed to connect to Triton: {str(e)}")
63
- raise
64
-
65
- async def unload(self) -> None:
66
- """
67
- Unload the client connection.
68
- """
69
- if not self.is_loaded():
70
- return
71
-
72
- self.client = None
73
- self._loaded = False
74
- self.logger.info("Triton client connection closed")
75
-
76
- async def transcribe(self,
77
- audio: Union[str, BinaryIO, bytes, np.ndarray],
78
- language: str = "en",
79
- config: Optional[Dict[str, Any]] = None) -> str:
80
- """
81
- Transcribe audio to text using Triton.
82
-
83
- Args:
84
- audio: Audio input (file path, file-like object, bytes, or numpy array)
85
- language: Language code (e.g., "en", "fr")
86
- config: Additional configuration parameters
87
-
88
- Returns:
89
- Transcribed text
90
- """
91
- if not self.is_loaded():
92
- await self.load()
93
-
94
- # Process audio to get numpy array
95
- audio_array = await self._process_audio_input(audio)
96
-
97
- # Get configuration
98
- merged_config = self.default_config.copy()
99
- if config:
100
- merged_config.update(config)
101
-
102
- # Override language if provided
103
- if language:
104
- merged_config["language"] = language
105
-
106
- try:
107
- # Prepare inputs
108
- inputs = {
109
- "audio_input": audio_array,
110
- "language": np.array([merged_config["language"]], dtype=np.object_)
111
- }
112
-
113
- # Run inference
114
- result = await self.client.infer(
115
- model_name=self.model_name,
116
- inputs=inputs,
117
- outputs=["text_output"]
118
- )
119
-
120
- # Extract transcription
121
- transcription = result["text_output"][0].decode('utf-8')
122
-
123
- return transcription
124
-
125
- except Exception as e:
126
- self.logger.error(f"Error during Whisper transcription: {str(e)}")
127
- raise
128
-
129
- async def _process_audio_input(self, audio: Union[str, BinaryIO, bytes, np.ndarray]) -> np.ndarray:
130
- """
131
- Process different types of audio inputs into a numpy array.
132
-
133
- Args:
134
- audio: Audio input (file path, file-like object, bytes, or numpy array)
135
-
136
- Returns:
137
- Numpy array of the audio
138
- """
139
- if isinstance(audio, np.ndarray):
140
- return audio
141
-
142
- try:
143
- import librosa
144
-
145
- if isinstance(audio, str):
146
- # File path
147
- y, sr = librosa.load(audio, sr=self.default_config["sampling_rate"])
148
- return y.astype(np.float32)
149
-
150
- elif isinstance(audio, (io.IOBase, BinaryIO)):
151
- # File-like object
152
- audio.seek(0)
153
- y, sr = librosa.load(audio, sr=self.default_config["sampling_rate"])
154
- return y.astype(np.float32)
155
-
156
- elif isinstance(audio, bytes):
157
- # Bytes
158
- with io.BytesIO(audio) as audio_bytes:
159
- y, sr = librosa.load(audio_bytes, sr=self.default_config["sampling_rate"])
160
- return y.astype(np.float32)
161
-
162
- else:
163
- raise ValueError(f"Unsupported audio type: {type(audio)}")
164
-
165
- except ImportError:
166
- self.logger.error("librosa not installed. Please install with: pip install librosa")
167
- raise
168
- except Exception as e:
169
- self.logger.error(f"Error processing audio: {str(e)}")
170
- raise
171
-
172
- def get_model_info(self) -> Dict[str, Any]:
173
- """
174
- Get information about the model.
175
-
176
- Returns:
177
- Dictionary containing model information
178
- """
179
- return {
180
- "name": self.model_name,
181
- "type": "speech",
182
- "backend": "triton",
183
- "url": self.triton_url,
184
- "loaded": self.is_loaded(),
185
- "config": self.default_config
186
- }
@@ -1,66 +0,0 @@
1
- from abc import abstractmethod
2
- from typing import Dict, Any, Optional, Union, BinaryIO
3
- from .base_service import BaseService
4
-
5
- class BaseTTSService(BaseService):
6
- """Base class for Text-to-Speech services"""
7
-
8
- @abstractmethod
9
- async def generate_speech(
10
- self,
11
- text: str,
12
- voice_id: Optional[str] = None,
13
- language: Optional[str] = None,
14
- speed: float = 1.0,
15
- options: Optional[Dict[str, Any]] = None
16
- ) -> bytes:
17
- """
18
- Generate speech from text
19
-
20
- Args:
21
- text: The text to convert to speech
22
- voice_id: Optional voice identifier
23
- language: Optional language code
24
- speed: Speech speed factor (1.0 is normal speed)
25
- options: Additional model-specific options
26
-
27
- Returns:
28
- Audio data as bytes
29
- """
30
- pass
31
-
32
- @abstractmethod
33
- async def save_to_file(
34
- self,
35
- text: str,
36
- output_file: Union[str, BinaryIO],
37
- voice_id: Optional[str] = None,
38
- language: Optional[str] = None,
39
- speed: float = 1.0,
40
- options: Optional[Dict[str, Any]] = None
41
- ) -> str:
42
- """
43
- Generate speech and save to file
44
-
45
- Args:
46
- text: The text to convert to speech
47
- output_file: Path to output file or file-like object
48
- voice_id: Optional voice identifier
49
- language: Optional language code
50
- speed: Speech speed factor (1.0 is normal speed)
51
- options: Additional model-specific options
52
-
53
- Returns:
54
- Path to the saved file
55
- """
56
- pass
57
-
58
- @abstractmethod
59
- async def get_available_voices(self) -> Dict[str, Any]:
60
- """
61
- Get available voices for the TTS service
62
-
63
- Returns:
64
- Dictionary of available voices with their details
65
- """
66
- pass
@@ -1,183 +0,0 @@
1
- import logging
2
- import asyncio
3
- import numpy as np
4
- from typing import Dict, List, Any, Optional, Union
5
-
6
- from isa_model.inference.services.base_service import BaseService
7
- from isa_model.inference.backends.triton_client import TritonClient
8
-
9
- logger = logging.getLogger(__name__)
10
-
11
-
12
- class BgeEmbeddingService(BaseService):
13
- """
14
- Service for BGE embedding using Triton Inference Server.
15
- """
16
-
17
- def __init__(self, triton_url: str = "localhost:8001", model_name: str = "bge_embed"):
18
- """
19
- Initialize the BGE embedding service.
20
-
21
- Args:
22
- triton_url: URL of the Triton Inference Server
23
- model_name: Name of the model in Triton
24
- """
25
- super().__init__()
26
- self.triton_url = triton_url
27
- self.model_name = model_name
28
- self.client = None
29
-
30
- # Default configuration
31
- self.default_config = {
32
- "normalize": True
33
- }
34
-
35
- self.logger = logger
36
-
37
- async def load(self) -> None:
38
- """
39
- Load the client connection to Triton.
40
- """
41
- if self.is_loaded():
42
- return
43
-
44
- try:
45
- # Create Triton client
46
- self.logger.info(f"Connecting to Triton server at {self.triton_url}")
47
- self.client = TritonClient(self.triton_url)
48
-
49
- # Check if model is ready
50
- if not await self.client.is_model_ready(self.model_name):
51
- self.logger.error(f"Model {self.model_name} is not ready on Triton server")
52
- raise RuntimeError(f"Model {self.model_name} is not ready on Triton server")
53
-
54
- self._loaded = True
55
- self.logger.info(f"Connected to Triton for model {self.model_name}")
56
-
57
- except Exception as e:
58
- self.logger.error(f"Failed to connect to Triton: {str(e)}")
59
- raise
60
-
61
- async def unload(self) -> None:
62
- """
63
- Unload the client connection.
64
- """
65
- if not self.is_loaded():
66
- return
67
-
68
- self.client = None
69
- self._loaded = False
70
- self.logger.info("Triton client connection closed")
71
-
72
- async def embed(self,
73
- texts: Union[str, List[str]],
74
- normalize: Optional[bool] = None) -> np.ndarray:
75
- """
76
- Generate embeddings for texts using Triton.
77
-
78
- Args:
79
- texts: Single text or list of texts to embed
80
- normalize: Whether to normalize embeddings (if None, use default)
81
-
82
- Returns:
83
- Numpy array of embeddings, shape [batch_size, embedding_dim]
84
- """
85
- if not self.is_loaded():
86
- await self.load()
87
-
88
- # Handle single text input
89
- if isinstance(texts, str):
90
- texts = [texts]
91
-
92
- # Use default normalize setting if not specified
93
- if normalize is None:
94
- normalize = self.default_config["normalize"]
95
-
96
- try:
97
- # Prepare inputs
98
- inputs = {
99
- "text_input": texts,
100
- "normalize": np.array([normalize], dtype=bool)
101
- }
102
-
103
- # Run inference
104
- result = await self.client.infer(
105
- model_name=self.model_name,
106
- inputs=inputs,
107
- outputs=["embedding"]
108
- )
109
-
110
- # Extract embeddings
111
- embeddings = result["embedding"]
112
-
113
- return embeddings
114
-
115
- except Exception as e:
116
- self.logger.error(f"Error during embedding generation: {str(e)}")
117
- raise
118
-
119
- async def similarity(self,
120
- text1: str,
121
- text2: str,
122
- normalize: Optional[bool] = None) -> float:
123
- """
124
- Calculate the similarity between two texts.
125
-
126
- Args:
127
- text1: First text
128
- text2: Second text
129
- normalize: Whether to normalize embeddings (if None, use default)
130
-
131
- Returns:
132
- Cosine similarity score (float between -1 and 1)
133
- """
134
- # Generate embeddings for both texts
135
- embeddings = await self.embed([text1, text2], normalize=normalize)
136
-
137
- # Calculate cosine similarity
138
- from sklearn.metrics.pairwise import cosine_similarity
139
- similarity = cosine_similarity(embeddings[0:1], embeddings[1:2])[0][0]
140
-
141
- return float(similarity)
142
-
143
- async def batch_similarity(self,
144
- queries: List[str],
145
- documents: List[str],
146
- normalize: Optional[bool] = None) -> np.ndarray:
147
- """
148
- Calculate similarities between queries and documents.
149
-
150
- Args:
151
- queries: List of query texts
152
- documents: List of document texts
153
- normalize: Whether to normalize embeddings (if None, use default)
154
-
155
- Returns:
156
- Numpy array of similarity scores, shape [len(queries), len(documents)]
157
- """
158
- # Generate embeddings for queries and documents
159
- query_embeddings = await self.embed(queries, normalize=normalize)
160
- doc_embeddings = await self.embed(documents, normalize=normalize)
161
-
162
- # Calculate cosine similarities
163
- from sklearn.metrics.pairwise import cosine_similarity
164
- similarities = cosine_similarity(query_embeddings, doc_embeddings)
165
-
166
- return similarities
167
-
168
- def get_model_info(self) -> Dict[str, Any]:
169
- """
170
- Get information about the model.
171
-
172
- Returns:
173
- Dictionary containing model information
174
- """
175
- return {
176
- "name": self.model_name,
177
- "type": "embedding",
178
- "backend": "triton",
179
- "url": self.triton_url,
180
- "loaded": self.is_loaded(),
181
- "embedding_dim": 1024, # Typical for BGE models
182
- "config": self.default_config
183
- }
@@ -1,118 +0,0 @@
1
- from typing import Dict, Any, List, Optional
2
- from ollama import AsyncClient
3
- from ...base_service import BaseRerankService
4
- from ...base_provider import BaseProvider
5
- from app.config.config_manager import config_manager
6
- import httpx
7
- import asyncio
8
- from functools import wraps
9
-
10
- logger = config_manager.get_logger(__name__)
11
-
12
- def retry_on_connection_error(max_retries=3, delay=1):
13
- """Decorator to retry on connection errors"""
14
- def decorator(func):
15
- @wraps(func)
16
- async def wrapper(*args, **kwargs):
17
- last_error = None
18
- for attempt in range(max_retries):
19
- try:
20
- return await func(*args, **kwargs)
21
- except (httpx.RemoteProtocolError, httpx.ConnectError) as e:
22
- last_error = e
23
- if attempt < max_retries - 1:
24
- logger.warning(f"Connection error on attempt {attempt + 1}, retrying in {delay}s: {str(e)}")
25
- await asyncio.sleep(delay)
26
- continue
27
- raise last_error
28
- return wrapper
29
- return decorator
30
-
31
- class OllamaRerankService(BaseRerankService):
32
- """Reranking service wrapper around Ollama"""
33
-
34
- def __init__(self, provider: 'BaseProvider', model_name: str):
35
- super().__init__(provider, model_name)
36
-
37
- # Initialize the Ollama client for reranking
38
- self.client = AsyncClient(
39
- host=self.config.get('base_url', 'http://localhost:11434')
40
- )
41
- self.model_name = model_name
42
-
43
- @retry_on_connection_error()
44
- async def rerank(
45
- self,
46
- query: str,
47
- documents: List[Dict],
48
- top_k: int = 5
49
- ) -> List[Dict]:
50
- """Rerank documents based on query relevance"""
51
- try:
52
- if not query:
53
- raise ValueError("Query cannot be empty")
54
- if not documents:
55
- return []
56
-
57
- results = []
58
- for doc in documents:
59
- if "content" not in doc:
60
- raise ValueError("Each document must have a 'content' field")
61
-
62
- # Format prompt for relevance scoring
63
- prompt = f"""Rate the relevance of the following text to the query on a scale of 0-100.
64
- Query: {query}
65
- Text: {doc['content']}
66
- Only respond with a number between 0 and 100."""
67
-
68
- # Get relevance score using direct Ollama API
69
- response = await self.client.generate(
70
- model=self.model_name,
71
- prompt=prompt,
72
- stream=False
73
- )
74
- try:
75
- score = float(response.response.strip())
76
- score = max(0.0, min(100.0, score)) / 100.0 # Normalize to 0-1
77
- except ValueError:
78
- logger.warning(f"Could not parse score from response: {response.response}")
79
- score = 0.0
80
-
81
- # Update document with rerank score
82
- doc_copy = doc.copy()
83
- doc_copy["rerank_score"] = score
84
- doc_copy["final_score"] = doc.get("score", 1.0) * score
85
- results.append(doc_copy)
86
-
87
- # Sort by final score in descending order
88
- results.sort(key=lambda x: x["final_score"], reverse=True)
89
- return results[:top_k]
90
-
91
- except Exception as e:
92
- logger.error(f"Error in rerank: {e}")
93
- raise
94
-
95
- @retry_on_connection_error()
96
- async def rerank_texts(
97
- self,
98
- query: str,
99
- texts: List[str]
100
- ) -> List[Dict]:
101
- """Rerank raw texts based on query relevance"""
102
- try:
103
- if not query:
104
- raise ValueError("Query cannot be empty")
105
- if not texts:
106
- return []
107
-
108
- # Convert texts to document format
109
- documents = [{"content": text, "score": 1.0} for text in texts]
110
- return await self.rerank(query, documents)
111
-
112
- except Exception as e:
113
- logger.error(f"Error in rerank_texts: {str(e)}")
114
- raise
115
-
116
- async def close(self):
117
- """Cleanup resources"""
118
- await self.client.aclose()
@@ -1,73 +0,0 @@
1
- from typing import Dict, Any, List, Union, Optional
2
- from ...base_service import BaseService
3
- from ...base_provider import BaseProvider
4
- from transformers import AutoTokenizer
5
- import onnxruntime as ort
6
- import numpy as np
7
- import torch
8
- import os
9
- from pathlib import Path
10
-
11
- class ONNXRerankService(BaseService):
12
- """ONNX Reranker service for BGE models"""
13
-
14
- def __init__(self, provider: 'BaseProvider', model_name: str):
15
- super().__init__(provider, model_name)
16
- self.model_path = self._get_model_path(model_name)
17
- self.session = provider.get_session(self.model_path)
18
-
19
- # Initialize tokenizer
20
- self.tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-m3')
21
- self.max_length = 512
22
-
23
- def _get_model_path(self, model_name: str) -> str:
24
- """Get path to ONNX model file"""
25
- base_dir = Path(__file__).parent
26
- model_path = base_dir / "model_converted" / model_name / "model.onnx"
27
- if not model_path.exists():
28
- raise FileNotFoundError(f"ONNX model not found at {model_path}. Please run the conversion script first.")
29
- return str(model_path)
30
-
31
- async def compute_score(self,
32
- pairs: Union[List[str], List[List[str]]],
33
- normalize: bool = False) -> Union[float, List[float]]:
34
- """Compute reranking scores for query-passage pairs"""
35
- try:
36
- # Handle single pair case
37
- if isinstance(pairs[0], str):
38
- pairs = [pairs]
39
-
40
- # Tokenize inputs
41
- inputs = self.tokenizer(
42
- pairs,
43
- padding=True,
44
- truncation=True,
45
- return_tensors='np',
46
- max_length=self.max_length
47
- )
48
-
49
- # Run inference
50
- ort_inputs = {
51
- 'input_ids': inputs['input_ids'],
52
- 'attention_mask': inputs['attention_mask']
53
- }
54
-
55
- scores = self.session.run(
56
- None, # output names, None means all
57
- ort_inputs
58
- )[0]
59
-
60
- # Convert to float and optionally normalize
61
- scores = scores.flatten().tolist()
62
- if normalize:
63
- scores = [self._sigmoid(score) for score in scores]
64
-
65
- # Return single score for single pair
66
- return scores[0] if len(scores) == 1 else scores
67
-
68
- except Exception as e:
69
- raise RuntimeError(f"ONNX reranking failed: {e}")
70
-
71
- def _sigmoid(self, x: float) -> float:
72
- """Apply sigmoid function to score"""
73
- return 1 / (1 + np.exp(-x))