ray-embedding 0.11.8__tar.gz → 0.12.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ray-embedding might be problematic. Click here for more details.
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/PKG-INFO +1 -1
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/ray_embedding/deploy.py +5 -5
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/ray_embedding/dto.py +2 -0
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/ray_embedding/embedding_model.py +11 -10
- ray_embedding-0.12.1/ray_embedding/model_router.py +111 -0
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/ray_embedding.egg-info/PKG-INFO +1 -1
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/setup.cfg +1 -1
- ray_embedding-0.11.8/ray_embedding/model_router.py +0 -84
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/README.md +0 -0
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/pyproject.toml +0 -0
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/ray_embedding/__init__.py +0 -0
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/ray_embedding.egg-info/SOURCES.txt +0 -0
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/ray_embedding.egg-info/dependency_links.txt +0 -0
- {ray_embedding-0.11.8 → ray_embedding-0.12.1}/ray_embedding.egg-info/top_level.txt +0 -0
|
@@ -1,12 +1,10 @@
|
|
|
1
1
|
import os
|
|
2
|
-
|
|
2
|
+
|
|
3
|
+
import torch
|
|
3
4
|
from ray.serve import Application
|
|
4
|
-
from ray.serve.handle import DeploymentHandle
|
|
5
5
|
|
|
6
6
|
from ray_embedding.dto import AppConfig, ModelDeploymentConfig, DeployedModel
|
|
7
7
|
from ray_embedding.embedding_model import EmbeddingModel
|
|
8
|
-
import torch
|
|
9
|
-
|
|
10
8
|
from ray_embedding.model_router import ModelRouter
|
|
11
9
|
|
|
12
10
|
|
|
@@ -19,6 +17,7 @@ def build_model(model_config: ModelDeploymentConfig) -> DeployedModel:
|
|
|
19
17
|
matryoshka_dim = model_config.matryoshka_dim
|
|
20
18
|
trust_remote_code = model_config.trust_remote_code or False
|
|
21
19
|
model_kwargs = model_config.model_kwargs or {}
|
|
20
|
+
|
|
22
21
|
if "torch_dtype" in model_kwargs:
|
|
23
22
|
torch_dtype = model_kwargs["torch_dtype"].strip()
|
|
24
23
|
if torch_dtype == "float16":
|
|
@@ -48,7 +47,8 @@ def build_model(model_config: ModelDeploymentConfig) -> DeployedModel:
|
|
|
48
47
|
def build_app(args: AppConfig) -> Application:
|
|
49
48
|
model_router, models = args.model_router, args.models
|
|
50
49
|
assert model_router and models
|
|
50
|
+
assert model_router.path_prefix
|
|
51
51
|
|
|
52
52
|
deployed_models = {model_config.served_model_name: build_model(model_config) for model_config in models}
|
|
53
|
-
router = ModelRouter.options(name=model_router.deployment).bind(deployed_models)
|
|
53
|
+
router = ModelRouter.options(name=model_router.deployment).bind(deployed_models, model_router.path_prefix)
|
|
54
54
|
return router
|
|
@@ -34,15 +34,17 @@ class EmbeddingModel:
|
|
|
34
34
|
trust_remote_code=self.trust_remote_code,
|
|
35
35
|
model_kwargs=self.model_kwargs)
|
|
36
36
|
|
|
37
|
-
self.logger.info(f"Successfully initialized
|
|
37
|
+
self.logger.info(f"Successfully initialized model {self.model} using device {self.torch_device}")
|
|
38
38
|
|
|
39
|
-
async def __call__(self, text: Union[str, List[str]], dimensions: Optional[int] = None):
|
|
40
|
-
"""Compute embeddings for the input text using the
|
|
41
|
-
if isinstance(text,
|
|
42
|
-
text
|
|
39
|
+
async def __call__(self, text: Union[str, List[str]], dimensions: Optional[int] = None) -> List[List[float]]:
|
|
40
|
+
"""Compute embeddings for the input text using the current model."""
|
|
41
|
+
if not text or (isinstance(text, list) and not all(text)):
|
|
42
|
+
raise ValueError("Input text is empty or invalid")
|
|
43
|
+
|
|
44
|
+
text = [text] if isinstance(text, str) else text
|
|
43
45
|
truncate_dim = dimensions or self.matryoshka_dim
|
|
44
46
|
|
|
45
|
-
# Compute embeddings
|
|
47
|
+
# Compute embeddings in PyTorch format
|
|
46
48
|
embeddings = self.embedding_model.encode(
|
|
47
49
|
text, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False,
|
|
48
50
|
).to(self.torch_device)
|
|
@@ -53,9 +55,8 @@ class EmbeddingModel:
|
|
|
53
55
|
embeddings = embeddings / torch.norm(embeddings, dim=1, keepdim=True)
|
|
54
56
|
|
|
55
57
|
# Move all embeddings to CPU at once before conversion
|
|
56
|
-
|
|
57
|
-
return
|
|
58
|
-
|
|
58
|
+
embeddings_list = embeddings.cpu().tolist()
|
|
59
|
+
return embeddings_list
|
|
59
60
|
|
|
60
61
|
def wait_for_cuda(self, wait: int = 10):
|
|
61
62
|
if self.init_device == "cuda" and not torch.cuda.is_available():
|
|
@@ -65,7 +66,7 @@ class EmbeddingModel:
|
|
|
65
66
|
def check_health(self):
|
|
66
67
|
if self.init_device == "cuda":
|
|
67
68
|
# Even though CUDA was available at init time,
|
|
68
|
-
# CUDA can become unavailable - this is a known problem in AWS EC2
|
|
69
|
+
# CUDA can become unavailable - this is a known problem in AWS EC2+Docker
|
|
69
70
|
# https://github.com/ray-project/ray/issues/49594
|
|
70
71
|
try:
|
|
71
72
|
nvmlInit()
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
from typing import Optional, Dict, List, Tuple
|
|
5
|
+
|
|
6
|
+
from fastapi import FastAPI, HTTPException
|
|
7
|
+
from ray import serve
|
|
8
|
+
from ray.serve.handle import DeploymentHandle
|
|
9
|
+
|
|
10
|
+
from ray_embedding.dto import DeployedModel, EmbeddingRequest, EmbeddingResponse
|
|
11
|
+
|
|
12
|
+
web_api = FastAPI(title="Ray Embeddings - OpenAI-compatible API")
|
|
13
|
+
|
|
14
|
+
@serve.deployment
|
|
15
|
+
@serve.ingress(web_api)
|
|
16
|
+
class ModelRouter:
|
|
17
|
+
def __init__(self, deployed_models: Dict[str, DeployedModel], path_prefix: List[str], max_concurrency: Optional[int] = 32):
|
|
18
|
+
assert deployed_models, "models cannot be empty"
|
|
19
|
+
assert path_prefix, "path_prefix cannot be empty"
|
|
20
|
+
|
|
21
|
+
logging.basicConfig(level=logging.INFO)
|
|
22
|
+
self.logger = logging.getLogger(self.__class__.__name__)
|
|
23
|
+
self.deployed_models = deployed_models
|
|
24
|
+
self.path_prefix = [item.removeprefix("/").removesuffix("/") for item in path_prefix]
|
|
25
|
+
self.max_concurrency = max_concurrency
|
|
26
|
+
self.rate_limiter = asyncio.Semaphore(self.max_concurrency)
|
|
27
|
+
self.available_models = [
|
|
28
|
+
{"id": str(item),
|
|
29
|
+
"object": "model",
|
|
30
|
+
"created": int(time.time()),
|
|
31
|
+
"owned_by": "openai",
|
|
32
|
+
"permission": []} for item in self.deployed_models.keys()
|
|
33
|
+
]
|
|
34
|
+
self.logger.info(f"Successfully registered models: {self.available_models}")
|
|
35
|
+
|
|
36
|
+
async def _compute_embeddings_from_resized_batches(self, model: str, inputs: List[str], dimensions: Optional[int] = None):
|
|
37
|
+
deployed_model = self.deployed_models[model]
|
|
38
|
+
model_handle = deployed_model.deployment_handle
|
|
39
|
+
batch_size = deployed_model.batch_size
|
|
40
|
+
num_retries = deployed_model.num_retries
|
|
41
|
+
|
|
42
|
+
# Resize the inputs into batch_size items, and dispatch in parallel
|
|
43
|
+
batches = [inputs[i:i+batch_size] for i in range(0, len(inputs), batch_size)]
|
|
44
|
+
if len(inputs) > batch_size:
|
|
45
|
+
self.logger.info(f"Original input (length {len(inputs)} was resized "
|
|
46
|
+
f"to {len(batches)} mini-batches, each with max length {batch_size}.")
|
|
47
|
+
|
|
48
|
+
# Call embedding model replicas in parallel (rate-limited)
|
|
49
|
+
tasks = [self._rate_limited_embedding_call(model_handle, batch, dimensions) for batch in batches]
|
|
50
|
+
all_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
51
|
+
|
|
52
|
+
# Retry any failed model calls
|
|
53
|
+
for i, result in enumerate(all_results):
|
|
54
|
+
if isinstance(result, Exception):
|
|
55
|
+
self.logger.warning(f"Retrying mini-batch {i} due to exception: {result}")
|
|
56
|
+
result_retried, retries = await self._retry_failed_embedding_call(model_handle, batches[i], dimensions,
|
|
57
|
+
num_retries)
|
|
58
|
+
if retries >= num_retries and (isinstance(result_retried, Exception) or result_retried is None):
|
|
59
|
+
raise result_retried or ValueError(f"Failed to compute `{model}` embeddings for mini-batch {i} after {num_retries} retries.")
|
|
60
|
+
|
|
61
|
+
all_results[i] = result_retried
|
|
62
|
+
|
|
63
|
+
# Flatten the results because `all_results` is a list of lists
|
|
64
|
+
self.logger.info(f"Successfully computed embeddings from {len(batches)} mini-batches")
|
|
65
|
+
return [emb for result in all_results for emb in result]
|
|
66
|
+
|
|
67
|
+
async def _rate_limited_embedding_call(self, model_handle: DeploymentHandle, batch: List[str], dimensions: int):
|
|
68
|
+
with self.rate_limiter:
|
|
69
|
+
return await model_handle.remote(batch, dimensions)
|
|
70
|
+
|
|
71
|
+
async def _retry_failed_embedding_call(self, model_handle: DeploymentHandle, batch: List[str],
|
|
72
|
+
dimensions: Optional[int] = None, num_retries: Optional[int] = 2) \
|
|
73
|
+
-> Tuple[List[List[float]] | Exception, int]:
|
|
74
|
+
|
|
75
|
+
result_retried, retries = None, 0
|
|
76
|
+
while retries < num_retries:
|
|
77
|
+
try:
|
|
78
|
+
result_retried = await model_handle.remote(batch, dimensions)
|
|
79
|
+
except Exception as e:
|
|
80
|
+
result_retried = e
|
|
81
|
+
self.logger.warning(e)
|
|
82
|
+
finally:
|
|
83
|
+
retries += 1
|
|
84
|
+
if not isinstance(result_retried, Exception) and result_retried is not None:
|
|
85
|
+
break
|
|
86
|
+
|
|
87
|
+
return result_retried, retries
|
|
88
|
+
|
|
89
|
+
@web_api.post("/{path_prefix}/v1/embeddings", response_model=EmbeddingResponse)
|
|
90
|
+
async def compute_embeddings(self, path_prefix: str, request: EmbeddingRequest):
|
|
91
|
+
assert path_prefix in self.path_prefix, f"Invalid path prefix: {path_prefix}"
|
|
92
|
+
assert request.model in self.deployed_models, f"Invalid model: {request.model}"
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
inputs = request.input if isinstance(request.input, list) else [request.input]
|
|
96
|
+
self.logger.info(f"Computing embeddings for a batch of {len(inputs)} texts using model: {request.model}")
|
|
97
|
+
embeddings = await self._compute_embeddings_from_resized_batches(request.model, inputs, request.dimensions)
|
|
98
|
+
response_data = [
|
|
99
|
+
{"index": idx, "embedding": emb}
|
|
100
|
+
for idx, emb in enumerate(embeddings)
|
|
101
|
+
]
|
|
102
|
+
return EmbeddingResponse(object="list", data=response_data, model=request.model)
|
|
103
|
+
except Exception as e:
|
|
104
|
+
self.logger.error(f"Failed to create embeddings: {e}")
|
|
105
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
106
|
+
|
|
107
|
+
@web_api.get("/{path_prefix}/v1/models")
|
|
108
|
+
async def list_models(self, path_prefix: str):
|
|
109
|
+
"""Returns the list of available models in OpenAI-compatible format."""
|
|
110
|
+
assert path_prefix in self.path_prefix, f"Invalid path prefix: {path_prefix}"
|
|
111
|
+
return {"object": "list", "data": self.available_models}
|
|
@@ -1,84 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import logging
|
|
3
|
-
import time
|
|
4
|
-
from typing import Optional, Dict, List
|
|
5
|
-
|
|
6
|
-
from fastapi import FastAPI, HTTPException
|
|
7
|
-
from ray import serve
|
|
8
|
-
|
|
9
|
-
from ray_embedding.dto import EmbeddingResponse, EmbeddingRequest, DeployedModel
|
|
10
|
-
|
|
11
|
-
web_api = FastAPI(title="Ray Embeddings - OpenAI-compatible API")
|
|
12
|
-
|
|
13
|
-
@serve.deployment
|
|
14
|
-
@serve.ingress(web_api)
|
|
15
|
-
class ModelRouter:
|
|
16
|
-
def __init__(self, deployed_models: Dict[str, DeployedModel]):
|
|
17
|
-
assert deployed_models, "models cannot be empty"
|
|
18
|
-
logging.basicConfig(level=logging.INFO)
|
|
19
|
-
self.logger = logging.getLogger(self.__class__.__name__)
|
|
20
|
-
self.deployed_models = deployed_models
|
|
21
|
-
self.available_models = [
|
|
22
|
-
{"id": str(item),
|
|
23
|
-
"object": "model",
|
|
24
|
-
"created": int(time.time()),
|
|
25
|
-
"owned_by": "openai",
|
|
26
|
-
"permission": []} for item in self.deployed_models.keys()
|
|
27
|
-
]
|
|
28
|
-
self.logger.info(f"Successfully registered models: {self.available_models}")
|
|
29
|
-
|
|
30
|
-
async def _compute_embeddings_from_resized_batches(self, model: str, inputs: List[str], dimensions: Optional[int] = None):
|
|
31
|
-
assert model in self.deployed_models
|
|
32
|
-
deployed_model = self.deployed_models[model]
|
|
33
|
-
model_handle = deployed_model.deployment_handle
|
|
34
|
-
batch_size = deployed_model.batch_size
|
|
35
|
-
num_retries = deployed_model.num_retries
|
|
36
|
-
|
|
37
|
-
# Resize the inputs into batch_size items, and dispatch in parallel
|
|
38
|
-
batches = [inputs[i:i+batch_size] for i in range(0, len(inputs), batch_size)]
|
|
39
|
-
if len(inputs) > batch_size:
|
|
40
|
-
self.logger.info(f"Original input (length {len(inputs)} was resized "
|
|
41
|
-
f"to {len(batches)} mini-batches of length {batch_size}")
|
|
42
|
-
tasks = [model_handle.remote(batch, dimensions) for batch in batches]
|
|
43
|
-
all_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
44
|
-
|
|
45
|
-
# Retry any failed model calls
|
|
46
|
-
for i, result in enumerate(all_results):
|
|
47
|
-
if isinstance(result, Exception):
|
|
48
|
-
retries = 0
|
|
49
|
-
while retries < num_retries:
|
|
50
|
-
try:
|
|
51
|
-
all_results[i] = await model_handle.remote(batches[i], dimensions)
|
|
52
|
-
except Exception as e:
|
|
53
|
-
self.logger.warning(e)
|
|
54
|
-
finally:
|
|
55
|
-
retries += 1
|
|
56
|
-
if not isinstance(all_results[i], Exception):
|
|
57
|
-
break
|
|
58
|
-
|
|
59
|
-
if retries >= num_retries and isinstance(all_results[i], Exception):
|
|
60
|
-
raise all_results[i]
|
|
61
|
-
|
|
62
|
-
# Flatten the results because all_results is a list of lists
|
|
63
|
-
self.logger.info(f"Successfully computed embeddings from {len(batches)} mini-batches")
|
|
64
|
-
return [emb for result in all_results for emb in result]
|
|
65
|
-
|
|
66
|
-
@web_api.post("/v1/embeddings", response_model=EmbeddingResponse)
|
|
67
|
-
async def compute_embeddings(self, request: EmbeddingRequest):
|
|
68
|
-
try:
|
|
69
|
-
inputs = request.input if isinstance(request.input, list) else [request.input]
|
|
70
|
-
self.logger.info(f"Computing embeddings for a batch of {len(inputs)} texts using model: {request.model}")
|
|
71
|
-
embeddings = await self._compute_embeddings_from_resized_batches(request.model, inputs, request.dimensions)
|
|
72
|
-
response_data = [
|
|
73
|
-
{"index": idx, "embedding": emb}
|
|
74
|
-
for idx, emb in enumerate(embeddings)
|
|
75
|
-
]
|
|
76
|
-
return EmbeddingResponse(object="list", data=response_data, model=request.model)
|
|
77
|
-
except Exception as e:
|
|
78
|
-
self.logger.error(f"Failed to create embeddings: {e}")
|
|
79
|
-
raise HTTPException(status_code=500, detail=str(e))
|
|
80
|
-
|
|
81
|
-
@web_api.get("/v1/models")
|
|
82
|
-
async def list_models(self):
|
|
83
|
-
"""Returns the list of available models in OpenAI-compatible format."""
|
|
84
|
-
return {"object": "list", "data": self.available_models}
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|