ray-embedding 0.14.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: ray-embedding
3
+ Version: 0.14.7
4
+ Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
+ Author: Crispin Almodovar
6
+ Author-email:
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+
13
+ # ray-embedding
14
+
15
+ A Python library for deploying SentenceTransformers models to a ray cluster.
16
+ This tool encapsulates inference logic that uses SentenceTransformers
17
+ to load any compatible embedding model from the Hugging Face hub and
18
+ compute embeddings for input text.
19
+
20
+ This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
21
+
22
+ Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
23
+ to see how this library is used.
24
+
25
+ ### Supports the following backends
26
+
27
+ - pytorch-gpu
28
+ - pytorch-cpu
29
+
30
+ ### Planned:
31
+ - onnx-gpu
32
+ - onnx-cpu
33
+ - openvino-cpu
34
+ - fastembed-onnx-cpu
35
+
36
+
@@ -0,0 +1,24 @@
1
+ # ray-embedding
2
+
3
+ A Python library for deploying SentenceTransformers models to a ray cluster.
4
+ This tool encapsulates inference logic that uses SentenceTransformers
5
+ to load any compatible embedding model from the Hugging Face hub and
6
+ compute embeddings for input text.
7
+
8
+ This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
9
+
10
+ Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
11
+ to see how this library is used.
12
+
13
+ ### Supports the following backends
14
+
15
+ - pytorch-gpu
16
+ - pytorch-cpu
17
+
18
+ ### Planned:
19
+ - onnx-gpu
20
+ - onnx-cpu
21
+ - openvino-cpu
22
+ - fastembed-onnx-cpu
23
+
24
+
@@ -0,0 +1,3 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
@@ -0,0 +1,2 @@
1
+ from ray_embedding.deploy import build_app
2
+
@@ -0,0 +1,86 @@
1
+ import os
2
+ from typing import Any, Dict
3
+
4
+ import torch
5
+ from ray.serve import Application
6
+
7
+ from ray_embedding.dto import AppConfig, ModelDeploymentConfig, DeployedModel, NodeReaperConfig
8
+ from ray_embedding.embedding_model import EmbeddingModel
9
+ from ray_embedding.model_router import ModelRouter
10
+ from ray_embedding.node_reaper import NodeReaper, NODE_REAPER_DEPLOYMENT_NAME
11
+
12
+
13
+ def build_model(model_config: ModelDeploymentConfig, node_reaper):
14
+ deployment_name = model_config.deployment
15
+ model = model_config.model
16
+ served_model_name = model_config.served_model_name or os.path.basename(model)
17
+ device = model_config.device
18
+ backend = model_config.backend or "torch"
19
+ matryoshka_dim = model_config.matryoshka_dim
20
+ trust_remote_code = model_config.trust_remote_code or False
21
+ model_kwargs = model_config.model_kwargs or {}
22
+ cuda_memory_flush_threshold = model_config.cuda_memory_flush_threshold or 0.8
23
+
24
+ if "torch_dtype" in model_kwargs:
25
+ torch_dtype = model_kwargs["torch_dtype"].strip()
26
+ if torch_dtype == "float16":
27
+ model_kwargs["torch_dtype"] = torch.float16
28
+ elif torch_dtype == "bfloat16":
29
+ model_kwargs["torch_dtype"] = torch.bfloat16
30
+ elif torch_dtype == "float32":
31
+ model_kwargs["torch_dtype"] = torch.float32
32
+ else:
33
+ raise ValueError(f"Invalid torch_dtype: '{torch_dtype}'")
34
+
35
+ deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
36
+ served_model_name=served_model_name,
37
+ device=device,
38
+ backend=backend,
39
+ matryoshka_dim=matryoshka_dim,
40
+ trust_remote_code=trust_remote_code,
41
+ model_kwargs=model_kwargs,
42
+ cuda_memory_flush_threshold=cuda_memory_flush_threshold,
43
+ node_reaper=node_reaper,
44
+ )
45
+ return DeployedModel(model=served_model_name,
46
+ deployment_handle=deployment,
47
+ batch_size=model_config.batch_size,
48
+ num_retries=model_config.num_retries
49
+ )
50
+
51
+
52
+ def build_app(args: AppConfig) -> Application:
53
+ model_router, models = args.model_router, args.models
54
+ assert model_router and models
55
+ assert model_router.path_prefix
56
+
57
+ node_reaper_config = args.node_reaper or NodeReaperConfig()
58
+
59
+ node_reaper_kwargs: Dict[str, Any] = {
60
+ "ssh_user": node_reaper_config.ssh_user,
61
+ "ssh_private_key": node_reaper_config.ssh_private_key,
62
+ }
63
+ if node_reaper_config.retention_seconds is not None:
64
+ node_reaper_kwargs["retention_seconds"] = node_reaper_config.retention_seconds
65
+ if node_reaper_config.reap_interval_seconds is not None:
66
+ node_reaper_kwargs["reap_interval_seconds"] = node_reaper_config.reap_interval_seconds
67
+
68
+ node_reaper = NodeReaper.options(
69
+ name=NODE_REAPER_DEPLOYMENT_NAME,
70
+ ray_actor_options={"num_cpus": 0.25, "resources": {"head_node": 1}},
71
+ autoscaling_config={"initial_replicas": 1, "min_replicas": 1, "max_replicas": 1}
72
+ ).bind(**node_reaper_kwargs)
73
+
74
+ deployed_models = {model_config.served_model_name: build_model(model_config, node_reaper) for model_config in models}
75
+ model_router_kwargs = {
76
+ "deployed_models": deployed_models,
77
+ "path_prefix": model_router.path_prefix,
78
+ "max_concurrency": model_router.max_concurrency,
79
+ "node_reaper": node_reaper
80
+ }
81
+ router = ModelRouter.options(
82
+ name=model_router.deployment,
83
+ ray_actor_options={"num_cpus": 0.25, "resources": {"worker_node": 1}}
84
+ ).bind(**model_router_kwargs)
85
+
86
+ return router
@@ -0,0 +1,59 @@
1
+ import dataclasses
2
+ from typing import Union, List, Optional, Dict, Any
3
+ from pydantic import BaseModel
4
+ from ray.serve.handle import DeploymentHandle
5
+
6
+
7
+ class EmbeddingRequest(BaseModel):
8
+ """Schema of embedding requests (compatible with OpenAI)"""
9
+ model: str # Model name (for compatibility; only one model is used here)
10
+ input: Union[str, List[str]] # List of strings to embed
11
+ dimensions: Optional[int] = None
12
+
13
+
14
+ class EmbeddingResponse(BaseModel):
15
+ """Schema of embedding response (compatible with OpenAI)"""
16
+ object: str
17
+ data: List[dict] # Embedding data including index and vector
18
+ model: str # Model name used for embedding
19
+
20
+
21
+ class ModelRouterConfig(BaseModel):
22
+ deployment: str
23
+ path_prefix: List[str] = []
24
+ max_concurrency: int = 32
25
+
26
+
27
+ class ModelDeploymentConfig(BaseModel):
28
+ model: str
29
+ served_model_name: str
30
+ batch_size: Optional[int] = 8
31
+ num_retries: Optional[int] = 2
32
+ device: Optional[str] = None
33
+ backend: Optional[str] = None
34
+ matryoshka_dim: Optional[int] = 768
35
+ trust_remote_code: Optional[bool] = False
36
+ model_kwargs: Optional[Dict[str, Any]] = {}
37
+ cuda_memory_flush_threshold: Optional[float] = 0.8
38
+ deployment: str
39
+
40
+
41
+ class NodeReaperConfig(BaseModel):
42
+ ssh_user: str = "ubuntu"
43
+ ssh_private_key: str = "/home/ray/ray_bootstrap_key.pem"
44
+ retention_seconds: Optional[int] = 900
45
+ reap_interval_seconds: Optional[int] = 60
46
+
47
+
48
+ class AppConfig(BaseModel):
49
+ model_router: ModelRouterConfig
50
+ node_reaper: Optional[NodeReaperConfig] = None
51
+ models: List[ModelDeploymentConfig]
52
+
53
+
54
+ @dataclasses.dataclass
55
+ class DeployedModel:
56
+ model: str
57
+ deployment_handle: DeploymentHandle
58
+ batch_size: int
59
+ num_retries: Optional[int] = 2
@@ -0,0 +1,122 @@
1
+ import logging
2
+ import os.path
3
+ import time
4
+ from typing import Optional, Dict, Any, List, Union
5
+
6
+ import torch
7
+ from pynvml import nvmlInit, nvmlDeviceGetCount, nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo
8
+ from ray import serve
9
+ from ray.serve.handle import DeploymentHandle
10
+ from sentence_transformers import SentenceTransformer
11
+
12
+ from ray_embedding.utils import report_unhealthy_replica, report_unhealthy_replica_async
13
+
14
+
15
+ @serve.deployment
16
+ class EmbeddingModel:
17
+ def __init__(self, model: str, served_model_name: Optional[str] = None,
18
+ device: Optional[str] = None, backend: Optional[str] = "torch",
19
+ matryoshka_dim: Optional[int] = None, trust_remote_code: Optional[bool] = False,
20
+ model_kwargs: Dict[str, Any] = None, cuda_memory_flush_threshold: Optional[float] = 0.8,
21
+ node_reaper: Optional[DeploymentHandle] = None):
22
+ logging.basicConfig(level=logging.INFO)
23
+ self.logger = logging.getLogger(self.__class__.__name__)
24
+ self.model = model
25
+ self.served_model_name = served_model_name or os.path.basename(self.model)
26
+ self.init_device = device
27
+ self.cuda_memory_flush_threshold = cuda_memory_flush_threshold
28
+ self.node_reaper = node_reaper
29
+ self.torch_device = torch.device(self.init_device)
30
+ self.backend = backend or "torch"
31
+ self.matryoshka_dim = matryoshka_dim
32
+ self.trust_remote_code = trust_remote_code or False
33
+ self.model_kwargs = model_kwargs or {}
34
+
35
+ if self.init_device is None or self.init_device == "auto":
36
+ self.init_device = "cuda" if torch.cuda.is_available() else "cpu"
37
+ if self.init_device == "cuda":
38
+ self.wait_for_cuda()
39
+
40
+ self.logger.info(f"Initializing embedding model: {self.model}")
41
+ self.embedding_model = SentenceTransformer(self.model, device=self.init_device, backend=self.backend,
42
+ trust_remote_code=self.trust_remote_code,
43
+ model_kwargs=self.model_kwargs)
44
+
45
+ self.logger.info(f"Successfully initialized model {self.model} using device {self.torch_device}")
46
+
47
+ async def __call__(self, text: Union[str, List[str]], dimensions: Optional[int] = None) -> List[List[float]]:
48
+ """Compute embeddings for the input text using the current model."""
49
+ if not text or (isinstance(text, list) and not all(text)):
50
+ raise ValueError("Input text is empty or invalid")
51
+
52
+ text = [text] if isinstance(text, str) else text
53
+ truncate_dim = dimensions or self.matryoshka_dim
54
+
55
+ # Compute embeddings in PyTorch format
56
+ embeddings = self.embedding_model.encode(
57
+ text, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False,
58
+ ).to(self.torch_device)
59
+
60
+ if truncate_dim is not None:
61
+ # Truncate and re-normalize the embeddings
62
+ embeddings = embeddings[:, :truncate_dim]
63
+ embeddings = embeddings / torch.norm(embeddings, dim=1, keepdim=True)
64
+
65
+ # Move all embeddings to CPU at once before conversion
66
+ embeddings_list = embeddings.cpu().tolist()
67
+
68
+ # don't wait for GC
69
+ del embeddings
70
+
71
+ return embeddings_list
72
+
73
+ def wait_for_cuda(self, wait: int = 10):
74
+ if self.init_device == "cuda" and not torch.cuda.is_available():
75
+ time.sleep(wait)
76
+ error_message = self._evaluate_cuda_health()
77
+ if error_message:
78
+ report_unhealthy_replica(error=error_message, node_reaper=self.node_reaper)
79
+ raise RuntimeError(error_message)
80
+
81
+ async def check_health(self):
82
+ error_message = self._evaluate_cuda_health()
83
+ if error_message:
84
+ await report_unhealthy_replica_async(error=error_message, node_reaper=self.node_reaper)
85
+ raise RuntimeError(error_message)
86
+
87
+ def _evaluate_cuda_health(self) -> Optional[str]:
88
+ if self.init_device != "cuda":
89
+ return None
90
+
91
+ try:
92
+ # Even though CUDA was available at init time,
93
+ # CUDA can become unavailable - this is a known problem in AWS EC2+Docker
94
+ # https://github.com/ray-project/ray/issues/49594
95
+ nvmlInit()
96
+ count = nvmlDeviceGetCount()
97
+ assert count >= 1, "No CUDA devices found"
98
+
99
+ # replicas only have access to GPU 0
100
+ handle = nvmlDeviceGetHandleByIndex(0)
101
+ mem_info = nvmlDeviceGetMemoryInfo(handle)
102
+ except Exception as e:
103
+ return f"CUDA health check failed: {e}"
104
+
105
+ reserved = torch.cuda.memory_reserved() # bytes currently reserved by CUDA cache
106
+ threshold_bytes = self.cuda_memory_flush_threshold * mem_info.total
107
+
108
+ if reserved > threshold_bytes:
109
+ # flush only when cache exceeds the percentage threshold
110
+ torch.cuda.empty_cache()
111
+
112
+ return None
113
+
114
+ def __del__(self):
115
+ # Clean up and free any remaining GPU memory
116
+ try:
117
+ if hasattr(self, 'embedding_model'):
118
+ del self.embedding_model
119
+ if torch.cuda.is_available():
120
+ torch.cuda.empty_cache()
121
+ except Exception as e:
122
+ self.logger.warning(f"Error during cleanup: {e}")
@@ -0,0 +1,133 @@
1
+ import asyncio
2
+ import logging
3
+ import time
4
+ from typing import Optional, Dict, List, Tuple
5
+
6
+ from fastapi import FastAPI, HTTPException
7
+ from ray import serve
8
+ from ray.serve.handle import DeploymentHandle
9
+
10
+ from ray_embedding.dto import DeployedModel, EmbeddingRequest, EmbeddingResponse
11
+ from ray_embedding.utils import get_current_node_ip
12
+
13
+ web_api = FastAPI(title="Ray Embeddings - OpenAI-compatible API")
14
+
15
+ @serve.deployment
16
+ @serve.ingress(web_api)
17
+ class ModelRouter:
18
+ def __init__(self, deployed_models: Dict[str, DeployedModel], path_prefix: List[str],
19
+ max_concurrency: Optional[int] = 32, node_reaper: Optional[DeploymentHandle] = None):
20
+ assert deployed_models, "models cannot be empty"
21
+ assert path_prefix, "path_prefix cannot be empty"
22
+
23
+ logging.basicConfig(level=logging.INFO)
24
+ self.logger = logging.getLogger(self.__class__.__name__)
25
+ self.deployed_models = deployed_models
26
+ self.path_prefix = [item.removeprefix("/").removesuffix("/") for item in path_prefix]
27
+ self.max_concurrency = max_concurrency
28
+ self.rate_limiter = asyncio.Semaphore(self.max_concurrency)
29
+ self.available_models = [
30
+ {"id": str(item),
31
+ "object": "model",
32
+ "created": int(time.time()),
33
+ "owned_by": "openai",
34
+ "permission": []} for item in self.deployed_models.keys()
35
+ ]
36
+ self.logger.info(f"Successfully registered models: {self.available_models}")
37
+ self.node_reaper = node_reaper
38
+
39
+ async def _compute_embeddings_from_resized_batches(self, model: str, inputs: List[str], dimensions: Optional[int] = None):
40
+ deployed_model = self.deployed_models[model]
41
+ model_handle = deployed_model.deployment_handle
42
+ batch_size = deployed_model.batch_size
43
+ num_retries = deployed_model.num_retries
44
+
45
+ # Resize the inputs into batch_size items, and dispatch in parallel
46
+ batches = [inputs[i:i+batch_size] for i in range(0, len(inputs), batch_size)]
47
+ if len(inputs) > batch_size:
48
+ self.logger.info(f"Original input (length {len(inputs)} was resized "
49
+ f"to {len(batches)} mini-batches, each with max length {batch_size}.")
50
+
51
+ # Call embedding model replicas in parallel (rate-limited)
52
+ tasks = [self._compute_embeddings_rate_limited(model_handle, batch, dimensions) for batch in batches]
53
+ all_results = await asyncio.gather(*tasks, return_exceptions=True)
54
+
55
+ # Retry any failed model calls
56
+ for i, result in enumerate(all_results):
57
+ if isinstance(result, Exception):
58
+ self.logger.warning(f"Retrying mini-batch {i} due to exception: {result}")
59
+ result_retried, retries = await self._retry_failed_embedding_call(model_handle, batches[i], dimensions,
60
+ num_retries)
61
+ if retries >= num_retries and (isinstance(result_retried, Exception) or result_retried is None):
62
+ raise result_retried or ValueError(f"Failed to compute `{model}` embeddings for mini-batch {i} after {num_retries} retries.")
63
+
64
+ all_results[i] = result_retried
65
+
66
+ # Flatten the results because `all_results` is a list of lists
67
+ self.logger.info(f"Successfully computed embeddings from {len(batches)} mini-batches")
68
+ return [emb for result in all_results for emb in result]
69
+
70
+ async def _compute_embeddings_rate_limited(self, model_handle: DeploymentHandle, batch: List[str], dimensions: int):
71
+ async with self.rate_limiter:
72
+ return await model_handle.remote(batch, dimensions)
73
+
74
+ async def _retry_failed_embedding_call(self, model_handle: DeploymentHandle, batch: List[str],
75
+ dimensions: Optional[int] = None, num_retries: Optional[int] = 2) \
76
+ -> Tuple[List[List[float]] | Exception, int]:
77
+
78
+ result_retried, retries = None, 0
79
+ while retries < num_retries:
80
+ try:
81
+ result_retried = await model_handle.remote(batch, dimensions)
82
+ except Exception as e:
83
+ result_retried = e
84
+ self.logger.warning(e)
85
+ finally:
86
+ retries += 1
87
+ if not isinstance(result_retried, Exception) and result_retried is not None:
88
+ break
89
+
90
+ return result_retried, retries
91
+
92
+ @web_api.post("/{path_prefix}/v1/embeddings", response_model=EmbeddingResponse)
93
+ async def compute_embeddings(self, path_prefix: str, request: EmbeddingRequest):
94
+ try:
95
+ assert path_prefix in self.path_prefix, f"The API path prefix specified is invalid: '{path_prefix}'"
96
+ assert request.model in self.deployed_models, f"The model specified is invalid: {request.model}"
97
+
98
+ inputs = request.input if isinstance(request.input, list) else [request.input]
99
+ self.logger.info(f"Computing embeddings for a batch of {len(inputs)} texts using model: {request.model}")
100
+ embeddings = await self._compute_embeddings_from_resized_batches(request.model, inputs, request.dimensions)
101
+ response_data = [
102
+ {"index": idx, "embedding": emb}
103
+ for idx, emb in enumerate(embeddings)
104
+ ]
105
+ return EmbeddingResponse(object="list", data=response_data, model=request.model)
106
+ except Exception as e:
107
+ status_code = 400 if isinstance(e, AssertionError) else 500
108
+ self.logger.error(f"Failed to create embeddings: {e}")
109
+ raise HTTPException(status_code=status_code, detail=str(e))
110
+
111
+ @web_api.get("/{path_prefix}/v1/models")
112
+ async def list_models(self, path_prefix: str):
113
+ """Returns the list of available models in OpenAI-compatible format."""
114
+ if path_prefix not in self.path_prefix:
115
+ raise HTTPException(status_code=400, detail=f"The API path prefix specified is invalid: '{path_prefix}'")
116
+ return {"object": "list", "data": self.available_models}
117
+
118
+ async def check_health(self):
119
+ if not self.node_reaper:
120
+ return
121
+
122
+ try:
123
+ unhealthy_node_ips = await self.node_reaper.get_unhealthy_node_ips.remote()
124
+ except Exception as exc:
125
+ self.logger.warning(f"Unable to fetch node reaper data: {exc}")
126
+ return
127
+
128
+ if not unhealthy_node_ips:
129
+ return
130
+
131
+ node_ip = get_current_node_ip()
132
+ if node_ip and node_ip in unhealthy_node_ips:
133
+ raise RuntimeError("Model router replica is colocated with an unhealthy embedding replica node.")
@@ -0,0 +1,124 @@
1
+ import asyncio
2
+ import logging
3
+ import time
4
+ from pathlib import Path
5
+ from typing import Dict, Any, List, Optional, Set
6
+
7
+ from ray import serve
8
+
9
+
10
+ NODE_REAPER_DEPLOYMENT_NAME = "NodeReaper"
11
+
12
+
13
+ @serve.deployment
14
+ class NodeReaper:
15
+ def __init__(
16
+ self,
17
+ ssh_user: str,
18
+ ssh_private_key: str,
19
+ retention_seconds: int = 900,
20
+ reap_interval_seconds: int = 60,
21
+ ):
22
+ logging.basicConfig(level=logging.INFO)
23
+ self.logger = logging.getLogger(self.__class__.__name__)
24
+ self.ssh_user = ssh_user
25
+ key_path = Path(ssh_private_key).expanduser()
26
+ if not key_path.exists():
27
+ raise FileNotFoundError(f"SSH private key not found: {key_path}")
28
+ self.ssh_private_key = key_path.as_posix()
29
+ self.retention_seconds = retention_seconds
30
+ self.reap_interval_seconds = max(30, reap_interval_seconds)
31
+
32
+ self._unhealthy_replicas: Dict[str, Dict[str, Any]] = {}
33
+ self._nodes_marked_for_reap: Dict[str, float] = {}
34
+ self._nodes_inflight: Set[str] = set()
35
+
36
+ loop = asyncio.get_event_loop()
37
+ self._reaper_task = loop.create_task(self._reap_loop())
38
+ self.logger.info("NodeReaper initialized; monitoring unhealthy nodes for recycling")
39
+
40
+ def __del__(self):
41
+ if hasattr(self, "_reaper_task") and self._reaper_task and not self._reaper_task.done():
42
+ self._reaper_task.cancel()
43
+
44
+ def report_failure(self, replica_id: str, node_ip: str, error: Optional[str] = None):
45
+ self._unhealthy_replicas[replica_id] = {
46
+ "node_ip": node_ip,
47
+ "error": error,
48
+ "timestamp": time.time(),
49
+ }
50
+ self._nodes_marked_for_reap[node_ip] = self._nodes_marked_for_reap.get(node_ip, time.time())
51
+ self.logger.warning(f"Replica {replica_id} on {node_ip} marked for reaping: {error}")
52
+ self._purge_stale()
53
+
54
+ def get_unhealthy_node_ips(self) -> List[str]:
55
+ self._purge_stale()
56
+ return list(self._nodes_marked_for_reap.keys())
57
+
58
+ async def _reap_loop(self):
59
+ while True:
60
+ try:
61
+ await asyncio.sleep(self.reap_interval_seconds)
62
+ await self._reap_pending_nodes()
63
+ except asyncio.CancelledError:
64
+ break
65
+ except Exception as exc:
66
+ self.logger.warning(f"Unexpected error in reap loop: {exc}")
67
+
68
+ async def _reap_pending_nodes(self):
69
+ nodes = self.get_unhealthy_node_ips()
70
+ for node_ip in nodes:
71
+ if node_ip in self._nodes_inflight:
72
+ continue
73
+ self._nodes_inflight.add(node_ip)
74
+ try:
75
+ await self._reap_node(node_ip)
76
+ self._clear_node(node_ip)
77
+ self.logger.info(f"Successfully reaped node {node_ip}")
78
+ except Exception as exc:
79
+ self.logger.error(f"Failed to reap node {node_ip}: {exc}")
80
+ finally:
81
+ self._nodes_inflight.discard(node_ip)
82
+
83
+ async def _reap_node(self, node_ip: str):
84
+ ssh_command = [
85
+ "ssh",
86
+ "-i",
87
+ self.ssh_private_key,
88
+ "-o",
89
+ "StrictHostKeyChecking=no",
90
+ f"{self.ssh_user}@{node_ip}",
91
+ "docker stop ray_container",
92
+ ]
93
+
94
+ self.logger.info(f"Reaping node {node_ip} via SSH")
95
+ process = await asyncio.create_subprocess_exec(
96
+ *ssh_command,
97
+ stdout=asyncio.subprocess.PIPE,
98
+ stderr=asyncio.subprocess.PIPE,
99
+ )
100
+ stdout, stderr = await process.communicate()
101
+ if process.returncode != 0:
102
+ stdout_text = stdout.decode().strip()
103
+ stderr_text = stderr.decode().strip()
104
+ raise RuntimeError(
105
+ f"SSH command failed with code {process.returncode}. stdout={stdout_text} stderr={stderr_text}"
106
+ )
107
+
108
+ def _clear_node(self, node_ip: str):
109
+ to_delete = [replica for replica, data in self._unhealthy_replicas.items() if data.get("node_ip") == node_ip]
110
+ for replica in to_delete:
111
+ self._unhealthy_replicas.pop(replica, None)
112
+ self._nodes_marked_for_reap.pop(node_ip, None)
113
+
114
+ def _purge_stale(self):
115
+ if not self.retention_seconds:
116
+ return
117
+ cutoff = time.time() - self.retention_seconds
118
+ replica_ids = [replica_id for replica_id, data in self._unhealthy_replicas.items()
119
+ if data.get("timestamp", 0) < cutoff]
120
+ for replica_id in replica_ids:
121
+ node_ip = self._unhealthy_replicas[replica_id]["node_ip"]
122
+ self._unhealthy_replicas.pop(replica_id, None)
123
+ if node_ip in self._nodes_marked_for_reap and self._nodes_marked_for_reap[node_ip] < cutoff:
124
+ self._nodes_marked_for_reap.pop(node_ip, None)
@@ -0,0 +1,83 @@
1
+ from typing import Optional, Tuple
2
+
3
+ from ray import serve
4
+ from ray.serve.handle import DeploymentHandle
5
+ from ray.util import get_node_ip_address, state
6
+
7
+ from ray_embedding.node_reaper import NODE_REAPER_DEPLOYMENT_NAME
8
+
9
+
10
+ def get_head_node_id() -> Tuple[str, str]:
11
+ try:
12
+ nodes = state.list_nodes(filters=[("is_head_node", "=", True)])
13
+ if not nodes:
14
+ raise RuntimeError("Unable to locate head node for NodeReaper deployment.")
15
+ head_node = nodes[0]
16
+ return head_node["node_id"], head_node["node_ip"]
17
+ except Exception as exc:
18
+ raise RuntimeError("Unable to locate the head node ID for NodeReaper deployment.") from exc
19
+
20
+
21
+ def get_node_reaper_handle() -> DeploymentHandle:
22
+ try:
23
+ return serve.context.get_deployment_handle(NODE_REAPER_DEPLOYMENT_NAME)
24
+ except Exception:
25
+ return serve.get_deployment(NODE_REAPER_DEPLOYMENT_NAME).get_handle(sync=False)
26
+
27
+
28
+ def get_current_replica_tag() -> Optional[str]:
29
+ try:
30
+ context = serve.context.get_current_replica_context()
31
+ except Exception:
32
+ context = None
33
+ if context is None:
34
+ return None
35
+ return getattr(context, "replica_tag", None)
36
+
37
+
38
+ def get_current_node_ip() -> Optional[str]:
39
+ try:
40
+ return get_node_ip_address()
41
+ except Exception:
42
+ return None
43
+
44
+
45
+ def _resolve_node_reaper_handle(node_reaper: Optional[DeploymentHandle]) -> Optional[DeploymentHandle]:
46
+ if node_reaper is not None:
47
+ return node_reaper
48
+ try:
49
+ return get_node_reaper_handle()
50
+ except Exception:
51
+ return None
52
+
53
+
54
+ def _gather_replica_context() -> Optional[Tuple[str, str]]:
55
+ replica_id = get_current_replica_tag()
56
+ node_ip = get_current_node_ip()
57
+ if not (replica_id and node_ip):
58
+ return None
59
+ return replica_id, node_ip
60
+
61
+
62
+ def report_unhealthy_replica(error: Optional[str] = None,
63
+ node_reaper: Optional[DeploymentHandle] = None) -> None:
64
+ context = _gather_replica_context()
65
+ if not context:
66
+ return
67
+ handle = _resolve_node_reaper_handle(node_reaper)
68
+ if handle is None:
69
+ return
70
+ replica_id, node_ip = context
71
+ handle.report_failure.remote(replica_id, node_ip, error)
72
+
73
+
74
+ async def report_unhealthy_replica_async(error: Optional[str] = None,
75
+ node_reaper: Optional[DeploymentHandle] = None) -> None:
76
+ context = _gather_replica_context()
77
+ if not context:
78
+ return
79
+ handle = _resolve_node_reaper_handle(node_reaper)
80
+ if handle is None:
81
+ return
82
+ replica_id, node_ip = context
83
+ await handle.report_failure.remote(replica_id, node_ip, error)
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: ray-embedding
3
+ Version: 0.14.7
4
+ Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
+ Author: Crispin Almodovar
6
+ Author-email:
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+
13
+ # ray-embedding
14
+
15
+ A Python library for deploying SentenceTransformers models to a ray cluster.
16
+ This tool encapsulates inference logic that uses SentenceTransformers
17
+ to load any compatible embedding model from the Hugging Face hub and
18
+ compute embeddings for input text.
19
+
20
+ This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
21
+
22
+ Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
23
+ to see how this library is used.
24
+
25
+ ### Supports the following backends
26
+
27
+ - pytorch-gpu
28
+ - pytorch-cpu
29
+
30
+ ### Planned:
31
+ - onnx-gpu
32
+ - onnx-cpu
33
+ - openvino-cpu
34
+ - fastembed-onnx-cpu
35
+
36
+
@@ -0,0 +1,14 @@
1
+ README.md
2
+ pyproject.toml
3
+ setup.cfg
4
+ ray_embedding/__init__.py
5
+ ray_embedding/deploy.py
6
+ ray_embedding/dto.py
7
+ ray_embedding/embedding_model.py
8
+ ray_embedding/model_router.py
9
+ ray_embedding/node_reaper.py
10
+ ray_embedding/utils.py
11
+ ray_embedding.egg-info/PKG-INFO
12
+ ray_embedding.egg-info/SOURCES.txt
13
+ ray_embedding.egg-info/dependency_links.txt
14
+ ray_embedding.egg-info/top_level.txt
@@ -0,0 +1 @@
1
+ ray_embedding
@@ -0,0 +1,21 @@
1
+ [metadata]
2
+ name = ray-embedding
3
+ version = 0.14.7
4
+ author = Crispin Almodovar
5
+ author_email =
6
+ description = Deploy SentenceTransformers embedding models to a ray cluster
7
+ long_description = file: README.md
8
+ long_description_content_type = text/markdown
9
+ classifiers =
10
+ Programming Language :: Python :: 3
11
+ License :: OSI Approved :: MIT License
12
+ Operating System :: OS Independent
13
+
14
+ [options]
15
+ packages = find:
16
+ python_requires = >=3.12
17
+
18
+ [egg_info]
19
+ tag_build =
20
+ tag_date = 0
21
+