ray-embedding 0.13.4__tar.gz → 0.13.18__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ray-embedding might be problematic. Click here for more details.
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/PKG-INFO +2 -2
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/README.md +1 -1
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/ray_embedding/deploy.py +79 -70
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/ray_embedding/dto.py +59 -52
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/ray_embedding/embedding_model.py +112 -126
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/ray_embedding/model_router.py +21 -15
- ray_embedding-0.13.18/ray_embedding/node_reaper.py +124 -0
- ray_embedding-0.13.18/ray_embedding/utils.py +71 -0
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/ray_embedding.egg-info/PKG-INFO +2 -2
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/ray_embedding.egg-info/SOURCES.txt +2 -1
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/setup.cfg +1 -1
- ray_embedding-0.13.4/ray_embedding/node_health.py +0 -91
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/pyproject.toml +0 -0
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/ray_embedding/__init__.py +0 -0
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/ray_embedding.egg-info/dependency_links.txt +0 -0
- {ray_embedding-0.13.4 → ray_embedding-0.13.18}/ray_embedding.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ray-embedding
|
|
3
|
-
Version: 0.13.
|
|
3
|
+
Version: 0.13.18
|
|
4
4
|
Summary: Deploy SentenceTransformers embedding models to a ray cluster
|
|
5
5
|
Author: Crispin Almodovar
|
|
6
6
|
Author-email:
|
|
@@ -31,6 +31,6 @@ to see how this library is used.
|
|
|
31
31
|
- onnx-gpu
|
|
32
32
|
- onnx-cpu
|
|
33
33
|
- openvino-cpu
|
|
34
|
-
|
|
34
|
+
- fastembed-onnx-cpu
|
|
35
35
|
|
|
36
36
|
|
|
@@ -1,70 +1,79 @@
|
|
|
1
|
-
import os
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import
|
|
5
|
-
|
|
6
|
-
from
|
|
7
|
-
|
|
8
|
-
from ray_embedding.
|
|
9
|
-
from ray_embedding.
|
|
10
|
-
from ray_embedding.model_router import ModelRouter
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
torch_dtype
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
import torch
|
|
4
|
+
from ray.serve import Application
|
|
5
|
+
|
|
6
|
+
from ray_embedding.dto import AppConfig, ModelDeploymentConfig, DeployedModel, NodeReaperConfig
|
|
7
|
+
from ray_embedding.embedding_model import EmbeddingModel
|
|
8
|
+
from ray_embedding.node_reaper import NodeReaper, NODE_REAPER_DEPLOYMENT_NAME
|
|
9
|
+
from ray_embedding.utils import node_affinity_for_head, node_affinity_for_worker, HEAD_NODE_IP
|
|
10
|
+
from ray_embedding.model_router import ModelRouter
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def build_model(model_config: ModelDeploymentConfig, node_reaper):
|
|
14
|
+
deployment_name = model_config.deployment
|
|
15
|
+
model = model_config.model
|
|
16
|
+
served_model_name = model_config.served_model_name or os.path.basename(model)
|
|
17
|
+
device = model_config.device
|
|
18
|
+
backend = model_config.backend or "torch"
|
|
19
|
+
matryoshka_dim = model_config.matryoshka_dim
|
|
20
|
+
trust_remote_code = model_config.trust_remote_code or False
|
|
21
|
+
model_kwargs = model_config.model_kwargs or {}
|
|
22
|
+
cuda_memory_flush_threshold = model_config.cuda_memory_flush_threshold or 0.8
|
|
23
|
+
|
|
24
|
+
if "torch_dtype" in model_kwargs:
|
|
25
|
+
torch_dtype = model_kwargs["torch_dtype"].strip()
|
|
26
|
+
if torch_dtype == "float16":
|
|
27
|
+
model_kwargs["torch_dtype"] = torch.float16
|
|
28
|
+
elif torch_dtype == "bfloat16":
|
|
29
|
+
model_kwargs["torch_dtype"] = torch.bfloat16
|
|
30
|
+
elif torch_dtype == "float32":
|
|
31
|
+
model_kwargs["torch_dtype"] = torch.float32
|
|
32
|
+
else:
|
|
33
|
+
raise ValueError(f"Invalid torch_dtype: '{torch_dtype}'")
|
|
34
|
+
|
|
35
|
+
deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
|
|
36
|
+
served_model_name=served_model_name,
|
|
37
|
+
device=device,
|
|
38
|
+
backend=backend,
|
|
39
|
+
matryoshka_dim=matryoshka_dim,
|
|
40
|
+
trust_remote_code=trust_remote_code,
|
|
41
|
+
model_kwargs=model_kwargs,
|
|
42
|
+
cuda_memory_flush_threshold=cuda_memory_flush_threshold,
|
|
43
|
+
node_reaper=node_reaper,
|
|
44
|
+
)
|
|
45
|
+
return DeployedModel(model=served_model_name,
|
|
46
|
+
deployment_handle=deployment,
|
|
47
|
+
batch_size=model_config.batch_size,
|
|
48
|
+
num_retries=model_config.num_retries
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def build_app(args: AppConfig) -> Application:
|
|
53
|
+
model_router, models = args.model_router, args.models
|
|
54
|
+
assert model_router and models
|
|
55
|
+
assert model_router.path_prefix
|
|
56
|
+
|
|
57
|
+
node_reaper_config = args.node_reaper or NodeReaperConfig()
|
|
58
|
+
|
|
59
|
+
node_reaper_kwargs = {
|
|
60
|
+
"ssh_user": node_reaper_config.ssh_user,
|
|
61
|
+
"ssh_private_key": node_reaper_config.ssh_private_key,
|
|
62
|
+
}
|
|
63
|
+
if node_reaper_config.retention_seconds is not None:
|
|
64
|
+
node_reaper_kwargs["retention_seconds"] = node_reaper_config.retention_seconds
|
|
65
|
+
if node_reaper_config.reap_interval_seconds is not None:
|
|
66
|
+
node_reaper_kwargs["reap_interval_seconds"] = node_reaper_config.reap_interval_seconds
|
|
67
|
+
|
|
68
|
+
node_reaper = NodeReaper.options(
|
|
69
|
+
name=NODE_REAPER_DEPLOYMENT_NAME,
|
|
70
|
+
ray_actor_options={"num_cpus": 0.25, f"node:{HEAD_NODE_IP}": 1},
|
|
71
|
+
autoscaling_config={"initial_replicas": 1, "min_replicas": 1, "max_replicas": 1}
|
|
72
|
+
).bind(**node_reaper_kwargs)
|
|
73
|
+
|
|
74
|
+
deployed_models = {model_config.served_model_name: build_model(model_config, node_reaper) for model_config in models}
|
|
75
|
+
router = ModelRouter.options(
|
|
76
|
+
name=model_router.deployment
|
|
77
|
+
).bind(deployed_models, model_router.path_prefix, node_reaper)
|
|
78
|
+
|
|
79
|
+
return router
|
|
@@ -1,52 +1,59 @@
|
|
|
1
|
-
import dataclasses
|
|
2
|
-
from typing import Union, List, Optional, Dict, Any
|
|
3
|
-
from pydantic import BaseModel
|
|
4
|
-
from ray.serve.handle import DeploymentHandle
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
class EmbeddingRequest(BaseModel):
|
|
8
|
-
"""Schema of embedding requests (compatible with OpenAI)"""
|
|
9
|
-
model: str # Model name (for compatibility; only one model is used here)
|
|
10
|
-
input: Union[str, List[str]] # List of strings to embed
|
|
11
|
-
dimensions: Optional[int] = None
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class EmbeddingResponse(BaseModel):
|
|
15
|
-
"""Schema of embedding response (compatible with OpenAI)"""
|
|
16
|
-
object: str
|
|
17
|
-
data: List[dict] # Embedding data including index and vector
|
|
18
|
-
model: str # Model name used for embedding
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class ModelRouterConfig(BaseModel):
|
|
22
|
-
deployment: str
|
|
23
|
-
path_prefix: List[str] = []
|
|
24
|
-
max_concurrency: int = 32
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
class ModelDeploymentConfig(BaseModel):
|
|
28
|
-
model: str
|
|
29
|
-
served_model_name: str
|
|
30
|
-
batch_size: Optional[int] = 8
|
|
31
|
-
num_retries: Optional[int] = 2
|
|
32
|
-
device: Optional[str] = None
|
|
33
|
-
backend: Optional[str] = None
|
|
34
|
-
matryoshka_dim: Optional[int] = 768
|
|
35
|
-
trust_remote_code: Optional[bool] = False
|
|
36
|
-
model_kwargs: Optional[Dict[str, Any]] = {}
|
|
37
|
-
cuda_memory_flush_threshold: Optional[float] = 0.8
|
|
38
|
-
deployment: str
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
class
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
class
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
1
|
+
import dataclasses
|
|
2
|
+
from typing import Union, List, Optional, Dict, Any
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
from ray.serve.handle import DeploymentHandle
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class EmbeddingRequest(BaseModel):
|
|
8
|
+
"""Schema of embedding requests (compatible with OpenAI)"""
|
|
9
|
+
model: str # Model name (for compatibility; only one model is used here)
|
|
10
|
+
input: Union[str, List[str]] # List of strings to embed
|
|
11
|
+
dimensions: Optional[int] = None
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class EmbeddingResponse(BaseModel):
|
|
15
|
+
"""Schema of embedding response (compatible with OpenAI)"""
|
|
16
|
+
object: str
|
|
17
|
+
data: List[dict] # Embedding data including index and vector
|
|
18
|
+
model: str # Model name used for embedding
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ModelRouterConfig(BaseModel):
|
|
22
|
+
deployment: str
|
|
23
|
+
path_prefix: List[str] = []
|
|
24
|
+
max_concurrency: int = 32
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ModelDeploymentConfig(BaseModel):
|
|
28
|
+
model: str
|
|
29
|
+
served_model_name: str
|
|
30
|
+
batch_size: Optional[int] = 8
|
|
31
|
+
num_retries: Optional[int] = 2
|
|
32
|
+
device: Optional[str] = None
|
|
33
|
+
backend: Optional[str] = None
|
|
34
|
+
matryoshka_dim: Optional[int] = 768
|
|
35
|
+
trust_remote_code: Optional[bool] = False
|
|
36
|
+
model_kwargs: Optional[Dict[str, Any]] = {}
|
|
37
|
+
cuda_memory_flush_threshold: Optional[float] = 0.8
|
|
38
|
+
deployment: str
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class NodeReaperConfig(BaseModel):
|
|
42
|
+
ssh_user: str = "ubuntu"
|
|
43
|
+
ssh_private_key: str = "/home/ray/ray_bootstrap_key.pem"
|
|
44
|
+
retention_seconds: Optional[int] = 900
|
|
45
|
+
reap_interval_seconds: Optional[int] = 60
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class AppConfig(BaseModel):
|
|
49
|
+
model_router: ModelRouterConfig
|
|
50
|
+
node_reaper: Optional[NodeReaperConfig] = None
|
|
51
|
+
models: List[ModelDeploymentConfig]
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclasses.dataclass
|
|
55
|
+
class DeployedModel:
|
|
56
|
+
model: str
|
|
57
|
+
deployment_handle: DeploymentHandle
|
|
58
|
+
batch_size: int
|
|
59
|
+
num_retries: Optional[int] = 2
|
|
@@ -1,126 +1,112 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import os.path
|
|
3
|
-
import time
|
|
4
|
-
from typing import Optional, Dict, Any, List, Union
|
|
5
|
-
|
|
6
|
-
import
|
|
7
|
-
import
|
|
8
|
-
from
|
|
9
|
-
from ray import
|
|
10
|
-
from
|
|
11
|
-
|
|
12
|
-
from
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
@serve.deployment
|
|
16
|
-
class EmbeddingModel:
|
|
17
|
-
def __init__(self, model: str, served_model_name: Optional[str] = None,
|
|
18
|
-
device: Optional[str] = None, backend: Optional[str] = "torch",
|
|
19
|
-
matryoshka_dim: Optional[int] = None, trust_remote_code: Optional[bool] = False,
|
|
20
|
-
model_kwargs: Dict[str, Any] = None, cuda_memory_flush_threshold: Optional[float] = 0.8,
|
|
21
|
-
|
|
22
|
-
logging.basicConfig(level=logging.INFO)
|
|
23
|
-
self.logger = logging.getLogger(self.__class__.__name__)
|
|
24
|
-
self.model = model
|
|
25
|
-
self.served_model_name = served_model_name or os.path.basename(self.model)
|
|
26
|
-
self.init_device = device
|
|
27
|
-
self.cuda_memory_flush_threshold = cuda_memory_flush_threshold
|
|
28
|
-
if self.init_device is None or self.init_device == "auto":
|
|
29
|
-
self.init_device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
30
|
-
if self.init_device == "cuda":
|
|
31
|
-
self.wait_for_cuda()
|
|
32
|
-
self.torch_device = torch.device(self.init_device)
|
|
33
|
-
self.backend = backend or "torch"
|
|
34
|
-
self.matryoshka_dim = matryoshka_dim
|
|
35
|
-
self.trust_remote_code = trust_remote_code or False
|
|
36
|
-
self.model_kwargs = model_kwargs or {}
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
self.
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
embeddings_list
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
def
|
|
78
|
-
if self.init_device
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
if reserved > threshold_bytes:
|
|
115
|
-
# flush only when cache exceeds the percentage threshold
|
|
116
|
-
torch.cuda.empty_cache()
|
|
117
|
-
|
|
118
|
-
def __del__(self):
|
|
119
|
-
# Clean up and free any remaining GPU memory
|
|
120
|
-
try:
|
|
121
|
-
if hasattr(self, 'embedding_model'):
|
|
122
|
-
del self.embedding_model
|
|
123
|
-
if torch.cuda.is_available():
|
|
124
|
-
torch.cuda.empty_cache()
|
|
125
|
-
except Exception as e:
|
|
126
|
-
self.logger.warning(f"Error during cleanup: {e}")
|
|
1
|
+
import logging
|
|
2
|
+
import os.path
|
|
3
|
+
import time
|
|
4
|
+
from typing import Optional, Dict, Any, List, Union
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
from pynvml import nvmlInit, nvmlDeviceGetCount, nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo
|
|
8
|
+
from ray import serve
|
|
9
|
+
from ray.serve.handle import DeploymentHandle
|
|
10
|
+
from sentence_transformers import SentenceTransformer
|
|
11
|
+
|
|
12
|
+
from ray_embedding.utils import report_unhealthy_replica
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@serve.deployment
|
|
16
|
+
class EmbeddingModel:
|
|
17
|
+
def __init__(self, model: str, served_model_name: Optional[str] = None,
|
|
18
|
+
device: Optional[str] = None, backend: Optional[str] = "torch",
|
|
19
|
+
matryoshka_dim: Optional[int] = None, trust_remote_code: Optional[bool] = False,
|
|
20
|
+
model_kwargs: Dict[str, Any] = None, cuda_memory_flush_threshold: Optional[float] = 0.8,
|
|
21
|
+
node_reaper: Optional[DeploymentHandle] = None):
|
|
22
|
+
logging.basicConfig(level=logging.INFO)
|
|
23
|
+
self.logger = logging.getLogger(self.__class__.__name__)
|
|
24
|
+
self.model = model
|
|
25
|
+
self.served_model_name = served_model_name or os.path.basename(self.model)
|
|
26
|
+
self.init_device = device
|
|
27
|
+
self.cuda_memory_flush_threshold = cuda_memory_flush_threshold
|
|
28
|
+
if self.init_device is None or self.init_device == "auto":
|
|
29
|
+
self.init_device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
30
|
+
if self.init_device == "cuda":
|
|
31
|
+
self.wait_for_cuda()
|
|
32
|
+
self.torch_device = torch.device(self.init_device)
|
|
33
|
+
self.backend = backend or "torch"
|
|
34
|
+
self.matryoshka_dim = matryoshka_dim
|
|
35
|
+
self.trust_remote_code = trust_remote_code or False
|
|
36
|
+
self.model_kwargs = model_kwargs or {}
|
|
37
|
+
self.node_reaper = node_reaper
|
|
38
|
+
|
|
39
|
+
self.logger.info(f"Initializing embedding model: {self.model}")
|
|
40
|
+
self.embedding_model = SentenceTransformer(self.model, device=self.init_device, backend=self.backend,
|
|
41
|
+
trust_remote_code=self.trust_remote_code,
|
|
42
|
+
model_kwargs=self.model_kwargs)
|
|
43
|
+
|
|
44
|
+
self.logger.info(f"Successfully initialized model {self.model} using device {self.torch_device}")
|
|
45
|
+
|
|
46
|
+
async def __call__(self, text: Union[str, List[str]], dimensions: Optional[int] = None) -> List[List[float]]:
|
|
47
|
+
"""Compute embeddings for the input text using the current model."""
|
|
48
|
+
if not text or (isinstance(text, list) and not all(text)):
|
|
49
|
+
raise ValueError("Input text is empty or invalid")
|
|
50
|
+
|
|
51
|
+
text = [text] if isinstance(text, str) else text
|
|
52
|
+
truncate_dim = dimensions or self.matryoshka_dim
|
|
53
|
+
|
|
54
|
+
# Compute embeddings in PyTorch format
|
|
55
|
+
embeddings = self.embedding_model.encode(
|
|
56
|
+
text, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False,
|
|
57
|
+
).to(self.torch_device)
|
|
58
|
+
|
|
59
|
+
if truncate_dim is not None:
|
|
60
|
+
# Truncate and re-normalize the embeddings
|
|
61
|
+
embeddings = embeddings[:, :truncate_dim]
|
|
62
|
+
embeddings = embeddings / torch.norm(embeddings, dim=1, keepdim=True)
|
|
63
|
+
|
|
64
|
+
# Move all embeddings to CPU at once before conversion
|
|
65
|
+
embeddings_list = embeddings.cpu().tolist()
|
|
66
|
+
|
|
67
|
+
# don't wait for GC
|
|
68
|
+
del embeddings
|
|
69
|
+
|
|
70
|
+
return embeddings_list
|
|
71
|
+
|
|
72
|
+
def wait_for_cuda(self, wait: int = 10):
|
|
73
|
+
if self.init_device == "cuda" and not torch.cuda.is_available():
|
|
74
|
+
time.sleep(wait)
|
|
75
|
+
self.check_health()
|
|
76
|
+
|
|
77
|
+
def check_health(self):
|
|
78
|
+
if self.init_device != "cuda":
|
|
79
|
+
return
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
# Even though CUDA was available at init time,
|
|
83
|
+
# CUDA can become unavailable - this is a known problem in AWS EC2+Docker
|
|
84
|
+
# https://github.com/ray-project/ray/issues/49594
|
|
85
|
+
nvmlInit()
|
|
86
|
+
count = nvmlDeviceGetCount()
|
|
87
|
+
assert count >= 1, "No CUDA devices found"
|
|
88
|
+
|
|
89
|
+
# replicas only have access to GPU 0
|
|
90
|
+
handle = nvmlDeviceGetHandleByIndex(0)
|
|
91
|
+
mem_info = nvmlDeviceGetMemoryInfo(handle)
|
|
92
|
+
except Exception as e:
|
|
93
|
+
error_message = f"CUDA health check failed: {e}"
|
|
94
|
+
report_unhealthy_replica(error=error_message, node_reaper=self.node_reaper)
|
|
95
|
+
raise RuntimeError(error_message)
|
|
96
|
+
|
|
97
|
+
reserved = torch.cuda.memory_reserved() # bytes currently reserved by CUDA cache
|
|
98
|
+
threshold_bytes = self.cuda_memory_flush_threshold * mem_info.total
|
|
99
|
+
|
|
100
|
+
if reserved > threshold_bytes:
|
|
101
|
+
# flush only when cache exceeds the percentage threshold
|
|
102
|
+
torch.cuda.empty_cache()
|
|
103
|
+
|
|
104
|
+
def __del__(self):
|
|
105
|
+
# Clean up and free any remaining GPU memory
|
|
106
|
+
try:
|
|
107
|
+
if hasattr(self, 'embedding_model'):
|
|
108
|
+
del self.embedding_model
|
|
109
|
+
if torch.cuda.is_available():
|
|
110
|
+
torch.cuda.empty_cache()
|
|
111
|
+
except Exception as e:
|
|
112
|
+
self.logger.warning(f"Error during cleanup: {e}")
|
|
@@ -4,20 +4,20 @@ import time
|
|
|
4
4
|
from typing import Optional, Dict, List, Tuple
|
|
5
5
|
|
|
6
6
|
from fastapi import FastAPI, HTTPException
|
|
7
|
+
import ray
|
|
7
8
|
from ray import serve
|
|
8
9
|
from ray.serve.handle import DeploymentHandle
|
|
9
|
-
from ray.util import get_node_ip_address
|
|
10
10
|
|
|
11
11
|
from ray_embedding.dto import DeployedModel, EmbeddingRequest, EmbeddingResponse
|
|
12
|
+
from ray_embedding.utils import get_current_node_ip
|
|
12
13
|
|
|
13
14
|
web_api = FastAPI(title="Ray Embeddings - OpenAI-compatible API")
|
|
14
15
|
|
|
15
16
|
@serve.deployment
|
|
16
17
|
@serve.ingress(web_api)
|
|
17
18
|
class ModelRouter:
|
|
18
|
-
def __init__(self, deployed_models: Dict[str, DeployedModel],
|
|
19
|
-
|
|
20
|
-
node_health_tracker: Optional[DeploymentHandle] = None):
|
|
19
|
+
def __init__(self, deployed_models: Dict[str, DeployedModel], path_prefix: List[str],
|
|
20
|
+
max_concurrency: Optional[int] = 32, node_reaper: Optional[DeploymentHandle] = None):
|
|
21
21
|
assert deployed_models, "models cannot be empty"
|
|
22
22
|
assert path_prefix, "path_prefix cannot be empty"
|
|
23
23
|
|
|
@@ -35,13 +35,7 @@ class ModelRouter:
|
|
|
35
35
|
"permission": []} for item in self.deployed_models.keys()
|
|
36
36
|
]
|
|
37
37
|
self.logger.info(f"Successfully registered models: {self.available_models}")
|
|
38
|
-
self.
|
|
39
|
-
replica_context = serve.get_replica_context()
|
|
40
|
-
self.deployment_name = replica_context.deployment
|
|
41
|
-
self.replica_actor_name = replica_context.replica_id.to_full_id_str()
|
|
42
|
-
self.node_ip = get_node_ip_address()
|
|
43
|
-
self.logger.info(f"Successfully initialized model router. "
|
|
44
|
-
f"Deployment name: {self.deployment_name}, Replica actor name: {self.replica_actor_name}, Node IP: {self.node_ip}")
|
|
38
|
+
self.node_reaper = node_reaper
|
|
45
39
|
|
|
46
40
|
async def _compute_embeddings_from_resized_batches(self, model: str, inputs: List[str], dimensions: Optional[int] = None):
|
|
47
41
|
deployed_model = self.deployed_models[model]
|
|
@@ -122,7 +116,19 @@ class ModelRouter:
|
|
|
122
116
|
raise HTTPException(status_code=400, detail=f"The API path prefix specified is invalid: '{path_prefix}'")
|
|
123
117
|
return {"object": "list", "data": self.available_models}
|
|
124
118
|
|
|
125
|
-
|
|
126
|
-
if self.
|
|
127
|
-
|
|
128
|
-
|
|
119
|
+
def check_health(self):
|
|
120
|
+
if not self.node_reaper:
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
unhealthy_node_ips = ray.get(self.node_reaper.get_unhealthy_node_ips.remote())
|
|
125
|
+
except Exception as exc:
|
|
126
|
+
self.logger.warning(f"Unable to fetch node reaper data: {exc}")
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
if not unhealthy_node_ips:
|
|
130
|
+
return
|
|
131
|
+
|
|
132
|
+
node_ip = get_current_node_ip()
|
|
133
|
+
if node_ip and node_ip in unhealthy_node_ips:
|
|
134
|
+
raise RuntimeError("Model router replica is colocated with an unhealthy embedding replica node.")
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Dict, Any, List, Optional, Set
|
|
6
|
+
|
|
7
|
+
from ray import serve
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
NODE_REAPER_DEPLOYMENT_NAME = "NodeReaper"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@serve.deployment
|
|
14
|
+
class NodeReaper:
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
ssh_user: str,
|
|
18
|
+
ssh_private_key: str,
|
|
19
|
+
retention_seconds: int = 900,
|
|
20
|
+
reap_interval_seconds: int = 60,
|
|
21
|
+
):
|
|
22
|
+
logging.basicConfig(level=logging.INFO)
|
|
23
|
+
self.logger = logging.getLogger(self.__class__.__name__)
|
|
24
|
+
self.ssh_user = ssh_user
|
|
25
|
+
key_path = Path(ssh_private_key).expanduser()
|
|
26
|
+
if not key_path.exists():
|
|
27
|
+
raise FileNotFoundError(f"SSH private key not found: {key_path}")
|
|
28
|
+
self.ssh_private_key = key_path.as_posix()
|
|
29
|
+
self.retention_seconds = retention_seconds
|
|
30
|
+
self.reap_interval_seconds = max(30, reap_interval_seconds)
|
|
31
|
+
|
|
32
|
+
self._unhealthy_replicas: Dict[str, Dict[str, Any]] = {}
|
|
33
|
+
self._nodes_marked_for_reap: Dict[str, float] = {}
|
|
34
|
+
self._nodes_inflight: Set[str] = set()
|
|
35
|
+
|
|
36
|
+
loop = asyncio.get_event_loop()
|
|
37
|
+
self._reaper_task = loop.create_task(self._reap_loop())
|
|
38
|
+
self.logger.info("NodeReaper initialized; monitoring unhealthy nodes for recycling")
|
|
39
|
+
|
|
40
|
+
def __del__(self):
|
|
41
|
+
if hasattr(self, "_reaper_task") and self._reaper_task and not self._reaper_task.done():
|
|
42
|
+
self._reaper_task.cancel()
|
|
43
|
+
|
|
44
|
+
def report_failure(self, replica_id: str, node_ip: str, error: Optional[str] = None):
|
|
45
|
+
self._unhealthy_replicas[replica_id] = {
|
|
46
|
+
"node_ip": node_ip,
|
|
47
|
+
"error": error,
|
|
48
|
+
"timestamp": time.time(),
|
|
49
|
+
}
|
|
50
|
+
self._nodes_marked_for_reap[node_ip] = self._nodes_marked_for_reap.get(node_ip, time.time())
|
|
51
|
+
self.logger.warning(f"Replica {replica_id} on {node_ip} marked for reaping: {error}")
|
|
52
|
+
self._purge_stale()
|
|
53
|
+
|
|
54
|
+
def get_unhealthy_node_ips(self) -> List[str]:
|
|
55
|
+
self._purge_stale()
|
|
56
|
+
return list(self._nodes_marked_for_reap.keys())
|
|
57
|
+
|
|
58
|
+
async def _reap_loop(self):
|
|
59
|
+
while True:
|
|
60
|
+
try:
|
|
61
|
+
await asyncio.sleep(self.reap_interval_seconds)
|
|
62
|
+
await self._reap_pending_nodes()
|
|
63
|
+
except asyncio.CancelledError:
|
|
64
|
+
break
|
|
65
|
+
except Exception as exc:
|
|
66
|
+
self.logger.warning(f"Unexpected error in reap loop: {exc}")
|
|
67
|
+
|
|
68
|
+
async def _reap_pending_nodes(self):
|
|
69
|
+
nodes = self.get_unhealthy_node_ips()
|
|
70
|
+
for node_ip in nodes:
|
|
71
|
+
if node_ip in self._nodes_inflight:
|
|
72
|
+
continue
|
|
73
|
+
self._nodes_inflight.add(node_ip)
|
|
74
|
+
try:
|
|
75
|
+
await self._reap_node(node_ip)
|
|
76
|
+
self._clear_node(node_ip)
|
|
77
|
+
self.logger.info(f"Successfully reaped node {node_ip}")
|
|
78
|
+
except Exception as exc:
|
|
79
|
+
self.logger.error(f"Failed to reap node {node_ip}: {exc}")
|
|
80
|
+
finally:
|
|
81
|
+
self._nodes_inflight.discard(node_ip)
|
|
82
|
+
|
|
83
|
+
async def _reap_node(self, node_ip: str):
|
|
84
|
+
ssh_command = [
|
|
85
|
+
"ssh",
|
|
86
|
+
"-i",
|
|
87
|
+
self.ssh_private_key,
|
|
88
|
+
"-o",
|
|
89
|
+
"StrictHostKeyChecking=no",
|
|
90
|
+
f"{self.ssh_user}@{node_ip}",
|
|
91
|
+
"docker stop ray_container",
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
self.logger.info(f"Reaping node {node_ip} via SSH")
|
|
95
|
+
process = await asyncio.create_subprocess_exec(
|
|
96
|
+
*ssh_command,
|
|
97
|
+
stdout=asyncio.subprocess.PIPE,
|
|
98
|
+
stderr=asyncio.subprocess.PIPE,
|
|
99
|
+
)
|
|
100
|
+
stdout, stderr = await process.communicate()
|
|
101
|
+
if process.returncode != 0:
|
|
102
|
+
stdout_text = stdout.decode().strip()
|
|
103
|
+
stderr_text = stderr.decode().strip()
|
|
104
|
+
raise RuntimeError(
|
|
105
|
+
f"SSH command failed with code {process.returncode}. stdout={stdout_text} stderr={stderr_text}"
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def _clear_node(self, node_ip: str):
|
|
109
|
+
to_delete = [replica for replica, data in self._unhealthy_replicas.items() if data.get("node_ip") == node_ip]
|
|
110
|
+
for replica in to_delete:
|
|
111
|
+
self._unhealthy_replicas.pop(replica, None)
|
|
112
|
+
self._nodes_marked_for_reap.pop(node_ip, None)
|
|
113
|
+
|
|
114
|
+
def _purge_stale(self):
|
|
115
|
+
if not self.retention_seconds:
|
|
116
|
+
return
|
|
117
|
+
cutoff = time.time() - self.retention_seconds
|
|
118
|
+
replica_ids = [replica_id for replica_id, data in self._unhealthy_replicas.items()
|
|
119
|
+
if data.get("timestamp", 0) < cutoff]
|
|
120
|
+
for replica_id in replica_ids:
|
|
121
|
+
node_ip = self._unhealthy_replicas[replica_id]["node_ip"]
|
|
122
|
+
self._unhealthy_replicas.pop(replica_id, None)
|
|
123
|
+
if node_ip in self._nodes_marked_for_reap and self._nodes_marked_for_reap[node_ip] < cutoff:
|
|
124
|
+
self._nodes_marked_for_reap.pop(node_ip, None)
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
from typing import Optional, Tuple
|
|
2
|
+
|
|
3
|
+
import ray
|
|
4
|
+
from ray import serve
|
|
5
|
+
from ray.serve.handle import DeploymentHandle
|
|
6
|
+
from ray.util import get_node_ip_address, state
|
|
7
|
+
from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy, NotIn
|
|
8
|
+
|
|
9
|
+
from ray_embedding.node_reaper import NODE_REAPER_DEPLOYMENT_NAME
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_head_node_id() -> Tuple[str, str]:
|
|
14
|
+
try:
|
|
15
|
+
nodes = state.list_nodes(filters=[("is_head_node", "=", True)])
|
|
16
|
+
if not nodes:
|
|
17
|
+
raise RuntimeError("Unable to locate head node for NodeReaper deployment.")
|
|
18
|
+
head_node = nodes[0]
|
|
19
|
+
return head_node["node_id"], head_node["node_ip"]
|
|
20
|
+
except Exception as exc:
|
|
21
|
+
raise RuntimeError("Unable to locate the head node ID for NodeReaper deployment.") from exc
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
HEAD_NODE_ID, HEAD_NODE_IP = get_head_node_id()
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def node_affinity_for_head() -> NodeAffinitySchedulingStrategy:
|
|
28
|
+
return NodeAffinitySchedulingStrategy(node_id=HEAD_NODE_ID, soft=False)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def node_affinity_for_worker() -> NodeAffinitySchedulingStrategy:
|
|
32
|
+
return NodeAffinitySchedulingStrategy(node_id=NotIn(HEAD_NODE_ID), soft=False)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def get_node_reaper_handle() -> DeploymentHandle:
|
|
36
|
+
try:
|
|
37
|
+
return serve.context.get_deployment_handle(NODE_REAPER_DEPLOYMENT_NAME)
|
|
38
|
+
except Exception:
|
|
39
|
+
return serve.get_deployment(NODE_REAPER_DEPLOYMENT_NAME).get_handle(sync=False)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def get_current_replica_tag() -> Optional[str]:
|
|
43
|
+
try:
|
|
44
|
+
context = serve.context.get_current_replica_context()
|
|
45
|
+
except Exception:
|
|
46
|
+
context = None
|
|
47
|
+
if context is None:
|
|
48
|
+
return None
|
|
49
|
+
return getattr(context, "replica_tag", None)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def get_current_node_ip() -> Optional[str]:
|
|
53
|
+
try:
|
|
54
|
+
return get_node_ip_address()
|
|
55
|
+
except Exception:
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def report_unhealthy_replica(error: Optional[str] = None,
|
|
60
|
+
node_reaper: Optional[DeploymentHandle] = None) -> None:
|
|
61
|
+
replica_id = get_current_replica_tag()
|
|
62
|
+
node_ip = get_current_node_ip()
|
|
63
|
+
if not (replica_id and node_ip):
|
|
64
|
+
return
|
|
65
|
+
handle = node_reaper
|
|
66
|
+
if handle is None:
|
|
67
|
+
try:
|
|
68
|
+
handle = get_node_reaper_handle()
|
|
69
|
+
except Exception:
|
|
70
|
+
return
|
|
71
|
+
handle.report_failure.remote(replica_id, node_ip, error)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ray-embedding
|
|
3
|
-
Version: 0.13.
|
|
3
|
+
Version: 0.13.18
|
|
4
4
|
Summary: Deploy SentenceTransformers embedding models to a ray cluster
|
|
5
5
|
Author: Crispin Almodovar
|
|
6
6
|
Author-email:
|
|
@@ -31,6 +31,6 @@ to see how this library is used.
|
|
|
31
31
|
- onnx-gpu
|
|
32
32
|
- onnx-cpu
|
|
33
33
|
- openvino-cpu
|
|
34
|
-
|
|
34
|
+
- fastembed-onnx-cpu
|
|
35
35
|
|
|
36
36
|
|
|
@@ -6,7 +6,8 @@ ray_embedding/deploy.py
|
|
|
6
6
|
ray_embedding/dto.py
|
|
7
7
|
ray_embedding/embedding_model.py
|
|
8
8
|
ray_embedding/model_router.py
|
|
9
|
-
ray_embedding/
|
|
9
|
+
ray_embedding/node_reaper.py
|
|
10
|
+
ray_embedding/utils.py
|
|
10
11
|
ray_embedding.egg-info/PKG-INFO
|
|
11
12
|
ray_embedding.egg-info/SOURCES.txt
|
|
12
13
|
ray_embedding.egg-info/dependency_links.txt
|
|
@@ -1,91 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import threading
|
|
3
|
-
from typing import Set, List
|
|
4
|
-
|
|
5
|
-
import ray
|
|
6
|
-
from ray import serve
|
|
7
|
-
from ray._private.services import get_node_ip_address
|
|
8
|
-
from ray.util.state import list_actors
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
@serve.deployment(autoscaling_config=dict(min_replicas=0, max_replicas=1),
|
|
12
|
-
ray_actor_options=dict(num_cpus=0.1))
|
|
13
|
-
class NodeHealthTracker:
|
|
14
|
-
"""Maintains a list of bad nodes, as reported by replicas that call the report_bad_node func.
|
|
15
|
-
Bad nodes are those that fail GPU/CUDA health check.
|
|
16
|
-
What's the purpose? Because when an embedding model replica becomes unhealthy
|
|
17
|
-
(due to GPU/CUDA issues), we want Ray to kill all replicas running on the node.
|
|
18
|
-
When Ray detects that there are no running replicas on a node, the node is stopped
|
|
19
|
-
and replaced with a new one.
|
|
20
|
-
"""
|
|
21
|
-
def __init__(self, tracked_model_deployments: List[str] = None):
|
|
22
|
-
logging.basicConfig(level=logging.INFO)
|
|
23
|
-
self.logger = logging.getLogger(self.__class__.__name__)
|
|
24
|
-
self.tracked_model_deployments = tracked_model_deployments or []
|
|
25
|
-
self.bad_gpu_node_ips: Set[str] = set()
|
|
26
|
-
self.lock = threading.RLock()
|
|
27
|
-
replica_context = serve.get_replica_context()
|
|
28
|
-
self.deployment_name = replica_context.deployment
|
|
29
|
-
self.replica_actor_name = replica_context.replica_id.to_full_id_str()
|
|
30
|
-
self.node_ip = get_node_ip_address()
|
|
31
|
-
self.logger.info(f"Successfully initialized NodeHealthTracker. Tracked model deployments: {self.tracked_model_deployments}")
|
|
32
|
-
|
|
33
|
-
async def report_bad_gpu_node(self, node_ip: str, deployment_name: str, replica_actor_name: str):
|
|
34
|
-
with self.lock:
|
|
35
|
-
if node_ip not in self.bad_gpu_node_ips:
|
|
36
|
-
self.bad_gpu_node_ips.add(node_ip)
|
|
37
|
-
self.logger.warning(
|
|
38
|
-
f"[Bad GPU node reported] Deployment: {deployment_name}, Replica: {replica_actor_name}, Node IP: {node_ip}"
|
|
39
|
-
)
|
|
40
|
-
|
|
41
|
-
async def is_bad_gpu_node(self, node_ip: str) -> bool:
|
|
42
|
-
self.logger.info(f"Checking if node {node_ip} is marked bad.")
|
|
43
|
-
with self.lock:
|
|
44
|
-
is_bad_gpu_node = node_ip in self.bad_gpu_node_ips
|
|
45
|
-
self.logger.info(f"Node {node_ip} is marked bad: {is_bad_gpu_node}")
|
|
46
|
-
return is_bad_gpu_node
|
|
47
|
-
|
|
48
|
-
async def is_bad_gpu_or_no_model_replica_on_node(self, node_ip: str):
|
|
49
|
-
self.logger.info(f"Checking if node {node_ip} is marked bad or no model replica running on the node.")
|
|
50
|
-
is_bad_gpu_node = await self.is_bad_gpu_node(node_ip)
|
|
51
|
-
is_no_model_replica_running_on_node = not await self.is_model_replica_running_on_node(node_ip)
|
|
52
|
-
return is_bad_gpu_node or is_no_model_replica_running_on_node
|
|
53
|
-
|
|
54
|
-
async def check_health(self):
|
|
55
|
-
"""Called periodically by Ray Serve. Used here to clean up stale node IDs."""
|
|
56
|
-
try:
|
|
57
|
-
current_node_ips = {node["NodeManagerAddress"] for node in ray.nodes() if node["Alive"]}
|
|
58
|
-
with self.lock:
|
|
59
|
-
stale_nodes = self.bad_gpu_node_ips - current_node_ips
|
|
60
|
-
if stale_nodes:
|
|
61
|
-
self.logger.info(f"Removing stale bad node_ips: {stale_nodes}")
|
|
62
|
-
self.bad_gpu_node_ips.intersection_update(current_node_ips)
|
|
63
|
-
self.logger.info(f"Current nodes: {current_node_ips}. Bad GPU nodes: {self.bad_gpu_node_ips}.")
|
|
64
|
-
except Exception as e:
|
|
65
|
-
raise RuntimeError(f"An error occurred in check_health during bad node cleanup: {e}")
|
|
66
|
-
|
|
67
|
-
async def is_model_replica_running_on_node(self, node_ip: str) -> bool:
|
|
68
|
-
"""
|
|
69
|
-
Return True if there is at least one replica of the self.tracked_model_deployments
|
|
70
|
-
running on the specified node_ip.
|
|
71
|
-
"""
|
|
72
|
-
try:
|
|
73
|
-
self.logger.info(f"Checking if there is at least one replica of tracked_deployments={self.tracked_model_deployments} "
|
|
74
|
-
f"running on node {node_ip}.")
|
|
75
|
-
target_node_id = next(node["NodeID"] for node in ray.nodes() if node["Alive"] and node["NodeManagerAddress"] == node_ip)
|
|
76
|
-
assert target_node_id, f"No node found with IP {node_ip}"
|
|
77
|
-
prefixes = tuple(f"SERVE_REPLICA::{d}" for d in self.tracked_model_deployments)
|
|
78
|
-
|
|
79
|
-
for actor in list_actors(detail=False):
|
|
80
|
-
if (actor.state in ["DEPENDENCIES_UNREADY", 'PENDING_CREATION', 'ALIVE', 'RESTARTING'] and
|
|
81
|
-
actor.node_id == target_node_id and
|
|
82
|
-
actor.name.startswith(prefixes)):
|
|
83
|
-
self.logger.info(f"There is at least one replica of tracked_deployments={self.tracked_model_deployments} "
|
|
84
|
-
f"running on node {node_ip}")
|
|
85
|
-
return True
|
|
86
|
-
|
|
87
|
-
self.logger.info(f"No replicas of tracked deployments={self.tracked_model_deployments} running on node: {node_ip}.")
|
|
88
|
-
return False
|
|
89
|
-
except Exception as e:
|
|
90
|
-
self.logger.error(f"An error occurred while checking replicas on node {node_ip}: {e}")
|
|
91
|
-
return False
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|