ray-embedding 0.10.1__py3-none-any.whl → 0.10.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ray-embedding might be problematic. Click here for more details.

ray_embedding/deploy.py CHANGED
@@ -16,6 +16,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
16
16
  model: str = args.pop("model", "")
17
17
  assert model
18
18
 
19
+ device: Optional[str] = args.pop("device", None)
19
20
  backend: Optional[str] = args.pop("backend", "torch")
20
21
  matryoshka_dim: Optional[int] = args.pop("matryoshka_dim", None)
21
22
  trust_remote_code: Optional[bool] = args.pop("trust_remote_code", False)
@@ -32,6 +33,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
32
33
  raise ValueError(f"Invalid torch_dtype: '{torch_dtype}'")
33
34
 
34
35
  deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
36
+ device=device,
35
37
  backend=backend,
36
38
  matryoshka_dim=matryoshka_dim,
37
39
  trust_remote_code=trust_remote_code,
@@ -28,18 +28,23 @@ web_api = FastAPI(title=f"Ray Embeddings - OpenAI-compatible API")
28
28
  )
29
29
  @serve.ingress(web_api)
30
30
  class EmbeddingModel:
31
- def __init__(self, model: str, backend: Optional[str] = "torch", matryoshka_dim: Optional[int] = None,
32
- trust_remote_code: Optional[bool] = False, model_kwargs: Dict[str, Any] = None):
31
+ def __init__(self, model: str, device: Optional[str] = None, backend: Optional[str] = "torch",
32
+ matryoshka_dim: Optional[int] = None, trust_remote_code: Optional[bool] = False,
33
+ model_kwargs: Dict[str, Any] = None):
33
34
  logging.basicConfig(level=logging.INFO)
34
35
  self.logger = logging.getLogger(__name__)
35
36
  self.model = model
37
+ self.init_device = device
38
+ if self.init_device is None or self.init_device == "auto":
39
+ self.init_device = "cuda" if torch.cuda.is_available() else "cpu"
40
+ self.torch_device = torch.device(self.init_device)
36
41
  self.backend = backend or "torch"
37
42
  self.matryoshka_dim = matryoshka_dim
38
43
  self.trust_remote_code = trust_remote_code or False
39
44
  self.model_kwargs = model_kwargs or {}
40
- self.torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
41
45
  self.logger.info(f"Initializing embedding model: {self.model}")
42
- self.embedding_model = SentenceTransformer(self.model, backend=self.backend, trust_remote_code=self.trust_remote_code,
46
+ self.embedding_model = SentenceTransformer(self.model, device=self.init_device, backend=self.backend,
47
+ trust_remote_code=self.trust_remote_code,
43
48
  model_kwargs=self.model_kwargs)
44
49
 
45
50
  self.served_model_name = os.path.basename(self.model)
@@ -93,5 +98,9 @@ class EmbeddingModel:
93
98
  """Returns the list of available models in OpenAI-compatible format."""
94
99
  return {"object": "list", "data": self.available_models}
95
100
 
96
-
97
-
101
+ def check_health(self):
102
+ if self.init_device == "cuda" and not torch.cuda.is_available():
103
+ # Even though CUDA was available at init time,
104
+ # CUDA can become unavailable - this is a known problem in AWS EC2
105
+ # https://github.com/ray-project/ray/issues/49594
106
+ raise RuntimeError("CUDA device is not available")
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: ray-embedding
3
+ Version: 0.10.10
4
+ Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
+ Author: Crispin Almodovar
6
+ Author-email:
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+
13
+ # ray-embedding
14
+
15
+ A Python library for deploying SentenceTransformers models to a ray cluster.
16
+ This tool encapsulates inference logic that uses SentenceTransformers
17
+ to load any compatible embedding model from the Hugging Face hub and
18
+ compute embeddings for input text.
19
+
20
+ This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
21
+
22
+ Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
23
+ to see how this library is used.
24
+
25
+ ### Supports the following backends
26
+
27
+ - pytorch-gpu
28
+ - pytorch-cpu
29
+
30
+ ### Planned:
31
+ - onnx-gpu
32
+ - onnx-cpu
33
+ - openvino-cpu
34
+ - fastembed-onnx-cpu
35
+
36
+
@@ -0,0 +1,8 @@
1
+ ray_embedding/__init__.py,sha256=OYJT0rVaaGzY613JqgfktsCgroDnBkGOHxR2FE9UtRU,49
2
+ ray_embedding/deploy.py,sha256=ZGxcG4589WcRtaM6H84YJarw0m1XqHNgfOf3PLAhM5M,1995
3
+ ray_embedding/dto.py,sha256=e91ejZbM_NB9WTjF1YnfuV71cajYIh0vOX8oV_g2OwM,595
4
+ ray_embedding/embedding_model.py,sha256=hIszW30di-Us0TL5Wevo8gNpD-kL-bhitU4MrNHrebc,4574
5
+ ray_embedding-0.10.10.dist-info/METADATA,sha256=5vgf2aQCm91W6V_hXKGfJJYo8a8L2b8gqzSrYscxY88,1095
6
+ ray_embedding-0.10.10.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
7
+ ray_embedding-0.10.10.dist-info/top_level.txt,sha256=ziCblpJq1YsrryshFqxTRuRMgNuO1_tgvAAkGShATNA,14
8
+ ray_embedding-0.10.10.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.3.0)
2
+ Generator: setuptools (80.3.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,28 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: ray-embedding
3
- Version: 0.10.1
4
- Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
- Author: Crispin Almodovar
6
- Author-email:
7
- Classifier: Programming Language :: Python :: 3
8
- Classifier: License :: OSI Approved :: MIT License
9
- Classifier: Operating System :: OS Independent
10
- Requires-Python: >=3.12
11
- Description-Content-Type: text/markdown
12
-
13
- # ray-embedding
14
-
15
- A tool for deploying SentenceTransformers models to a ray cluster.
16
-
17
- ### Supports the following backends
18
-
19
- - pytorch-gpu
20
- - pytorch-cpu
21
-
22
- ### Planned:
23
- - onnx-gpu
24
- - onnx-cpu
25
- - openvino-cpu
26
- - fastembed-onnx-cpu
27
-
28
-
@@ -1,8 +0,0 @@
1
- ray_embedding/__init__.py,sha256=OYJT0rVaaGzY613JqgfktsCgroDnBkGOHxR2FE9UtRU,49
2
- ray_embedding/deploy.py,sha256=oqOhMxS5UyZ4oGhfpL7kqvrxLO8QW41sF_FHbNSJe-w,1858
3
- ray_embedding/dto.py,sha256=e91ejZbM_NB9WTjF1YnfuV71cajYIh0vOX8oV_g2OwM,595
4
- ray_embedding/embedding_model.py,sha256=54QQ5iIAnOHtg_8nqGPIfqrZVwih6qKKeEOgmZweQeQ,3931
5
- ray_embedding-0.10.1.dist-info/METADATA,sha256=E9sZzIgqproOjSMt1ujPha_BhuBHLtd3ipTXD8vJSAU,605
6
- ray_embedding-0.10.1.dist-info/WHEEL,sha256=GHB6lJx2juba1wDgXDNlMTyM13ckjBMKf-OnwgKOCtA,91
7
- ray_embedding-0.10.1.dist-info/top_level.txt,sha256=ziCblpJq1YsrryshFqxTRuRMgNuO1_tgvAAkGShATNA,14
8
- ray_embedding-0.10.1.dist-info/RECORD,,