ray-embedding 0.10.1__tar.gz → 0.10.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ray-embedding might be problematic. Click here for more details.

@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: ray-embedding
3
+ Version: 0.10.5
4
+ Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
+ Author: Crispin Almodovar
6
+ Author-email:
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+
13
+ # ray-embedding
14
+
15
+ A Python library for deploying SentenceTransformers models to a ray cluster.
16
+ This tool encapsulates inference logic that uses SentenceTransformers
17
+ to load any compatible embedding model from the Hugging Face hub and
18
+ compute embeddings for input text.
19
+
20
+ This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
21
+
22
+ Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
23
+ to see how this library is used.
24
+
25
+ ### Supports the following backends
26
+
27
+ - pytorch-gpu
28
+ - pytorch-cpu
29
+
30
+ ### Planned:
31
+ - onnx-gpu
32
+ - onnx-cpu
33
+ - openvino-cpu
34
+ - fastembed-onnx-cpu
35
+
36
+
@@ -0,0 +1,24 @@
1
+ # ray-embedding
2
+
3
+ A Python library for deploying SentenceTransformers models to a ray cluster.
4
+ This tool encapsulates inference logic that uses SentenceTransformers
5
+ to load any compatible embedding model from the Hugging Face hub and
6
+ compute embeddings for input text.
7
+
8
+ This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
9
+
10
+ Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
11
+ to see how this library is used.
12
+
13
+ ### Supports the following backends
14
+
15
+ - pytorch-gpu
16
+ - pytorch-cpu
17
+
18
+ ### Planned:
19
+ - onnx-gpu
20
+ - onnx-cpu
21
+ - openvino-cpu
22
+ - fastembed-onnx-cpu
23
+
24
+
@@ -19,6 +19,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
19
19
  backend: Optional[str] = args.pop("backend", "torch")
20
20
  matryoshka_dim: Optional[int] = args.pop("matryoshka_dim", None)
21
21
  trust_remote_code: Optional[bool] = args.pop("trust_remote_code", False)
22
+ device: Optional[str] = args.pop("device", None)
22
23
  model_kwargs: Dict[str, Any] = args.pop("model_kwargs", {})
23
24
  if "torch_dtype" in model_kwargs:
24
25
  torch_dtype = model_kwargs["torch_dtype"].strip()
@@ -33,6 +34,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
33
34
 
34
35
  deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
35
36
  backend=backend,
37
+ device=device,
36
38
  matryoshka_dim=matryoshka_dim,
37
39
  trust_remote_code=trust_remote_code,
38
40
  model_kwargs=model_kwargs
@@ -28,16 +28,21 @@ web_api = FastAPI(title=f"Ray Embeddings - OpenAI-compatible API")
28
28
  )
29
29
  @serve.ingress(web_api)
30
30
  class EmbeddingModel:
31
- def __init__(self, model: str, backend: Optional[str] = "torch", matryoshka_dim: Optional[int] = None,
32
- trust_remote_code: Optional[bool] = False, model_kwargs: Dict[str, Any] = None):
31
+ def __init__(self, model: str, backend: Optional[str] = "torch", device: Optional[str] = None,
32
+ matryoshka_dim: Optional[int] = None, trust_remote_code: Optional[bool] = False,
33
+ model_kwargs: Dict[str, Any] = None):
33
34
  logging.basicConfig(level=logging.INFO)
34
35
  self.logger = logging.getLogger(__name__)
35
36
  self.model = model
37
+ self.init_device = device
36
38
  self.backend = backend or "torch"
37
39
  self.matryoshka_dim = matryoshka_dim
38
40
  self.trust_remote_code = trust_remote_code or False
39
41
  self.model_kwargs = model_kwargs or {}
40
- self.torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
42
+ if self.init_device is None:
43
+ self.torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
44
+ else:
45
+ self.torch_device = torch.device(self.init_device)
41
46
  self.logger.info(f"Initializing embedding model: {self.model}")
42
47
  self.embedding_model = SentenceTransformer(self.model, backend=self.backend, trust_remote_code=self.trust_remote_code,
43
48
  model_kwargs=self.model_kwargs)
@@ -93,5 +98,9 @@ class EmbeddingModel:
93
98
  """Returns the list of available models in OpenAI-compatible format."""
94
99
  return {"object": "list", "data": self.available_models}
95
100
 
96
-
97
-
101
+ def check_health(self):
102
+ if self.init_device == "cuda" and not torch.cuda.is_available():
103
+ # Even though CUDA was available at init time,
104
+ # CUDA can become unavailable - this is a known problem in AWS EC2
105
+ # https://github.com/ray-project/ray/issues/49594
106
+ raise RuntimeError("CUDA device is not available")
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: ray-embedding
3
+ Version: 0.10.5
4
+ Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
+ Author: Crispin Almodovar
6
+ Author-email:
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+
13
+ # ray-embedding
14
+
15
+ A Python library for deploying SentenceTransformers models to a ray cluster.
16
+ This tool encapsulates inference logic that uses SentenceTransformers
17
+ to load any compatible embedding model from the Hugging Face hub and
18
+ compute embeddings for input text.
19
+
20
+ This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
21
+
22
+ Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
23
+ to see how this library is used.
24
+
25
+ ### Supports the following backends
26
+
27
+ - pytorch-gpu
28
+ - pytorch-cpu
29
+
30
+ ### Planned:
31
+ - onnx-gpu
32
+ - onnx-cpu
33
+ - openvino-cpu
34
+ - fastembed-onnx-cpu
35
+
36
+
@@ -8,5 +8,4 @@ ray_embedding/embedding_model.py
8
8
  ray_embedding.egg-info/PKG-INFO
9
9
  ray_embedding.egg-info/SOURCES.txt
10
10
  ray_embedding.egg-info/dependency_links.txt
11
- ray_embedding.egg-info/top_level.txt
12
- test/test.py
11
+ ray_embedding.egg-info/top_level.txt
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  name = ray-embedding
3
- version = 0.10.1
3
+ version = 0.10.5
4
4
  author = Crispin Almodovar
5
5
  author_email =
6
6
  description = Deploy SentenceTransformers embedding models to a ray cluster
@@ -1,28 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: ray-embedding
3
- Version: 0.10.1
4
- Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
- Author: Crispin Almodovar
6
- Author-email:
7
- Classifier: Programming Language :: Python :: 3
8
- Classifier: License :: OSI Approved :: MIT License
9
- Classifier: Operating System :: OS Independent
10
- Requires-Python: >=3.12
11
- Description-Content-Type: text/markdown
12
-
13
- # ray-embedding
14
-
15
- A tool for deploying SentenceTransformers models to a ray cluster.
16
-
17
- ### Supports the following backends
18
-
19
- - pytorch-gpu
20
- - pytorch-cpu
21
-
22
- ### Planned:
23
- - onnx-gpu
24
- - onnx-cpu
25
- - openvino-cpu
26
- - fastembed-onnx-cpu
27
-
28
-
@@ -1,16 +0,0 @@
1
- # ray-embedding
2
-
3
- A tool for deploying SentenceTransformers models to a ray cluster.
4
-
5
- ### Supports the following backends
6
-
7
- - pytorch-gpu
8
- - pytorch-cpu
9
-
10
- ### Planned:
11
- - onnx-gpu
12
- - onnx-cpu
13
- - openvino-cpu
14
- - fastembed-onnx-cpu
15
-
16
-
@@ -1,28 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: ray-embedding
3
- Version: 0.10.1
4
- Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
- Author: Crispin Almodovar
6
- Author-email:
7
- Classifier: Programming Language :: Python :: 3
8
- Classifier: License :: OSI Approved :: MIT License
9
- Classifier: Operating System :: OS Independent
10
- Requires-Python: >=3.12
11
- Description-Content-Type: text/markdown
12
-
13
- # ray-embedding
14
-
15
- A tool for deploying SentenceTransformers models to a ray cluster.
16
-
17
- ### Supports the following backends
18
-
19
- - pytorch-gpu
20
- - pytorch-cpu
21
-
22
- ### Planned:
23
- - onnx-gpu
24
- - onnx-cpu
25
- - openvino-cpu
26
- - fastembed-onnx-cpu
27
-
28
-
File without changes