ray-embedding 0.10.1__tar.gz → 0.10.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ray-embedding might be problematic. Click here for more details.
- ray_embedding-0.10.10/PKG-INFO +36 -0
- ray_embedding-0.10.10/README.md +24 -0
- {ray_embedding-0.10.1 → ray_embedding-0.10.10}/ray_embedding/deploy.py +2 -0
- {ray_embedding-0.10.1 → ray_embedding-0.10.10}/ray_embedding/embedding_model.py +15 -6
- ray_embedding-0.10.10/ray_embedding.egg-info/PKG-INFO +36 -0
- {ray_embedding-0.10.1 → ray_embedding-0.10.10}/ray_embedding.egg-info/SOURCES.txt +1 -2
- {ray_embedding-0.10.1 → ray_embedding-0.10.10}/setup.cfg +1 -1
- ray_embedding-0.10.1/PKG-INFO +0 -28
- ray_embedding-0.10.1/README.md +0 -16
- ray_embedding-0.10.1/ray_embedding.egg-info/PKG-INFO +0 -28
- ray_embedding-0.10.1/test/test.py +0 -0
- {ray_embedding-0.10.1 → ray_embedding-0.10.10}/pyproject.toml +0 -0
- {ray_embedding-0.10.1 → ray_embedding-0.10.10}/ray_embedding/__init__.py +0 -0
- {ray_embedding-0.10.1 → ray_embedding-0.10.10}/ray_embedding/dto.py +0 -0
- {ray_embedding-0.10.1 → ray_embedding-0.10.10}/ray_embedding.egg-info/dependency_links.txt +0 -0
- {ray_embedding-0.10.1 → ray_embedding-0.10.10}/ray_embedding.egg-info/top_level.txt +0 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ray-embedding
|
|
3
|
+
Version: 0.10.10
|
|
4
|
+
Summary: Deploy SentenceTransformers embedding models to a ray cluster
|
|
5
|
+
Author: Crispin Almodovar
|
|
6
|
+
Author-email:
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Requires-Python: >=3.12
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
|
|
13
|
+
# ray-embedding
|
|
14
|
+
|
|
15
|
+
A Python library for deploying SentenceTransformers models to a ray cluster.
|
|
16
|
+
This tool encapsulates inference logic that uses SentenceTransformers
|
|
17
|
+
to load any compatible embedding model from the Hugging Face hub and
|
|
18
|
+
compute embeddings for input text.
|
|
19
|
+
|
|
20
|
+
This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
|
|
21
|
+
|
|
22
|
+
Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
|
|
23
|
+
to see how this library is used.
|
|
24
|
+
|
|
25
|
+
### Supports the following backends
|
|
26
|
+
|
|
27
|
+
- pytorch-gpu
|
|
28
|
+
- pytorch-cpu
|
|
29
|
+
|
|
30
|
+
### Planned:
|
|
31
|
+
- onnx-gpu
|
|
32
|
+
- onnx-cpu
|
|
33
|
+
- openvino-cpu
|
|
34
|
+
- fastembed-onnx-cpu
|
|
35
|
+
|
|
36
|
+
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# ray-embedding
|
|
2
|
+
|
|
3
|
+
A Python library for deploying SentenceTransformers models to a ray cluster.
|
|
4
|
+
This tool encapsulates inference logic that uses SentenceTransformers
|
|
5
|
+
to load any compatible embedding model from the Hugging Face hub and
|
|
6
|
+
compute embeddings for input text.
|
|
7
|
+
|
|
8
|
+
This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
|
|
9
|
+
|
|
10
|
+
Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
|
|
11
|
+
to see how this library is used.
|
|
12
|
+
|
|
13
|
+
### Supports the following backends
|
|
14
|
+
|
|
15
|
+
- pytorch-gpu
|
|
16
|
+
- pytorch-cpu
|
|
17
|
+
|
|
18
|
+
### Planned:
|
|
19
|
+
- onnx-gpu
|
|
20
|
+
- onnx-cpu
|
|
21
|
+
- openvino-cpu
|
|
22
|
+
- fastembed-onnx-cpu
|
|
23
|
+
|
|
24
|
+
|
|
@@ -16,6 +16,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
|
|
|
16
16
|
model: str = args.pop("model", "")
|
|
17
17
|
assert model
|
|
18
18
|
|
|
19
|
+
device: Optional[str] = args.pop("device", None)
|
|
19
20
|
backend: Optional[str] = args.pop("backend", "torch")
|
|
20
21
|
matryoshka_dim: Optional[int] = args.pop("matryoshka_dim", None)
|
|
21
22
|
trust_remote_code: Optional[bool] = args.pop("trust_remote_code", False)
|
|
@@ -32,6 +33,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
|
|
|
32
33
|
raise ValueError(f"Invalid torch_dtype: '{torch_dtype}'")
|
|
33
34
|
|
|
34
35
|
deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
|
|
36
|
+
device=device,
|
|
35
37
|
backend=backend,
|
|
36
38
|
matryoshka_dim=matryoshka_dim,
|
|
37
39
|
trust_remote_code=trust_remote_code,
|
|
@@ -28,18 +28,23 @@ web_api = FastAPI(title=f"Ray Embeddings - OpenAI-compatible API")
|
|
|
28
28
|
)
|
|
29
29
|
@serve.ingress(web_api)
|
|
30
30
|
class EmbeddingModel:
|
|
31
|
-
def __init__(self, model: str,
|
|
32
|
-
|
|
31
|
+
def __init__(self, model: str, device: Optional[str] = None, backend: Optional[str] = "torch",
|
|
32
|
+
matryoshka_dim: Optional[int] = None, trust_remote_code: Optional[bool] = False,
|
|
33
|
+
model_kwargs: Dict[str, Any] = None):
|
|
33
34
|
logging.basicConfig(level=logging.INFO)
|
|
34
35
|
self.logger = logging.getLogger(__name__)
|
|
35
36
|
self.model = model
|
|
37
|
+
self.init_device = device
|
|
38
|
+
if self.init_device is None or self.init_device == "auto":
|
|
39
|
+
self.init_device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
40
|
+
self.torch_device = torch.device(self.init_device)
|
|
36
41
|
self.backend = backend or "torch"
|
|
37
42
|
self.matryoshka_dim = matryoshka_dim
|
|
38
43
|
self.trust_remote_code = trust_remote_code or False
|
|
39
44
|
self.model_kwargs = model_kwargs or {}
|
|
40
|
-
self.torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
41
45
|
self.logger.info(f"Initializing embedding model: {self.model}")
|
|
42
|
-
self.embedding_model = SentenceTransformer(self.model,
|
|
46
|
+
self.embedding_model = SentenceTransformer(self.model, device=self.init_device, backend=self.backend,
|
|
47
|
+
trust_remote_code=self.trust_remote_code,
|
|
43
48
|
model_kwargs=self.model_kwargs)
|
|
44
49
|
|
|
45
50
|
self.served_model_name = os.path.basename(self.model)
|
|
@@ -93,5 +98,9 @@ class EmbeddingModel:
|
|
|
93
98
|
"""Returns the list of available models in OpenAI-compatible format."""
|
|
94
99
|
return {"object": "list", "data": self.available_models}
|
|
95
100
|
|
|
96
|
-
|
|
97
|
-
|
|
101
|
+
def check_health(self):
|
|
102
|
+
if self.init_device == "cuda" and not torch.cuda.is_available():
|
|
103
|
+
# Even though CUDA was available at init time,
|
|
104
|
+
# CUDA can become unavailable - this is a known problem in AWS EC2
|
|
105
|
+
# https://github.com/ray-project/ray/issues/49594
|
|
106
|
+
raise RuntimeError("CUDA device is not available")
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ray-embedding
|
|
3
|
+
Version: 0.10.10
|
|
4
|
+
Summary: Deploy SentenceTransformers embedding models to a ray cluster
|
|
5
|
+
Author: Crispin Almodovar
|
|
6
|
+
Author-email:
|
|
7
|
+
Classifier: Programming Language :: Python :: 3
|
|
8
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Requires-Python: >=3.12
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
|
|
13
|
+
# ray-embedding
|
|
14
|
+
|
|
15
|
+
A Python library for deploying SentenceTransformers models to a ray cluster.
|
|
16
|
+
This tool encapsulates inference logic that uses SentenceTransformers
|
|
17
|
+
to load any compatible embedding model from the Hugging Face hub and
|
|
18
|
+
compute embeddings for input text.
|
|
19
|
+
|
|
20
|
+
This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
|
|
21
|
+
|
|
22
|
+
Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
|
|
23
|
+
to see how this library is used.
|
|
24
|
+
|
|
25
|
+
### Supports the following backends
|
|
26
|
+
|
|
27
|
+
- pytorch-gpu
|
|
28
|
+
- pytorch-cpu
|
|
29
|
+
|
|
30
|
+
### Planned:
|
|
31
|
+
- onnx-gpu
|
|
32
|
+
- onnx-cpu
|
|
33
|
+
- openvino-cpu
|
|
34
|
+
- fastembed-onnx-cpu
|
|
35
|
+
|
|
36
|
+
|
ray_embedding-0.10.1/PKG-INFO
DELETED
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: ray-embedding
|
|
3
|
-
Version: 0.10.1
|
|
4
|
-
Summary: Deploy SentenceTransformers embedding models to a ray cluster
|
|
5
|
-
Author: Crispin Almodovar
|
|
6
|
-
Author-email:
|
|
7
|
-
Classifier: Programming Language :: Python :: 3
|
|
8
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
-
Classifier: Operating System :: OS Independent
|
|
10
|
-
Requires-Python: >=3.12
|
|
11
|
-
Description-Content-Type: text/markdown
|
|
12
|
-
|
|
13
|
-
# ray-embedding
|
|
14
|
-
|
|
15
|
-
A tool for deploying SentenceTransformers models to a ray cluster.
|
|
16
|
-
|
|
17
|
-
### Supports the following backends
|
|
18
|
-
|
|
19
|
-
- pytorch-gpu
|
|
20
|
-
- pytorch-cpu
|
|
21
|
-
|
|
22
|
-
### Planned:
|
|
23
|
-
- onnx-gpu
|
|
24
|
-
- onnx-cpu
|
|
25
|
-
- openvino-cpu
|
|
26
|
-
- fastembed-onnx-cpu
|
|
27
|
-
|
|
28
|
-
|
ray_embedding-0.10.1/README.md
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
# ray-embedding
|
|
2
|
-
|
|
3
|
-
A tool for deploying SentenceTransformers models to a ray cluster.
|
|
4
|
-
|
|
5
|
-
### Supports the following backends
|
|
6
|
-
|
|
7
|
-
- pytorch-gpu
|
|
8
|
-
- pytorch-cpu
|
|
9
|
-
|
|
10
|
-
### Planned:
|
|
11
|
-
- onnx-gpu
|
|
12
|
-
- onnx-cpu
|
|
13
|
-
- openvino-cpu
|
|
14
|
-
- fastembed-onnx-cpu
|
|
15
|
-
|
|
16
|
-
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.4
|
|
2
|
-
Name: ray-embedding
|
|
3
|
-
Version: 0.10.1
|
|
4
|
-
Summary: Deploy SentenceTransformers embedding models to a ray cluster
|
|
5
|
-
Author: Crispin Almodovar
|
|
6
|
-
Author-email:
|
|
7
|
-
Classifier: Programming Language :: Python :: 3
|
|
8
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
9
|
-
Classifier: Operating System :: OS Independent
|
|
10
|
-
Requires-Python: >=3.12
|
|
11
|
-
Description-Content-Type: text/markdown
|
|
12
|
-
|
|
13
|
-
# ray-embedding
|
|
14
|
-
|
|
15
|
-
A tool for deploying SentenceTransformers models to a ray cluster.
|
|
16
|
-
|
|
17
|
-
### Supports the following backends
|
|
18
|
-
|
|
19
|
-
- pytorch-gpu
|
|
20
|
-
- pytorch-cpu
|
|
21
|
-
|
|
22
|
-
### Planned:
|
|
23
|
-
- onnx-gpu
|
|
24
|
-
- onnx-cpu
|
|
25
|
-
- openvino-cpu
|
|
26
|
-
- fastembed-onnx-cpu
|
|
27
|
-
|
|
28
|
-
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|