ray-embedding 0.9.7__tar.gz → 0.9.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ray-embedding might be problematic. Click here for more details.
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/PKG-INFO +2 -8
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/README.md +0 -6
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/ray_embedding/deploy.py +1 -1
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/ray_embedding/embedding_model.py +41 -32
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/ray_embedding.egg-info/PKG-INFO +2 -8
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/setup.cfg +2 -2
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/pyproject.toml +0 -0
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/ray_embedding/__init__.py +0 -0
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/ray_embedding/dto.py +0 -0
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/ray_embedding.egg-info/SOURCES.txt +0 -0
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/ray_embedding.egg-info/dependency_links.txt +0 -0
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/ray_embedding.egg-info/top_level.txt +0 -0
- {ray_embedding-0.9.7 → ray_embedding-0.9.9}/test/test.py +0 -0
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ray-embedding
|
|
3
|
-
Version: 0.9.
|
|
3
|
+
Version: 0.9.9
|
|
4
4
|
Summary: Deploy SentenceTransformers embedding models to a ray cluster
|
|
5
5
|
Author: Crispin Almodovar
|
|
6
|
-
Author-email:
|
|
6
|
+
Author-email:
|
|
7
7
|
Classifier: Programming Language :: Python :: 3
|
|
8
8
|
Classifier: License :: OSI Approved :: MIT License
|
|
9
9
|
Classifier: Operating System :: OS Independent
|
|
@@ -24,11 +24,5 @@ A tool for deploying SentenceTransformers models to a ray cluster.
|
|
|
24
24
|
- onnx-cpu
|
|
25
25
|
- openvino-cpu
|
|
26
26
|
- fastembed-onnx-cpu
|
|
27
|
-
|
|
28
|
-
- spot instances
|
|
29
|
-
- grpc
|
|
30
27
|
|
|
31
|
-
### To build:
|
|
32
|
-
- python -m build
|
|
33
|
-
- twine upload dist/*
|
|
34
28
|
|
|
@@ -29,7 +29,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
|
|
|
29
29
|
elif torch_dtype == "float32":
|
|
30
30
|
model_kwargs["torch_dtype"] = torch.float32
|
|
31
31
|
else:
|
|
32
|
-
|
|
32
|
+
raise ValueError(f"Invalid torch_dtype: '{torch_dtype}'")
|
|
33
33
|
|
|
34
34
|
deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
|
|
35
35
|
backend=backend,
|
|
@@ -69,7 +69,6 @@ class EmbeddingModel:
|
|
|
69
69
|
assert request.model == self.served_model_name, (
|
|
70
70
|
f"Model '{request.model}' is not supported. Use '{self.served_model_name}' instead."
|
|
71
71
|
)
|
|
72
|
-
request.dimensions = request.dimensions or self.matryoshka_dim
|
|
73
72
|
return await self.__create_embeddings_batch(request)
|
|
74
73
|
except Exception as e:
|
|
75
74
|
self.logger.error(e)
|
|
@@ -77,40 +76,50 @@ class EmbeddingModel:
|
|
|
77
76
|
|
|
78
77
|
@serve.batch(max_batch_size=8, batch_wait_timeout_s=0.25)
|
|
79
78
|
async def __create_embeddings_batch(self, requests_batch: List[EmbeddingRequest]) -> List[EmbeddingResponse]:
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
truncate_dims = []
|
|
79
|
+
self_0 = self[0] if isinstance(self, list) else self
|
|
80
|
+
|
|
81
|
+
inputs, truncate_dims, num_inputs_list = [], [], []
|
|
83
82
|
for request in requests_batch:
|
|
84
|
-
if isinstance(request.input,
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
truncate_dims.append(request.dimensions)
|
|
83
|
+
input_text = request.input if isinstance(request.input, list) else [request.input] # Can be a list of texts
|
|
84
|
+
inputs.extend(input_text)
|
|
85
|
+
num_inputs_list.append(len(input_text))
|
|
86
|
+
truncate_dims.append(request.dimensions or self_0.matryoshka_dim)
|
|
88
87
|
|
|
89
|
-
|
|
90
|
-
embeddings = self.embedding_model.encode(
|
|
88
|
+
embeddings = self_0.embedding_model.encode(
|
|
91
89
|
inputs, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False,
|
|
92
|
-
).to(
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
num_inputs
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
90
|
+
).to(self_0.torch_device)
|
|
91
|
+
|
|
92
|
+
model_name = requests_batch[0].model
|
|
93
|
+
truncate_needed = any(dim is not None for dim in truncate_dims)
|
|
94
|
+
results_batch, ix = [], 0
|
|
95
|
+
|
|
96
|
+
if truncate_needed:
|
|
97
|
+
for truncate_dim, num_inputs in zip(truncate_dims, num_inputs_list):
|
|
98
|
+
batch_embeddings = embeddings[ix: ix + num_inputs]
|
|
99
|
+
ix += num_inputs
|
|
100
|
+
|
|
101
|
+
if truncate_dim is not None:
|
|
102
|
+
# Truncate and normalize using pytorch (faster)
|
|
103
|
+
batch_embeddings = batch_embeddings[:, :truncate_dim]
|
|
104
|
+
batch_embeddings = batch_embeddings / torch.norm(batch_embeddings, dim=1, keepdim=True)
|
|
105
|
+
|
|
106
|
+
batch_embeddings = batch_embeddings.cpu().tolist()
|
|
107
|
+
response_data = [
|
|
108
|
+
{"index": emb_ix, "embedding": emb} for emb_ix, emb in enumerate(batch_embeddings)
|
|
109
|
+
]
|
|
110
|
+
results_batch.append(EmbeddingResponse(object="list", data=response_data, model=model_name))
|
|
111
|
+
else:
|
|
112
|
+
embeddings_list = embeddings.cpu().tolist() # Move everything to CPU
|
|
113
|
+
for num_inputs in num_inputs_list:
|
|
114
|
+
batch_embeddings = embeddings_list[ix: ix + num_inputs]
|
|
115
|
+
ix += num_inputs
|
|
116
|
+
|
|
117
|
+
response_data = [
|
|
118
|
+
{"index": emb_ix, "embedding": emb} for emb_ix, emb in enumerate(batch_embeddings)
|
|
119
|
+
]
|
|
120
|
+
results_batch.append(EmbeddingResponse(object="list", data=response_data, model=model_name))
|
|
121
|
+
|
|
122
|
+
return results_batch
|
|
114
123
|
|
|
115
124
|
@web_api.get("/v1/models")
|
|
116
125
|
async def list_models(self):
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ray-embedding
|
|
3
|
-
Version: 0.9.
|
|
3
|
+
Version: 0.9.9
|
|
4
4
|
Summary: Deploy SentenceTransformers embedding models to a ray cluster
|
|
5
5
|
Author: Crispin Almodovar
|
|
6
|
-
Author-email:
|
|
6
|
+
Author-email:
|
|
7
7
|
Classifier: Programming Language :: Python :: 3
|
|
8
8
|
Classifier: License :: OSI Approved :: MIT License
|
|
9
9
|
Classifier: Operating System :: OS Independent
|
|
@@ -24,11 +24,5 @@ A tool for deploying SentenceTransformers models to a ray cluster.
|
|
|
24
24
|
- onnx-cpu
|
|
25
25
|
- openvino-cpu
|
|
26
26
|
- fastembed-onnx-cpu
|
|
27
|
-
|
|
28
|
-
- spot instances
|
|
29
|
-
- grpc
|
|
30
27
|
|
|
31
|
-
### To build:
|
|
32
|
-
- python -m build
|
|
33
|
-
- twine upload dist/*
|
|
34
28
|
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
[metadata]
|
|
2
2
|
name = ray-embedding
|
|
3
|
-
version = 0.9.
|
|
3
|
+
version = 0.9.9
|
|
4
4
|
author = Crispin Almodovar
|
|
5
|
-
author_email =
|
|
5
|
+
author_email =
|
|
6
6
|
description = Deploy SentenceTransformers embedding models to a ray cluster
|
|
7
7
|
long_description = file: README.md
|
|
8
8
|
long_description_content_type = text/markdown
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|