ray-embedding 0.9.8__py3-none-any.whl → 0.9.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ray-embedding might be problematic. Click here for more details.

ray_embedding/deploy.py CHANGED
@@ -29,7 +29,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
29
29
  elif torch_dtype == "float32":
30
30
  model_kwargs["torch_dtype"] = torch.float32
31
31
  else:
32
- del model_kwargs["torch_dtype"] # Remove
32
+ raise ValueError(f"Invalid torch_dtype: '{torch_dtype}'")
33
33
 
34
34
  deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
35
35
  backend=backend,
@@ -76,42 +76,51 @@ class EmbeddingModel:
76
76
 
77
77
  @serve.batch(max_batch_size=8, batch_wait_timeout_s=0.25)
78
78
  async def __create_embeddings_batch(self, requests_batch: List[EmbeddingRequest]) -> List[EmbeddingResponse]:
79
- self_0 = self[0] if isinstance(self, list) else self
79
+ self_0 = self[0] if isinstance(self, list) else self # Ray also passes an array of self refs; just take the first one
80
+ embedding_model, matryoshka_dim, torch_device = self_0.embedding_model, self_0.matryoshka_dim, self_0.torch_device
80
81
 
81
- # Batch the text inputs
82
- inputs = []
83
- truncate_dims = []
82
+ inputs, truncate_dims, num_inputs_list = [], [], []
84
83
  for request in requests_batch:
85
- if isinstance(request.input, str):
86
- request.input = [request.input]
87
- inputs.extend(request.input)
88
- truncate_dims.append(request.dimensions or self_0.matryoshka_dim)
84
+ input_text = request.input if isinstance(request.input, list) else [request.input] # Can be a list of texts
85
+ inputs.extend(input_text)
86
+ num_inputs_list.append(len(input_text))
87
+ truncate_dims.append(request.dimensions or matryoshka_dim)
89
88
 
90
- # Compute embeddings for the batch of text inputs
91
- embeddings = self_0.embedding_model.encode(
89
+ embeddings = embedding_model.encode(
92
90
  inputs, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False,
93
- ).to(self_0.torch_device)
94
-
95
- # Truncate the embeddings; note that the truncate_dim can be different for each request
96
- # so we need to this step one by one
97
- results = []
98
- ix = 0
99
- for truncate_dim, request in zip(truncate_dims, requests_batch):
100
- num_inputs = len(request.input)
101
- batch_embeddings = embeddings[ix: ix + num_inputs]
102
- ix += num_inputs
103
- if truncate_dim is not None:
104
- batch_embeddings = batch_embeddings[:, :truncate_dim]
105
- batch_embeddings = batch_embeddings / torch.norm(batch_embeddings, dim=1, keepdim=True)
106
-
107
- batch_embeddings = batch_embeddings.cpu().tolist()
108
- response_data = [
109
- {"index": emb_ix, "embedding": emb}
110
- for emb_ix, emb in enumerate(batch_embeddings)
111
- ]
112
- results.append(EmbeddingResponse(object="list", data=response_data, model=request.model))
113
-
114
- return results
91
+ ).to(torch_device)
92
+
93
+ model_name = requests_batch[0].model
94
+ truncate_needed = any(dim is not None for dim in truncate_dims)
95
+ results_batch, ix = [], 0
96
+
97
+ if truncate_needed:
98
+ for truncate_dim, num_inputs in zip(truncate_dims, num_inputs_list):
99
+ batch_embeddings = embeddings[ix: ix + num_inputs]
100
+ ix += num_inputs
101
+
102
+ if truncate_dim is not None:
103
+ # Truncate and normalize using pytorch (faster)
104
+ batch_embeddings = batch_embeddings[:, :truncate_dim]
105
+ batch_embeddings = batch_embeddings / torch.norm(batch_embeddings, dim=1, keepdim=True)
106
+
107
+ batch_embeddings = batch_embeddings.cpu().tolist()
108
+ response_data = [
109
+ {"index": emb_ix, "embedding": emb} for emb_ix, emb in enumerate(batch_embeddings)
110
+ ]
111
+ results_batch.append(EmbeddingResponse(object="list", data=response_data, model=model_name))
112
+ else:
113
+ embeddings_list = embeddings.cpu().tolist() # Move everything to CPU
114
+ for num_inputs in num_inputs_list:
115
+ batch_embeddings = embeddings_list[ix: ix + num_inputs]
116
+ ix += num_inputs
117
+
118
+ response_data = [
119
+ {"index": emb_ix, "embedding": emb} for emb_ix, emb in enumerate(batch_embeddings)
120
+ ]
121
+ results_batch.append(EmbeddingResponse(object="list", data=response_data, model=model_name))
122
+
123
+ return results_batch
115
124
 
116
125
  @web_api.get("/v1/models")
117
126
  async def list_models(self):
@@ -1,9 +1,9 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ray-embedding
3
- Version: 0.9.8
3
+ Version: 0.9.10
4
4
  Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
5
  Author: Crispin Almodovar
6
- Author-email: crispin.almodovar@docorto.ai
6
+ Author-email:
7
7
  Classifier: Programming Language :: Python :: 3
8
8
  Classifier: License :: OSI Approved :: MIT License
9
9
  Classifier: Operating System :: OS Independent
@@ -24,11 +24,5 @@ A tool for deploying SentenceTransformers models to a ray cluster.
24
24
  - onnx-cpu
25
25
  - openvino-cpu
26
26
  - fastembed-onnx-cpu
27
-
28
- - spot instances
29
- - grpc
30
27
 
31
- ### To build:
32
- - python -m build
33
- - twine upload dist/*
34
28
 
@@ -0,0 +1,8 @@
1
+ ray_embedding/__init__.py,sha256=OYJT0rVaaGzY613JqgfktsCgroDnBkGOHxR2FE9UtRU,49
2
+ ray_embedding/deploy.py,sha256=oqOhMxS5UyZ4oGhfpL7kqvrxLO8QW41sF_FHbNSJe-w,1858
3
+ ray_embedding/dto.py,sha256=e91ejZbM_NB9WTjF1YnfuV71cajYIh0vOX8oV_g2OwM,595
4
+ ray_embedding/embedding_model.py,sha256=DyhFO0kQnyoFq_VGJQYDEZKuvtwqhuXr83Vu23yb4ds,5966
5
+ ray_embedding-0.9.10.dist-info/METADATA,sha256=aDU4KlY1_-mtJ80tPb9FGGAnB9o2VSjQtsUR3Xt60_k,605
6
+ ray_embedding-0.9.10.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
7
+ ray_embedding-0.9.10.dist-info/top_level.txt,sha256=ziCblpJq1YsrryshFqxTRuRMgNuO1_tgvAAkGShATNA,14
8
+ ray_embedding-0.9.10.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (79.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,8 +0,0 @@
1
- ray_embedding/__init__.py,sha256=OYJT0rVaaGzY613JqgfktsCgroDnBkGOHxR2FE9UtRU,49
2
- ray_embedding/deploy.py,sha256=YD_udSm13QbFPgSAkCrTQso15DmtIn0QEhErOFNg7jM,1841
3
- ray_embedding/dto.py,sha256=e91ejZbM_NB9WTjF1YnfuV71cajYIh0vOX8oV_g2OwM,595
4
- ray_embedding/embedding_model.py,sha256=aeNgO01Qu8tnhi8ScrdBJDmIoJnNwutRqKUVjqvZM08,5211
5
- ray_embedding-0.9.8.dist-info/METADATA,sha256=XjQh_exJnrlmNryxnOFgVmzlU9XugcNK_0PnUquD8h0,712
6
- ray_embedding-0.9.8.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
7
- ray_embedding-0.9.8.dist-info/top_level.txt,sha256=ziCblpJq1YsrryshFqxTRuRMgNuO1_tgvAAkGShATNA,14
8
- ray_embedding-0.9.8.dist-info/RECORD,,