ray-embedding 0.10.0__py3-none-any.whl → 0.10.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ray_embedding/deploy.py CHANGED
@@ -19,6 +19,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
19
19
  backend: Optional[str] = args.pop("backend", "torch")
20
20
  matryoshka_dim: Optional[int] = args.pop("matryoshka_dim", None)
21
21
  trust_remote_code: Optional[bool] = args.pop("trust_remote_code", False)
22
+ device: Optional[str] = args.pop("device", None)
22
23
  model_kwargs: Dict[str, Any] = args.pop("model_kwargs", {})
23
24
  if "torch_dtype" in model_kwargs:
24
25
  torch_dtype = model_kwargs["torch_dtype"].strip()
@@ -33,6 +34,7 @@ def deploy_model(args: Dict[str, Any]) -> Application:
33
34
 
34
35
  deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
35
36
  backend=backend,
37
+ device=device,
36
38
  matryoshka_dim=matryoshka_dim,
37
39
  trust_remote_code=trust_remote_code,
38
40
  model_kwargs=model_kwargs
@@ -24,24 +24,25 @@ web_api = FastAPI(title=f"Ray Embeddings - OpenAI-compatible API")
24
24
  "min_replicas": 0,
25
25
  "initial_replicas": 1,
26
26
  "max_replicas": 1,
27
- },
28
- user_config={
29
- "max_batch_size": 8,
30
- "batch_wait_timeout_s": 0.25,
31
27
  }
32
28
  )
33
29
  @serve.ingress(web_api)
34
30
  class EmbeddingModel:
35
- def __init__(self, model: str, backend: Optional[str] = "torch", matryoshka_dim: Optional[int] = None,
36
- trust_remote_code: Optional[bool] = False, model_kwargs: Dict[str, Any] = None):
31
+ def __init__(self, model: str, backend: Optional[str] = "torch", device: Optional[str] = None,
32
+ matryoshka_dim: Optional[int] = None, trust_remote_code: Optional[bool] = False,
33
+ model_kwargs: Dict[str, Any] = None):
37
34
  logging.basicConfig(level=logging.INFO)
38
35
  self.logger = logging.getLogger(__name__)
39
36
  self.model = model
37
+ self.init_device = device
40
38
  self.backend = backend or "torch"
41
39
  self.matryoshka_dim = matryoshka_dim
42
40
  self.trust_remote_code = trust_remote_code or False
43
41
  self.model_kwargs = model_kwargs or {}
44
- self.torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
42
+ if self.init_device is None:
43
+ self.torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
44
+ else:
45
+ self.torch_device = torch.device(self.init_device)
45
46
  self.logger.info(f"Initializing embedding model: {self.model}")
46
47
  self.embedding_model = SentenceTransformer(self.model, backend=self.backend, trust_remote_code=self.trust_remote_code,
47
48
  model_kwargs=self.model_kwargs)
@@ -56,12 +57,6 @@ class EmbeddingModel:
56
57
  ]
57
58
  self.logger.info(f"Successfully initialized embedding model {self.model} using device {self.torch_device}")
58
59
 
59
- async def reconfigure(self, user_config: Dict):
60
- assert "max_batch_size" in user_config and "batch_wait_timeout_s" in user_config, "Invalid user config"
61
- self.logger.info(f"Reconfiguring dynamic batching parameters: {user_config}")
62
- self.__create_embeddings_batch.set_max_batch_size(user_config["max_batch_size"])
63
- self.__create_embeddings_batch.set_batch_wait_timeout_s(user_config["batch_wait_timeout_s"])
64
-
65
60
  @web_api.post("/v1/embeddings", response_model=EmbeddingResponse)
66
61
  async def create_embeddings(self, request: EmbeddingRequest):
67
62
  """Generate embeddings for the input text using the specified model."""
@@ -69,7 +64,6 @@ class EmbeddingModel:
69
64
  assert request.model == self.served_model_name, (
70
65
  f"Model '{request.model}' is not supported. Use '{self.served_model_name}' instead."
71
66
  )
72
-
73
67
  if isinstance(request.input, str):
74
68
  request.input = [request.input]
75
69
 
@@ -77,7 +71,7 @@ class EmbeddingModel:
77
71
 
78
72
  # Compute embeddings and convert to a PyTorch tensor on the GPU
79
73
  embeddings = self.embedding_model.encode(
80
- request.input, convert_to_tensor=True, normalize_embeddings=True
74
+ request.input, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False,
81
75
  ).to(self.torch_device)
82
76
 
83
77
  if truncate_dim is not None:
@@ -99,58 +93,14 @@ class EmbeddingModel:
99
93
  self.logger.error(e)
100
94
  raise HTTPException(status_code=500, detail=str(e))
101
95
 
102
- @serve.batch(max_batch_size=8, batch_wait_timeout_s=0.25)
103
- async def __create_embeddings_batch(self, requests_batch: List[EmbeddingRequest]) -> List[EmbeddingResponse]:
104
- self_0 = self[0] if isinstance(self, list) else self # Ray also passes an array of self refs; just take the first one
105
- embedding_model, matryoshka_dim, torch_device = self_0.embedding_model, self_0.matryoshka_dim, self_0.torch_device
106
-
107
- inputs, truncate_dims, num_inputs_list = [], [], []
108
- for request in requests_batch:
109
- input_text = request.input if isinstance(request.input, list) else [request.input] # Can be a list of texts
110
- inputs.extend(input_text)
111
- num_inputs_list.append(len(input_text))
112
- truncate_dims.append(request.dimensions or matryoshka_dim)
113
-
114
- embeddings = embedding_model.encode(
115
- inputs, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False,
116
- ).to(torch_device)
117
-
118
- model_name = requests_batch[0].model
119
- truncate_needed = any(dim is not None for dim in truncate_dims)
120
- results_batch, ix = [], 0
121
-
122
- if truncate_needed:
123
- for truncate_dim, num_inputs in zip(truncate_dims, num_inputs_list):
124
- batch_embeddings = embeddings[ix: ix + num_inputs]
125
- ix += num_inputs
126
-
127
- if truncate_dim is not None:
128
- # Truncate and normalize using pytorch (faster)
129
- batch_embeddings = batch_embeddings[:, :truncate_dim]
130
- batch_embeddings = batch_embeddings / torch.norm(batch_embeddings, dim=1, keepdim=True)
131
-
132
- batch_embeddings = batch_embeddings.cpu().tolist()
133
- response_data = [
134
- {"index": emb_ix, "embedding": emb} for emb_ix, emb in enumerate(batch_embeddings)
135
- ]
136
- results_batch.append(EmbeddingResponse(object="list", data=response_data, model=model_name))
137
- else:
138
- embeddings_list = embeddings.cpu().tolist() # Move everything to CPU
139
- for num_inputs in num_inputs_list:
140
- batch_embeddings = embeddings_list[ix: ix + num_inputs]
141
- ix += num_inputs
142
-
143
- response_data = [
144
- {"index": emb_ix, "embedding": emb} for emb_ix, emb in enumerate(batch_embeddings)
145
- ]
146
- results_batch.append(EmbeddingResponse(object="list", data=response_data, model=model_name))
147
-
148
- return results_batch
149
-
150
96
  @web_api.get("/v1/models")
151
97
  async def list_models(self):
152
98
  """Returns the list of available models in OpenAI-compatible format."""
153
99
  return {"object": "list", "data": self.available_models}
154
100
 
155
-
156
-
101
+ def check_health(self):
102
+ if self.init_device == "cuda" and not torch.cuda.is_available():
103
+ # Even though CUDA was available at init time,
104
+ # CUDA can become unavailable - this is a known problem in AWS EC2
105
+ # https://github.com/ray-project/ray/issues/49594
106
+ raise RuntimeError("CUDA device is not available")
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: ray-embedding
3
+ Version: 0.10.5
4
+ Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
+ Author: Crispin Almodovar
6
+ Author-email:
7
+ Classifier: Programming Language :: Python :: 3
8
+ Classifier: License :: OSI Approved :: MIT License
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.12
11
+ Description-Content-Type: text/markdown
12
+
13
+ # ray-embedding
14
+
15
+ A Python library for deploying SentenceTransformers models to a ray cluster.
16
+ This tool encapsulates inference logic that uses SentenceTransformers
17
+ to load any compatible embedding model from the Hugging Face hub and
18
+ compute embeddings for input text.
19
+
20
+ This library is meant to be used with the [embedding-models Ray cluster](https://bitbucket.org/docorto/embedding-models/src/dev/).
21
+
22
+ Refer to this [Ray Serve deployment config](https://bitbucket.org/docorto/embedding-models/src/dev/serve-config/dev/serve-config.yaml)
23
+ to see how this library is used.
24
+
25
+ ### Supports the following backends
26
+
27
+ - pytorch-gpu
28
+ - pytorch-cpu
29
+
30
+ ### Planned:
31
+ - onnx-gpu
32
+ - onnx-cpu
33
+ - openvino-cpu
34
+ - fastembed-onnx-cpu
35
+
36
+
@@ -0,0 +1,8 @@
1
+ ray_embedding/__init__.py,sha256=OYJT0rVaaGzY613JqgfktsCgroDnBkGOHxR2FE9UtRU,49
2
+ ray_embedding/deploy.py,sha256=GEKJAaV25DOlXd7Pqj7bqASM-FcP0FtFsJlI7u_U6Iw,1995
3
+ ray_embedding/dto.py,sha256=e91ejZbM_NB9WTjF1YnfuV71cajYIh0vOX8oV_g2OwM,595
4
+ ray_embedding/embedding_model.py,sha256=i0sno946g2nbnhhhcD-bZ3WoWJi5qScbm716-i6tI1I,4501
5
+ ray_embedding-0.10.5.dist-info/METADATA,sha256=L77LrGKHM2GYVgpXGVqkRYf5LoSCkbHpar6vkwlxIp4,1094
6
+ ray_embedding-0.10.5.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
7
+ ray_embedding-0.10.5.dist-info/top_level.txt,sha256=ziCblpJq1YsrryshFqxTRuRMgNuO1_tgvAAkGShATNA,14
8
+ ray_embedding-0.10.5.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.3.0)
2
+ Generator: setuptools (80.3.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,28 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: ray-embedding
3
- Version: 0.10.0
4
- Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
- Author: Crispin Almodovar
6
- Author-email:
7
- Classifier: Programming Language :: Python :: 3
8
- Classifier: License :: OSI Approved :: MIT License
9
- Classifier: Operating System :: OS Independent
10
- Requires-Python: >=3.12
11
- Description-Content-Type: text/markdown
12
-
13
- # ray-embedding
14
-
15
- A tool for deploying SentenceTransformers models to a ray cluster.
16
-
17
- ### Supports the following backends
18
-
19
- - pytorch-gpu
20
- - pytorch-cpu
21
-
22
- ### Planned:
23
- - onnx-gpu
24
- - onnx-cpu
25
- - openvino-cpu
26
- - fastembed-onnx-cpu
27
-
28
-
@@ -1,8 +0,0 @@
1
- ray_embedding/__init__.py,sha256=OYJT0rVaaGzY613JqgfktsCgroDnBkGOHxR2FE9UtRU,49
2
- ray_embedding/deploy.py,sha256=oqOhMxS5UyZ4oGhfpL7kqvrxLO8QW41sF_FHbNSJe-w,1858
3
- ray_embedding/dto.py,sha256=e91ejZbM_NB9WTjF1YnfuV71cajYIh0vOX8oV_g2OwM,595
4
- ray_embedding/embedding_model.py,sha256=cyGH7CZRCAV50T51aTAxTSRCU0haCV_mX42tyhmFa6U,7007
5
- ray_embedding-0.10.0.dist-info/METADATA,sha256=8uU8yj2bnbxZPlL1WEaU0V6ioi0We6rcUUzTGd62yTk,605
6
- ray_embedding-0.10.0.dist-info/WHEEL,sha256=GHB6lJx2juba1wDgXDNlMTyM13ckjBMKf-OnwgKOCtA,91
7
- ray_embedding-0.10.0.dist-info/top_level.txt,sha256=ziCblpJq1YsrryshFqxTRuRMgNuO1_tgvAAkGShATNA,14
8
- ray_embedding-0.10.0.dist-info/RECORD,,