ray-embedding 0.11.7__py3-none-any.whl → 0.12.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ray-embedding might be problematic. Click here for more details.

ray_embedding/deploy.py CHANGED
@@ -1,18 +1,19 @@
1
+ import os
1
2
  from typing import Dict, Any, Optional
2
3
  from ray.serve import Application
3
4
  from ray.serve.handle import DeploymentHandle
4
5
 
5
- from ray_embedding.dto import AppConfig, ModelDeploymentConfig
6
+ from ray_embedding.dto import AppConfig, ModelDeploymentConfig, DeployedModel
6
7
  from ray_embedding.embedding_model import EmbeddingModel
7
8
  import torch
8
9
 
9
10
  from ray_embedding.model_router import ModelRouter
10
11
 
11
12
 
12
- def build_model(model_config: ModelDeploymentConfig) -> DeploymentHandle:
13
+ def build_model(model_config: ModelDeploymentConfig) -> DeployedModel:
13
14
  deployment_name = model_config.deployment
14
15
  model = model_config.model
15
- served_model_name = model_config.served_model_name
16
+ served_model_name = model_config.served_model_name or os.path.basename(model)
16
17
  device = model_config.device
17
18
  backend = model_config.backend or "torch"
18
19
  matryoshka_dim = model_config.matryoshka_dim
@@ -37,12 +38,18 @@ def build_model(model_config: ModelDeploymentConfig) -> DeploymentHandle:
37
38
  trust_remote_code=trust_remote_code,
38
39
  model_kwargs=model_kwargs
39
40
  )
40
- return deployment
41
+ return DeployedModel(model=served_model_name,
42
+ deployment_handle=deployment,
43
+ batch_size=model_config.batch_size,
44
+ num_retries=model_config.num_retries
45
+ )
46
+
41
47
 
42
48
  def build_app(args: AppConfig) -> Application:
43
49
  model_router, models = args.model_router, args.models
44
50
  assert model_router and models
51
+ assert model_router.path_prefix
45
52
 
46
- served_models = {model_config.served_model_name: build_model(model_config) for model_config in models}
47
- router = ModelRouter.options(name=model_router.deployment).bind(served_models)
53
+ deployed_models = {model_config.served_model_name: build_model(model_config) for model_config in models}
54
+ router = ModelRouter.options(name=model_router.deployment).bind(deployed_models, model_router.path_prefix)
48
55
  return router
ray_embedding/dto.py CHANGED
@@ -20,6 +20,7 @@ class EmbeddingResponse(BaseModel):
20
20
 
21
21
  class ModelRouterConfig(BaseModel):
22
22
  deployment: str
23
+ path_prefix: List[str] = []
23
24
 
24
25
 
25
26
  class ModelDeploymentConfig(BaseModel):
@@ -34,9 +34,9 @@ class EmbeddingModel:
34
34
  trust_remote_code=self.trust_remote_code,
35
35
  model_kwargs=self.model_kwargs)
36
36
 
37
- self.logger.info(f"Successfully initialized embedding model {self.model} using device {self.torch_device}")
37
+ self.logger.info(f"Successfully initialized model {self.model} using device {self.torch_device}")
38
38
 
39
- async def __call__(self, text: Union[str, List[str]], dimensions: Optional[int] = None):
39
+ async def __call__(self, text: Union[str, List[str]], dimensions: Optional[int] = None) -> List[List[float]]:
40
40
  """Compute embeddings for the input text using the loaded model."""
41
41
  if isinstance(text, str):
42
42
  text = [text]
@@ -53,8 +53,8 @@ class EmbeddingModel:
53
53
  embeddings = embeddings / torch.norm(embeddings, dim=1, keepdim=True)
54
54
 
55
55
  # Move all embeddings to CPU at once before conversion
56
- embeddings = embeddings.cpu().tolist()
57
- return embeddings
56
+ embeddings_list = embeddings.cpu().tolist()
57
+ return embeddings_list
58
58
 
59
59
 
60
60
  def wait_for_cuda(self, wait: int = 10):
@@ -1,10 +1,11 @@
1
1
  import asyncio
2
2
  import logging
3
3
  import time
4
- from typing import Optional, Dict, List
4
+ from typing import Optional, Dict, List, Tuple, Any, Coroutine
5
5
 
6
6
  from fastapi import FastAPI, HTTPException
7
7
  from ray import serve
8
+ from ray.serve.handle import DeploymentHandle
8
9
 
9
10
  from ray_embedding.dto import EmbeddingResponse, EmbeddingRequest, DeployedModel
10
11
 
@@ -13,62 +14,79 @@ web_api = FastAPI(title="Ray Embeddings - OpenAI-compatible API")
13
14
  @serve.deployment
14
15
  @serve.ingress(web_api)
15
16
  class ModelRouter:
16
- def __init__(self, served_models: Dict[str, DeployedModel]):
17
- assert served_models, "models cannot be empty"
17
+ def __init__(self, deployed_models: Dict[str, DeployedModel], path_prefix: List[str]):
18
+ assert deployed_models, "models cannot be empty"
19
+ assert path_prefix, "path_prefix cannot be empty"
20
+
18
21
  logging.basicConfig(level=logging.INFO)
19
22
  self.logger = logging.getLogger(self.__class__.__name__)
20
- self.served_models = served_models
23
+ self.deployed_models = deployed_models
24
+ self.path_prefix = path_prefix
21
25
  self.available_models = [
22
26
  {"id": str(item),
23
27
  "object": "model",
24
28
  "created": int(time.time()),
25
29
  "owned_by": "openai",
26
- "permission": []} for item in self.served_models.keys()
30
+ "permission": []} for item in self.deployed_models.keys()
27
31
  ]
28
32
  self.logger.info(f"Successfully registered models: {self.available_models}")
29
33
 
30
34
  async def _compute_embeddings_from_resized_batches(self, model: str, inputs: List[str], dimensions: Optional[int] = None):
31
- assert model in self.served_models
32
- self.logger.info(f"Retrieving model handle for: {model} ")
33
- model_handle = self.served_models[model].deployment_handle
34
- self.logger.info(f"Model handle: {model_handle} ")
35
- batch_size = self.served_models[model].batch_size
36
- num_retries = self.served_models[model].num_retries
35
+ deployed_model = self.deployed_models[model]
36
+ model_handle = deployed_model.deployment_handle
37
+ batch_size = deployed_model.batch_size
38
+ num_retries = deployed_model.num_retries
37
39
 
38
40
  # Resize the inputs into batch_size items, and dispatch in parallel
39
41
  batches = [inputs[i:i+batch_size] for i in range(0, len(inputs), batch_size)]
40
42
  if len(inputs) > batch_size:
41
- self.logger.info(f"Original input is greater than {batch_size}. "
42
- f"It was resized to {len(batches)} mini-batches of size {batch_size}")
43
+ self.logger.info(f"Original input (length {len(inputs)} was resized "
44
+ f"to {len(batches)} mini-batches, each with max length {batch_size}.")
45
+
46
+ # Call embedding model replicas in parallel
43
47
  tasks = [model_handle.remote(batch, dimensions) for batch in batches]
44
48
  all_results = await asyncio.gather(*tasks, return_exceptions=True)
45
49
 
46
50
  # Retry any failed model calls
47
51
  for i, result in enumerate(all_results):
48
52
  if isinstance(result, Exception):
49
- retries = 0
50
- while retries < num_retries:
51
- try:
52
- all_results[i] = await model_handle.remote(batches[i], dimensions)
53
- except Exception as e:
54
- self.logger.warning(e)
55
- finally:
56
- retries += 1
57
- if not isinstance(all_results[i], Exception):
58
- break
59
-
60
- if retries >= num_retries and isinstance(all_results[i], Exception):
61
- raise all_results[i]
62
-
63
- # Flatten the results because all_results is a list of lists
53
+ result_retried, retries = await self._retry_failed_embedding_call(model_handle, batches[i], dimensions,
54
+ num_retries)
55
+ if (retries >= num_retries and isinstance(result_retried, Exception)) or result_retried is None:
56
+ raise result_retried or ValueError(f"Failed to compute `{model}` embeddings for mini-batch of size {batch_size}.")
57
+
58
+ all_results[i] = result_retried
59
+
60
+ # Flatten the results because `all_results` is a list of lists
64
61
  self.logger.info(f"Successfully computed embeddings from {len(batches)} mini-batches")
65
62
  return [emb for result in all_results for emb in result]
66
63
 
67
- @web_api.post("/v1/embeddings", response_model=EmbeddingResponse)
68
- async def compute_embeddings(self, request: EmbeddingRequest):
64
+ async def _retry_failed_embedding_call(self, model_handle: DeploymentHandle, batch: List[str],
65
+ dimensions: Optional[int] = None, num_retries: Optional[int] = 2) \
66
+ -> Tuple[List[List[float]] | Exception, int]:
67
+
68
+ result_retried, retries = None, 0
69
+ while retries < num_retries:
70
+ try:
71
+ result_retried = await model_handle.remote(batch, dimensions)
72
+ except Exception as e:
73
+ result_retried = e
74
+ self.logger.warning(e)
75
+ finally:
76
+ retries += 1
77
+ if not isinstance(result_retried, Exception) and result_retried is not None:
78
+ break
79
+
80
+ return result_retried, retries
81
+
82
+ @web_api.post("/{path_prefix}/v1/embeddings", response_model=EmbeddingResponse)
83
+ async def compute_embeddings(self, path_prefix: str, request: EmbeddingRequest):
84
+ assert path_prefix in self.path_prefix, f"Invalid path prefix: {path_prefix}"
85
+ assert request.model in self.deployed_models, f"Invalid model: {request.model}"
86
+
69
87
  try:
70
88
  inputs = request.input if isinstance(request.input, list) else [request.input]
71
- self.logger.info(f"Received input of size {len(inputs)} text chunks; model: {request.model}")
89
+ self.logger.info(f"Computing embeddings for a batch of {len(inputs)} texts using model: {request.model}")
72
90
  embeddings = await self._compute_embeddings_from_resized_batches(request.model, inputs, request.dimensions)
73
91
  response_data = [
74
92
  {"index": idx, "embedding": emb}
@@ -79,7 +97,8 @@ class ModelRouter:
79
97
  self.logger.error(f"Failed to create embeddings: {e}")
80
98
  raise HTTPException(status_code=500, detail=str(e))
81
99
 
82
- @web_api.get("/v1/models")
83
- async def list_models(self):
100
+ @web_api.get("/{path_prefix}/v1/models")
101
+ async def list_models(self, path_prefix: str):
84
102
  """Returns the list of available models in OpenAI-compatible format."""
85
- return {"object": "list", "data": self.available_models}
103
+ assert path_prefix in self.path_prefix, f"Invalid path prefix: {path_prefix}"
104
+ return {"object": "list", "data": self.available_models}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ray-embedding
3
- Version: 0.11.7
3
+ Version: 0.12.0
4
4
  Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
5
  Author: Crispin Almodovar
6
6
  Author-email:
@@ -0,0 +1,9 @@
1
+ ray_embedding/__init__.py,sha256=YS5LAZfRIwwVvE3C9g7hsauvjgIkqKtHyxkwMFFfAGY,46
2
+ ray_embedding/deploy.py,sha256=_p8LXLfRP2y9z4Jj1BbuVTuDYkuXZpGI_JkTEj_bMa4,2712
3
+ ray_embedding/dto.py,sha256=NwS8EkeZZcfWDE6RFsLG0WtZtnc7onlr95llRSgnIQc,1432
4
+ ray_embedding/embedding_model.py,sha256=cZe6voJTXYz0OKjjkFskC05V8frm3B-B3-Ae1j9GMKo,3408
5
+ ray_embedding/model_router.py,sha256=04PN_ZptFwEGeSSaDVbCodbDMRMu2i-jeamC1Sb2v24,5145
6
+ ray_embedding-0.12.0.dist-info/METADATA,sha256=jSrtclfqYRIfyd5e1KErc3DvDjXSB2TZEvXSyi-aGqk,1094
7
+ ray_embedding-0.12.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
+ ray_embedding-0.12.0.dist-info/top_level.txt,sha256=ziCblpJq1YsrryshFqxTRuRMgNuO1_tgvAAkGShATNA,14
9
+ ray_embedding-0.12.0.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- ray_embedding/__init__.py,sha256=YS5LAZfRIwwVvE3C9g7hsauvjgIkqKtHyxkwMFFfAGY,46
2
- ray_embedding/deploy.py,sha256=2lnHmSONHWyRQdZSXJrU8UGtXVQWgyFZGwUYdD7gzOM,2356
3
- ray_embedding/dto.py,sha256=QlduDoqkFHaeF_KgsFeUKq2XWiPMmrgRPy_QjCTSCRE,1399
4
- ray_embedding/embedding_model.py,sha256=2wLk54BZIhHMCnwx5vneU0z4Y7EQs220BFNeLf-UQh4,3387
5
- ray_embedding/model_router.py,sha256=QBf188zZXsCrYMtb06PWlRSB0DEiAK3UojSLhKMMEEM,3997
6
- ray_embedding-0.11.7.dist-info/METADATA,sha256=_b15ndqKOJUd7G9zgk31wEnYDASKE8D3QOSUpFpAyOU,1094
7
- ray_embedding-0.11.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
8
- ray_embedding-0.11.7.dist-info/top_level.txt,sha256=ziCblpJq1YsrryshFqxTRuRMgNuO1_tgvAAkGShATNA,14
9
- ray_embedding-0.11.7.dist-info/RECORD,,