ray-embedding 0.11.5__tar.gz → 0.11.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ray-embedding might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ray-embedding
3
- Version: 0.11.5
3
+ Version: 0.11.7
4
4
  Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
5
  Author: Crispin Almodovar
6
6
  Author-email:
@@ -6,12 +6,13 @@ from ray_embedding.dto import AppConfig, ModelDeploymentConfig
6
6
  from ray_embedding.embedding_model import EmbeddingModel
7
7
  import torch
8
8
 
9
- from ray_embedding.embedding_service import EmbeddingService
9
+ from ray_embedding.model_router import ModelRouter
10
10
 
11
11
 
12
12
  def build_model(model_config: ModelDeploymentConfig) -> DeploymentHandle:
13
13
  deployment_name = model_config.deployment
14
14
  model = model_config.model
15
+ served_model_name = model_config.served_model_name
15
16
  device = model_config.device
16
17
  backend = model_config.backend or "torch"
17
18
  matryoshka_dim = model_config.matryoshka_dim
@@ -29,6 +30,7 @@ def build_model(model_config: ModelDeploymentConfig) -> DeploymentHandle:
29
30
  raise ValueError(f"Invalid torch_dtype: '{torch_dtype}'")
30
31
 
31
32
  deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
33
+ served_model_name=served_model_name,
32
34
  device=device,
33
35
  backend=backend,
34
36
  matryoshka_dim=matryoshka_dim,
@@ -41,6 +43,6 @@ def build_app(args: AppConfig) -> Application:
41
43
  model_router, models = args.model_router, args.models
42
44
  assert model_router and models
43
45
 
44
- served_models = {model_config.model: build_model(model_config) for model_config in models}
45
- app = EmbeddingService.options(name=model_router.deployment).bind(served_models)
46
- return app
46
+ served_models = {model_config.served_model_name: build_model(model_config) for model_config in models}
47
+ router = ModelRouter.options(name=model_router.deployment).bind(served_models)
48
+ return router
@@ -18,6 +18,7 @@ class EmbeddingModel:
18
18
  logging.basicConfig(level=logging.INFO)
19
19
  self.logger = logging.getLogger(self.__class__.__name__)
20
20
  self.model = model
21
+ self.served_model_name = served_model_name or os.path.basename(self.model)
21
22
  self.init_device = device
22
23
  if self.init_device is None or self.init_device == "auto":
23
24
  self.init_device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -33,7 +34,6 @@ class EmbeddingModel:
33
34
  trust_remote_code=self.trust_remote_code,
34
35
  model_kwargs=self.model_kwargs)
35
36
 
36
- self.served_model_name = served_model_name or os.path.basename(self.model)
37
37
  self.logger.info(f"Successfully initialized embedding model {self.model} using device {self.torch_device}")
38
38
 
39
39
  async def __call__(self, text: Union[str, List[str]], dimensions: Optional[int] = None):
@@ -12,7 +12,7 @@ web_api = FastAPI(title="Ray Embeddings - OpenAI-compatible API")
12
12
 
13
13
  @serve.deployment
14
14
  @serve.ingress(web_api)
15
- class EmbeddingService:
15
+ class ModelRouter:
16
16
  def __init__(self, served_models: Dict[str, DeployedModel]):
17
17
  assert served_models, "models cannot be empty"
18
18
  logging.basicConfig(level=logging.INFO)
@@ -29,6 +29,7 @@ class EmbeddingService:
29
29
 
30
30
  async def _compute_embeddings_from_resized_batches(self, model: str, inputs: List[str], dimensions: Optional[int] = None):
31
31
  assert model in self.served_models
32
+ self.logger.info(f"Retrieving model handle for: {model} ")
32
33
  model_handle = self.served_models[model].deployment_handle
33
34
  self.logger.info(f"Model handle: {model_handle} ")
34
35
  batch_size = self.served_models[model].batch_size
@@ -67,7 +68,7 @@ class EmbeddingService:
67
68
  async def compute_embeddings(self, request: EmbeddingRequest):
68
69
  try:
69
70
  inputs = request.input if isinstance(request.input, list) else [request.input]
70
- self.logger.info(f"Received input of size {len(inputs)} text chunks")
71
+ self.logger.info(f"Received input of size {len(inputs)} text chunks; model: {request.model}")
71
72
  embeddings = await self._compute_embeddings_from_resized_batches(request.model, inputs, request.dimensions)
72
73
  response_data = [
73
74
  {"index": idx, "embedding": emb}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ray-embedding
3
- Version: 0.11.5
3
+ Version: 0.11.7
4
4
  Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
5
  Author: Crispin Almodovar
6
6
  Author-email:
@@ -5,7 +5,7 @@ ray_embedding/__init__.py
5
5
  ray_embedding/deploy.py
6
6
  ray_embedding/dto.py
7
7
  ray_embedding/embedding_model.py
8
- ray_embedding/embedding_service.py
8
+ ray_embedding/model_router.py
9
9
  ray_embedding.egg-info/PKG-INFO
10
10
  ray_embedding.egg-info/SOURCES.txt
11
11
  ray_embedding.egg-info/dependency_links.txt
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  name = ray-embedding
3
- version = 0.11.5
3
+ version = 0.11.7
4
4
  author = Crispin Almodovar
5
5
  author_email =
6
6
  description = Deploy SentenceTransformers embedding models to a ray cluster
File without changes