ray-embedding 0.11.6__tar.gz → 0.11.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ray-embedding might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ray-embedding
3
- Version: 0.11.6
3
+ Version: 0.11.7
4
4
  Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
5
  Author: Crispin Almodovar
6
6
  Author-email:
@@ -12,6 +12,7 @@ from ray_embedding.model_router import ModelRouter
12
12
  def build_model(model_config: ModelDeploymentConfig) -> DeploymentHandle:
13
13
  deployment_name = model_config.deployment
14
14
  model = model_config.model
15
+ served_model_name = model_config.served_model_name
15
16
  device = model_config.device
16
17
  backend = model_config.backend or "torch"
17
18
  matryoshka_dim = model_config.matryoshka_dim
@@ -29,6 +30,7 @@ def build_model(model_config: ModelDeploymentConfig) -> DeploymentHandle:
29
30
  raise ValueError(f"Invalid torch_dtype: '{torch_dtype}'")
30
31
 
31
32
  deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
33
+ served_model_name=served_model_name,
32
34
  device=device,
33
35
  backend=backend,
34
36
  matryoshka_dim=matryoshka_dim,
@@ -41,6 +43,6 @@ def build_app(args: AppConfig) -> Application:
41
43
  model_router, models = args.model_router, args.models
42
44
  assert model_router and models
43
45
 
44
- served_models = {model_config.model: build_model(model_config) for model_config in models}
46
+ served_models = {model_config.served_model_name: build_model(model_config) for model_config in models}
45
47
  router = ModelRouter.options(name=model_router.deployment).bind(served_models)
46
48
  return router
@@ -18,6 +18,7 @@ class EmbeddingModel:
18
18
  logging.basicConfig(level=logging.INFO)
19
19
  self.logger = logging.getLogger(self.__class__.__name__)
20
20
  self.model = model
21
+ self.served_model_name = served_model_name or os.path.basename(self.model)
21
22
  self.init_device = device
22
23
  if self.init_device is None or self.init_device == "auto":
23
24
  self.init_device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -33,7 +34,6 @@ class EmbeddingModel:
33
34
  trust_remote_code=self.trust_remote_code,
34
35
  model_kwargs=self.model_kwargs)
35
36
 
36
- self.served_model_name = served_model_name or os.path.basename(self.model)
37
37
  self.logger.info(f"Successfully initialized embedding model {self.model} using device {self.torch_device}")
38
38
 
39
39
  async def __call__(self, text: Union[str, List[str]], dimensions: Optional[int] = None):
@@ -68,7 +68,7 @@ class ModelRouter:
68
68
  async def compute_embeddings(self, request: EmbeddingRequest):
69
69
  try:
70
70
  inputs = request.input if isinstance(request.input, list) else [request.input]
71
- self.logger.info(f"Received input of size {len(inputs)} text chunks")
71
+ self.logger.info(f"Received input of size {len(inputs)} text chunks; model: {request.model}")
72
72
  embeddings = await self._compute_embeddings_from_resized_batches(request.model, inputs, request.dimensions)
73
73
  response_data = [
74
74
  {"index": idx, "embedding": emb}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ray-embedding
3
- Version: 0.11.6
3
+ Version: 0.11.7
4
4
  Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
5
  Author: Crispin Almodovar
6
6
  Author-email:
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  name = ray-embedding
3
- version = 0.11.6
3
+ version = 0.11.7
4
4
  author = Crispin Almodovar
5
5
  author_email =
6
6
  description = Deploy SentenceTransformers embedding models to a ray cluster
File without changes