ray-embedding 0.11.6__tar.gz → 0.11.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ray-embedding
3
- Version: 0.11.6
3
+ Version: 0.11.8
4
4
  Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
5
  Author: Crispin Almodovar
6
6
  Author-email:
@@ -1,17 +1,19 @@
1
+ import os
1
2
  from typing import Dict, Any, Optional
2
3
  from ray.serve import Application
3
4
  from ray.serve.handle import DeploymentHandle
4
5
 
5
- from ray_embedding.dto import AppConfig, ModelDeploymentConfig
6
+ from ray_embedding.dto import AppConfig, ModelDeploymentConfig, DeployedModel
6
7
  from ray_embedding.embedding_model import EmbeddingModel
7
8
  import torch
8
9
 
9
10
  from ray_embedding.model_router import ModelRouter
10
11
 
11
12
 
12
- def build_model(model_config: ModelDeploymentConfig) -> DeploymentHandle:
13
+ def build_model(model_config: ModelDeploymentConfig) -> DeployedModel:
13
14
  deployment_name = model_config.deployment
14
15
  model = model_config.model
16
+ served_model_name = model_config.served_model_name or os.path.basename(model)
15
17
  device = model_config.device
16
18
  backend = model_config.backend or "torch"
17
19
  matryoshka_dim = model_config.matryoshka_dim
@@ -29,18 +31,24 @@ def build_model(model_config: ModelDeploymentConfig) -> DeploymentHandle:
29
31
  raise ValueError(f"Invalid torch_dtype: '{torch_dtype}'")
30
32
 
31
33
  deployment = EmbeddingModel.options(name=deployment_name).bind(model=model,
34
+ served_model_name=served_model_name,
32
35
  device=device,
33
36
  backend=backend,
34
37
  matryoshka_dim=matryoshka_dim,
35
38
  trust_remote_code=trust_remote_code,
36
39
  model_kwargs=model_kwargs
37
40
  )
38
- return deployment
41
+ return DeployedModel(model=served_model_name,
42
+ deployment_handle=deployment,
43
+ batch_size=model_config.batch_size,
44
+ num_retries=model_config.num_retries
45
+ )
46
+
39
47
 
40
48
  def build_app(args: AppConfig) -> Application:
41
49
  model_router, models = args.model_router, args.models
42
50
  assert model_router and models
43
51
 
44
- served_models = {model_config.model: build_model(model_config) for model_config in models}
45
- router = ModelRouter.options(name=model_router.deployment).bind(served_models)
52
+ deployed_models = {model_config.served_model_name: build_model(model_config) for model_config in models}
53
+ router = ModelRouter.options(name=model_router.deployment).bind(deployed_models)
46
54
  return router
@@ -18,6 +18,7 @@ class EmbeddingModel:
18
18
  logging.basicConfig(level=logging.INFO)
19
19
  self.logger = logging.getLogger(self.__class__.__name__)
20
20
  self.model = model
21
+ self.served_model_name = served_model_name or os.path.basename(self.model)
21
22
  self.init_device = device
22
23
  if self.init_device is None or self.init_device == "auto":
23
24
  self.init_device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -33,7 +34,6 @@ class EmbeddingModel:
33
34
  trust_remote_code=self.trust_remote_code,
34
35
  model_kwargs=self.model_kwargs)
35
36
 
36
- self.served_model_name = served_model_name or os.path.basename(self.model)
37
37
  self.logger.info(f"Successfully initialized embedding model {self.model} using device {self.torch_device}")
38
38
 
39
39
  async def __call__(self, text: Union[str, List[str]], dimensions: Optional[int] = None):
@@ -13,33 +13,32 @@ web_api = FastAPI(title="Ray Embeddings - OpenAI-compatible API")
13
13
  @serve.deployment
14
14
  @serve.ingress(web_api)
15
15
  class ModelRouter:
16
- def __init__(self, served_models: Dict[str, DeployedModel]):
17
- assert served_models, "models cannot be empty"
16
+ def __init__(self, deployed_models: Dict[str, DeployedModel]):
17
+ assert deployed_models, "models cannot be empty"
18
18
  logging.basicConfig(level=logging.INFO)
19
19
  self.logger = logging.getLogger(self.__class__.__name__)
20
- self.served_models = served_models
20
+ self.deployed_models = deployed_models
21
21
  self.available_models = [
22
22
  {"id": str(item),
23
23
  "object": "model",
24
24
  "created": int(time.time()),
25
25
  "owned_by": "openai",
26
- "permission": []} for item in self.served_models.keys()
26
+ "permission": []} for item in self.deployed_models.keys()
27
27
  ]
28
28
  self.logger.info(f"Successfully registered models: {self.available_models}")
29
29
 
30
30
  async def _compute_embeddings_from_resized_batches(self, model: str, inputs: List[str], dimensions: Optional[int] = None):
31
- assert model in self.served_models
32
- self.logger.info(f"Retrieving model handle for: {model} ")
33
- model_handle = self.served_models[model].deployment_handle
34
- self.logger.info(f"Model handle: {model_handle} ")
35
- batch_size = self.served_models[model].batch_size
36
- num_retries = self.served_models[model].num_retries
31
+ assert model in self.deployed_models
32
+ deployed_model = self.deployed_models[model]
33
+ model_handle = deployed_model.deployment_handle
34
+ batch_size = deployed_model.batch_size
35
+ num_retries = deployed_model.num_retries
37
36
 
38
37
  # Resize the inputs into batch_size items, and dispatch in parallel
39
38
  batches = [inputs[i:i+batch_size] for i in range(0, len(inputs), batch_size)]
40
39
  if len(inputs) > batch_size:
41
- self.logger.info(f"Original input is greater than {batch_size}. "
42
- f"It was resized to {len(batches)} mini-batches of size {batch_size}")
40
+ self.logger.info(f"Original input (length {len(inputs)} was resized "
41
+ f"to {len(batches)} mini-batches of length {batch_size}")
43
42
  tasks = [model_handle.remote(batch, dimensions) for batch in batches]
44
43
  all_results = await asyncio.gather(*tasks, return_exceptions=True)
45
44
 
@@ -68,7 +67,7 @@ class ModelRouter:
68
67
  async def compute_embeddings(self, request: EmbeddingRequest):
69
68
  try:
70
69
  inputs = request.input if isinstance(request.input, list) else [request.input]
71
- self.logger.info(f"Received input of size {len(inputs)} text chunks")
70
+ self.logger.info(f"Computing embeddings for a batch of {len(inputs)} texts using model: {request.model}")
72
71
  embeddings = await self._compute_embeddings_from_resized_batches(request.model, inputs, request.dimensions)
73
72
  response_data = [
74
73
  {"index": idx, "embedding": emb}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ray-embedding
3
- Version: 0.11.6
3
+ Version: 0.11.8
4
4
  Summary: Deploy SentenceTransformers embedding models to a ray cluster
5
5
  Author: Crispin Almodovar
6
6
  Author-email:
@@ -1,6 +1,6 @@
1
1
  [metadata]
2
2
  name = ray-embedding
3
- version = 0.11.6
3
+ version = 0.11.8
4
4
  author = Crispin Almodovar
5
5
  author_email =
6
6
  description = Deploy SentenceTransformers embedding models to a ray cluster
File without changes