ray-embedding 0.9.10__tar.gz → 0.10.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ray-embedding might be problematic. Click here for more details.
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/PKG-INFO +1 -1
- ray_embedding-0.10.1/ray_embedding/embedding_model.py +97 -0
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/ray_embedding.egg-info/PKG-INFO +1 -1
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/setup.cfg +1 -1
- ray_embedding-0.9.10/ray_embedding/embedding_model.py +0 -131
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/README.md +0 -0
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/pyproject.toml +0 -0
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/ray_embedding/__init__.py +0 -0
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/ray_embedding/deploy.py +0 -0
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/ray_embedding/dto.py +0 -0
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/ray_embedding.egg-info/SOURCES.txt +0 -0
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/ray_embedding.egg-info/dependency_links.txt +0 -0
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/ray_embedding.egg-info/top_level.txt +0 -0
- {ray_embedding-0.9.10 → ray_embedding-0.10.1}/test/test.py +0 -0
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import os.path
|
|
3
|
+
import time
|
|
4
|
+
from typing import Optional, Dict, Any, List
|
|
5
|
+
|
|
6
|
+
import torch
|
|
7
|
+
from fastapi import FastAPI, HTTPException
|
|
8
|
+
from ray import serve
|
|
9
|
+
from sentence_transformers import SentenceTransformer
|
|
10
|
+
|
|
11
|
+
from ray_embedding.dto import EmbeddingResponse, EmbeddingRequest
|
|
12
|
+
|
|
13
|
+
web_api = FastAPI(title=f"Ray Embeddings - OpenAI-compatible API")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@serve.deployment(
|
|
17
|
+
num_replicas="auto",
|
|
18
|
+
ray_actor_options={
|
|
19
|
+
"num_cpus": 1,
|
|
20
|
+
"num_gpus": 0
|
|
21
|
+
},
|
|
22
|
+
autoscaling_config={
|
|
23
|
+
"target_ongoing_requests": 2,
|
|
24
|
+
"min_replicas": 0,
|
|
25
|
+
"initial_replicas": 1,
|
|
26
|
+
"max_replicas": 1,
|
|
27
|
+
}
|
|
28
|
+
)
|
|
29
|
+
@serve.ingress(web_api)
|
|
30
|
+
class EmbeddingModel:
|
|
31
|
+
def __init__(self, model: str, backend: Optional[str] = "torch", matryoshka_dim: Optional[int] = None,
|
|
32
|
+
trust_remote_code: Optional[bool] = False, model_kwargs: Dict[str, Any] = None):
|
|
33
|
+
logging.basicConfig(level=logging.INFO)
|
|
34
|
+
self.logger = logging.getLogger(__name__)
|
|
35
|
+
self.model = model
|
|
36
|
+
self.backend = backend or "torch"
|
|
37
|
+
self.matryoshka_dim = matryoshka_dim
|
|
38
|
+
self.trust_remote_code = trust_remote_code or False
|
|
39
|
+
self.model_kwargs = model_kwargs or {}
|
|
40
|
+
self.torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
41
|
+
self.logger.info(f"Initializing embedding model: {self.model}")
|
|
42
|
+
self.embedding_model = SentenceTransformer(self.model, backend=self.backend, trust_remote_code=self.trust_remote_code,
|
|
43
|
+
model_kwargs=self.model_kwargs)
|
|
44
|
+
|
|
45
|
+
self.served_model_name = os.path.basename(self.model)
|
|
46
|
+
self.available_models = [
|
|
47
|
+
{"id": self.served_model_name,
|
|
48
|
+
"object": "model",
|
|
49
|
+
"created": int(time.time()),
|
|
50
|
+
"owned_by": "openai",
|
|
51
|
+
"permission": []}
|
|
52
|
+
]
|
|
53
|
+
self.logger.info(f"Successfully initialized embedding model {self.model} using device {self.torch_device}")
|
|
54
|
+
|
|
55
|
+
@web_api.post("/v1/embeddings", response_model=EmbeddingResponse)
|
|
56
|
+
async def create_embeddings(self, request: EmbeddingRequest):
|
|
57
|
+
"""Generate embeddings for the input text using the specified model."""
|
|
58
|
+
try:
|
|
59
|
+
assert request.model == self.served_model_name, (
|
|
60
|
+
f"Model '{request.model}' is not supported. Use '{self.served_model_name}' instead."
|
|
61
|
+
)
|
|
62
|
+
if isinstance(request.input, str):
|
|
63
|
+
request.input = [request.input]
|
|
64
|
+
|
|
65
|
+
truncate_dim = request.dimensions or self.matryoshka_dim
|
|
66
|
+
|
|
67
|
+
# Compute embeddings and convert to a PyTorch tensor on the GPU
|
|
68
|
+
embeddings = self.embedding_model.encode(
|
|
69
|
+
request.input, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False,
|
|
70
|
+
).to(self.torch_device)
|
|
71
|
+
|
|
72
|
+
if truncate_dim is not None:
|
|
73
|
+
# Truncate and re-normalize the embeddings
|
|
74
|
+
embeddings = embeddings[:, :truncate_dim]
|
|
75
|
+
embeddings = embeddings / torch.norm(embeddings, dim=1, keepdim=True)
|
|
76
|
+
|
|
77
|
+
# Move all embeddings to CPU at once before conversion
|
|
78
|
+
embeddings = embeddings.cpu().tolist()
|
|
79
|
+
|
|
80
|
+
# Convert embeddings to list format for response
|
|
81
|
+
response_data = [
|
|
82
|
+
{"index": idx, "embedding": emb}
|
|
83
|
+
for idx, emb in enumerate(embeddings)
|
|
84
|
+
]
|
|
85
|
+
return EmbeddingResponse(object="list", data=response_data, model=request.model)
|
|
86
|
+
|
|
87
|
+
except Exception as e:
|
|
88
|
+
self.logger.error(e)
|
|
89
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
90
|
+
|
|
91
|
+
@web_api.get("/v1/models")
|
|
92
|
+
async def list_models(self):
|
|
93
|
+
"""Returns the list of available models in OpenAI-compatible format."""
|
|
94
|
+
return {"object": "list", "data": self.available_models}
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
|
|
@@ -1,131 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import os.path
|
|
3
|
-
import time
|
|
4
|
-
from typing import Optional, Dict, Any, List
|
|
5
|
-
|
|
6
|
-
import torch
|
|
7
|
-
from fastapi import FastAPI, HTTPException
|
|
8
|
-
from ray import serve
|
|
9
|
-
from sentence_transformers import SentenceTransformer
|
|
10
|
-
|
|
11
|
-
from ray_embedding.dto import EmbeddingResponse, EmbeddingRequest
|
|
12
|
-
|
|
13
|
-
web_api = FastAPI(title=f"Ray Embeddings - OpenAI-compatible API")
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
@serve.deployment(
|
|
17
|
-
num_replicas="auto",
|
|
18
|
-
ray_actor_options={
|
|
19
|
-
"num_cpus": 1,
|
|
20
|
-
"num_gpus": 0
|
|
21
|
-
},
|
|
22
|
-
autoscaling_config={
|
|
23
|
-
"target_ongoing_requests": 2,
|
|
24
|
-
"min_replicas": 0,
|
|
25
|
-
"initial_replicas": 1,
|
|
26
|
-
"max_replicas": 1,
|
|
27
|
-
},
|
|
28
|
-
user_config={
|
|
29
|
-
"max_batch_size": 8,
|
|
30
|
-
"batch_wait_timeout_s": 0.25,
|
|
31
|
-
}
|
|
32
|
-
)
|
|
33
|
-
@serve.ingress(web_api)
|
|
34
|
-
class EmbeddingModel:
|
|
35
|
-
def __init__(self, model: str, backend: Optional[str] = "torch", matryoshka_dim: Optional[int] = None,
|
|
36
|
-
trust_remote_code: Optional[bool] = False, model_kwargs: Dict[str, Any] = None):
|
|
37
|
-
logging.basicConfig(level=logging.INFO)
|
|
38
|
-
self.logger = logging.getLogger(__name__)
|
|
39
|
-
self.model = model
|
|
40
|
-
self.backend = backend or "torch"
|
|
41
|
-
self.matryoshka_dim = matryoshka_dim
|
|
42
|
-
self.trust_remote_code = trust_remote_code or False
|
|
43
|
-
self.model_kwargs = model_kwargs or {}
|
|
44
|
-
self.torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
45
|
-
self.logger.info(f"Initializing embedding model: {self.model}")
|
|
46
|
-
self.embedding_model = SentenceTransformer(self.model, backend=self.backend, trust_remote_code=self.trust_remote_code,
|
|
47
|
-
model_kwargs=self.model_kwargs)
|
|
48
|
-
|
|
49
|
-
self.served_model_name = os.path.basename(self.model)
|
|
50
|
-
self.available_models = [
|
|
51
|
-
{"id": self.served_model_name,
|
|
52
|
-
"object": "model",
|
|
53
|
-
"created": int(time.time()),
|
|
54
|
-
"owned_by": "openai",
|
|
55
|
-
"permission": []}
|
|
56
|
-
]
|
|
57
|
-
self.logger.info(f"Successfully initialized embedding model {self.model} using device {self.torch_device}")
|
|
58
|
-
|
|
59
|
-
async def reconfigure(self, user_config: Dict):
|
|
60
|
-
assert "max_batch_size" in user_config and "batch_wait_timeout_s" in user_config, "Invalid user config"
|
|
61
|
-
self.logger.info(f"Reconfiguring dynamic batching parameters: {user_config}")
|
|
62
|
-
self.__create_embeddings_batch.set_max_batch_size(user_config["max_batch_size"])
|
|
63
|
-
self.__create_embeddings_batch.set_batch_wait_timeout_s(user_config["batch_wait_timeout_s"])
|
|
64
|
-
|
|
65
|
-
@web_api.post("/v1/embeddings", response_model=EmbeddingResponse)
|
|
66
|
-
async def create_embeddings(self, request: EmbeddingRequest):
|
|
67
|
-
"""Generate embeddings for the input text using the specified model."""
|
|
68
|
-
try:
|
|
69
|
-
assert request.model == self.served_model_name, (
|
|
70
|
-
f"Model '{request.model}' is not supported. Use '{self.served_model_name}' instead."
|
|
71
|
-
)
|
|
72
|
-
return await self.__create_embeddings_batch(request)
|
|
73
|
-
except Exception as e:
|
|
74
|
-
self.logger.error(e)
|
|
75
|
-
raise HTTPException(status_code=500, detail=str(e))
|
|
76
|
-
|
|
77
|
-
@serve.batch(max_batch_size=8, batch_wait_timeout_s=0.25)
|
|
78
|
-
async def __create_embeddings_batch(self, requests_batch: List[EmbeddingRequest]) -> List[EmbeddingResponse]:
|
|
79
|
-
self_0 = self[0] if isinstance(self, list) else self # Ray also passes an array of self refs; just take the first one
|
|
80
|
-
embedding_model, matryoshka_dim, torch_device = self_0.embedding_model, self_0.matryoshka_dim, self_0.torch_device
|
|
81
|
-
|
|
82
|
-
inputs, truncate_dims, num_inputs_list = [], [], []
|
|
83
|
-
for request in requests_batch:
|
|
84
|
-
input_text = request.input if isinstance(request.input, list) else [request.input] # Can be a list of texts
|
|
85
|
-
inputs.extend(input_text)
|
|
86
|
-
num_inputs_list.append(len(input_text))
|
|
87
|
-
truncate_dims.append(request.dimensions or matryoshka_dim)
|
|
88
|
-
|
|
89
|
-
embeddings = embedding_model.encode(
|
|
90
|
-
inputs, convert_to_tensor=True, normalize_embeddings=True, show_progress_bar=False,
|
|
91
|
-
).to(torch_device)
|
|
92
|
-
|
|
93
|
-
model_name = requests_batch[0].model
|
|
94
|
-
truncate_needed = any(dim is not None for dim in truncate_dims)
|
|
95
|
-
results_batch, ix = [], 0
|
|
96
|
-
|
|
97
|
-
if truncate_needed:
|
|
98
|
-
for truncate_dim, num_inputs in zip(truncate_dims, num_inputs_list):
|
|
99
|
-
batch_embeddings = embeddings[ix: ix + num_inputs]
|
|
100
|
-
ix += num_inputs
|
|
101
|
-
|
|
102
|
-
if truncate_dim is not None:
|
|
103
|
-
# Truncate and normalize using pytorch (faster)
|
|
104
|
-
batch_embeddings = batch_embeddings[:, :truncate_dim]
|
|
105
|
-
batch_embeddings = batch_embeddings / torch.norm(batch_embeddings, dim=1, keepdim=True)
|
|
106
|
-
|
|
107
|
-
batch_embeddings = batch_embeddings.cpu().tolist()
|
|
108
|
-
response_data = [
|
|
109
|
-
{"index": emb_ix, "embedding": emb} for emb_ix, emb in enumerate(batch_embeddings)
|
|
110
|
-
]
|
|
111
|
-
results_batch.append(EmbeddingResponse(object="list", data=response_data, model=model_name))
|
|
112
|
-
else:
|
|
113
|
-
embeddings_list = embeddings.cpu().tolist() # Move everything to CPU
|
|
114
|
-
for num_inputs in num_inputs_list:
|
|
115
|
-
batch_embeddings = embeddings_list[ix: ix + num_inputs]
|
|
116
|
-
ix += num_inputs
|
|
117
|
-
|
|
118
|
-
response_data = [
|
|
119
|
-
{"index": emb_ix, "embedding": emb} for emb_ix, emb in enumerate(batch_embeddings)
|
|
120
|
-
]
|
|
121
|
-
results_batch.append(EmbeddingResponse(object="list", data=response_data, model=model_name))
|
|
122
|
-
|
|
123
|
-
return results_batch
|
|
124
|
-
|
|
125
|
-
@web_api.get("/v1/models")
|
|
126
|
-
async def list_models(self):
|
|
127
|
-
"""Returns the list of available models in OpenAI-compatible format."""
|
|
128
|
-
return {"object": "list", "data": self.available_models}
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|