endee-llamaindex 0.1.3__py3-none-any.whl → 0.1.5a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- endee_llamaindex/base.py +603 -652
- endee_llamaindex/constants.py +70 -0
- endee_llamaindex/utils.py +7 -587
- {endee_llamaindex-0.1.3.dist-info → endee_llamaindex-0.1.5a1.dist-info}/METADATA +147 -50
- endee_llamaindex-0.1.5a1.dist-info/RECORD +8 -0
- {endee_llamaindex-0.1.3.dist-info → endee_llamaindex-0.1.5a1.dist-info}/WHEEL +1 -1
- endee_llamaindex-0.1.3.dist-info/RECORD +0 -7
- {endee_llamaindex-0.1.3.dist-info → endee_llamaindex-0.1.5a1.dist-info}/top_level.txt +0 -0
endee_llamaindex/base.py
CHANGED
|
@@ -1,16 +1,32 @@
|
|
|
1
|
+
"""
|
|
2
|
+
EndeeVectorStore: LlamaIndex vector store backed by the Endee API.
|
|
3
|
+
|
|
4
|
+
Aligned with the local endee package (./endee). API contract:
|
|
5
|
+
|
|
6
|
+
Endee (endee.endee):
|
|
7
|
+
- __init__(token, http_library)
|
|
8
|
+
- create_index(name, dimension, space_type, M, ef_con, precision, version, sparse_dim)
|
|
9
|
+
Validates: index name (alphanumeric + underscores, max length), dimension <= MAX_DIMENSION_ALLOWED,
|
|
10
|
+
space_type in SPACE_TYPES_SUPPORTED ('cosine','l2','ip'), precision in PRECISION_TYPES_SUPPORTED,
|
|
11
|
+
sparse_dim >= 0. Map 'euclidean'->'l2', 'inner_product'->'ip' before calling.
|
|
12
|
+
- get_index(name) -> Index
|
|
13
|
+
|
|
14
|
+
Index (endee.index):
|
|
15
|
+
- upsert(input_array): list of {id, vector, meta?, filter?, sparse_indices?, sparse_values?}; max MAX_VECTORS_PER_BATCH per batch; duplicate IDs in batch raise
|
|
16
|
+
- query(vector, top_k, filter, ef, include_vectors, sparse_indices, sparse_values)
|
|
17
|
+
- delete_vector(id), get_vector(id), describe()
|
|
18
|
+
|
|
19
|
+
No list_ids or batch fetch in endee; filter for query is JSON-serializable (e.g. [{"field":{"$op":value}}] or dict).
|
|
20
|
+
"""
|
|
21
|
+
|
|
1
22
|
import logging
|
|
2
|
-
from collections import Counter
|
|
3
|
-
from functools import partial
|
|
4
23
|
import json
|
|
5
24
|
from typing import Any, Callable, Dict, List, Optional, cast
|
|
6
|
-
|
|
7
25
|
from llama_index.core.bridge.pydantic import PrivateAttr
|
|
8
|
-
from llama_index.core.schema import BaseNode,
|
|
26
|
+
from llama_index.core.schema import BaseNode, TextNode
|
|
9
27
|
from llama_index.core.vector_stores.types import (
|
|
10
28
|
BasePydanticVectorStore,
|
|
11
|
-
MetadataFilters,
|
|
12
29
|
VectorStoreQuery,
|
|
13
|
-
VectorStoreQueryMode,
|
|
14
30
|
VectorStoreQueryResult,
|
|
15
31
|
)
|
|
16
32
|
from llama_index.core.vector_stores.utils import (
|
|
@@ -19,83 +35,24 @@ from llama_index.core.vector_stores.utils import (
|
|
|
19
35
|
metadata_dict_to_node,
|
|
20
36
|
node_to_metadata_dict,
|
|
21
37
|
)
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
ID_KEY = "id"
|
|
40
|
-
VECTOR_KEY = "values"
|
|
41
|
-
SPARSE_VECTOR_KEY = "sparse_values"
|
|
42
|
-
METADATA_KEY = "metadata"
|
|
43
|
-
|
|
44
|
-
DEFAULT_BATCH_SIZE = 100
|
|
45
|
-
|
|
38
|
+
from .constants import (
|
|
39
|
+
DEFAULT_BATCH_SIZE,
|
|
40
|
+
DEFAULT_EF_SEARCH,
|
|
41
|
+
MAX_DIMENSION_ALLOWED,
|
|
42
|
+
MAX_EF_SEARCH_ALLOWED,
|
|
43
|
+
MAX_INDEX_NAME_LENGTH_ALLOWED,
|
|
44
|
+
MAX_TOP_K_ALLOWED,
|
|
45
|
+
MAX_VECTORS_PER_BATCH,
|
|
46
|
+
PRECISION_VALID,
|
|
47
|
+
REVERSE_OPERATOR_MAP,
|
|
48
|
+
SPACE_TYPE_MAP,
|
|
49
|
+
SPACE_TYPES_VALID,
|
|
50
|
+
SUPPORTED_FILTER_OPERATORS,
|
|
51
|
+
)
|
|
52
|
+
from .utils import get_sparse_encoder
|
|
53
|
+
from endee import Endee
|
|
46
54
|
_logger = logging.getLogger(__name__)
|
|
47
55
|
|
|
48
|
-
from llama_index.core.vector_stores.types import MetadataFilter, FilterOperator
|
|
49
|
-
|
|
50
|
-
reverse_operator_map = {
|
|
51
|
-
FilterOperator.EQ: "$eq",
|
|
52
|
-
FilterOperator.NE: "$ne",
|
|
53
|
-
FilterOperator.GT: "$gt",
|
|
54
|
-
FilterOperator.GTE: "$gte",
|
|
55
|
-
FilterOperator.LT: "$lt",
|
|
56
|
-
FilterOperator.LTE: "$lte",
|
|
57
|
-
FilterOperator.IN: "$in",
|
|
58
|
-
FilterOperator.NIN: "$nin",
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
def build_dict(input_batch: List[List[int]]) -> List[Dict[str, Any]]:
|
|
64
|
-
"""
|
|
65
|
-
Build a list of sparse dictionaries from a batch of input_ids.
|
|
66
|
-
|
|
67
|
-
NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
|
|
68
|
-
|
|
69
|
-
"""
|
|
70
|
-
# store a batch of sparse embeddings
|
|
71
|
-
sparse_emb = []
|
|
72
|
-
# iterate through input batch
|
|
73
|
-
for token_ids in input_batch:
|
|
74
|
-
indices = []
|
|
75
|
-
values = []
|
|
76
|
-
# convert the input_ids list to a dictionary of key to frequency values
|
|
77
|
-
d = dict(Counter(token_ids))
|
|
78
|
-
for idx in d:
|
|
79
|
-
indices.append(idx)
|
|
80
|
-
values.append(float(d[idx]))
|
|
81
|
-
sparse_emb.append({"indices": indices, "values": values})
|
|
82
|
-
# return sparse_emb list
|
|
83
|
-
return sparse_emb
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
def generate_sparse_vectors(
|
|
87
|
-
context_batch: List[str], tokenizer: Callable
|
|
88
|
-
) -> List[Dict[str, Any]]:
|
|
89
|
-
"""
|
|
90
|
-
Generate sparse vectors from a batch of contexts.
|
|
91
|
-
|
|
92
|
-
NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
|
|
93
|
-
|
|
94
|
-
"""
|
|
95
|
-
# create batch of input_ids
|
|
96
|
-
inputs = tokenizer(context_batch)["input_ids"]
|
|
97
|
-
# create sparse dictionaries
|
|
98
|
-
return build_dict(inputs)
|
|
99
56
|
|
|
100
57
|
|
|
101
58
|
# Supported sparse embedding models
|
|
@@ -111,166 +68,7 @@ SUPPORTED_SPARSE_MODELS = {
|
|
|
111
68
|
}
|
|
112
69
|
|
|
113
70
|
|
|
114
|
-
|
|
115
|
-
model_name: str,
|
|
116
|
-
batch_size: int = 256,
|
|
117
|
-
cache_dir: Optional[str] = None,
|
|
118
|
-
threads: Optional[int] = None,
|
|
119
|
-
) -> Callable:
|
|
120
|
-
"""
|
|
121
|
-
Initialize a sparse encoder using FastEmbed (recommended for SPLADE models).
|
|
122
|
-
|
|
123
|
-
Args:
|
|
124
|
-
model_name: Model identifier or alias
|
|
125
|
-
batch_size: Batch size for encoding
|
|
126
|
-
cache_dir: Directory to cache model files
|
|
127
|
-
threads: Number of threads to use
|
|
128
|
-
|
|
129
|
-
Returns:
|
|
130
|
-
Callable function that generates sparse vectors from text
|
|
131
|
-
"""
|
|
132
|
-
try:
|
|
133
|
-
from fastembed.sparse.sparse_text_embedding import SparseTextEmbedding
|
|
134
|
-
except ImportError as e:
|
|
135
|
-
raise ImportError(
|
|
136
|
-
"Could not import FastEmbed. "
|
|
137
|
-
"Please install it with `pip install fastembed` or "
|
|
138
|
-
"`pip install fastembed-gpu` for GPU support."
|
|
139
|
-
) from e
|
|
140
|
-
|
|
141
|
-
# Resolve model name from alias if needed
|
|
142
|
-
resolved_model_name = SUPPORTED_SPARSE_MODELS.get(model_name, model_name)
|
|
143
|
-
|
|
144
|
-
# Try GPU first, fallback to CPU
|
|
145
|
-
try:
|
|
146
|
-
model = SparseTextEmbedding(
|
|
147
|
-
resolved_model_name,
|
|
148
|
-
cache_dir=cache_dir,
|
|
149
|
-
threads=threads,
|
|
150
|
-
providers=["CUDAExecutionProvider"],
|
|
151
|
-
)
|
|
152
|
-
_logger.info(f"Initialized sparse encoder '{resolved_model_name}' on GPU")
|
|
153
|
-
except Exception:
|
|
154
|
-
model = SparseTextEmbedding(
|
|
155
|
-
resolved_model_name,
|
|
156
|
-
cache_dir=cache_dir,
|
|
157
|
-
threads=threads
|
|
158
|
-
)
|
|
159
|
-
_logger.info(f"Initialized sparse encoder '{resolved_model_name}' on CPU")
|
|
160
|
-
|
|
161
|
-
def compute_vectors(texts: List[str]) -> tuple:
|
|
162
|
-
"""Compute sparse vectors (indices, values) for a list of texts."""
|
|
163
|
-
embeddings = model.embed(texts, batch_size=batch_size)
|
|
164
|
-
indices = []
|
|
165
|
-
values = []
|
|
166
|
-
for embedding in embeddings:
|
|
167
|
-
indices.append(embedding.indices.tolist())
|
|
168
|
-
values.append(embedding.values.tolist())
|
|
169
|
-
return indices, values
|
|
170
|
-
|
|
171
|
-
return compute_vectors
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
def _initialize_sparse_encoder_transformers(
|
|
175
|
-
model_name: str,
|
|
176
|
-
) -> Callable:
|
|
177
|
-
"""
|
|
178
|
-
Initialize a sparse encoder using Transformers library.
|
|
179
|
-
|
|
180
|
-
Args:
|
|
181
|
-
model_name: Model identifier or alias
|
|
182
|
-
|
|
183
|
-
Returns:
|
|
184
|
-
Callable function that generates sparse vectors from text
|
|
185
|
-
"""
|
|
186
|
-
try:
|
|
187
|
-
import torch
|
|
188
|
-
from transformers import AutoModelForMaskedLM, AutoTokenizer
|
|
189
|
-
except ImportError as e:
|
|
190
|
-
raise ImportError(
|
|
191
|
-
"Could not import transformers library. "
|
|
192
|
-
'Please install transformers with `pip install "transformers[torch]"`'
|
|
193
|
-
) from e
|
|
194
|
-
|
|
195
|
-
# Resolve model name from alias if needed
|
|
196
|
-
resolved_model_name = SUPPORTED_SPARSE_MODELS.get(model_name, model_name)
|
|
197
|
-
|
|
198
|
-
tokenizer = AutoTokenizer.from_pretrained(resolved_model_name)
|
|
199
|
-
model = AutoModelForMaskedLM.from_pretrained(resolved_model_name)
|
|
200
|
-
|
|
201
|
-
if torch.cuda.is_available():
|
|
202
|
-
model = model.to("cuda")
|
|
203
|
-
_logger.info(f"Initialized sparse encoder '{resolved_model_name}' on GPU")
|
|
204
|
-
else:
|
|
205
|
-
_logger.info(f"Initialized sparse encoder '{resolved_model_name}' on CPU")
|
|
206
|
-
|
|
207
|
-
def compute_vectors(texts: List[str]) -> tuple:
|
|
208
|
-
"""
|
|
209
|
-
Compute sparse vectors from logits using ReLU, log, and max operations.
|
|
210
|
-
"""
|
|
211
|
-
tokens = tokenizer(
|
|
212
|
-
texts,
|
|
213
|
-
truncation=True,
|
|
214
|
-
padding=True,
|
|
215
|
-
max_length=512,
|
|
216
|
-
return_tensors="pt"
|
|
217
|
-
)
|
|
218
|
-
|
|
219
|
-
if torch.cuda.is_available():
|
|
220
|
-
tokens = tokens.to("cuda")
|
|
221
|
-
|
|
222
|
-
with torch.no_grad():
|
|
223
|
-
output = model(**tokens)
|
|
224
|
-
logits, attention_mask = output.logits, tokens.attention_mask
|
|
225
|
-
relu_log = torch.log(1 + torch.relu(logits))
|
|
226
|
-
weighted_log = relu_log * attention_mask.unsqueeze(-1)
|
|
227
|
-
tvecs, _ = torch.max(weighted_log, dim=1)
|
|
228
|
-
|
|
229
|
-
# Extract non-zero vectors and their indices
|
|
230
|
-
indices = []
|
|
231
|
-
values = []
|
|
232
|
-
for batch in tvecs:
|
|
233
|
-
nz_indices = batch.nonzero(as_tuple=True)[0].tolist()
|
|
234
|
-
indices.append(nz_indices)
|
|
235
|
-
values.append(batch[nz_indices].tolist())
|
|
236
|
-
|
|
237
|
-
return indices, values
|
|
238
|
-
|
|
239
|
-
return compute_vectors
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
def get_sparse_encoder(
|
|
243
|
-
model_name: Optional[str] = None,
|
|
244
|
-
use_fastembed: bool = True,
|
|
245
|
-
batch_size: int = 256,
|
|
246
|
-
cache_dir: Optional[str] = None,
|
|
247
|
-
threads: Optional[int] = None,
|
|
248
|
-
) -> Optional[Callable]:
|
|
249
|
-
"""
|
|
250
|
-
Get a sparse encoder function for the specified model.
|
|
251
|
-
|
|
252
|
-
Args:
|
|
253
|
-
model_name: Model name or alias (e.g., 'splade_pp', 'bert_base', or full model ID)
|
|
254
|
-
use_fastembed: If True, use FastEmbed (recommended for SPLADE models), else use Transformers
|
|
255
|
-
batch_size: Batch size for encoding
|
|
256
|
-
cache_dir: Directory to cache model files
|
|
257
|
-
threads: Number of threads to use
|
|
258
|
-
|
|
259
|
-
Returns:
|
|
260
|
-
Callable function that generates sparse vectors, or None if model_name is not provided
|
|
261
|
-
"""
|
|
262
|
-
if model_name is None:
|
|
263
|
-
return None
|
|
264
|
-
|
|
265
|
-
if use_fastembed:
|
|
266
|
-
return _initialize_sparse_encoder_fastembed(
|
|
267
|
-
model_name=model_name,
|
|
268
|
-
batch_size=batch_size,
|
|
269
|
-
cache_dir=cache_dir,
|
|
270
|
-
threads=threads,
|
|
271
|
-
)
|
|
272
|
-
else:
|
|
273
|
-
return _initialize_sparse_encoder_transformers(model_name=model_name)
|
|
71
|
+
# Import sparse encoder utilities from utils module
|
|
274
72
|
|
|
275
73
|
|
|
276
74
|
import_err_msg = (
|
|
@@ -282,22 +80,18 @@ class EndeeVectorStore(BasePydanticVectorStore):
|
|
|
282
80
|
|
|
283
81
|
stores_text: bool = True
|
|
284
82
|
flat_metadata: bool = False
|
|
285
|
-
|
|
286
83
|
api_token: Optional[str]
|
|
287
84
|
index_name: Optional[str]
|
|
288
85
|
space_type: Optional[str]
|
|
289
86
|
dimension: Optional[int]
|
|
290
|
-
insert_kwargs: Optional[Dict]
|
|
291
87
|
add_sparse_vector: bool
|
|
292
88
|
text_key: str
|
|
293
89
|
batch_size: int
|
|
294
90
|
remove_text_from_metadata: bool
|
|
295
91
|
hybrid: bool
|
|
296
|
-
|
|
92
|
+
sparse_dim: Optional[int]
|
|
297
93
|
model_name: Optional[str]
|
|
298
94
|
precision: Optional[str]
|
|
299
|
-
key: Optional[str]
|
|
300
|
-
|
|
301
95
|
_endee_index: Any = PrivateAttr()
|
|
302
96
|
_sparse_encoder: Optional[Callable] = PrivateAttr(default=None)
|
|
303
97
|
|
|
@@ -308,63 +102,68 @@ class EndeeVectorStore(BasePydanticVectorStore):
|
|
|
308
102
|
index_name: Optional[str] = None,
|
|
309
103
|
space_type: Optional[str] = "cosine",
|
|
310
104
|
dimension: Optional[int] = None,
|
|
311
|
-
insert_kwargs: Optional[Dict] = None,
|
|
312
105
|
add_sparse_vector: bool = False,
|
|
313
106
|
text_key: str = DEFAULT_TEXT_KEY,
|
|
314
107
|
batch_size: int = DEFAULT_BATCH_SIZE,
|
|
315
108
|
remove_text_from_metadata: bool = False,
|
|
316
109
|
hybrid: bool = False,
|
|
317
|
-
|
|
110
|
+
sparse_dim: Optional[int] = None,
|
|
318
111
|
model_name: Optional[str] = None,
|
|
319
|
-
precision: Optional[str] = "
|
|
320
|
-
|
|
112
|
+
precision: Optional[str] = "float16",
|
|
113
|
+
M: Optional[int] = None,
|
|
114
|
+
ef_con: Optional[int] = None,
|
|
321
115
|
**kwargs: Any,
|
|
322
116
|
) -> None:
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
add_sparse_vector=add_sparse_vector,
|
|
332
|
-
text_key=text_key,
|
|
333
|
-
batch_size=batch_size,
|
|
334
|
-
remove_text_from_metadata=remove_text_from_metadata,
|
|
335
|
-
vocab_size=vocab_size,
|
|
336
|
-
hybrid=hybrid,
|
|
337
|
-
model_name=model_name,
|
|
338
|
-
precision=precision,
|
|
339
|
-
key=key,
|
|
340
|
-
)
|
|
341
|
-
|
|
342
|
-
# Initialize index based on hybrid flag
|
|
343
|
-
if endee_index is not None:
|
|
344
|
-
# Use provided index
|
|
345
|
-
self._endee_index = endee_index
|
|
346
|
-
elif hybrid:
|
|
347
|
-
# Initialize hybrid index
|
|
348
|
-
self._endee_index = self._initialize_hybrid_index(
|
|
349
|
-
api_token, index_name, dimension, space_type, vocab_size, precision, key
|
|
350
|
-
)
|
|
351
|
-
else:
|
|
352
|
-
# Initialize regular index
|
|
353
|
-
self._endee_index = self._initialize_endee_index(
|
|
354
|
-
api_token, index_name, dimension, space_type, precision, key
|
|
355
|
-
)
|
|
356
|
-
|
|
357
|
-
# Initialize sparse encoder if model name is provided and hybrid mode is enabled
|
|
358
|
-
if hybrid and model_name:
|
|
359
|
-
_logger.info(f"Initializing sparse encoder with model: {model_name}")
|
|
360
|
-
self._sparse_encoder = get_sparse_encoder(
|
|
361
|
-
model_name=model_name,
|
|
362
|
-
use_fastembed=True, # Default to FastEmbed
|
|
117
|
+
try:
|
|
118
|
+
super().__init__(
|
|
119
|
+
index_name=index_name,
|
|
120
|
+
api_token=api_token,
|
|
121
|
+
space_type=space_type,
|
|
122
|
+
dimension=dimension,
|
|
123
|
+
add_sparse_vector=add_sparse_vector,
|
|
124
|
+
text_key=text_key,
|
|
363
125
|
batch_size=batch_size,
|
|
126
|
+
remove_text_from_metadata=remove_text_from_metadata,
|
|
127
|
+
sparse_dim=sparse_dim,
|
|
128
|
+
hybrid=hybrid,
|
|
129
|
+
model_name=model_name,
|
|
130
|
+
precision=precision,
|
|
364
131
|
)
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
132
|
+
|
|
133
|
+
# Initialize index (handles both dense and hybrid)
|
|
134
|
+
if endee_index is not None:
|
|
135
|
+
self._endee_index = endee_index
|
|
136
|
+
else:
|
|
137
|
+
# sparse_dim=None creates dense index, sparse_dim>0 creates hybrid index
|
|
138
|
+
self._endee_index = self._initialize_endee_index(
|
|
139
|
+
api_token,
|
|
140
|
+
index_name,
|
|
141
|
+
dimension,
|
|
142
|
+
space_type,
|
|
143
|
+
precision,
|
|
144
|
+
sparse_dim=sparse_dim if hybrid else None,
|
|
145
|
+
M=M,
|
|
146
|
+
ef_con=ef_con,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Initialize sparse encoder if hybrid mode is enabled
|
|
150
|
+
if hybrid:
|
|
151
|
+
# Use default model if none provided
|
|
152
|
+
if model_name is None:
|
|
153
|
+
model_name = "splade_pp" # Default sparse model
|
|
154
|
+
_logger.info(f"Using default sparse model: {model_name}")
|
|
155
|
+
|
|
156
|
+
_logger.info(f"Initializing sparse encoder with model: {model_name}")
|
|
157
|
+
self._sparse_encoder = get_sparse_encoder(
|
|
158
|
+
model_name=model_name,
|
|
159
|
+
use_fastembed=True, # Default to FastEmbed
|
|
160
|
+
batch_size=batch_size,
|
|
161
|
+
)
|
|
162
|
+
else:
|
|
163
|
+
self._sparse_encoder = None
|
|
164
|
+
except Exception as e:
|
|
165
|
+
_logger.error(f"Error initializing EndeeVectorStore: {e}")
|
|
166
|
+
raise
|
|
368
167
|
|
|
369
168
|
@classmethod
|
|
370
169
|
def _initialize_endee_index(
|
|
@@ -373,82 +172,139 @@ class EndeeVectorStore(BasePydanticVectorStore):
|
|
|
373
172
|
index_name: Optional[str],
|
|
374
173
|
dimension: Optional[int] = None,
|
|
375
174
|
space_type: Optional[str] = "cosine",
|
|
376
|
-
precision: Optional[str] = "
|
|
377
|
-
|
|
175
|
+
precision: Optional[str] = "float16",
|
|
176
|
+
sparse_dim: Optional[int] = None,
|
|
177
|
+
M: Optional[int] = None,
|
|
178
|
+
ef_con: Optional[int] = None,
|
|
378
179
|
) -> Any:
|
|
379
|
-
"""
|
|
380
|
-
|
|
381
|
-
from endee.endee import Endee
|
|
382
|
-
|
|
383
|
-
# Initialize Endee client
|
|
384
|
-
nd = Endee(token=api_token)
|
|
180
|
+
"""
|
|
181
|
+
Initialize Endee index (dense or hybrid).
|
|
385
182
|
|
|
183
|
+
Args:
|
|
184
|
+
api_token: Endee API token
|
|
185
|
+
index_name: Name of the index
|
|
186
|
+
dimension: Dense vector dimension
|
|
187
|
+
space_type: Distance metric (cosine, l2, ip)
|
|
188
|
+
precision: Vector precision type
|
|
189
|
+
sparse_dim: Sparse vector dimension. If None or 0, creates dense-only index.
|
|
190
|
+
If > 0, creates hybrid index with both dense and sparse vectors.
|
|
191
|
+
M: HNSW graph connectivity parameter (optional)
|
|
192
|
+
ef_con: HNSW construction parameter (optional)
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
Endee Index object
|
|
196
|
+
"""
|
|
386
197
|
try:
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
_logger.info(f"Retrieved existing index: {index_name}")
|
|
390
|
-
return index
|
|
391
|
-
except Exception as e:
|
|
392
|
-
if dimension is None:
|
|
393
|
-
raise ValueError(
|
|
394
|
-
"Must provide dimension when creating a new index"
|
|
395
|
-
) from e
|
|
396
|
-
|
|
397
|
-
# Create a new index if it doesn't exist
|
|
398
|
-
_logger.info(f"Creating new index: {index_name}")
|
|
399
|
-
nd.create_index(
|
|
400
|
-
name=index_name,
|
|
401
|
-
dimension=dimension,
|
|
402
|
-
space_type=space_type,
|
|
403
|
-
precision=precision,
|
|
404
|
-
key=key,
|
|
405
|
-
)
|
|
406
|
-
return nd.get_index(name=index_name, key=key)
|
|
198
|
+
|
|
199
|
+
|
|
407
200
|
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
dimension: Optional[int] = None,
|
|
414
|
-
space_type: Optional[str] = "cosine",
|
|
415
|
-
vocab_size: Optional[int] = None,
|
|
416
|
-
precision: Optional[str] = "medium",
|
|
417
|
-
key: Optional[str] = None,
|
|
418
|
-
) -> Any:
|
|
419
|
-
"""Initialize Endee hybrid index using the current API."""
|
|
420
|
-
endee = _import_endee()
|
|
421
|
-
from endee.endee import Endee
|
|
201
|
+
_logger.info("Connecting to Endee service...")
|
|
202
|
+
nd = Endee(token=api_token)
|
|
203
|
+
prec = precision if precision is not None else "float16"
|
|
204
|
+
is_hybrid = sparse_dim is not None and sparse_dim > 0
|
|
205
|
+
dim_sparse = sparse_dim if is_hybrid else 0
|
|
422
206
|
|
|
423
|
-
|
|
424
|
-
|
|
207
|
+
try:
|
|
208
|
+
_logger.info(f"Checking if index '{index_name}' exists...")
|
|
209
|
+
index = nd.get_index(name=index_name)
|
|
210
|
+
# Check if existing index matches expected type
|
|
211
|
+
existing_sparse_dim = getattr(index, "sparse_dim", 0)
|
|
212
|
+
if is_hybrid and existing_sparse_dim > 0:
|
|
213
|
+
_logger.info(f"✓ Retrieved existing hybrid index: {index_name}")
|
|
214
|
+
elif not is_hybrid and existing_sparse_dim == 0:
|
|
215
|
+
_logger.info(f"✓ Retrieved existing dense index: {index_name}")
|
|
216
|
+
elif is_hybrid and existing_sparse_dim == 0:
|
|
217
|
+
_logger.warning(
|
|
218
|
+
f"Index '{index_name}' exists as dense-only (sparse_dim=0) but hybrid was requested. "
|
|
219
|
+
f"Using existing dense index."
|
|
220
|
+
)
|
|
221
|
+
else:
|
|
222
|
+
_logger.warning(
|
|
223
|
+
f"Index '{index_name}' exists as hybrid (sparse_dim={existing_sparse_dim}) "
|
|
224
|
+
f"but dense-only was requested. Using existing hybrid index."
|
|
225
|
+
)
|
|
226
|
+
return index
|
|
227
|
+
|
|
228
|
+
except Exception as e:
|
|
229
|
+
# Index doesn't exist, create new one
|
|
230
|
+
if dimension is None:
|
|
231
|
+
raise ValueError(
|
|
232
|
+
f"Must provide dimension when creating a new {'hybrid' if is_hybrid else 'dense'} index"
|
|
233
|
+
) from e
|
|
234
|
+
if is_hybrid and sparse_dim is None:
|
|
235
|
+
raise ValueError(
|
|
236
|
+
"Must provide sparse_dim when creating a new hybrid index"
|
|
237
|
+
) from e
|
|
238
|
+
|
|
239
|
+
# Validate index name
|
|
240
|
+
try:
|
|
241
|
+
from endee.utils import is_valid_index_name
|
|
242
|
+
|
|
243
|
+
if not is_valid_index_name(index_name):
|
|
244
|
+
raise ValueError(
|
|
245
|
+
f"Invalid index name. Index name must be alphanumeric and can "
|
|
246
|
+
f"contain underscores and should be less than "
|
|
247
|
+
f"{MAX_INDEX_NAME_LENGTH_ALLOWED} characters"
|
|
248
|
+
)
|
|
249
|
+
except ImportError:
|
|
250
|
+
pass
|
|
251
|
+
|
|
252
|
+
# Validate dimension
|
|
253
|
+
if dimension > MAX_DIMENSION_ALLOWED:
|
|
254
|
+
raise ValueError(
|
|
255
|
+
f"Dimension cannot be greater than {MAX_DIMENSION_ALLOWED}"
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
# Validate sparse_dim
|
|
259
|
+
if dim_sparse < 0:
|
|
260
|
+
raise ValueError("sparse_dim cannot be negative")
|
|
261
|
+
|
|
262
|
+
# Validate and map space_type
|
|
263
|
+
space = SPACE_TYPE_MAP.get(
|
|
264
|
+
(space_type or "cosine").lower(), (space_type or "cosine").lower()
|
|
265
|
+
)
|
|
266
|
+
if space not in SPACE_TYPES_VALID:
|
|
267
|
+
raise ValueError(f"Invalid space type: {space}")
|
|
268
|
+
|
|
269
|
+
# Validate precision
|
|
270
|
+
if prec not in PRECISION_VALID:
|
|
271
|
+
raise ValueError(
|
|
272
|
+
f"Invalid precision: {prec}. Use one of {PRECISION_VALID}"
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
# Build create_index kwargs
|
|
276
|
+
create_kwargs = {
|
|
277
|
+
"name": index_name,
|
|
278
|
+
"dimension": dimension,
|
|
279
|
+
"space_type": space,
|
|
280
|
+
"precision": prec,
|
|
281
|
+
"sparse_dim": dim_sparse,
|
|
282
|
+
}
|
|
283
|
+
# Only add M and ef_con if provided
|
|
284
|
+
if M is not None:
|
|
285
|
+
create_kwargs["M"] = M
|
|
286
|
+
if ef_con is not None:
|
|
287
|
+
create_kwargs["ef_con"] = ef_con
|
|
288
|
+
# Build log message
|
|
289
|
+
index_type = "hybrid" if is_hybrid else "dense"
|
|
290
|
+
log_msg = f"Creating new {index_type} index '{index_name}' (dimension={dimension}"
|
|
291
|
+
if is_hybrid:
|
|
292
|
+
log_msg += f", sparse_dim={dim_sparse}"
|
|
293
|
+
if M is not None:
|
|
294
|
+
log_msg += f", M={M}"
|
|
295
|
+
if ef_con is not None:
|
|
296
|
+
log_msg += f", ef_con={ef_con}"
|
|
297
|
+
log_msg += ")..."
|
|
298
|
+
|
|
299
|
+
_logger.info(log_msg)
|
|
300
|
+
|
|
301
|
+
nd.create_index(**create_kwargs)
|
|
302
|
+
_logger.info("✓ Index created successfully")
|
|
303
|
+
return nd.get_index(name=index_name)
|
|
425
304
|
|
|
426
|
-
try:
|
|
427
|
-
# Try to get existing hybrid index
|
|
428
|
-
index = nd.get_hybrid_index(name=index_name, key=key)
|
|
429
|
-
_logger.info(f"Retrieved existing hybrid index: {index_name}")
|
|
430
|
-
return index
|
|
431
305
|
except Exception as e:
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
"Must provide dimension when creating a new hybrid index"
|
|
435
|
-
) from e
|
|
436
|
-
if vocab_size is None:
|
|
437
|
-
raise ValueError(
|
|
438
|
-
"Must provide vocab_size when creating a new hybrid index"
|
|
439
|
-
) from e
|
|
440
|
-
|
|
441
|
-
# Create a new hybrid index if it doesn't exist
|
|
442
|
-
_logger.info(f"Creating new hybrid index: {index_name}")
|
|
443
|
-
nd.create_hybrid_index(
|
|
444
|
-
name=index_name,
|
|
445
|
-
dimension=dimension,
|
|
446
|
-
space_type=space_type,
|
|
447
|
-
vocab_size=vocab_size,
|
|
448
|
-
precision=precision,
|
|
449
|
-
key=key,
|
|
450
|
-
)
|
|
451
|
-
return nd.get_hybrid_index(name=index_name, key=key)
|
|
306
|
+
_logger.error(f"Error initializing Endee index: {e}")
|
|
307
|
+
raise
|
|
452
308
|
|
|
453
309
|
@classmethod
|
|
454
310
|
def from_params(
|
|
@@ -459,351 +315,446 @@ class EndeeVectorStore(BasePydanticVectorStore):
|
|
|
459
315
|
space_type: str = "cosine",
|
|
460
316
|
batch_size: int = DEFAULT_BATCH_SIZE,
|
|
461
317
|
hybrid: bool = False,
|
|
462
|
-
|
|
318
|
+
sparse_dim: Optional[int] = None,
|
|
463
319
|
model_name: Optional[str] = None,
|
|
464
|
-
precision: Optional[str] = "
|
|
465
|
-
|
|
320
|
+
precision: Optional[str] = "float16",
|
|
321
|
+
M: Optional[int] = None,
|
|
322
|
+
ef_con: Optional[int] = None,
|
|
466
323
|
) -> "EndeeVectorStore":
|
|
467
324
|
"""Create EndeeVectorStore from parameters.
|
|
468
|
-
|
|
325
|
+
|
|
469
326
|
Args:
|
|
470
327
|
api_token: API token for Endee service
|
|
471
328
|
index_name: Name of the index
|
|
472
329
|
dimension: Vector dimension
|
|
473
330
|
space_type: Distance metric ("cosine", "l2", or "ip")
|
|
474
331
|
batch_size: Batch size for operations
|
|
475
|
-
hybrid: If True, create/use a hybrid index
|
|
476
|
-
|
|
477
|
-
model_name: Model name or alias for sparse embeddings
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
- 'distilbert': distilbert-base-uncased (~256 MB)
|
|
483
|
-
- 'minilm': sentence-transformers/all-MiniLM-L6-v2 (~90 MB)
|
|
484
|
-
- 'mpnet': sentence-transformers/all-mpnet-base-v2 (~420 MB)
|
|
485
|
-
- 'roberta': roberta-base (~501 MB)
|
|
486
|
-
- 'xlm_roberta': xlm-roberta-base (~1.3 GB)
|
|
487
|
-
precision: Precision setting for index ("low", "medium", "high", or None)
|
|
488
|
-
key: Encryption key for encrypting metadata (256-bit hex key, 64 hex characters)
|
|
489
|
-
If provided, metadata will be encrypted using AES-256. Store this key securely.
|
|
332
|
+
hybrid: If True, create/use a hybrid index. Auto-set to True if sparse_dim > 0.
|
|
333
|
+
sparse_dim: Sparse dimension for hybrid index. If > 0, hybrid is automatically enabled.
|
|
334
|
+
model_name: Model name or alias for sparse embeddings. Defaults to 'splade_pp' if not provided.
|
|
335
|
+
Available: 'splade_pp', 'splade_cocondenser', 'bert_base', 'distilbert', etc.
|
|
336
|
+
precision: Precision for index. Use one of: "binary", "float16", "float32", "int16d", "int8d". Default "float16".
|
|
337
|
+
M: Optional HNSW M parameter (bi-directional links per node). If not provided, backend uses default.
|
|
338
|
+
ef_con: Optional HNSW ef_construction parameter. If not provided, backend uses default.
|
|
490
339
|
"""
|
|
491
|
-
if
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
340
|
+
# Auto-enable hybrid if sparse_dim is provided and > 0
|
|
341
|
+
try:
|
|
342
|
+
if sparse_dim is not None and sparse_dim > 0:
|
|
343
|
+
hybrid = True
|
|
344
|
+
_logger.info(f"Auto-enabling hybrid mode (sparse_dim={sparse_dim} > 0)")
|
|
345
|
+
|
|
346
|
+
# Initialize index (unified method handles both dense and hybrid)
|
|
496
347
|
endee_index = cls._initialize_endee_index(
|
|
497
|
-
api_token,
|
|
348
|
+
api_token,
|
|
349
|
+
index_name,
|
|
350
|
+
dimension,
|
|
351
|
+
space_type,
|
|
352
|
+
precision,
|
|
353
|
+
sparse_dim=sparse_dim if hybrid else None,
|
|
354
|
+
M=M,
|
|
355
|
+
ef_con=ef_con,
|
|
498
356
|
)
|
|
499
357
|
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
358
|
+
# Get actual index configuration from the backend
|
|
359
|
+
try:
|
|
360
|
+
index_info = endee_index.describe()
|
|
361
|
+
actual_index_name = index_info.get("name", index_name)
|
|
362
|
+
actual_dimension = index_info.get("dimension", dimension)
|
|
363
|
+
actual_space_type = index_info.get("space_type", space_type)
|
|
364
|
+
actual_precision = index_info.get("precision", precision)
|
|
365
|
+
actual_sparse_dim = index_info.get("sparse_dim", sparse_dim)
|
|
366
|
+
except Exception as e:
|
|
367
|
+
_logger.warning(
|
|
368
|
+
f"Could not get index info, using provided parameters: {e}"
|
|
369
|
+
)
|
|
370
|
+
# Fallback to provided parameters
|
|
371
|
+
actual_index_name = index_name
|
|
372
|
+
actual_dimension = dimension
|
|
373
|
+
actual_space_type = space_type
|
|
374
|
+
actual_precision = precision
|
|
375
|
+
actual_sparse_dim = sparse_dim
|
|
376
|
+
|
|
377
|
+
# Determine if index is hybrid based on sparse_dim
|
|
378
|
+
actual_hybrid = actual_sparse_dim is not None and actual_sparse_dim > 0
|
|
379
|
+
|
|
380
|
+
return cls(
|
|
381
|
+
endee_index=endee_index,
|
|
382
|
+
api_token=api_token,
|
|
383
|
+
index_name=actual_index_name,
|
|
384
|
+
dimension=actual_dimension,
|
|
385
|
+
space_type=actual_space_type,
|
|
386
|
+
batch_size=batch_size,
|
|
387
|
+
sparse_dim=actual_sparse_dim,
|
|
388
|
+
hybrid=actual_hybrid,
|
|
389
|
+
model_name=model_name,
|
|
390
|
+
precision=actual_precision,
|
|
391
|
+
M=M,
|
|
392
|
+
ef_con=ef_con,
|
|
393
|
+
)
|
|
394
|
+
except Exception as e:
|
|
395
|
+
_logger.error(f"Error creating EndeeVectorStore from params: {e}")
|
|
396
|
+
raise
|
|
513
397
|
|
|
514
398
|
@classmethod
|
|
515
399
|
def class_name(cls) -> str:
|
|
516
|
-
|
|
400
|
+
try:
|
|
401
|
+
return "EndeeVectorStore"
|
|
402
|
+
except Exception as e:
|
|
403
|
+
_logger.error(f"Error getting class name: {e}")
|
|
404
|
+
raise
|
|
517
405
|
|
|
518
406
|
def _compute_sparse_vectors(self, texts: List[str]) -> tuple:
|
|
519
407
|
"""Compute sparse vectors for a list of texts."""
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
408
|
+
try:
|
|
409
|
+
if self._sparse_encoder is None:
|
|
410
|
+
raise ValueError(
|
|
411
|
+
"Sparse encoder not initialized. "
|
|
412
|
+
"Please provide model_name when creating the store with hybrid=True."
|
|
413
|
+
)
|
|
414
|
+
return self._sparse_encoder(texts)
|
|
415
|
+
except Exception as e:
|
|
416
|
+
_logger.error(f"Error computing sparse vectors: {e}")
|
|
417
|
+
raise
|
|
526
418
|
|
|
527
419
|
def add(
|
|
528
420
|
self,
|
|
529
421
|
nodes: List[BaseNode],
|
|
530
|
-
hybrid: Optional[bool] = None,
|
|
531
422
|
**add_kwargs: Any,
|
|
532
423
|
) -> List[str]:
|
|
533
424
|
"""
|
|
534
425
|
Add nodes to index.
|
|
535
426
|
|
|
536
427
|
Args:
|
|
537
|
-
nodes: List
|
|
538
|
-
|
|
539
|
-
|
|
428
|
+
nodes: List of nodes with embeddings to add to the index.
|
|
429
|
+
If index is configured for hybrid search (self.hybrid=True),
|
|
430
|
+
sparse vectors will be automatically computed from node text.
|
|
540
431
|
"""
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
for
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
else:
|
|
558
|
-
sparse_indices = [[] for _ in texts]
|
|
559
|
-
sparse_values = [[] for _ in texts]
|
|
560
|
-
|
|
561
|
-
for i, node in enumerate(nodes):
|
|
562
|
-
node_id = node.node_id
|
|
563
|
-
metadata = node_to_metadata_dict(node)
|
|
564
|
-
|
|
565
|
-
# Filter values must be simple key-value pairs
|
|
566
|
-
filter_data = {}
|
|
567
|
-
if "file_name" in metadata:
|
|
568
|
-
filter_data["file_name"] = metadata["file_name"]
|
|
569
|
-
if "doc_id" in metadata:
|
|
570
|
-
filter_data["doc_id"] = metadata["doc_id"]
|
|
571
|
-
if "category" in metadata:
|
|
572
|
-
filter_data["category"] = metadata["category"]
|
|
573
|
-
if "difficulty" in metadata:
|
|
574
|
-
filter_data["difficulty"] = metadata["difficulty"]
|
|
575
|
-
if "language" in metadata:
|
|
576
|
-
filter_data["language"] = metadata["language"]
|
|
577
|
-
if "field" in metadata:
|
|
578
|
-
filter_data["field"] = metadata["field"]
|
|
579
|
-
if "type" in metadata:
|
|
580
|
-
filter_data["type"] = metadata["type"]
|
|
581
|
-
if "feature" in metadata:
|
|
582
|
-
filter_data["feature"] = metadata["feature"]
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
# Build entry based on hybrid mode
|
|
432
|
+
try:
|
|
433
|
+
# Use instance hybrid setting
|
|
434
|
+
use_hybrid = self.hybrid
|
|
435
|
+
|
|
436
|
+
# Endee Index.upsert rejects duplicate IDs in a batch; dedupe by node_id (keep last)
|
|
437
|
+
seen: Dict[str, int] = {}
|
|
438
|
+
for idx, node in enumerate(nodes):
|
|
439
|
+
seen[node.node_id] = idx
|
|
440
|
+
deduped_indices = sorted(seen.values())
|
|
441
|
+
nodes = [nodes[i] for i in deduped_indices]
|
|
442
|
+
|
|
443
|
+
ids = []
|
|
444
|
+
entries = []
|
|
445
|
+
texts = []
|
|
446
|
+
|
|
447
|
+
# Collect texts for sparse encoding if hybrid mode
|
|
586
448
|
if use_hybrid:
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
"sparse_vector": {
|
|
591
|
-
"indices": sparse_indices[i],
|
|
592
|
-
"values": sparse_values[i]
|
|
593
|
-
},
|
|
594
|
-
"meta": metadata,
|
|
595
|
-
}
|
|
596
|
-
else:
|
|
597
|
-
entry = {
|
|
598
|
-
"id": node_id,
|
|
599
|
-
"vector": node.get_embedding(),
|
|
600
|
-
"meta": metadata,
|
|
601
|
-
"filter": filter_data
|
|
602
|
-
}
|
|
449
|
+
for node in nodes:
|
|
450
|
+
text = node.get_content()
|
|
451
|
+
texts.append(text)
|
|
603
452
|
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
453
|
+
# Compute sparse vectors in batch
|
|
454
|
+
if self._sparse_encoder is not None and texts:
|
|
455
|
+
sparse_indices, sparse_values = self._compute_sparse_vectors(texts)
|
|
456
|
+
else:
|
|
457
|
+
sparse_indices = [[] for _ in texts]
|
|
458
|
+
sparse_values = [[] for _ in texts]
|
|
459
|
+
|
|
460
|
+
for i, node in enumerate(nodes):
|
|
461
|
+
node_id = node.node_id
|
|
462
|
+
metadata = node_to_metadata_dict(node)
|
|
463
|
+
|
|
464
|
+
# Filter values must be simple key-value pairs
|
|
465
|
+
filter_data = {}
|
|
466
|
+
ref_id = getattr(node, "ref_doc_id", None) or metadata.get("ref_doc_id")
|
|
467
|
+
if ref_id is not None:
|
|
468
|
+
filter_data["ref_doc_id"] = ref_id
|
|
469
|
+
if "file_name" in metadata:
|
|
470
|
+
filter_data["file_name"] = metadata["file_name"]
|
|
471
|
+
if "doc_id" in metadata:
|
|
472
|
+
filter_data["doc_id"] = metadata["doc_id"]
|
|
473
|
+
if "category" in metadata:
|
|
474
|
+
filter_data["category"] = metadata["category"]
|
|
475
|
+
if "difficulty" in metadata:
|
|
476
|
+
filter_data["difficulty"] = metadata["difficulty"]
|
|
477
|
+
if "language" in metadata:
|
|
478
|
+
filter_data["language"] = metadata["language"]
|
|
479
|
+
if "field" in metadata:
|
|
480
|
+
filter_data["field"] = metadata["field"]
|
|
481
|
+
if "type" in metadata:
|
|
482
|
+
filter_data["type"] = metadata["type"]
|
|
483
|
+
if "feature" in metadata:
|
|
484
|
+
filter_data["feature"] = metadata["feature"]
|
|
485
|
+
|
|
486
|
+
# Build entry for endee Index.upsert
|
|
487
|
+
if use_hybrid:
|
|
488
|
+
entry = {
|
|
489
|
+
"id": node_id,
|
|
490
|
+
"vector": node.get_embedding(),
|
|
491
|
+
"sparse_indices": sparse_indices[i],
|
|
492
|
+
"sparse_values": sparse_values[i],
|
|
493
|
+
"meta": metadata,
|
|
494
|
+
"filter": filter_data,
|
|
495
|
+
}
|
|
496
|
+
else:
|
|
497
|
+
entry = {
|
|
498
|
+
"id": node_id,
|
|
499
|
+
"vector": node.get_embedding(),
|
|
500
|
+
"meta": metadata,
|
|
501
|
+
"filter": filter_data,
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
ids.append(node_id)
|
|
505
|
+
entries.append(entry)
|
|
506
|
+
|
|
507
|
+
# Batch insert; endee Index.upsert allows max MAX_VECTORS_PER_BATCH per batch
|
|
508
|
+
batch_size = min(self.batch_size, MAX_VECTORS_PER_BATCH)
|
|
509
|
+
for i in range(0, len(entries), batch_size):
|
|
510
|
+
batch = entries[i : i + batch_size]
|
|
511
|
+
self._endee_index.upsert(batch)
|
|
512
|
+
|
|
513
|
+
return ids
|
|
514
|
+
except Exception as e:
|
|
515
|
+
_logger.error(f"Error adding nodes to index: {e}")
|
|
516
|
+
raise
|
|
614
517
|
|
|
615
518
|
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
|
|
616
519
|
"""
|
|
617
|
-
Delete nodes using
|
|
618
|
-
|
|
619
|
-
Args:
|
|
620
|
-
ref_doc_id (str): The id of the document to delete.
|
|
520
|
+
Delete nodes by ref_doc_id using endee Index.delete_with_filter.
|
|
521
|
+
Only deletes vectors that were stored with ref_doc_id in their filter (see add()).
|
|
621
522
|
"""
|
|
622
523
|
try:
|
|
623
|
-
|
|
524
|
+
# Filter format consistent with query: list of {field: {$op: value}}
|
|
525
|
+
filter_dict = [{"ref_doc_id": {"$eq": ref_doc_id}}]
|
|
526
|
+
self._endee_index.delete_with_filter(filter_dict)
|
|
624
527
|
except Exception as e:
|
|
625
|
-
_logger.error(f"Error deleting
|
|
528
|
+
_logger.error(f"Error deleting by ref_doc_id {ref_doc_id!r}: {e}")
|
|
529
|
+
raise
|
|
626
530
|
|
|
627
531
|
@property
|
|
628
532
|
def client(self) -> Any:
|
|
629
533
|
"""Return Endee index client."""
|
|
630
|
-
|
|
631
|
-
|
|
534
|
+
try:
|
|
535
|
+
return self._endee_index
|
|
536
|
+
except Exception as e:
|
|
537
|
+
_logger.error(f"Error getting client: {e}")
|
|
538
|
+
raise
|
|
539
|
+
|
|
540
|
+
def describe(self) -> Dict[str, Any]:
|
|
541
|
+
"""Get index metadata (endee Index.describe())."""
|
|
542
|
+
try:
|
|
543
|
+
return self._endee_index.describe()
|
|
544
|
+
except Exception as e:
|
|
545
|
+
_logger.error(f"Error describing index: {e}")
|
|
546
|
+
return {}
|
|
547
|
+
|
|
548
|
+
def fetch(self, ids: List[str]) -> List[Dict[str, Any]]:
|
|
549
|
+
"""Fetch vectors by IDs (uses endee Index.get_vector per id)."""
|
|
550
|
+
out: List[Dict[str, Any]] = []
|
|
551
|
+
for id_ in ids:
|
|
552
|
+
try:
|
|
553
|
+
out.append(self._endee_index.get_vector(id_))
|
|
554
|
+
except Exception as e:
|
|
555
|
+
_logger.error(f"Error fetching vector id {id_}: {e}")
|
|
556
|
+
return out
|
|
632
557
|
|
|
633
558
|
def query(
|
|
634
559
|
self,
|
|
635
560
|
query: VectorStoreQuery,
|
|
636
|
-
|
|
637
|
-
sparse_query_text: Optional[str] = None,
|
|
638
|
-
sparse_top_k: Optional[int] = None,
|
|
639
|
-
dense_top_k: Optional[int] = None,
|
|
640
|
-
rrf_k: int = 60,
|
|
561
|
+
ef: int = DEFAULT_EF_SEARCH,
|
|
641
562
|
**kwargs: Any,
|
|
642
563
|
) -> VectorStoreQueryResult:
|
|
643
564
|
"""
|
|
644
565
|
Query index for top k most similar nodes.
|
|
645
566
|
|
|
646
567
|
Args:
|
|
647
|
-
query: VectorStoreQuery object containing query parameters
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
Defaults to query.similarity_top_k if not specified.
|
|
656
|
-
rrf_k: Reciprocal Rank Fusion parameter (default: 60).
|
|
568
|
+
query: VectorStoreQuery object containing query parameters:
|
|
569
|
+
- query_embedding: Dense vector for search
|
|
570
|
+
- query_str: Text query for sparse search (used if index is hybrid)
|
|
571
|
+
- similarity_top_k: Number of results to return
|
|
572
|
+
- filters: Optional metadata filters
|
|
573
|
+
- alpha: Optional weighting for hybrid search (0=sparse, 1=dense)
|
|
574
|
+
ef: HNSW ef_search parameter (default 128, max 1024).
|
|
575
|
+
Controls search quality vs speed tradeoff.
|
|
657
576
|
"""
|
|
658
|
-
# Use
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
if not hasattr(self._endee_index, 'dimension'):
|
|
662
|
-
# Get dimension from index if available, otherwise try to infer from query
|
|
663
|
-
try:
|
|
664
|
-
dimension = self._endee_index.describe()["dimension"]
|
|
665
|
-
except:
|
|
666
|
-
if query.query_embedding is not None:
|
|
667
|
-
dimension = len(query.query_embedding)
|
|
668
|
-
else:
|
|
669
|
-
raise ValueError("Could not determine vector dimension")
|
|
670
|
-
else:
|
|
671
|
-
dimension = self._endee_index.dimension
|
|
672
|
-
|
|
673
|
-
query_embedding = [0.0] * dimension # Default empty vector
|
|
674
|
-
filters = {}
|
|
675
|
-
|
|
676
|
-
# Apply any metadata filters if provided
|
|
677
|
-
if query.filters is not None:
|
|
678
|
-
for filter_item in query.filters.filters:
|
|
679
|
-
# Case 1: MetadataFilter object
|
|
680
|
-
if hasattr(filter_item, "key") and hasattr(filter_item, "value") and hasattr(filter_item, "operator"):
|
|
681
|
-
op_symbol = reverse_operator_map.get(filter_item.operator)
|
|
682
|
-
if not op_symbol:
|
|
683
|
-
raise ValueError(f"Unsupported filter operator: {filter_item.operator}")
|
|
684
|
-
|
|
685
|
-
if filter_item.key not in filters:
|
|
686
|
-
filters[filter_item.key] = {}
|
|
687
|
-
|
|
688
|
-
filters[filter_item.key][op_symbol] = filter_item.value
|
|
689
|
-
|
|
690
|
-
# Case 2: Raw dict, e.g. {"category": {"$eq": "programming"}}
|
|
691
|
-
elif isinstance(filter_item, dict):
|
|
692
|
-
for key, op_dict in filter_item.items():
|
|
693
|
-
if isinstance(op_dict, dict):
|
|
694
|
-
for op, val in op_dict.items():
|
|
695
|
-
if key not in filters:
|
|
696
|
-
filters[key] = {}
|
|
697
|
-
filters[key][op] = val
|
|
698
|
-
else:
|
|
699
|
-
raise ValueError(f"Unsupported filter format: {filter_item}")
|
|
700
|
-
|
|
701
|
-
_logger.info(f"Final structured filters: {filters}")
|
|
702
|
-
|
|
703
|
-
# Use the query embedding if provided
|
|
704
|
-
if query.query_embedding is not None:
|
|
705
|
-
query_embedding = cast(List[float], query.query_embedding)
|
|
706
|
-
if query.alpha is not None and query.mode == VectorStoreQueryMode.HYBRID:
|
|
707
|
-
# Apply alpha scaling in hybrid mode
|
|
708
|
-
query_embedding = [v * query.alpha for v in query_embedding]
|
|
709
|
-
|
|
710
|
-
# Compute sparse query vector if hybrid mode
|
|
711
|
-
sparse_vector = {"indices": [], "values": []}
|
|
712
|
-
|
|
713
|
-
if use_hybrid:
|
|
714
|
-
query_text = sparse_query_text or getattr(query, 'query_str', None)
|
|
715
|
-
if query_text and self._sparse_encoder is not None:
|
|
716
|
-
sparse_indices_batch, sparse_values_batch = self._compute_sparse_vectors([query_text])
|
|
717
|
-
sparse_vector = {
|
|
718
|
-
"indices": sparse_indices_batch[0],
|
|
719
|
-
"values": sparse_values_batch[0]
|
|
720
|
-
}
|
|
577
|
+
# Use index configuration to determine hybrid mode
|
|
578
|
+
try:
|
|
579
|
+
use_hybrid = self.hybrid
|
|
721
580
|
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
581
|
+
# Log the mode being used
|
|
582
|
+
_logger.info(
|
|
583
|
+
f"Using {'hybrid' if use_hybrid else 'dense-only'} search (index configured with hybrid={self.hybrid})"
|
|
584
|
+
)
|
|
725
585
|
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
rrf_k=rrf_k,
|
|
737
|
-
)
|
|
586
|
+
if not hasattr(self._endee_index, "dimension"):
|
|
587
|
+
# Get dimension from index if available, otherwise try to infer from query
|
|
588
|
+
try:
|
|
589
|
+
dimension = self._endee_index.describe()["dimension"]
|
|
590
|
+
except Exception as e:
|
|
591
|
+
_logger.warning(f"Could not get dimension from index: {e}")
|
|
592
|
+
if query.query_embedding is not None:
|
|
593
|
+
dimension = len(query.query_embedding)
|
|
594
|
+
else:
|
|
595
|
+
raise ValueError("Could not determine vector dimension")
|
|
738
596
|
else:
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
597
|
+
dimension = self._endee_index.dimension
|
|
598
|
+
|
|
599
|
+
query_embedding = [0.0] * dimension # Default empty vector
|
|
600
|
+
filters = {}
|
|
601
|
+
# Apply any metadata filters if provided
|
|
602
|
+
if query.filters is not None:
|
|
603
|
+
for filter_item in query.filters.filters:
|
|
604
|
+
# Case 1: MetadataFilter object
|
|
605
|
+
if (
|
|
606
|
+
hasattr(filter_item, "key")
|
|
607
|
+
and hasattr(filter_item, "value")
|
|
608
|
+
and hasattr(filter_item, "operator")
|
|
609
|
+
):
|
|
610
|
+
if filter_item.operator not in SUPPORTED_FILTER_OPERATORS:
|
|
611
|
+
raise ValueError(
|
|
612
|
+
f"Unsupported filter operator: {filter_item.operator}. "
|
|
613
|
+
"Supported filter operations: EQ ($eq), IN ($in)."
|
|
614
|
+
)
|
|
615
|
+
op_symbol = REVERSE_OPERATOR_MAP[filter_item.operator]
|
|
616
|
+
if filter_item.key not in filters:
|
|
617
|
+
filters[filter_item.key] = {}
|
|
618
|
+
filters[filter_item.key][op_symbol] = filter_item.value
|
|
619
|
+
|
|
620
|
+
# Case 2: Raw dict, e.g. {"category": {"$eq": "programming"}}
|
|
621
|
+
elif isinstance(filter_item, dict):
|
|
622
|
+
for key, op_dict in filter_item.items():
|
|
623
|
+
if isinstance(op_dict, dict):
|
|
624
|
+
for op, val in op_dict.items():
|
|
625
|
+
if key not in filters:
|
|
626
|
+
filters[key] = {}
|
|
627
|
+
filters[key][op] = val
|
|
628
|
+
else:
|
|
629
|
+
raise ValueError(f"Unsupported filter format: {filter_item}")
|
|
630
|
+
|
|
631
|
+
_logger.info(f"Final structured filters: {filters}")
|
|
632
|
+
|
|
633
|
+
# Endee API expects filter as array: [{"field": {"$op": value}}, ...]
|
|
634
|
+
filter_for_api: Optional[List[Dict[str, Any]]] = None
|
|
635
|
+
if filters:
|
|
636
|
+
filter_for_api = [{field: ops} for field, ops in filters.items()]
|
|
637
|
+
_logger.info(f"Filter sent to backend API: {filter_for_api}")
|
|
638
|
+
|
|
639
|
+
# Use the query embedding if provided
|
|
640
|
+
if query.query_embedding is not None:
|
|
641
|
+
query_embedding = cast(List[float], query.query_embedding)
|
|
642
|
+
if query.alpha is not None and use_hybrid:
|
|
643
|
+
# Apply alpha scaling in hybrid mode
|
|
644
|
+
query_embedding = [v * query.alpha for v in query_embedding]# Sparse query components for hybrid (endee Index.query uses sparse_indices, sparse_values)
|
|
645
|
+
sparse_indices_q: Optional[List[int]] = None
|
|
646
|
+
sparse_values_q: Optional[List[float]] = None
|
|
647
|
+
if use_hybrid:
|
|
648
|
+
# Get query text from query.query_str
|
|
649
|
+
query_text = getattr(query, "query_str", None)
|
|
650
|
+
if query_text and self._sparse_encoder is not None:
|
|
651
|
+
_logger.info(
|
|
652
|
+
f"Processing sparse vectors for hybrid search with query_str: '{query_text[:100]}...'"
|
|
653
|
+
)
|
|
654
|
+
si, sv = self._compute_sparse_vectors([query_text])
|
|
655
|
+
sparse_indices_q = si[0]
|
|
656
|
+
sparse_values_q = [float(v) for v in sv[0]]
|
|
657
|
+
_logger.info(f"Generated {len(sparse_indices_q)} sparse features")
|
|
658
|
+
elif query_text:
|
|
659
|
+
_logger.warning(
|
|
660
|
+
"Hybrid mode enabled but no sparse encoder available"
|
|
661
|
+
)
|
|
662
|
+
else:
|
|
663
|
+
_logger.warning(
|
|
664
|
+
"Hybrid mode enabled but no query_str provided in VectorStoreQuery"
|
|
665
|
+
)
|
|
769
666
|
else:
|
|
770
|
-
|
|
771
|
-
metadata=metadata,
|
|
772
|
-
text_key=self.text_key,
|
|
773
|
-
)
|
|
774
|
-
|
|
775
|
-
# Create TextNode with the extracted metadata
|
|
776
|
-
# Step 1: Get the JSON string from "_node_content"
|
|
777
|
-
_node_content_str = metadata.get("_node_content", "{}")
|
|
667
|
+
_logger.info("Using dense-only search (not hybrid mode)")
|
|
778
668
|
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
669
|
+
# Cap to endee limits (MAX_TOP_K_ALLOWED=512, MAX_EF_SEARCH_ALLOWED=1024)
|
|
670
|
+
requested_top_k = (
|
|
671
|
+
query.similarity_top_k if query.similarity_top_k is not None else 10
|
|
672
|
+
)
|
|
673
|
+
top_k = min(requested_top_k, MAX_TOP_K_ALLOWED)
|
|
674
|
+
ef_capped = min(ef, MAX_EF_SEARCH_ALLOWED)
|
|
675
|
+
|
|
676
|
+
# Build query kwargs - only include optional parameters if they have values
|
|
677
|
+
query_kwargs = {
|
|
678
|
+
"vector": query_embedding,
|
|
679
|
+
"top_k": top_k,
|
|
680
|
+
"ef": ef_capped,
|
|
681
|
+
"include_vectors": True,
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
# Only add filter if provided
|
|
685
|
+
if filter_for_api is not None:
|
|
686
|
+
query_kwargs["filter"] = filter_for_api# Only add sparse vectors if provided (for hybrid search)
|
|
687
|
+
if sparse_indices_q is not None:
|
|
688
|
+
query_kwargs["sparse_indices"] = sparse_indices_q
|
|
689
|
+
if sparse_values_q is not None:
|
|
690
|
+
query_kwargs["sparse_values"] = sparse_values_q
|
|
691
|
+
# Use endee Index.query
|
|
692
|
+
try:
|
|
693
|
+
results = self._endee_index.query(**query_kwargs)
|
|
694
|
+
except Exception as e:
|
|
695
|
+
_logger.error(f"Error querying Endee: {e}")
|
|
696
|
+
return VectorStoreQueryResult(nodes=[], similarities=[], ids=[])
|
|
697
|
+
|
|
698
|
+
# Process results
|
|
699
|
+
nodes = []
|
|
700
|
+
similarities = []
|
|
701
|
+
ids = []
|
|
702
|
+
|
|
703
|
+
for result in results:
|
|
704
|
+
node_id = result["id"]
|
|
705
|
+
score = result.get("similarity", result.get("score", 0.0))
|
|
706
|
+
metadata = result.get("meta", {})
|
|
707
|
+
|
|
708
|
+
# Create node from metadata
|
|
709
|
+
if self.flat_metadata:
|
|
710
|
+
node = metadata_dict_to_node(
|
|
711
|
+
metadata=metadata,
|
|
712
|
+
text=metadata.pop(self.text_key, None),
|
|
713
|
+
id_=node_id,
|
|
714
|
+
)
|
|
715
|
+
else:
|
|
716
|
+
metadata_dict, node_info, relationships = (
|
|
717
|
+
legacy_metadata_dict_to_node(
|
|
718
|
+
metadata=metadata,
|
|
719
|
+
text_key=self.text_key,
|
|
720
|
+
)
|
|
721
|
+
)
|
|
722
|
+
|
|
723
|
+
# Create TextNode with the extracted metadata
|
|
724
|
+
# Step 1: Get the JSON string from "_node_content"
|
|
725
|
+
_node_content_str = metadata.get("_node_content", "{}")
|
|
726
|
+
|
|
727
|
+
# Step 2: Convert JSON string to Python dict
|
|
728
|
+
try:
|
|
729
|
+
node_content = json.loads(_node_content_str)
|
|
730
|
+
except json.JSONDecodeError:
|
|
731
|
+
node_content = {}
|
|
732
|
+
|
|
733
|
+
# Step 3: Get the text
|
|
734
|
+
text = node_content.get(self.text_key, "")
|
|
735
|
+
node = TextNode(
|
|
736
|
+
text=text,
|
|
737
|
+
metadata=metadata_dict,
|
|
738
|
+
relationships=relationships,
|
|
739
|
+
node_id=node_id,
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
# Add any node_info properties to the node
|
|
743
|
+
for key, val in node_info.items():
|
|
744
|
+
if hasattr(node, key):
|
|
745
|
+
setattr(node, key, val)
|
|
746
|
+
|
|
747
|
+
# If embedding was returned in the results, add it to the node
|
|
748
|
+
if "vector" in result:
|
|
749
|
+
node.embedding = result["vector"]
|
|
750
|
+
|
|
751
|
+
nodes.append(node)
|
|
752
|
+
similarities.append(score)
|
|
753
|
+
ids.append(node_id)
|
|
754
|
+
|
|
755
|
+
return VectorStoreQueryResult(
|
|
756
|
+
nodes=nodes, similarities=similarities, ids=ids
|
|
757
|
+
)
|
|
758
|
+
except Exception as e:
|
|
759
|
+
_logger.error(f"Error querying index: {e}")
|
|
760
|
+
raise
|