endee-llamaindex 0.1.2__py3-none-any.whl → 0.1.5a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
endee_llamaindex/base.py CHANGED
@@ -1,16 +1,32 @@
1
+ """
2
+ EndeeVectorStore: LlamaIndex vector store backed by the Endee API.
3
+
4
+ Aligned with the local endee package (./endee). API contract:
5
+
6
+ Endee (endee.endee):
7
+ - __init__(token, http_library)
8
+ - create_index(name, dimension, space_type, M, ef_con, precision, version, sparse_dim)
9
+ Validates: index name (alphanumeric + underscores, max length), dimension <= MAX_DIMENSION_ALLOWED,
10
+ space_type in SPACE_TYPES_SUPPORTED ('cosine','l2','ip'), precision in PRECISION_TYPES_SUPPORTED,
11
+ sparse_dim >= 0. Map 'euclidean'->'l2', 'inner_product'->'ip' before calling.
12
+ - get_index(name) -> Index
13
+
14
+ Index (endee.index):
15
+ - upsert(input_array): list of {id, vector, meta?, filter?, sparse_indices?, sparse_values?}; max MAX_VECTORS_PER_BATCH per batch; duplicate IDs in batch raise
16
+ - query(vector, top_k, filter, ef, include_vectors, sparse_indices, sparse_values)
17
+ - delete_vector(id), get_vector(id), describe()
18
+
19
+ No list_ids or batch fetch in endee; filter for query is JSON-serializable (e.g. [{"field":{"$op":value}}] or dict).
20
+ """
21
+
1
22
  import logging
2
- from collections import Counter
3
- from functools import partial
4
23
  import json
5
24
  from typing import Any, Callable, Dict, List, Optional, cast
6
-
7
25
  from llama_index.core.bridge.pydantic import PrivateAttr
8
- from llama_index.core.schema import BaseNode, MetadataMode, TextNode
26
+ from llama_index.core.schema import BaseNode, TextNode
9
27
  from llama_index.core.vector_stores.types import (
10
28
  BasePydanticVectorStore,
11
- MetadataFilters,
12
29
  VectorStoreQuery,
13
- VectorStoreQueryMode,
14
30
  VectorStoreQueryResult,
15
31
  )
16
32
  from llama_index.core.vector_stores.utils import (
@@ -19,83 +35,40 @@ from llama_index.core.vector_stores.utils import (
19
35
  metadata_dict_to_node,
20
36
  node_to_metadata_dict,
21
37
  )
22
-
23
- from datetime import datetime
24
-
25
- def _import_endee() -> Any:
26
- """
27
- Try to import endee module. If it's not already installed, instruct user how to install.
28
- """
29
- try:
30
- import endee
31
- from endee.endee_client import Endee
32
- except ImportError as e:
33
- raise ImportError(
34
- "Could not import endee python package. "
35
- "Please install it with `pip install endee`."
36
- ) from e
37
- return endee
38
-
39
- ID_KEY = "id"
40
- VECTOR_KEY = "values"
41
- SPARSE_VECTOR_KEY = "sparse_values"
42
- METADATA_KEY = "metadata"
43
-
44
- DEFAULT_BATCH_SIZE = 100
45
-
38
+ from .constants import (
39
+ DEFAULT_BATCH_SIZE,
40
+ DEFAULT_EF_SEARCH,
41
+ MAX_DIMENSION_ALLOWED,
42
+ MAX_EF_SEARCH_ALLOWED,
43
+ MAX_INDEX_NAME_LENGTH_ALLOWED,
44
+ MAX_TOP_K_ALLOWED,
45
+ MAX_VECTORS_PER_BATCH,
46
+ PRECISION_VALID,
47
+ REVERSE_OPERATOR_MAP,
48
+ SPACE_TYPE_MAP,
49
+ SPACE_TYPES_VALID,
50
+ SUPPORTED_FILTER_OPERATORS,
51
+ )
52
+ from .utils import get_sparse_encoder
53
+ from endee import Endee
46
54
  _logger = logging.getLogger(__name__)
47
55
 
48
- from llama_index.core.vector_stores.types import MetadataFilter, FilterOperator
49
-
50
- reverse_operator_map = {
51
- FilterOperator.EQ: "$eq",
52
- FilterOperator.NE: "$ne",
53
- FilterOperator.GT: "$gt",
54
- FilterOperator.GTE: "$gte",
55
- FilterOperator.LT: "$lt",
56
- FilterOperator.LTE: "$lte",
57
- FilterOperator.IN: "$in",
58
- FilterOperator.NIN: "$nin",
59
- }
60
-
61
-
62
-
63
- def build_dict(input_batch: List[List[int]]) -> List[Dict[str, Any]]:
64
- """
65
- Build a list of sparse dictionaries from a batch of input_ids.
66
56
 
67
- NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
68
57
 
69
- """
70
- # store a batch of sparse embeddings
71
- sparse_emb = []
72
- # iterate through input batch
73
- for token_ids in input_batch:
74
- indices = []
75
- values = []
76
- # convert the input_ids list to a dictionary of key to frequency values
77
- d = dict(Counter(token_ids))
78
- for idx in d:
79
- indices.append(idx)
80
- values.append(float(d[idx]))
81
- sparse_emb.append({"indices": indices, "values": values})
82
- # return sparse_emb list
83
- return sparse_emb
84
-
85
-
86
- def generate_sparse_vectors(
87
- context_batch: List[str], tokenizer: Callable
88
- ) -> List[Dict[str, Any]]:
89
- """
90
- Generate sparse vectors from a batch of contexts.
58
+ # Supported sparse embedding models
59
+ SUPPORTED_SPARSE_MODELS = {
60
+ "splade_pp": "prithivida/Splade_PP_en_v1",
61
+ "splade_cocondenser": "naver/splade-cocondenser-ensembledistil",
62
+ "bert_base": "bert-base-uncased",
63
+ "distilbert": "distilbert-base-uncased",
64
+ "minilm": "sentence-transformers/all-MiniLM-L6-v2",
65
+ "mpnet": "sentence-transformers/all-mpnet-base-v2",
66
+ "roberta": "roberta-base",
67
+ "xlm_roberta": "xlm-roberta-base",
68
+ }
91
69
 
92
- NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
93
70
 
94
- """
95
- # create batch of input_ids
96
- inputs = tokenizer(context_batch)["input_ids"]
97
- # create sparse dictionaries
98
- return build_dict(inputs)
71
+ # Import sparse encoder utilities from utils module
99
72
 
100
73
 
101
74
  import_err_msg = (
@@ -107,18 +80,20 @@ class EndeeVectorStore(BasePydanticVectorStore):
107
80
 
108
81
  stores_text: bool = True
109
82
  flat_metadata: bool = False
110
-
111
83
  api_token: Optional[str]
112
84
  index_name: Optional[str]
113
85
  space_type: Optional[str]
114
86
  dimension: Optional[int]
115
- insert_kwargs: Optional[Dict]
116
87
  add_sparse_vector: bool
117
88
  text_key: str
118
89
  batch_size: int
119
90
  remove_text_from_metadata: bool
120
-
91
+ hybrid: bool
92
+ sparse_dim: Optional[int]
93
+ model_name: Optional[str]
94
+ precision: Optional[str]
121
95
  _endee_index: Any = PrivateAttr()
96
+ _sparse_encoder: Optional[Callable] = PrivateAttr(default=None)
122
97
 
123
98
  def __init__(
124
99
  self,
@@ -127,31 +102,68 @@ class EndeeVectorStore(BasePydanticVectorStore):
127
102
  index_name: Optional[str] = None,
128
103
  space_type: Optional[str] = "cosine",
129
104
  dimension: Optional[int] = None,
130
- insert_kwargs: Optional[Dict] = None,
131
105
  add_sparse_vector: bool = False,
132
106
  text_key: str = DEFAULT_TEXT_KEY,
133
107
  batch_size: int = DEFAULT_BATCH_SIZE,
134
108
  remove_text_from_metadata: bool = False,
109
+ hybrid: bool = False,
110
+ sparse_dim: Optional[int] = None,
111
+ model_name: Optional[str] = None,
112
+ precision: Optional[str] = "float16",
113
+ M: Optional[int] = None,
114
+ ef_con: Optional[int] = None,
135
115
  **kwargs: Any,
136
116
  ) -> None:
137
- insert_kwargs = insert_kwargs or {}
138
-
139
- super().__init__(
140
- index_name=index_name,
141
- api_token=api_token,
142
- space_type=space_type,
143
- dimension=dimension,
144
- insert_kwargs=insert_kwargs,
145
- add_sparse_vector=add_sparse_vector,
146
- text_key=text_key,
147
- batch_size=batch_size,
148
- remove_text_from_metadata=remove_text_from_metadata,
149
- )
150
-
151
- # Use existing endee_index or initialize a new one
152
- self._endee_index = endee_index or self._initialize_endee_index(
153
- api_token, index_name, dimension, space_type
154
- )
117
+ try:
118
+ super().__init__(
119
+ index_name=index_name,
120
+ api_token=api_token,
121
+ space_type=space_type,
122
+ dimension=dimension,
123
+ add_sparse_vector=add_sparse_vector,
124
+ text_key=text_key,
125
+ batch_size=batch_size,
126
+ remove_text_from_metadata=remove_text_from_metadata,
127
+ sparse_dim=sparse_dim,
128
+ hybrid=hybrid,
129
+ model_name=model_name,
130
+ precision=precision,
131
+ )
132
+
133
+ # Initialize index (handles both dense and hybrid)
134
+ if endee_index is not None:
135
+ self._endee_index = endee_index
136
+ else:
137
+ # sparse_dim=None creates dense index, sparse_dim>0 creates hybrid index
138
+ self._endee_index = self._initialize_endee_index(
139
+ api_token,
140
+ index_name,
141
+ dimension,
142
+ space_type,
143
+ precision,
144
+ sparse_dim=sparse_dim if hybrid else None,
145
+ M=M,
146
+ ef_con=ef_con,
147
+ )
148
+
149
+ # Initialize sparse encoder if hybrid mode is enabled
150
+ if hybrid:
151
+ # Use default model if none provided
152
+ if model_name is None:
153
+ model_name = "splade_pp" # Default sparse model
154
+ _logger.info(f"Using default sparse model: {model_name}")
155
+
156
+ _logger.info(f"Initializing sparse encoder with model: {model_name}")
157
+ self._sparse_encoder = get_sparse_encoder(
158
+ model_name=model_name,
159
+ use_fastembed=True, # Default to FastEmbed
160
+ batch_size=batch_size,
161
+ )
162
+ else:
163
+ self._sparse_encoder = None
164
+ except Exception as e:
165
+ _logger.error(f"Error initializing EndeeVectorStore: {e}")
166
+ raise
155
167
 
156
168
  @classmethod
157
169
  def _initialize_endee_index(
@@ -160,33 +172,139 @@ class EndeeVectorStore(BasePydanticVectorStore):
160
172
  index_name: Optional[str],
161
173
  dimension: Optional[int] = None,
162
174
  space_type: Optional[str] = "cosine",
175
+ precision: Optional[str] = "float16",
176
+ sparse_dim: Optional[int] = None,
177
+ M: Optional[int] = None,
178
+ ef_con: Optional[int] = None,
163
179
  ) -> Any:
164
- """Initialize Endee index using the current API."""
165
- endee = _import_endee()
166
- from endee.endee_client import Endee
167
-
168
- # Initialize Endee client
169
- nd = Endee(token=api_token)
180
+ """
181
+ Initialize Endee index (dense or hybrid).
170
182
 
183
+ Args:
184
+ api_token: Endee API token
185
+ index_name: Name of the index
186
+ dimension: Dense vector dimension
187
+ space_type: Distance metric (cosine, l2, ip)
188
+ precision: Vector precision type
189
+ sparse_dim: Sparse vector dimension. If None or 0, creates dense-only index.
190
+ If > 0, creates hybrid index with both dense and sparse vectors.
191
+ M: HNSW graph connectivity parameter (optional)
192
+ ef_con: HNSW construction parameter (optional)
193
+
194
+ Returns:
195
+ Endee Index object
196
+ """
171
197
  try:
172
- # Try to get existing index
173
- index = nd.get_index(name=index_name)
174
- _logger.info(f"Retrieved existing index: {index_name}")
175
- return index
198
+
199
+
200
+
201
+ _logger.info("Connecting to Endee service...")
202
+ nd = Endee(token=api_token)
203
+ prec = precision if precision is not None else "float16"
204
+ is_hybrid = sparse_dim is not None and sparse_dim > 0
205
+ dim_sparse = sparse_dim if is_hybrid else 0
206
+
207
+ try:
208
+ _logger.info(f"Checking if index '{index_name}' exists...")
209
+ index = nd.get_index(name=index_name)
210
+ # Check if existing index matches expected type
211
+ existing_sparse_dim = getattr(index, "sparse_dim", 0)
212
+ if is_hybrid and existing_sparse_dim > 0:
213
+ _logger.info(f"✓ Retrieved existing hybrid index: {index_name}")
214
+ elif not is_hybrid and existing_sparse_dim == 0:
215
+ _logger.info(f"✓ Retrieved existing dense index: {index_name}")
216
+ elif is_hybrid and existing_sparse_dim == 0:
217
+ _logger.warning(
218
+ f"Index '{index_name}' exists as dense-only (sparse_dim=0) but hybrid was requested. "
219
+ f"Using existing dense index."
220
+ )
221
+ else:
222
+ _logger.warning(
223
+ f"Index '{index_name}' exists as hybrid (sparse_dim={existing_sparse_dim}) "
224
+ f"but dense-only was requested. Using existing hybrid index."
225
+ )
226
+ return index
227
+
228
+ except Exception as e:
229
+ # Index doesn't exist, create new one
230
+ if dimension is None:
231
+ raise ValueError(
232
+ f"Must provide dimension when creating a new {'hybrid' if is_hybrid else 'dense'} index"
233
+ ) from e
234
+ if is_hybrid and sparse_dim is None:
235
+ raise ValueError(
236
+ "Must provide sparse_dim when creating a new hybrid index"
237
+ ) from e
238
+
239
+ # Validate index name
240
+ try:
241
+ from endee.utils import is_valid_index_name
242
+
243
+ if not is_valid_index_name(index_name):
244
+ raise ValueError(
245
+ f"Invalid index name. Index name must be alphanumeric and can "
246
+ f"contain underscores and should be less than "
247
+ f"{MAX_INDEX_NAME_LENGTH_ALLOWED} characters"
248
+ )
249
+ except ImportError:
250
+ pass
251
+
252
+ # Validate dimension
253
+ if dimension > MAX_DIMENSION_ALLOWED:
254
+ raise ValueError(
255
+ f"Dimension cannot be greater than {MAX_DIMENSION_ALLOWED}"
256
+ )
257
+
258
+ # Validate sparse_dim
259
+ if dim_sparse < 0:
260
+ raise ValueError("sparse_dim cannot be negative")
261
+
262
+ # Validate and map space_type
263
+ space = SPACE_TYPE_MAP.get(
264
+ (space_type or "cosine").lower(), (space_type or "cosine").lower()
265
+ )
266
+ if space not in SPACE_TYPES_VALID:
267
+ raise ValueError(f"Invalid space type: {space}")
268
+
269
+ # Validate precision
270
+ if prec not in PRECISION_VALID:
271
+ raise ValueError(
272
+ f"Invalid precision: {prec}. Use one of {PRECISION_VALID}"
273
+ )
274
+
275
+ # Build create_index kwargs
276
+ create_kwargs = {
277
+ "name": index_name,
278
+ "dimension": dimension,
279
+ "space_type": space,
280
+ "precision": prec,
281
+ "sparse_dim": dim_sparse,
282
+ }
283
+ # Only add M and ef_con if provided
284
+ if M is not None:
285
+ create_kwargs["M"] = M
286
+ if ef_con is not None:
287
+ create_kwargs["ef_con"] = ef_con
288
+ # Build log message
289
+ index_type = "hybrid" if is_hybrid else "dense"
290
+ log_msg = f"Creating new {index_type} index '{index_name}' (dimension={dimension}"
291
+ if is_hybrid:
292
+ log_msg += f", sparse_dim={dim_sparse}"
293
+ if M is not None:
294
+ log_msg += f", M={M}"
295
+ if ef_con is not None:
296
+ log_msg += f", ef_con={ef_con}"
297
+ log_msg += ")..."
298
+
299
+ _logger.info(log_msg)
300
+
301
+ nd.create_index(**create_kwargs)
302
+ _logger.info("✓ Index created successfully")
303
+ return nd.get_index(name=index_name)
304
+
176
305
  except Exception as e:
177
- if dimension is None:
178
- raise ValueError(
179
- "Must provide dimension when creating a new index"
180
- ) from e
181
-
182
- # Create a new index if it doesn't exist
183
- _logger.info(f"Creating new index: {index_name}")
184
- nd.create_index(
185
- name=index_name,
186
- dimension=dimension,
187
- space_type=space_type,
188
- )
189
- return nd.get_index(name=index_name)
306
+ _logger.error(f"Error initializing Endee index: {e}")
307
+ raise
190
308
 
191
309
  @classmethod
192
310
  def from_params(
@@ -196,24 +314,107 @@ class EndeeVectorStore(BasePydanticVectorStore):
196
314
  dimension: Optional[int] = None,
197
315
  space_type: str = "cosine",
198
316
  batch_size: int = DEFAULT_BATCH_SIZE,
317
+ hybrid: bool = False,
318
+ sparse_dim: Optional[int] = None,
319
+ model_name: Optional[str] = None,
320
+ precision: Optional[str] = "float16",
321
+ M: Optional[int] = None,
322
+ ef_con: Optional[int] = None,
199
323
  ) -> "EndeeVectorStore":
200
- """Create EndeeVectorStore from parameters."""
201
- endee_index = cls._initialize_endee_index(
202
- api_token, index_name, dimension, space_type
203
- )
204
-
205
- return cls(
206
- endee_index=endee_index,
207
- api_token=api_token,
208
- index_name=index_name,
209
- dimension=dimension,
210
- space_type=space_type,
211
- batch_size=batch_size,
212
- )
324
+ """Create EndeeVectorStore from parameters.
325
+
326
+ Args:
327
+ api_token: API token for Endee service
328
+ index_name: Name of the index
329
+ dimension: Vector dimension
330
+ space_type: Distance metric ("cosine", "l2", or "ip")
331
+ batch_size: Batch size for operations
332
+ hybrid: If True, create/use a hybrid index. Auto-set to True if sparse_dim > 0.
333
+ sparse_dim: Sparse dimension for hybrid index. If > 0, hybrid is automatically enabled.
334
+ model_name: Model name or alias for sparse embeddings. Defaults to 'splade_pp' if not provided.
335
+ Available: 'splade_pp', 'splade_cocondenser', 'bert_base', 'distilbert', etc.
336
+ precision: Precision for index. Use one of: "binary", "float16", "float32", "int16d", "int8d". Default "float16".
337
+ M: Optional HNSW M parameter (bi-directional links per node). If not provided, backend uses default.
338
+ ef_con: Optional HNSW ef_construction parameter. If not provided, backend uses default.
339
+ """
340
+ # Auto-enable hybrid if sparse_dim is provided and > 0
341
+ try:
342
+ if sparse_dim is not None and sparse_dim > 0:
343
+ hybrid = True
344
+ _logger.info(f"Auto-enabling hybrid mode (sparse_dim={sparse_dim} > 0)")
345
+
346
+ # Initialize index (unified method handles both dense and hybrid)
347
+ endee_index = cls._initialize_endee_index(
348
+ api_token,
349
+ index_name,
350
+ dimension,
351
+ space_type,
352
+ precision,
353
+ sparse_dim=sparse_dim if hybrid else None,
354
+ M=M,
355
+ ef_con=ef_con,
356
+ )
357
+
358
+ # Get actual index configuration from the backend
359
+ try:
360
+ index_info = endee_index.describe()
361
+ actual_index_name = index_info.get("name", index_name)
362
+ actual_dimension = index_info.get("dimension", dimension)
363
+ actual_space_type = index_info.get("space_type", space_type)
364
+ actual_precision = index_info.get("precision", precision)
365
+ actual_sparse_dim = index_info.get("sparse_dim", sparse_dim)
366
+ except Exception as e:
367
+ _logger.warning(
368
+ f"Could not get index info, using provided parameters: {e}"
369
+ )
370
+ # Fallback to provided parameters
371
+ actual_index_name = index_name
372
+ actual_dimension = dimension
373
+ actual_space_type = space_type
374
+ actual_precision = precision
375
+ actual_sparse_dim = sparse_dim
376
+
377
+ # Determine if index is hybrid based on sparse_dim
378
+ actual_hybrid = actual_sparse_dim is not None and actual_sparse_dim > 0
379
+
380
+ return cls(
381
+ endee_index=endee_index,
382
+ api_token=api_token,
383
+ index_name=actual_index_name,
384
+ dimension=actual_dimension,
385
+ space_type=actual_space_type,
386
+ batch_size=batch_size,
387
+ sparse_dim=actual_sparse_dim,
388
+ hybrid=actual_hybrid,
389
+ model_name=model_name,
390
+ precision=actual_precision,
391
+ M=M,
392
+ ef_con=ef_con,
393
+ )
394
+ except Exception as e:
395
+ _logger.error(f"Error creating EndeeVectorStore from params: {e}")
396
+ raise
213
397
 
214
398
  @classmethod
215
399
  def class_name(cls) -> str:
216
- return "EndeeVectorStore"
400
+ try:
401
+ return "EndeeVectorStore"
402
+ except Exception as e:
403
+ _logger.error(f"Error getting class name: {e}")
404
+ raise
405
+
406
+ def _compute_sparse_vectors(self, texts: List[str]) -> tuple:
407
+ """Compute sparse vectors for a list of texts."""
408
+ try:
409
+ if self._sparse_encoder is None:
410
+ raise ValueError(
411
+ "Sparse encoder not initialized. "
412
+ "Please provide model_name when creating the store with hybrid=True."
413
+ )
414
+ return self._sparse_encoder(texts)
415
+ except Exception as e:
416
+ _logger.error(f"Error computing sparse vectors: {e}")
417
+ raise
217
418
 
218
419
  def add(
219
420
  self,
@@ -224,193 +425,336 @@ class EndeeVectorStore(BasePydanticVectorStore):
224
425
  Add nodes to index.
225
426
 
226
427
  Args:
227
- nodes: List[BaseNode]: list of nodes with embeddings
428
+ nodes: List of nodes with embeddings to add to the index.
429
+ If index is configured for hybrid search (self.hybrid=True),
430
+ sparse vectors will be automatically computed from node text.
228
431
  """
229
- ids = []
230
- entries = []
231
-
232
- for node in nodes:
233
- node_id = node.node_id
234
- metadata = node_to_metadata_dict(node)
235
-
236
- # Filter values must be simple key-value pairs
237
- filter_data = {}
238
- if "file_name" in metadata:
239
- filter_data["file_name"] = metadata["file_name"]
240
- if "doc_id" in metadata:
241
- filter_data["doc_id"] = metadata["doc_id"]
242
- if "category" in metadata:
243
- filter_data["category"] = metadata["category"]
244
- if "difficulty" in metadata:
245
- filter_data["difficulty"] = metadata["difficulty"]
246
- if "language" in metadata:
247
- filter_data["language"] = metadata["language"]
248
- if "field" in metadata:
249
- filter_data["field"] = metadata["field"]
250
- if "type" in metadata:
251
- filter_data["type"] = metadata["type"]
252
- if "feature" in metadata:
253
- filter_data["feature"] = metadata["feature"]
254
-
255
-
256
- entry = {
257
- "id": node_id,
258
- "vector": node.get_embedding(),
259
- "meta": metadata,
260
- "filter": filter_data
261
- }
262
-
263
- ids.append(node_id)
264
- entries.append(entry)
265
-
266
- # Batch insert to avoid hitting API limits
267
- batch_size = self.batch_size
268
- for i in range(0, len(entries), batch_size):
269
- batch = entries[i : i + batch_size]
270
- self._endee_index.upsert(batch)
271
-
272
- return ids
432
+ try:
433
+ # Use instance hybrid setting
434
+ use_hybrid = self.hybrid
435
+
436
+ # Endee Index.upsert rejects duplicate IDs in a batch; dedupe by node_id (keep last)
437
+ seen: Dict[str, int] = {}
438
+ for idx, node in enumerate(nodes):
439
+ seen[node.node_id] = idx
440
+ deduped_indices = sorted(seen.values())
441
+ nodes = [nodes[i] for i in deduped_indices]
442
+
443
+ ids = []
444
+ entries = []
445
+ texts = []
446
+
447
+ # Collect texts for sparse encoding if hybrid mode
448
+ if use_hybrid:
449
+ for node in nodes:
450
+ text = node.get_content()
451
+ texts.append(text)
452
+
453
+ # Compute sparse vectors in batch
454
+ if self._sparse_encoder is not None and texts:
455
+ sparse_indices, sparse_values = self._compute_sparse_vectors(texts)
456
+ else:
457
+ sparse_indices = [[] for _ in texts]
458
+ sparse_values = [[] for _ in texts]
459
+
460
+ for i, node in enumerate(nodes):
461
+ node_id = node.node_id
462
+ metadata = node_to_metadata_dict(node)
463
+
464
+ # Filter values must be simple key-value pairs
465
+ filter_data = {}
466
+ ref_id = getattr(node, "ref_doc_id", None) or metadata.get("ref_doc_id")
467
+ if ref_id is not None:
468
+ filter_data["ref_doc_id"] = ref_id
469
+ if "file_name" in metadata:
470
+ filter_data["file_name"] = metadata["file_name"]
471
+ if "doc_id" in metadata:
472
+ filter_data["doc_id"] = metadata["doc_id"]
473
+ if "category" in metadata:
474
+ filter_data["category"] = metadata["category"]
475
+ if "difficulty" in metadata:
476
+ filter_data["difficulty"] = metadata["difficulty"]
477
+ if "language" in metadata:
478
+ filter_data["language"] = metadata["language"]
479
+ if "field" in metadata:
480
+ filter_data["field"] = metadata["field"]
481
+ if "type" in metadata:
482
+ filter_data["type"] = metadata["type"]
483
+ if "feature" in metadata:
484
+ filter_data["feature"] = metadata["feature"]
485
+
486
+ # Build entry for endee Index.upsert
487
+ if use_hybrid:
488
+ entry = {
489
+ "id": node_id,
490
+ "vector": node.get_embedding(),
491
+ "sparse_indices": sparse_indices[i],
492
+ "sparse_values": sparse_values[i],
493
+ "meta": metadata,
494
+ "filter": filter_data,
495
+ }
496
+ else:
497
+ entry = {
498
+ "id": node_id,
499
+ "vector": node.get_embedding(),
500
+ "meta": metadata,
501
+ "filter": filter_data,
502
+ }
503
+
504
+ ids.append(node_id)
505
+ entries.append(entry)
506
+
507
+ # Batch insert; endee Index.upsert allows max MAX_VECTORS_PER_BATCH per batch
508
+ batch_size = min(self.batch_size, MAX_VECTORS_PER_BATCH)
509
+ for i in range(0, len(entries), batch_size):
510
+ batch = entries[i : i + batch_size]
511
+ self._endee_index.upsert(batch)
512
+
513
+ return ids
514
+ except Exception as e:
515
+ _logger.error(f"Error adding nodes to index: {e}")
516
+ raise
273
517
 
274
518
  def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
275
519
  """
276
- Delete nodes using with ref_doc_id.
277
-
278
- Args:
279
- ref_doc_id (str): The id of the document to delete.
520
+ Delete nodes by ref_doc_id using endee Index.delete_with_filter.
521
+ Only deletes vectors that were stored with ref_doc_id in their filter (see add()).
280
522
  """
281
523
  try:
282
- self._endee_index.delete_with_filter({"doc_id": ref_doc_id})
524
+ # Filter format consistent with query: list of {field: {$op: value}}
525
+ filter_dict = [{"ref_doc_id": {"$eq": ref_doc_id}}]
526
+ self._endee_index.delete_with_filter(filter_dict)
283
527
  except Exception as e:
284
- _logger.error(f"Error deleting vectors for doc_id {ref_doc_id}: {e}")
528
+ _logger.error(f"Error deleting by ref_doc_id {ref_doc_id!r}: {e}")
529
+ raise
285
530
 
286
531
  @property
287
532
  def client(self) -> Any:
288
533
  """Return Endee index client."""
289
- return self._endee_index
534
+ try:
535
+ return self._endee_index
536
+ except Exception as e:
537
+ _logger.error(f"Error getting client: {e}")
538
+ raise
539
+
540
+ def describe(self) -> Dict[str, Any]:
541
+ """Get index metadata (endee Index.describe())."""
542
+ try:
543
+ return self._endee_index.describe()
544
+ except Exception as e:
545
+ _logger.error(f"Error describing index: {e}")
546
+ return {}
290
547
 
291
- def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
548
+ def fetch(self, ids: List[str]) -> List[Dict[str, Any]]:
549
+ """Fetch vectors by IDs (uses endee Index.get_vector per id)."""
550
+ out: List[Dict[str, Any]] = []
551
+ for id_ in ids:
552
+ try:
553
+ out.append(self._endee_index.get_vector(id_))
554
+ except Exception as e:
555
+ _logger.error(f"Error fetching vector id {id_}: {e}")
556
+ return out
557
+
558
+ def query(
559
+ self,
560
+ query: VectorStoreQuery,
561
+ ef: int = DEFAULT_EF_SEARCH,
562
+ **kwargs: Any,
563
+ ) -> VectorStoreQueryResult:
292
564
  """
293
565
  Query index for top k most similar nodes.
294
566
 
295
567
  Args:
296
- query: VectorStoreQuery object containing query parameters
568
+ query: VectorStoreQuery object containing query parameters:
569
+ - query_embedding: Dense vector for search
570
+ - query_str: Text query for sparse search (used if index is hybrid)
571
+ - similarity_top_k: Number of results to return
572
+ - filters: Optional metadata filters
573
+ - alpha: Optional weighting for hybrid search (0=sparse, 1=dense)
574
+ ef: HNSW ef_search parameter (default 128, max 1024).
575
+ Controls search quality vs speed tradeoff.
297
576
  """
298
- if not hasattr(self._endee_index, 'dimension'):
299
- # Get dimension from index if available, otherwise try to infer from query
300
- try:
301
- dimension = self._endee_index.describe()["dimension"]
302
- except:
303
- if query.query_embedding is not None:
304
- dimension = len(query.query_embedding)
305
- else:
306
- raise ValueError("Could not determine vector dimension")
307
- else:
308
- dimension = self._endee_index.dimension
309
-
310
- query_embedding = [0.0] * dimension # Default empty vector
311
- filters = {}
312
-
313
- # Apply any metadata filters if provided
314
- if query.filters is not None:
315
- for filter_item in query.filters.filters:
316
- # Case 1: MetadataFilter object
317
- if hasattr(filter_item, "key") and hasattr(filter_item, "value") and hasattr(filter_item, "operator"):
318
- op_symbol = reverse_operator_map.get(filter_item.operator)
319
- if not op_symbol:
320
- raise ValueError(f"Unsupported filter operator: {filter_item.operator}")
321
-
322
- if filter_item.key not in filters:
323
- filters[filter_item.key] = {}
324
-
325
- filters[filter_item.key][op_symbol] = filter_item.value
326
-
327
- # Case 2: Raw dict, e.g. {"category": {"$eq": "programming"}}
328
- elif isinstance(filter_item, dict):
329
- for key, op_dict in filter_item.items():
330
- if isinstance(op_dict, dict):
331
- for op, val in op_dict.items():
332
- if key not in filters:
333
- filters[key] = {}
334
- filters[key][op] = val
335
- else:
336
- raise ValueError(f"Unsupported filter format: {filter_item}")
577
+ # Use index configuration to determine hybrid mode
578
+ try:
579
+ use_hybrid = self.hybrid
580
+
581
+ # Log the mode being used
582
+ _logger.info(
583
+ f"Using {'hybrid' if use_hybrid else 'dense-only'} search (index configured with hybrid={self.hybrid})"
584
+ )
337
585
 
338
- _logger.info(f"Final structured filters: {filters}")
586
+ if not hasattr(self._endee_index, "dimension"):
587
+ # Get dimension from index if available, otherwise try to infer from query
588
+ try:
589
+ dimension = self._endee_index.describe()["dimension"]
590
+ except Exception as e:
591
+ _logger.warning(f"Could not get dimension from index: {e}")
592
+ if query.query_embedding is not None:
593
+ dimension = len(query.query_embedding)
594
+ else:
595
+ raise ValueError("Could not determine vector dimension")
596
+ else:
597
+ dimension = self._endee_index.dimension
598
+
599
+ query_embedding = [0.0] * dimension # Default empty vector
600
+ filters = {}
601
+ # Apply any metadata filters if provided
602
+ if query.filters is not None:
603
+ for filter_item in query.filters.filters:
604
+ # Case 1: MetadataFilter object
605
+ if (
606
+ hasattr(filter_item, "key")
607
+ and hasattr(filter_item, "value")
608
+ and hasattr(filter_item, "operator")
609
+ ):
610
+ if filter_item.operator not in SUPPORTED_FILTER_OPERATORS:
611
+ raise ValueError(
612
+ f"Unsupported filter operator: {filter_item.operator}. "
613
+ "Supported filter operations: EQ ($eq), IN ($in)."
614
+ )
615
+ op_symbol = REVERSE_OPERATOR_MAP[filter_item.operator]
616
+ if filter_item.key not in filters:
617
+ filters[filter_item.key] = {}
618
+ filters[filter_item.key][op_symbol] = filter_item.value
619
+
620
+ # Case 2: Raw dict, e.g. {"category": {"$eq": "programming"}}
621
+ elif isinstance(filter_item, dict):
622
+ for key, op_dict in filter_item.items():
623
+ if isinstance(op_dict, dict):
624
+ for op, val in op_dict.items():
625
+ if key not in filters:
626
+ filters[key] = {}
627
+ filters[key][op] = val
628
+ else:
629
+ raise ValueError(f"Unsupported filter format: {filter_item}")
630
+
631
+ _logger.info(f"Final structured filters: {filters}")
632
+
633
+ # Endee API expects filter as array: [{"field": {"$op": value}}, ...]
634
+ filter_for_api: Optional[List[Dict[str, Any]]] = None
635
+ if filters:
636
+ filter_for_api = [{field: ops} for field, ops in filters.items()]
637
+ _logger.info(f"Filter sent to backend API: {filter_for_api}")
638
+
639
+ # Use the query embedding if provided
640
+ if query.query_embedding is not None:
641
+ query_embedding = cast(List[float], query.query_embedding)
642
+ if query.alpha is not None and use_hybrid:
643
+ # Apply alpha scaling in hybrid mode
644
+ query_embedding = [v * query.alpha for v in query_embedding]# Sparse query components for hybrid (endee Index.query uses sparse_indices, sparse_values)
645
+ sparse_indices_q: Optional[List[int]] = None
646
+ sparse_values_q: Optional[List[float]] = None
647
+ if use_hybrid:
648
+ # Get query text from query.query_str
649
+ query_text = getattr(query, "query_str", None)
650
+ if query_text and self._sparse_encoder is not None:
651
+ _logger.info(
652
+ f"Processing sparse vectors for hybrid search with query_str: '{query_text[:100]}...'"
653
+ )
654
+ si, sv = self._compute_sparse_vectors([query_text])
655
+ sparse_indices_q = si[0]
656
+ sparse_values_q = [float(v) for v in sv[0]]
657
+ _logger.info(f"Generated {len(sparse_indices_q)} sparse features")
658
+ elif query_text:
659
+ _logger.warning(
660
+ "Hybrid mode enabled but no sparse encoder available"
661
+ )
662
+ else:
663
+ _logger.warning(
664
+ "Hybrid mode enabled but no query_str provided in VectorStoreQuery"
665
+ )
666
+ else:
667
+ _logger.info("Using dense-only search (not hybrid mode)")
339
668
 
340
- # Use the query embedding if provided
341
- if query.query_embedding is not None:
342
- query_embedding = cast(List[float], query.query_embedding)
343
- if query.alpha is not None and query.mode == VectorStoreQueryMode.HYBRID:
344
- # Apply alpha scaling in hybrid mode
345
- query_embedding = [v * query.alpha for v in query_embedding]
669
+ # Cap to endee limits (MAX_TOP_K_ALLOWED=512, MAX_EF_SEARCH_ALLOWED=1024)
670
+ requested_top_k = (
671
+ query.similarity_top_k if query.similarity_top_k is not None else 10
672
+ )
673
+ top_k = min(requested_top_k, MAX_TOP_K_ALLOWED)
674
+ ef_capped = min(ef, MAX_EF_SEARCH_ALLOWED)
675
+
676
+ # Build query kwargs - only include optional parameters if they have values
677
+ query_kwargs = {
678
+ "vector": query_embedding,
679
+ "top_k": top_k,
680
+ "ef": ef_capped,
681
+ "include_vectors": True,
682
+ }
346
683
 
347
- # Execute query
348
- try:
349
- results = self._endee_index.query(
350
- vector=query_embedding,
351
- top_k=query.similarity_top_k,
352
- filter=filters if filters else None,
353
- include_vectors=True
684
+ # Only add filter if provided
685
+ if filter_for_api is not None:
686
+ query_kwargs["filter"] = filter_for_api# Only add sparse vectors if provided (for hybrid search)
687
+ if sparse_indices_q is not None:
688
+ query_kwargs["sparse_indices"] = sparse_indices_q
689
+ if sparse_values_q is not None:
690
+ query_kwargs["sparse_values"] = sparse_values_q
691
+ # Use endee Index.query
692
+ try:
693
+ results = self._endee_index.query(**query_kwargs)
694
+ except Exception as e:
695
+ _logger.error(f"Error querying Endee: {e}")
696
+ return VectorStoreQueryResult(nodes=[], similarities=[], ids=[])
697
+
698
+ # Process results
699
+ nodes = []
700
+ similarities = []
701
+ ids = []
702
+
703
+ for result in results:
704
+ node_id = result["id"]
705
+ score = result.get("similarity", result.get("score", 0.0))
706
+ metadata = result.get("meta", {})
707
+
708
+ # Create node from metadata
709
+ if self.flat_metadata:
710
+ node = metadata_dict_to_node(
711
+ metadata=metadata,
712
+ text=metadata.pop(self.text_key, None),
713
+ id_=node_id,
714
+ )
715
+ else:
716
+ metadata_dict, node_info, relationships = (
717
+ legacy_metadata_dict_to_node(
718
+ metadata=metadata,
719
+ text_key=self.text_key,
720
+ )
721
+ )
722
+
723
+ # Create TextNode with the extracted metadata
724
+ # Step 1: Get the JSON string from "_node_content"
725
+ _node_content_str = metadata.get("_node_content", "{}")
726
+
727
+ # Step 2: Convert JSON string to Python dict
728
+ try:
729
+ node_content = json.loads(_node_content_str)
730
+ except json.JSONDecodeError:
731
+ node_content = {}
732
+
733
+ # Step 3: Get the text
734
+ text = node_content.get(self.text_key, "")
735
+ node = TextNode(
736
+ text=text,
737
+ metadata=metadata_dict,
738
+ relationships=relationships,
739
+ node_id=node_id,
740
+ )
741
+
742
+ # Add any node_info properties to the node
743
+ for key, val in node_info.items():
744
+ if hasattr(node, key):
745
+ setattr(node, key, val)
746
+
747
+ # If embedding was returned in the results, add it to the node
748
+ if "vector" in result:
749
+ node.embedding = result["vector"]
750
+
751
+ nodes.append(node)
752
+ similarities.append(score)
753
+ ids.append(node_id)
754
+
755
+ return VectorStoreQueryResult(
756
+ nodes=nodes, similarities=similarities, ids=ids
354
757
  )
355
758
  except Exception as e:
356
- _logger.error(f"Error querying Endee: {e}")
357
- return VectorStoreQueryResult(nodes=[], similarities=[], ids=[])
358
-
359
- # Process results
360
- nodes = []
361
- similarities = []
362
- ids = []
363
-
364
- for result in results:
365
- node_id = result["id"]
366
- score = result["similarity"]
367
-
368
- # Get metadata from result
369
- metadata = result.get("meta", {})
370
-
371
- # Create node from metadata
372
- if self.flat_metadata:
373
- node = metadata_dict_to_node(
374
- metadata=metadata,
375
- text=metadata.pop(self.text_key, None),
376
- id_=node_id,
377
- )
378
- else:
379
- metadata_dict, node_info, relationships = legacy_metadata_dict_to_node(
380
- metadata=metadata,
381
- text_key=self.text_key,
382
- )
383
-
384
- # Create TextNode with the extracted metadata
385
- # Step 1: Get the JSON string from "_node_content"
386
- _node_content_str = metadata.get("_node_content", "{}")
387
-
388
- # Step 2: Convert JSON string to Python dict
389
- try:
390
- node_content = json.loads(_node_content_str)
391
- except json.JSONDecodeError:
392
- node_content = {}
393
-
394
- # Step 3: Get the text
395
- text = node_content.get(self.text_key, "")
396
- node = TextNode(
397
- text=text,
398
- metadata=metadata_dict,
399
- relationships=relationships,
400
- node_id=node_id,
401
- )
402
-
403
- # Add any node_info properties to the node
404
- for key, val in node_info.items():
405
- if hasattr(node, key):
406
- setattr(node, key, val)
407
-
408
- # If embedding was returned in the results, add it to the node
409
- if "vector" in result:
410
- node.embedding = result["vector"]
411
-
412
- nodes.append(node)
413
- similarities.append(score)
414
- ids.append(node_id)
415
-
416
- return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
759
+ _logger.error(f"Error querying index: {e}")
760
+ raise