agno 2.2.6__py3-none-any.whl → 2.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,687 @@
1
+ import asyncio
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ try:
5
+ from redis import Redis
6
+ from redis.asyncio import Redis as AsyncRedis
7
+ from redisvl.index import AsyncSearchIndex, SearchIndex
8
+ from redisvl.query import FilterQuery, HybridQuery, TextQuery, VectorQuery
9
+ from redisvl.query.filter import Tag
10
+ from redisvl.redis.utils import array_to_buffer, convert_bytes
11
+ from redisvl.schema import IndexSchema
12
+ except ImportError:
13
+ raise ImportError("`redis` and `redisvl` not installed. Please install using `pip install redis redisvl`")
14
+
15
+ from agno.knowledge.document import Document
16
+ from agno.knowledge.embedder import Embedder
17
+ from agno.utils.log import log_debug, log_info, logger
18
+ from agno.utils.string import hash_string_sha256
19
+ from agno.vectordb.base import VectorDb
20
+ from agno.vectordb.distance import Distance
21
+ from agno.vectordb.search import SearchType
22
+
23
+
24
+ class RedisDB(VectorDb):
25
+ """
26
+ Redis class for managing vector operations with Redis and RedisVL.
27
+
28
+ This class provides methods for creating, inserting, searching, and managing
29
+ vector data in a Redis database using the RedisVL library.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ index_name: str,
35
+ redis_url: Optional[str] = None,
36
+ redis_client: Optional[Redis] = None,
37
+ embedder: Optional[Embedder] = None,
38
+ search_type: SearchType = SearchType.vector,
39
+ distance: Distance = Distance.cosine,
40
+ vector_score_weight: float = 0.7,
41
+ **redis_kwargs,
42
+ ):
43
+ """
44
+ Initialize the Redis instance.
45
+
46
+ Args:
47
+ index_name (str): Name of the Redis index to store vector data.
48
+ redis_url (Optional[str]): Redis connection URL.
49
+ redis_client (Optional[redis.Redis]): Redis client instance.
50
+ embedder (Optional[Embedder]): Embedder instance for creating embeddings.
51
+ search_type (SearchType): Type of search to perform.
52
+ distance (Distance): Distance metric for vector comparisons.
53
+ vector_score_weight (float): Weight for vector similarity in hybrid search.
54
+ reranker (Optional[Reranker]): Reranker instance.
55
+ **redis_kwargs: Additional Redis connection parameters.
56
+ """
57
+ if not index_name:
58
+ raise ValueError("Index name must be provided.")
59
+
60
+ if redis_client is None and redis_url is None:
61
+ raise ValueError("Either 'redis_url' or 'redis_client' must be provided.")
62
+
63
+ self.redis_url = redis_url
64
+
65
+ # Initialize Redis client
66
+ if redis_client is None:
67
+ assert redis_url is not None
68
+ self.redis_client = Redis.from_url(redis_url, **redis_kwargs)
69
+ else:
70
+ self.redis_client = redis_client
71
+
72
+ # Index settings
73
+ self.index_name: str = index_name
74
+
75
+ # Embedder for embedding the document contents
76
+ if embedder is None:
77
+ from agno.knowledge.embedder.openai import OpenAIEmbedder
78
+
79
+ embedder = OpenAIEmbedder()
80
+ log_info("Embedder not provided, using OpenAIEmbedder as default.")
81
+
82
+ self.embedder: Embedder = embedder
83
+ self.dimensions: Optional[int] = self.embedder.dimensions
84
+
85
+ if self.dimensions is None:
86
+ raise ValueError("Embedder.dimensions must be set.")
87
+
88
+ # Search type and distance metric
89
+ self.search_type: SearchType = search_type
90
+ self.distance: Distance = distance
91
+ self.vector_score_weight: float = vector_score_weight
92
+
93
+ # # Reranker instance
94
+ # self.reranker: Optional[Reranker] = reranker
95
+
96
+ # Create index schema
97
+ self.schema = self._get_schema()
98
+ self.index = self._create_index()
99
+ self.meta_data_fields: set[str] = set()
100
+
101
+ # Async components - created lazily when needed
102
+ self._async_redis_client: Optional[AsyncRedis] = None
103
+ self._async_index: Optional[AsyncSearchIndex] = None
104
+
105
+ log_debug(f"Initialized Redis with index '{self.index_name}'")
106
+
107
+ async def _get_async_index(self) -> AsyncSearchIndex:
108
+ """Get or create the async index and client."""
109
+ if self._async_index is None:
110
+ if self.redis_url is None:
111
+ raise ValueError("redis_url must be provided for async operations")
112
+ url: str = self.redis_url
113
+ self._async_redis_client = AsyncRedis.from_url(url)
114
+ self._async_index = AsyncSearchIndex(schema=self.schema, redis_client=self._async_redis_client)
115
+ return self._async_index
116
+
117
+ def _get_schema(self):
118
+ """Get default redis schema"""
119
+ distance_mapping = {
120
+ Distance.cosine: "cosine",
121
+ Distance.l2: "l2",
122
+ Distance.max_inner_product: "ip",
123
+ }
124
+
125
+ return IndexSchema.from_dict(
126
+ {
127
+ "index": {
128
+ "name": self.index_name,
129
+ "prefix": f"{self.index_name}:",
130
+ "storage_type": "hash",
131
+ },
132
+ "fields": [
133
+ {"name": "id", "type": "tag"},
134
+ {"name": "name", "type": "tag"},
135
+ {"name": "content", "type": "text"},
136
+ {"name": "content_hash", "type": "tag"},
137
+ {"name": "content_id", "type": "tag"},
138
+ # Common metadata fields used in operations/tests
139
+ {"name": "status", "type": "tag"},
140
+ {"name": "category", "type": "tag"},
141
+ {"name": "tag", "type": "tag"},
142
+ {"name": "source", "type": "tag"},
143
+ {"name": "mode", "type": "tag"},
144
+ {
145
+ "name": "embedding",
146
+ "type": "vector",
147
+ "attrs": {
148
+ "dims": self.dimensions,
149
+ "distance_metric": distance_mapping[self.distance],
150
+ "algorithm": "flat",
151
+ },
152
+ },
153
+ ],
154
+ }
155
+ )
156
+
157
+ def _create_index(self) -> SearchIndex:
158
+ """Create the RedisVL index object for this schema."""
159
+ return SearchIndex(self.schema, redis_url=self.redis_url)
160
+
161
+ def create(self) -> None:
162
+ """Create the Redis index if it does not exist."""
163
+ try:
164
+ if not self.exists():
165
+ self.index.create()
166
+ log_debug(f"Created Redis index: {self.index_name}")
167
+ else:
168
+ log_debug(f"Redis index already exists: {self.index_name}")
169
+ except Exception as e:
170
+ logger.error(f"Error creating Redis index: {e}")
171
+ raise
172
+
173
+ async def async_create(self) -> None:
174
+ """Async version of create method."""
175
+ try:
176
+ async_index = await self._get_async_index()
177
+ await async_index.create(overwrite=False, drop=False)
178
+ log_debug(f"Created Redis index: {self.index_name}")
179
+ except Exception as e:
180
+ if "already exists" in str(e).lower():
181
+ log_debug(f"Redis index already exists: {self.index_name}")
182
+ else:
183
+ logger.error(f"Error creating Redis index: {e}")
184
+ raise
185
+
186
+ def doc_exists(self, document: Document) -> bool:
187
+ """Check if a document exists in the index."""
188
+ try:
189
+ doc_id = document.id or hash_string_sha256(document.content)
190
+ return self.id_exists(doc_id)
191
+ except Exception as e:
192
+ logger.error(f"Error checking if document exists: {e}")
193
+ return False
194
+
195
+ async def async_doc_exists(self, document: Document) -> bool:
196
+ """Async version of doc_exists method."""
197
+ try:
198
+ doc_id = document.id or hash_string_sha256(document.content)
199
+ async_index = await self._get_async_index()
200
+ id_filter = Tag("id") == doc_id
201
+ query = FilterQuery(
202
+ filter_expression=id_filter,
203
+ return_fields=["id"],
204
+ num_results=1,
205
+ )
206
+ results = await async_index.query(query)
207
+ return len(results) > 0
208
+ except Exception as e:
209
+ logger.error(f"Error checking if document exists: {e}")
210
+ return False
211
+
212
+ def name_exists(self, name: str) -> bool:
213
+ """Check if a document with the given name exists."""
214
+ try:
215
+ name_filter = Tag("name") == name
216
+ query = FilterQuery(
217
+ filter_expression=name_filter,
218
+ return_fields=["id"],
219
+ num_results=1,
220
+ )
221
+ results = self.index.query(query)
222
+ return len(results) > 0
223
+ except Exception as e:
224
+ logger.error(f"Error checking if name exists: {e}")
225
+ return False
226
+
227
+ async def async_name_exists(self, name: str) -> bool: # type: ignore[override]
228
+ """Async version of name_exists method."""
229
+ try:
230
+ async_index = await self._get_async_index()
231
+ name_filter = Tag("name") == name
232
+ query = FilterQuery(
233
+ filter_expression=name_filter,
234
+ return_fields=["id"],
235
+ num_results=1,
236
+ )
237
+ results = await async_index.query(query)
238
+ return len(results) > 0
239
+ except Exception as e:
240
+ logger.error(f"Error checking if name exists: {e}")
241
+ return False
242
+
243
+ def id_exists(self, id: str) -> bool:
244
+ """Check if a document with the given ID exists."""
245
+ try:
246
+ id_filter = Tag("id") == id
247
+ query = FilterQuery(
248
+ filter_expression=id_filter,
249
+ return_fields=["id"],
250
+ num_results=1,
251
+ )
252
+ results = self.index.query(query)
253
+ return len(results) > 0
254
+ except Exception as e:
255
+ logger.error(f"Error checking if ID exists: {e}")
256
+ return False
257
+
258
+ def content_hash_exists(self, content_hash: str) -> bool:
259
+ """Check if a document with the given content hash exists."""
260
+ try:
261
+ content_hash_filter = Tag("content_hash") == content_hash
262
+ query = FilterQuery(
263
+ filter_expression=content_hash_filter,
264
+ return_fields=["id"],
265
+ num_results=1,
266
+ )
267
+ results = self.index.query(query)
268
+ return len(results) > 0
269
+ except Exception as e:
270
+ logger.error(f"Error checking if content hash exists: {e}")
271
+ return False
272
+
273
+ def _parse_redis_hash(self, doc: Document):
274
+ """
275
+ Create object serializable into Redis HASH structure
276
+ """
277
+ doc_dict = doc.to_dict()
278
+ # Ensure an ID is present; derive a deterministic one from content when missing
279
+ doc_id = doc.id or hash_string_sha256(doc.content)
280
+ doc_dict["id"] = doc_id
281
+ if not doc.embedding:
282
+ doc.embed(self.embedder)
283
+
284
+ # TODO: determine how we want to handle dtypes
285
+ doc_dict["embedding"] = array_to_buffer(doc.embedding, "float32")
286
+
287
+ # Add content_id if available
288
+ if hasattr(doc, "content_id") and doc.content_id:
289
+ doc_dict["content_id"] = doc.content_id
290
+
291
+ if "meta_data" in doc_dict:
292
+ meta_data = doc_dict.pop("meta_data", {})
293
+ for md in meta_data:
294
+ self.meta_data_fields.add(md)
295
+ doc_dict.update(meta_data)
296
+
297
+ return doc_dict
298
+
299
+ def insert(
300
+ self,
301
+ content_hash: str,
302
+ documents: List[Document],
303
+ filters: Optional[Dict[str, Any]] = None,
304
+ ) -> None:
305
+ """Insert documents into the Redis index."""
306
+ try:
307
+ # Store content hash for tracking
308
+ parsed_documents = []
309
+ for doc in documents:
310
+ parsed_doc = self._parse_redis_hash(doc)
311
+ parsed_doc["content_hash"] = content_hash
312
+ parsed_documents.append(parsed_doc)
313
+
314
+ self.index.load(parsed_documents, id_field="id")
315
+ log_debug(f"Inserted {len(documents)} documents with content_hash: {content_hash}")
316
+ except Exception as e:
317
+ logger.error(f"Error inserting documents: {e}")
318
+ raise
319
+
320
+ async def async_insert(
321
+ self,
322
+ content_hash: str,
323
+ documents: List[Document],
324
+ filters: Optional[Dict[str, Any]] = None,
325
+ ) -> None:
326
+ """Async version of insert method."""
327
+ try:
328
+ async_index = await self._get_async_index()
329
+ parsed_documents = []
330
+ for doc in documents:
331
+ parsed_doc = self._parse_redis_hash(doc)
332
+ parsed_doc["content_hash"] = content_hash
333
+ parsed_documents.append(parsed_doc)
334
+ await async_index.load(parsed_documents, id_field="id")
335
+ log_debug(f"Inserted {len(documents)} documents with content_hash: {content_hash}")
336
+ except Exception as e:
337
+ logger.error(f"Error inserting documents: {e}")
338
+ raise
339
+
340
+ def upsert_available(self) -> bool:
341
+ """Check if upsert is available (always True for Redis)."""
342
+ return True
343
+
344
+ def upsert(
345
+ self,
346
+ content_hash: str,
347
+ documents: List[Document],
348
+ filters: Optional[Dict[str, Any]] = None,
349
+ ) -> None:
350
+ """Upsert documents into the Redis index.
351
+ Strategy: delete existing docs with the same content_hash, then insert new docs.
352
+ """
353
+ try:
354
+ # Find existing docs for this content_hash and delete them
355
+ ch_filter = Tag("content_hash") == content_hash
356
+ query = FilterQuery(
357
+ filter_expression=ch_filter,
358
+ return_fields=["id"],
359
+ num_results=1000,
360
+ )
361
+ existing = self.index.query(query)
362
+ parsed = convert_bytes(existing)
363
+ for r in parsed:
364
+ key = r.get("id")
365
+ if key:
366
+ self.index.drop_keys(key)
367
+
368
+ # Insert new docs
369
+ self.insert(content_hash, documents, filters)
370
+ except Exception as e:
371
+ logger.error(f"Error upserting documents: {e}")
372
+ raise
373
+
374
+ async def async_upsert(
375
+ self,
376
+ content_hash: str,
377
+ documents: List[Document],
378
+ filters: Optional[Dict[str, Any]] = None,
379
+ ) -> None:
380
+ """Async version of upsert method.
381
+ Strategy: delete existing docs with the same content_hash, then insert new docs.
382
+ """
383
+ try:
384
+ async_index = await self._get_async_index()
385
+
386
+ # Find existing docs for this content_hash and delete them
387
+ ch_filter = Tag("content_hash") == content_hash
388
+ query = FilterQuery(
389
+ filter_expression=ch_filter,
390
+ return_fields=["id"],
391
+ num_results=1000,
392
+ )
393
+ existing = await async_index.query(query)
394
+ parsed = convert_bytes(existing)
395
+ for r in parsed:
396
+ key = r.get("id")
397
+ if key:
398
+ await async_index.drop_keys(key)
399
+
400
+ # Insert new docs
401
+ await self.async_insert(content_hash, documents, filters)
402
+ except Exception as e:
403
+ logger.error(f"Error upserting documents: {e}")
404
+ raise
405
+
406
+ def search(self, query: str, limit: int = 5, filters: Optional[Dict[str, Any]] = None) -> List[Document]:
407
+ """Search for documents using the specified search type."""
408
+ try:
409
+ if self.search_type == SearchType.vector:
410
+ return self.vector_search(query, limit)
411
+ elif self.search_type == SearchType.keyword:
412
+ return self.keyword_search(query, limit)
413
+ elif self.search_type == SearchType.hybrid:
414
+ return self.hybrid_search(query, limit)
415
+ else:
416
+ raise ValueError(f"Unsupported search type: {self.search_type}")
417
+ except Exception as e:
418
+ logger.error(f"Error in search: {e}")
419
+ return []
420
+
421
+ async def async_search(
422
+ self, query: str, limit: int = 5, filters: Optional[Dict[str, Any]] = None
423
+ ) -> List[Document]:
424
+ """Async version of search method."""
425
+ return await asyncio.to_thread(self.search, query, limit, filters)
426
+
427
+ def vector_search(self, query: str, limit: int = 5) -> List[Document]:
428
+ """Perform vector similarity search."""
429
+ try:
430
+ # Get query embedding
431
+ query_embedding = array_to_buffer(self.embedder.get_embedding(query), "float32")
432
+
433
+ # TODO: do we want to pass back the embedding?
434
+ # Create vector query
435
+ vector_query = VectorQuery(
436
+ vector=query_embedding,
437
+ vector_field_name="embedding",
438
+ return_fields=["id", "name", "content"],
439
+ return_score=False,
440
+ num_results=limit,
441
+ )
442
+
443
+ # Execute search
444
+ results = self.index.query(vector_query)
445
+
446
+ # Convert results to documents
447
+ documents = [Document.from_dict(r) for r in results]
448
+
449
+ return documents
450
+ except Exception as e:
451
+ logger.error(f"Error in vector search: {e}")
452
+ return []
453
+
454
+ def keyword_search(self, query: str, limit: int = 5) -> List[Document]:
455
+ """Perform keyword search using Redis text search."""
456
+ try:
457
+ # Create text query
458
+ text_query = TextQuery(
459
+ text=query,
460
+ text_field_name="content",
461
+ )
462
+
463
+ # Execute search
464
+ results = self.index.query(text_query)
465
+
466
+ # Convert results to documents
467
+ parsed = convert_bytes(results)
468
+
469
+ # Convert results to documents
470
+ documents = [Document.from_dict(p) for p in parsed]
471
+
472
+ return documents
473
+ except Exception as e:
474
+ logger.error(f"Error in keyword search: {e}")
475
+ return []
476
+
477
+ def hybrid_search(self, query: str, limit: int = 5) -> List[Document]:
478
+ """Perform hybrid search combining vector and keyword search."""
479
+ try:
480
+ # Get query embedding
481
+ query_embedding = array_to_buffer(self.embedder.get_embedding(query), "float32")
482
+
483
+ # Create vector query
484
+ vector_query = HybridQuery(
485
+ vector=query_embedding,
486
+ vector_field_name="embedding",
487
+ text=query,
488
+ text_field_name="content",
489
+ alpha=self.vector_score_weight,
490
+ return_fields=["id", "name", "content"],
491
+ num_results=limit,
492
+ )
493
+
494
+ # Execute search
495
+ results = self.index.query(vector_query)
496
+ parsed = convert_bytes(results)
497
+
498
+ # Convert results to documents
499
+ documents = [Document.from_dict(p) for p in parsed]
500
+
501
+ return documents
502
+ except Exception as e:
503
+ logger.error(f"Error in hybrid search: {e}")
504
+ return []
505
+
506
+ def drop(self) -> bool: # type: ignore[override]
507
+ """Drop the Redis index."""
508
+ try:
509
+ self.index.delete(drop=True)
510
+ log_debug(f"Deleted Redis index: {self.index_name}")
511
+ return True
512
+ except Exception as e:
513
+ logger.error(f"Error dropping Redis index: {e}")
514
+ return False
515
+
516
+ async def async_drop(self) -> None:
517
+ """Async version of drop method."""
518
+ try:
519
+ async_index = await self._get_async_index()
520
+ await async_index.delete(drop=True)
521
+ log_debug(f"Deleted Redis index: {self.index_name}")
522
+ except Exception as e:
523
+ logger.error(f"Error dropping Redis index: {e}")
524
+ raise
525
+
526
+ def exists(self) -> bool:
527
+ """Check if the Redis index exists."""
528
+ try:
529
+ return self.index.exists()
530
+ except Exception as e:
531
+ logger.error(f"Error checking if index exists: {e}")
532
+ return False
533
+
534
+ async def async_exists(self) -> bool:
535
+ """Async version of exists method."""
536
+ try:
537
+ async_index = await self._get_async_index()
538
+ return await async_index.exists()
539
+ except Exception as e:
540
+ logger.error(f"Error checking if index exists: {e}")
541
+ return False
542
+
543
+ def optimize(self) -> None:
544
+ """Optimize the Redis index (no-op for Redis)."""
545
+ log_debug("Redis optimization not required")
546
+ pass
547
+
548
+ def delete(self) -> bool:
549
+ """Delete the Redis index (same as drop)."""
550
+ try:
551
+ self.index.clear()
552
+ return True
553
+ except Exception as e:
554
+ logger.error(f"Error deleting Redis index: {e}")
555
+ return False
556
+
557
+ def delete_by_id(self, id: str) -> bool:
558
+ """Delete documents by ID."""
559
+ try:
560
+ # Use RedisVL to drop documents by document ID
561
+ result = self.index.drop_documents(id)
562
+ log_debug(f"Deleted document with id '{id}' from Redis index")
563
+ return result > 0
564
+ except Exception as e:
565
+ logger.error(f"Error deleting document by ID: {e}")
566
+ return False
567
+
568
+ def delete_by_name(self, name: str) -> bool:
569
+ """Delete documents by name."""
570
+ try:
571
+ # First find documents with the given name
572
+ name_filter = Tag("name") == name
573
+ query = FilterQuery(
574
+ filter_expression=name_filter,
575
+ return_fields=["id"],
576
+ num_results=1000, # Get all matching documents
577
+ )
578
+ results = self.index.query(query)
579
+ parsed = convert_bytes(results)
580
+
581
+ # Delete each found document by key (result['id'] is the Redis key)
582
+ deleted_count = 0
583
+ for result in parsed:
584
+ key = result.get("id")
585
+ if key:
586
+ deleted_count += self.index.drop_keys(key)
587
+
588
+ log_debug(f"Deleted {deleted_count} documents with name '{name}'")
589
+ return deleted_count > 0
590
+ except Exception as e:
591
+ logger.error(f"Error deleting documents by name: {e}")
592
+ return False
593
+
594
+ def delete_by_metadata(self, metadata: Dict[str, Any]) -> bool:
595
+ """Delete documents by metadata."""
596
+ try:
597
+ # Build filter expression for metadata using Tag filters
598
+ filters = []
599
+ for key, value in metadata.items():
600
+ filters.append(Tag(key) == str(value))
601
+
602
+ # Combine filters with AND logic
603
+ if len(filters) == 1:
604
+ combined_filter = filters[0]
605
+ else:
606
+ combined_filter = filters[0]
607
+ for f in filters[1:]:
608
+ combined_filter = combined_filter & f
609
+
610
+ # Find documents with the given metadata
611
+ query = FilterQuery(
612
+ filter_expression=combined_filter,
613
+ return_fields=["id"],
614
+ num_results=1000, # Get all matching documents
615
+ )
616
+ results = self.index.query(query)
617
+ parsed = convert_bytes(results)
618
+
619
+ # Delete each found document by key (result['id'] is the Redis key)
620
+ deleted_count = 0
621
+ for result in parsed:
622
+ key = result.get("id")
623
+ if key:
624
+ deleted_count += self.index.drop_keys(key)
625
+
626
+ log_debug(f"Deleted {deleted_count} documents with metadata {metadata}")
627
+ return deleted_count > 0
628
+ except Exception as e:
629
+ logger.error(f"Error deleting documents by metadata: {e}")
630
+ return False
631
+
632
+ def delete_by_content_id(self, content_id: str) -> bool:
633
+ """Delete documents by content ID."""
634
+ try:
635
+ # Find documents with the given content_id
636
+ content_id_filter = Tag("content_id") == content_id
637
+ query = FilterQuery(
638
+ filter_expression=content_id_filter,
639
+ return_fields=["id"],
640
+ num_results=1000, # Get all matching documents
641
+ )
642
+ results = self.index.query(query)
643
+ parsed = convert_bytes(results)
644
+
645
+ # Delete each found document by key (result['id'] is the Redis key)
646
+ deleted_count = 0
647
+ for result in parsed:
648
+ key = result.get("id")
649
+ if key:
650
+ deleted_count += self.index.drop_keys(key)
651
+
652
+ log_debug(f"Deleted {deleted_count} documents with content_id '{content_id}'")
653
+ return deleted_count > 0
654
+ except Exception as e:
655
+ logger.error(f"Error deleting documents by content_id: {e}")
656
+ return False
657
+
658
+ def update_metadata(self, content_id: str, metadata: Dict[str, Any]) -> None:
659
+ """Update metadata for documents with the given content ID."""
660
+ try:
661
+ # Find documents with the given content_id
662
+ content_id_filter = Tag("content_id") == content_id
663
+ query = FilterQuery(
664
+ filter_expression=content_id_filter,
665
+ return_fields=["id"],
666
+ num_results=1000, # Get all matching documents
667
+ )
668
+ results = self.index.query(query)
669
+
670
+ # Update metadata for each found document
671
+ for result in results:
672
+ doc_id = result.get("id")
673
+ if doc_id:
674
+ # result['id'] is the Redis key
675
+ key = result.get("id")
676
+ # Update the hash with new metadata
677
+ if key:
678
+ self.redis_client.hset(key, mapping=metadata)
679
+
680
+ log_debug(f"Updated metadata for documents with content_id '{content_id}'")
681
+ except Exception as e:
682
+ logger.error(f"Error updating metadata: {e}")
683
+ raise
684
+
685
+ def get_supported_search_types(self) -> List[str]:
686
+ """Get list of supported search types."""
687
+ return ["vector", "keyword", "hybrid"]