agno 2.1.3__py3-none-any.whl → 2.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. agno/agent/agent.py +1779 -577
  2. agno/db/async_postgres/__init__.py +3 -0
  3. agno/db/async_postgres/async_postgres.py +1668 -0
  4. agno/db/async_postgres/schemas.py +124 -0
  5. agno/db/async_postgres/utils.py +289 -0
  6. agno/db/base.py +237 -2
  7. agno/db/dynamo/dynamo.py +10 -8
  8. agno/db/dynamo/schemas.py +1 -10
  9. agno/db/dynamo/utils.py +2 -2
  10. agno/db/firestore/firestore.py +2 -2
  11. agno/db/firestore/utils.py +4 -2
  12. agno/db/gcs_json/gcs_json_db.py +2 -2
  13. agno/db/in_memory/in_memory_db.py +2 -2
  14. agno/db/json/json_db.py +2 -2
  15. agno/db/migrations/v1_to_v2.py +30 -13
  16. agno/db/mongo/mongo.py +18 -6
  17. agno/db/mysql/mysql.py +35 -13
  18. agno/db/postgres/postgres.py +29 -6
  19. agno/db/redis/redis.py +2 -2
  20. agno/db/singlestore/singlestore.py +2 -2
  21. agno/db/sqlite/sqlite.py +34 -12
  22. agno/db/sqlite/utils.py +8 -3
  23. agno/eval/accuracy.py +50 -43
  24. agno/eval/performance.py +6 -3
  25. agno/eval/reliability.py +6 -3
  26. agno/eval/utils.py +33 -16
  27. agno/exceptions.py +8 -2
  28. agno/knowledge/embedder/fastembed.py +1 -1
  29. agno/knowledge/knowledge.py +260 -46
  30. agno/knowledge/reader/pdf_reader.py +4 -6
  31. agno/knowledge/reader/reader_factory.py +2 -3
  32. agno/memory/manager.py +241 -33
  33. agno/models/anthropic/claude.py +37 -0
  34. agno/os/app.py +15 -10
  35. agno/os/interfaces/a2a/router.py +3 -5
  36. agno/os/interfaces/agui/router.py +4 -1
  37. agno/os/interfaces/agui/utils.py +33 -6
  38. agno/os/interfaces/slack/router.py +2 -4
  39. agno/os/mcp.py +98 -41
  40. agno/os/router.py +23 -0
  41. agno/os/routers/evals/evals.py +52 -20
  42. agno/os/routers/evals/utils.py +14 -14
  43. agno/os/routers/knowledge/knowledge.py +130 -9
  44. agno/os/routers/knowledge/schemas.py +57 -0
  45. agno/os/routers/memory/memory.py +116 -44
  46. agno/os/routers/metrics/metrics.py +16 -6
  47. agno/os/routers/session/session.py +65 -22
  48. agno/os/schema.py +38 -0
  49. agno/os/utils.py +69 -13
  50. agno/reasoning/anthropic.py +80 -0
  51. agno/reasoning/gemini.py +73 -0
  52. agno/reasoning/openai.py +5 -0
  53. agno/reasoning/vertexai.py +76 -0
  54. agno/session/workflow.py +69 -1
  55. agno/team/team.py +934 -241
  56. agno/tools/function.py +36 -18
  57. agno/tools/google_drive.py +270 -0
  58. agno/tools/googlesheets.py +20 -5
  59. agno/tools/mcp_toolbox.py +3 -3
  60. agno/tools/scrapegraph.py +1 -1
  61. agno/utils/models/claude.py +3 -1
  62. agno/utils/print_response/workflow.py +112 -12
  63. agno/utils/streamlit.py +1 -1
  64. agno/vectordb/base.py +22 -1
  65. agno/vectordb/cassandra/cassandra.py +9 -0
  66. agno/vectordb/chroma/chromadb.py +26 -6
  67. agno/vectordb/clickhouse/clickhousedb.py +9 -1
  68. agno/vectordb/couchbase/couchbase.py +11 -0
  69. agno/vectordb/lancedb/lance_db.py +20 -0
  70. agno/vectordb/langchaindb/langchaindb.py +11 -0
  71. agno/vectordb/lightrag/lightrag.py +9 -0
  72. agno/vectordb/llamaindex/llamaindexdb.py +15 -1
  73. agno/vectordb/milvus/milvus.py +23 -0
  74. agno/vectordb/mongodb/mongodb.py +22 -0
  75. agno/vectordb/pgvector/pgvector.py +19 -0
  76. agno/vectordb/pineconedb/pineconedb.py +35 -4
  77. agno/vectordb/qdrant/qdrant.py +24 -0
  78. agno/vectordb/singlestore/singlestore.py +25 -17
  79. agno/vectordb/surrealdb/surrealdb.py +18 -1
  80. agno/vectordb/upstashdb/upstashdb.py +26 -1
  81. agno/vectordb/weaviate/weaviate.py +18 -0
  82. agno/workflow/condition.py +29 -0
  83. agno/workflow/loop.py +29 -0
  84. agno/workflow/parallel.py +141 -113
  85. agno/workflow/router.py +29 -0
  86. agno/workflow/step.py +146 -25
  87. agno/workflow/steps.py +29 -0
  88. agno/workflow/types.py +26 -1
  89. agno/workflow/workflow.py +507 -22
  90. {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/METADATA +100 -41
  91. {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/RECORD +94 -86
  92. {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/WHEEL +0 -0
  93. {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/licenses/LICENSE +0 -0
  94. {agno-2.1.3.dist-info → agno-2.1.5.dist-info}/top_level.txt +0 -0
@@ -257,6 +257,11 @@ def print_response_stream(
257
257
  current_primitive_context = None # Current primitive being executed (parallel, loop, etc.)
258
258
  step_display_cache = {} # type: ignore
259
259
 
260
+ # Parallel-aware tracking for simultaneous steps
261
+ parallel_step_states: Dict[
262
+ Any, Dict[str, Any]
263
+ ] = {} # track state of each parallel step: {step_index: {"name": str, "content": str, "started": bool, "completed": bool}}
264
+
260
265
  def get_step_display_number(step_index: Union[int, tuple], step_name: str = "") -> str:
261
266
  """Generate clean two-level step numbering: x.y format only"""
262
267
 
@@ -321,8 +326,11 @@ def print_response_stream(
321
326
  live_log.update(status)
322
327
 
323
328
  elif isinstance(response, StepStartedEvent):
324
- current_step_name = response.step_name or "Unknown"
325
- current_step_index = response.step_index or 0 # type: ignore
329
+ step_name = response.step_name or "Unknown"
330
+ step_index = response.step_index or 0 # type: ignore
331
+
332
+ current_step_name = step_name
333
+ current_step_index = step_index # type: ignore
326
334
  current_step_content = ""
327
335
  step_started_printed = False
328
336
 
@@ -335,6 +343,14 @@ def print_response_stream(
335
343
  step_name = response.step_name or "Unknown"
336
344
  step_index = response.step_index or 0
337
345
 
346
+ # Skip parallel sub-step completed events - they're handled in ParallelExecutionCompletedEvent (avoid duplication)
347
+ if (
348
+ current_primitive_context
349
+ and current_primitive_context["type"] == "parallel"
350
+ and isinstance(step_index, tuple)
351
+ ):
352
+ continue
353
+
338
354
  # Generate smart step number for completion (will use cached value)
339
355
  step_display = get_step_display_number(step_index, step_name)
340
356
  status.update(f"Completed {step_display}: {step_name}")
@@ -376,7 +392,8 @@ def print_response_stream(
376
392
  "max_iterations": response.max_iterations,
377
393
  }
378
394
 
379
- # Clear cache for this primitive's sub-steps
395
+ # Initialize parallel step tracking - clear previous states
396
+ parallel_step_states.clear()
380
397
  step_display_cache.clear()
381
398
 
382
399
  status.update(f"Starting loop: {current_step_name} (max {response.max_iterations} iterations)...")
@@ -442,7 +459,8 @@ def print_response_stream(
442
459
  "total_steps": response.parallel_step_count,
443
460
  }
444
461
 
445
- # Clear cache for this primitive's sub-steps
462
+ # Initialize parallel step tracking - clear previous states
463
+ parallel_step_states.clear()
446
464
  step_display_cache.clear()
447
465
 
448
466
  # Print parallel execution summary panel
@@ -468,8 +486,30 @@ def print_response_stream(
468
486
 
469
487
  status.update(f"Completed parallel execution: {step_name}")
470
488
 
489
+ # Display individual parallel step results immediately
490
+ if show_step_details and response.step_results:
491
+ live_log.update(status, refresh=True)
492
+
493
+ # Get the parallel container's display number for consistent numbering
494
+ parallel_step_display = get_step_display_number(step_index, step_name)
495
+
496
+ # Show each parallel step with the same number (1.1, 1.1)
497
+ for step_result in response.step_results:
498
+ if step_result.content:
499
+ step_result_name = step_result.step_name or "Parallel Step"
500
+ formatted_content = format_step_content_for_display(step_result.content) # type: ignore
501
+
502
+ # All parallel sub-steps get the same number
503
+ parallel_step_panel = create_panel(
504
+ content=Markdown(formatted_content) if markdown else formatted_content,
505
+ title=f"{parallel_step_display}: {step_result_name} (Completed)",
506
+ border_style="orange3",
507
+ )
508
+ console.print(parallel_step_panel) # type: ignore
509
+
471
510
  # Reset context
472
511
  current_primitive_context = None
512
+ parallel_step_states.clear()
473
513
  step_display_cache.clear()
474
514
 
475
515
  elif isinstance(response, ConditionExecutionStartedEvent):
@@ -486,7 +526,8 @@ def print_response_stream(
486
526
  "condition_result": response.condition_result,
487
527
  }
488
528
 
489
- # Clear cache for this primitive's sub-steps
529
+ # Initialize parallel step tracking - clear previous states
530
+ parallel_step_states.clear()
490
531
  step_display_cache.clear()
491
532
 
492
533
  condition_text = "met" if response.condition_result else "not met"
@@ -517,7 +558,8 @@ def print_response_stream(
517
558
  "selected_steps": response.selected_steps,
518
559
  }
519
560
 
520
- # Clear cache for this primitive's sub-steps
561
+ # Initialize parallel step tracking - clear previous states
562
+ parallel_step_states.clear()
521
563
  step_display_cache.clear()
522
564
 
523
565
  selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
@@ -667,6 +709,14 @@ def print_response_stream(
667
709
  # Use the unified formatting function for consistency
668
710
  response_str = format_step_content_for_display(response_str) # type: ignore
669
711
 
712
+ # Skip streaming content from parallel sub-steps - they're handled in ParallelExecutionCompletedEvent
713
+ if (
714
+ current_primitive_context
715
+ and current_primitive_context["type"] == "parallel"
716
+ and isinstance(current_step_index, tuple)
717
+ ):
718
+ continue
719
+
670
720
  # Filter out empty responses and add to current step content
671
721
  if response_str and response_str.strip():
672
722
  # If it's a structured output from a team, replace the content instead of appending
@@ -990,6 +1040,11 @@ async def aprint_response_stream(
990
1040
  current_primitive_context = None # Current primitive being executed (parallel, loop, etc.)
991
1041
  step_display_cache = {} # type: ignore
992
1042
 
1043
+ # Parallel-aware tracking for simultaneous steps
1044
+ parallel_step_states: Dict[
1045
+ Any, Dict[str, Any]
1046
+ ] = {} # track state of each parallel step: {step_index: {"name": str, "content": str, "started": bool, "completed": bool}}
1047
+
993
1048
  def get_step_display_number(step_index: Union[int, tuple], step_name: str = "") -> str:
994
1049
  """Generate clean two-level step numbering: x.y format only"""
995
1050
 
@@ -1054,8 +1109,11 @@ async def aprint_response_stream(
1054
1109
  live_log.update(status)
1055
1110
 
1056
1111
  elif isinstance(response, StepStartedEvent):
1057
- current_step_name = response.step_name or "Unknown"
1058
- current_step_index = response.step_index or 0 # type: ignore
1112
+ step_name = response.step_name or "Unknown"
1113
+ step_index = response.step_index or 0 # type: ignore
1114
+
1115
+ current_step_name = step_name
1116
+ current_step_index = step_index # type: ignore
1059
1117
  current_step_content = ""
1060
1118
  step_started_printed = False
1061
1119
 
@@ -1068,6 +1126,14 @@ async def aprint_response_stream(
1068
1126
  step_name = response.step_name or "Unknown"
1069
1127
  step_index = response.step_index or 0
1070
1128
 
1129
+ # Skip parallel sub-step completed events - they're handled in ParallelExecutionCompletedEvent (avoid duplication)
1130
+ if (
1131
+ current_primitive_context
1132
+ and current_primitive_context["type"] == "parallel"
1133
+ and isinstance(step_index, tuple)
1134
+ ):
1135
+ continue
1136
+
1071
1137
  # Generate smart step number for completion (will use cached value)
1072
1138
  step_display = get_step_display_number(step_index, step_name)
1073
1139
  status.update(f"Completed {step_display}: {step_name}")
@@ -1109,7 +1175,8 @@ async def aprint_response_stream(
1109
1175
  "max_iterations": response.max_iterations,
1110
1176
  }
1111
1177
 
1112
- # Clear cache for this primitive's sub-steps
1178
+ # Initialize parallel step tracking - clear previous states
1179
+ parallel_step_states.clear()
1113
1180
  step_display_cache.clear()
1114
1181
 
1115
1182
  status.update(f"Starting loop: {current_step_name} (max {response.max_iterations} iterations)...")
@@ -1175,7 +1242,8 @@ async def aprint_response_stream(
1175
1242
  "total_steps": response.parallel_step_count,
1176
1243
  }
1177
1244
 
1178
- # Clear cache for this primitive's sub-steps
1245
+ # Initialize parallel step tracking - clear previous states
1246
+ parallel_step_states.clear()
1179
1247
  step_display_cache.clear()
1180
1248
 
1181
1249
  # Print parallel execution summary panel
@@ -1201,8 +1269,30 @@ async def aprint_response_stream(
1201
1269
 
1202
1270
  status.update(f"Completed parallel execution: {step_name}")
1203
1271
 
1272
+ # Display individual parallel step results immediately
1273
+ if show_step_details and response.step_results:
1274
+ live_log.update(status, refresh=True)
1275
+
1276
+ # Get the parallel container's display number for consistent numbering
1277
+ parallel_step_display = get_step_display_number(step_index, step_name)
1278
+
1279
+ # Show each parallel step with the same number (1.1, 1.1)
1280
+ for step_result in response.step_results:
1281
+ if step_result.content:
1282
+ step_result_name = step_result.step_name or "Parallel Step"
1283
+ formatted_content = format_step_content_for_display(step_result.content) # type: ignore
1284
+
1285
+ # All parallel sub-steps get the same number
1286
+ parallel_step_panel = create_panel(
1287
+ content=Markdown(formatted_content) if markdown else formatted_content,
1288
+ title=f"{parallel_step_display}: {step_result_name} (Completed)",
1289
+ border_style="orange3",
1290
+ )
1291
+ console.print(parallel_step_panel) # type: ignore
1292
+
1204
1293
  # Reset context
1205
1294
  current_primitive_context = None
1295
+ parallel_step_states.clear()
1206
1296
  step_display_cache.clear()
1207
1297
 
1208
1298
  elif isinstance(response, ConditionExecutionStartedEvent):
@@ -1219,7 +1309,8 @@ async def aprint_response_stream(
1219
1309
  "condition_result": response.condition_result,
1220
1310
  }
1221
1311
 
1222
- # Clear cache for this primitive's sub-steps
1312
+ # Initialize parallel step tracking - clear previous states
1313
+ parallel_step_states.clear()
1223
1314
  step_display_cache.clear()
1224
1315
 
1225
1316
  condition_text = "met" if response.condition_result else "not met"
@@ -1250,7 +1341,8 @@ async def aprint_response_stream(
1250
1341
  "selected_steps": response.selected_steps,
1251
1342
  }
1252
1343
 
1253
- # Clear cache for this primitive's sub-steps
1344
+ # Initialize parallel step tracking - clear previous states
1345
+ parallel_step_states.clear()
1254
1346
  step_display_cache.clear()
1255
1347
 
1256
1348
  selected_steps_text = ", ".join(response.selected_steps) if response.selected_steps else "none"
@@ -1404,6 +1496,14 @@ async def aprint_response_stream(
1404
1496
  # Use the unified formatting function for consistency
1405
1497
  response_str = format_step_content_for_display(response_str) # type: ignore
1406
1498
 
1499
+ # Skip streaming content from parallel sub-steps - they're handled in ParallelExecutionCompletedEvent
1500
+ if (
1501
+ current_primitive_context
1502
+ and current_primitive_context["type"] == "parallel"
1503
+ and isinstance(current_step_index, tuple)
1504
+ ):
1505
+ continue
1506
+
1407
1507
  # Filter out empty responses and add to current step content
1408
1508
  if response_str and response_str.strip():
1409
1509
  # If it's a structured output from a team, replace the content instead of appending
agno/utils/streamlit.py CHANGED
@@ -80,7 +80,7 @@ def session_selector_widget(agent: Agent, model_id: str, agent_creation_callback
80
80
  session_options = []
81
81
  session_dict = {}
82
82
 
83
- for session in sessions:
83
+ for session in sessions: # type: ignore
84
84
  if not hasattr(session, "session_id") or not session.session_id:
85
85
  continue
86
86
 
agno/vectordb/base.py CHANGED
@@ -1,11 +1,28 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from typing import Any, Dict, List, Optional
3
3
 
4
+ from agno.knowledge.document import Document
5
+ from agno.utils.string import generate_id
6
+
4
7
 
5
8
  class VectorDb(ABC):
6
9
  """Base class for Vector Databases"""
7
10
 
8
- from agno.knowledge.document import Document
11
+ def __init__(self, *, id: Optional[str] = None, name: Optional[str] = None, description: Optional[str] = None):
12
+ """Initialize base VectorDb.
13
+
14
+ Args:
15
+ id: Optional custom ID. If not provided, an id will be generated.
16
+ name: Optional name for the vector database.
17
+ description: Optional description for the vector database.
18
+ """
19
+ if name is None:
20
+ name = self.__class__.__name__
21
+
22
+ self.name = name
23
+ self.description = description
24
+ # Last resort fallback to generate id from name if ID not specified
25
+ self.id = id if id else generate_id(name)
9
26
 
10
27
  @abstractmethod
11
28
  def create(self) -> None:
@@ -106,3 +123,7 @@ class VectorDb(ABC):
106
123
  @abstractmethod
107
124
  def delete_by_content_id(self, content_id: str) -> bool:
108
125
  raise NotImplementedError
126
+
127
+ @abstractmethod
128
+ def get_supported_search_types(self) -> List[str]:
129
+ raise NotImplementedError
@@ -15,6 +15,8 @@ class Cassandra(VectorDb):
15
15
  keyspace: str,
16
16
  embedder: Optional[Embedder] = None,
17
17
  session=None,
18
+ name: Optional[str] = None,
19
+ description: Optional[str] = None,
18
20
  ) -> None:
19
21
  if not table_name:
20
22
  raise ValueError("Table name must be provided.")
@@ -30,6 +32,9 @@ class Cassandra(VectorDb):
30
32
 
31
33
  embedder = OpenAIEmbedder()
32
34
  log_info("Embedder not provided, using OpenAIEmbedder as default.")
35
+ # Initialize base class with name and description
36
+ super().__init__(name=name, description=description)
37
+
33
38
  self.table_name: str = table_name
34
39
  self.embedder: Embedder = embedder
35
40
  self.session = session
@@ -483,3 +488,7 @@ class Cassandra(VectorDb):
483
488
  except Exception as e:
484
489
  log_error(f"Error updating metadata for content_id {content_id}: {e}")
485
490
  raise
491
+
492
+ def get_supported_search_types(self) -> List[str]:
493
+ """Get the supported search types for this vector database."""
494
+ return [] # Cassandra doesn't use SearchType enum
@@ -25,6 +25,9 @@ class ChromaDb(VectorDb):
25
25
  def __init__(
26
26
  self,
27
27
  collection: str,
28
+ name: Optional[str] = None,
29
+ description: Optional[str] = None,
30
+ id: Optional[str] = None,
28
31
  embedder: Optional[Embedder] = None,
29
32
  distance: Distance = Distance.cosine,
30
33
  path: str = "tmp/chromadb",
@@ -32,9 +35,22 @@ class ChromaDb(VectorDb):
32
35
  reranker: Optional[Reranker] = None,
33
36
  **kwargs,
34
37
  ):
38
+ # Validate required parameters
39
+ if not collection:
40
+ raise ValueError("Collection name must be provided.")
41
+
42
+ # Dynamic ID generation based on unique identifiers
43
+ if id is None:
44
+ from agno.utils.string import generate_id
45
+
46
+ seed = f"{path}#{collection}"
47
+ id = generate_id(seed)
48
+
49
+ # Initialize base class with name, description, and generated ID
50
+ super().__init__(id=id, name=name, description=description)
51
+
35
52
  # Collection attributes
36
53
  self.collection_name: str = collection
37
-
38
54
  # Embedder for embedding the document contents
39
55
  if embedder is None:
40
56
  from agno.knowledge.embedder.openai import OpenAIEmbedder
@@ -497,11 +513,11 @@ class ChromaDb(VectorDb):
497
513
  # Build search results
498
514
  search_results: List[Document] = []
499
515
 
500
- ids_list = result.get("ids", [[]])
501
- metadata_list = result.get("metadatas", [[{}]])
502
- documents_list = result.get("documents", [[]])
503
- embeddings_list = result.get("embeddings")
504
- distances_list = result.get("distances", [[]])
516
+ ids_list = result.get("ids", [[]]) # type: ignore
517
+ metadata_list = result.get("metadatas", [[{}]]) # type: ignore
518
+ documents_list = result.get("documents", [[]]) # type: ignore
519
+ embeddings_list = result.get("embeddings") # type: ignore
520
+ distances_list = result.get("distances", [[]]) # type: ignore
505
521
 
506
522
  if not ids_list or not metadata_list or not documents_list or embeddings_list is None or not distances_list:
507
523
  return search_results
@@ -901,3 +917,7 @@ class ChromaDb(VectorDb):
901
917
  except Exception as e:
902
918
  logger.error(f"Error updating metadata for content_id '{content_id}': {e}")
903
919
  raise
920
+
921
+ def get_supported_search_types(self) -> List[str]:
922
+ """Get the supported search types for this vector database."""
923
+ return [] # ChromaDb doesn't use SearchType enum
@@ -23,6 +23,8 @@ class Clickhouse(VectorDb):
23
23
  self,
24
24
  table_name: str,
25
25
  host: str,
26
+ name: Optional[str] = None,
27
+ description: Optional[str] = None,
26
28
  username: Optional[str] = None,
27
29
  password: str = "",
28
30
  port: int = 0,
@@ -41,9 +43,11 @@ class Clickhouse(VectorDb):
41
43
  self.password = password
42
44
  self.port = port
43
45
  self.dsn = dsn
46
+ # Initialize base class with name and description
47
+ super().__init__(name=name, description=description)
48
+
44
49
  self.compress = compress
45
50
  self.database_name = database_name
46
-
47
51
  if not client:
48
52
  client = clickhouse_connect.get_client(
49
53
  host=self.host,
@@ -817,3 +821,7 @@ class Clickhouse(VectorDb):
817
821
  except Exception as e:
818
822
  logger.error(f"Error updating metadata for content_id '{content_id}': {e}")
819
823
  raise
824
+
825
+ def get_supported_search_types(self) -> List[str]:
826
+ """Get the supported search types for this vector database."""
827
+ return [] # Clickhouse doesn't use SearchType enum
@@ -66,6 +66,8 @@ class CouchbaseSearch(VectorDb):
66
66
  is_global_level_index: bool = False,
67
67
  wait_until_index_ready: float = 0,
68
68
  batch_limit: int = 500,
69
+ name: Optional[str] = None,
70
+ description: Optional[str] = None,
69
71
  **kwargs,
70
72
  ):
71
73
  """
@@ -75,6 +77,8 @@ class CouchbaseSearch(VectorDb):
75
77
  bucket_name (str): Name of the Couchbase bucket.
76
78
  scope_name (str): Name of the scope within the bucket.
77
79
  collection_name (str): Name of the collection within the scope.
80
+ name (Optional[str]): Name of the vector database.
81
+ description (Optional[str]): Description of the vector database.
78
82
  couchbase_connection_string (str): Couchbase connection string.
79
83
  cluster_options (ClusterOptions): Options for configuring the Couchbase cluster connection.
80
84
  search_index (Union[str, SearchIndex], optional): Search index configuration, either as index name or SearchIndex definition.
@@ -96,6 +100,9 @@ class CouchbaseSearch(VectorDb):
96
100
  self.overwrite = overwrite
97
101
  self.is_global_level_index = is_global_level_index
98
102
  self.wait_until_index_ready = wait_until_index_ready
103
+ # Initialize base class with name and description
104
+ super().__init__(name=name, description=description)
105
+
99
106
  self.kwargs = kwargs
100
107
  self.batch_limit = batch_limit
101
108
  if isinstance(search_index, str):
@@ -1420,3 +1427,7 @@ class CouchbaseSearch(VectorDb):
1420
1427
  except Exception as e:
1421
1428
  logger.error(f"Error updating metadata for content_id '{content_id}': {e}")
1422
1429
  raise
1430
+
1431
+ def get_supported_search_types(self) -> List[str]:
1432
+ """Get the supported search types for this vector database."""
1433
+ return [] # CouchbaseSearch doesn't use SearchType enum
@@ -25,6 +25,8 @@ class LanceDb(VectorDb):
25
25
 
26
26
  Args:
27
27
  uri: The URI of the LanceDB database.
28
+ name: Name of the vector database.
29
+ description: Description of the vector database.
28
30
  connection: The LanceDB connection to use.
29
31
  table: The LanceDB table instance to use.
30
32
  async_connection: The LanceDB async connection to use.
@@ -44,6 +46,9 @@ class LanceDb(VectorDb):
44
46
  def __init__(
45
47
  self,
46
48
  uri: lancedb.URI = "/tmp/lancedb",
49
+ name: Optional[str] = None,
50
+ description: Optional[str] = None,
51
+ id: Optional[str] = None,
47
52
  connection: Optional[lancedb.LanceDBConnection] = None,
48
53
  table: Optional[lancedb.db.LanceTable] = None,
49
54
  async_connection: Optional[lancedb.AsyncConnection] = None,
@@ -59,6 +64,17 @@ class LanceDb(VectorDb):
59
64
  on_bad_vectors: Optional[str] = None, # One of "error", "drop", "fill", "null".
60
65
  fill_value: Optional[float] = None, # Only used if on_bad_vectors is "fill"
61
66
  ):
67
+ # Dynamic ID generation based on unique identifiers
68
+ if id is None:
69
+ from agno.utils.string import generate_id
70
+
71
+ table_identifier = table_name or "default_table"
72
+ seed = f"{uri}#{table_identifier}"
73
+ id = generate_id(seed)
74
+
75
+ # Initialize base class with name, description, and generated ID
76
+ super().__init__(id=id, name=name, description=description)
77
+
62
78
  # Embedder for embedding the document contents
63
79
  if embedder is None:
64
80
  from agno.knowledge.embedder.openai import OpenAIEmbedder
@@ -1048,3 +1064,7 @@ class LanceDb(VectorDb):
1048
1064
  except Exception as e:
1049
1065
  logger.error(f"Error updating metadata for content_id '{content_id}': {e}")
1050
1066
  raise
1067
+
1068
+ def get_supported_search_types(self) -> List[str]:
1069
+ """Get the supported search types for this vector database."""
1070
+ return [SearchType.vector, SearchType.keyword, SearchType.hybrid]
@@ -11,16 +11,23 @@ class LangChainVectorDb(VectorDb):
11
11
  vectorstore: Optional[Any] = None,
12
12
  search_kwargs: Optional[dict] = None,
13
13
  knowledge_retriever: Optional[Any] = None,
14
+ name: Optional[str] = None,
15
+ description: Optional[str] = None,
14
16
  ):
15
17
  """
16
18
  Initialize LangChainVectorDb.
17
19
 
18
20
  Args:
19
21
  vectorstore: The LangChain vectorstore instance
22
+ name (Optional[str]): Name of the vector database.
23
+ description (Optional[str]): Description of the vector database.
20
24
  search_kwargs: Additional search parameters for the retriever
21
25
  knowledge_retriever: An optional LangChain retriever instance
22
26
  """
23
27
  self.vectorstore = vectorstore
28
+ # Initialize base class with name and description
29
+ super().__init__(name=name, description=description)
30
+
24
31
  self.search_kwargs = search_kwargs
25
32
  self.knowledge_retriever = knowledge_retriever
26
33
 
@@ -141,3 +148,7 @@ class LangChainVectorDb(VectorDb):
141
148
  metadata (Dict[str, Any]): The metadata to update
142
149
  """
143
150
  raise NotImplementedError("update_metadata not supported for LangChain vectorstores")
151
+
152
+ def get_supported_search_types(self) -> List[str]:
153
+ """Get the supported search types for this vector database."""
154
+ return [] # LangChainVectorDb doesn't use SearchType enum
@@ -21,9 +21,14 @@ class LightRag(VectorDb):
21
21
  api_key: Optional[str] = None,
22
22
  auth_header_name: str = "X-API-KEY",
23
23
  auth_header_format: str = "{api_key}",
24
+ name: Optional[str] = None,
25
+ description: Optional[str] = None,
24
26
  ):
25
27
  self.server_url = server_url
26
28
  self.api_key = api_key
29
+ # Initialize base class with name and description
30
+ super().__init__(name=name, description=description)
31
+
27
32
  self.auth_header_name = auth_header_name
28
33
  self.auth_header_format = auth_header_format
29
34
 
@@ -372,3 +377,7 @@ class LightRag(VectorDb):
372
377
  metadata (Dict[str, Any]): The metadata to update
373
378
  """
374
379
  raise NotImplementedError("update_metadata not supported for LightRag - use LightRag's native methods")
380
+
381
+ def get_supported_search_types(self) -> List[str]:
382
+ """Get the supported search types for this vector database."""
383
+ return [] # LightRag doesn't use SearchType enum
@@ -17,8 +17,18 @@ class LlamaIndexVectorDb(VectorDb):
17
17
  knowledge_retriever: BaseRetriever
18
18
  loader: Optional[Callable] = None
19
19
 
20
- def __init__(self, knowledge_retriever: BaseRetriever, loader: Optional[Callable] = None, **kwargs):
20
+ def __init__(
21
+ self,
22
+ knowledge_retriever: BaseRetriever,
23
+ loader: Optional[Callable] = None,
24
+ name: Optional[str] = None,
25
+ description: Optional[str] = None,
26
+ **kwargs,
27
+ ):
21
28
  super().__init__(**kwargs)
29
+ # Initialize base class with name and description
30
+ super().__init__(name=name, description=description)
31
+
22
32
  self.knowledge_retriever = knowledge_retriever
23
33
  self.loader = loader
24
34
 
@@ -144,3 +154,7 @@ class LlamaIndexVectorDb(VectorDb):
144
154
  "LlamaIndexVectorDb.delete_by_content_id() not supported - please check the vectorstore manually."
145
155
  )
146
156
  return False
157
+
158
+ def get_supported_search_types(self) -> List[str]:
159
+ """Get the supported search types for this vector database."""
160
+ return [] # LlamaIndexVectorDb doesn't use SearchType enum
@@ -28,6 +28,9 @@ class Milvus(VectorDb):
28
28
  def __init__(
29
29
  self,
30
30
  collection: str,
31
+ name: Optional[str] = None,
32
+ description: Optional[str] = None,
33
+ id: Optional[str] = None,
31
34
  embedder: Optional[Embedder] = None,
32
35
  distance: Distance = Distance.cosine,
33
36
  uri: str = "http://localhost:19530",
@@ -42,6 +45,8 @@ class Milvus(VectorDb):
42
45
 
43
46
  Args:
44
47
  collection (str): Name of the Milvus collection.
48
+ name (Optional[str]): Name of the vector database.
49
+ description (Optional[str]): Description of the vector database.
45
50
  embedder (Embedder): Embedder to use for embedding documents.
46
51
  distance (Distance): Distance metric to use for vector similarity.
47
52
  uri (Optional[str]): URI of the Milvus server.
@@ -63,6 +68,20 @@ class Milvus(VectorDb):
63
68
  reranker (Optional[Reranker]): Reranker to use for hybrid search results
64
69
  **kwargs: Additional keyword arguments to pass to the MilvusClient.
65
70
  """
71
+ # Validate required parameters
72
+ if not collection:
73
+ raise ValueError("Collection name must be provided.")
74
+
75
+ # Dynamic ID generation based on unique identifiers
76
+ if id is None:
77
+ from agno.utils.string import generate_id
78
+
79
+ seed = f"{uri or 'milvus'}#{collection}"
80
+ id = generate_id(seed)
81
+
82
+ # Initialize base class with name, description, and generated ID
83
+ super().__init__(id=id, name=name, description=description)
84
+
66
85
  self.collection: str = collection
67
86
 
68
87
  if embedder is None:
@@ -1141,3 +1160,7 @@ class Milvus(VectorDb):
1141
1160
  except Exception as e:
1142
1161
  log_error(f"Error updating metadata for content_id '{content_id}': {e}")
1143
1162
  raise
1163
+
1164
+ def get_supported_search_types(self) -> List[str]:
1165
+ """Get the supported search types for this vector database."""
1166
+ return [SearchType.vector, SearchType.hybrid]