agno 2.0.6__py3-none-any.whl → 2.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. agno/agent/agent.py +94 -48
  2. agno/db/migrations/v1_to_v2.py +140 -11
  3. agno/knowledge/chunking/semantic.py +33 -6
  4. agno/knowledge/embedder/sentence_transformer.py +3 -3
  5. agno/knowledge/knowledge.py +152 -31
  6. agno/knowledge/types.py +8 -0
  7. agno/media.py +2 -0
  8. agno/models/base.py +38 -9
  9. agno/models/cometapi/__init__.py +5 -0
  10. agno/models/cometapi/cometapi.py +57 -0
  11. agno/models/google/gemini.py +4 -8
  12. agno/models/llama_cpp/__init__.py +5 -0
  13. agno/models/llama_cpp/llama_cpp.py +22 -0
  14. agno/models/nexus/__init__.py +1 -1
  15. agno/models/nexus/nexus.py +2 -5
  16. agno/models/ollama/chat.py +24 -1
  17. agno/models/openai/chat.py +2 -7
  18. agno/models/openai/responses.py +21 -17
  19. agno/os/app.py +4 -10
  20. agno/os/interfaces/agui/agui.py +2 -2
  21. agno/os/interfaces/agui/utils.py +81 -18
  22. agno/os/interfaces/slack/slack.py +2 -2
  23. agno/os/interfaces/whatsapp/whatsapp.py +2 -2
  24. agno/os/router.py +3 -4
  25. agno/os/routers/evals/evals.py +1 -1
  26. agno/os/routers/memory/memory.py +1 -1
  27. agno/os/schema.py +3 -4
  28. agno/os/utils.py +55 -12
  29. agno/reasoning/default.py +3 -1
  30. agno/run/agent.py +4 -0
  31. agno/run/team.py +3 -1
  32. agno/session/agent.py +8 -5
  33. agno/session/team.py +14 -10
  34. agno/team/team.py +239 -115
  35. agno/tools/decorator.py +4 -2
  36. agno/tools/function.py +43 -4
  37. agno/tools/mcp.py +61 -38
  38. agno/tools/memori.py +1 -53
  39. agno/utils/events.py +7 -1
  40. agno/utils/gemini.py +147 -19
  41. agno/utils/models/claude.py +9 -0
  42. agno/utils/print_response/agent.py +16 -0
  43. agno/utils/print_response/team.py +16 -0
  44. agno/vectordb/base.py +2 -2
  45. agno/vectordb/langchaindb/langchaindb.py +5 -7
  46. agno/vectordb/llamaindex/llamaindexdb.py +25 -6
  47. agno/workflow/workflow.py +59 -15
  48. {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/METADATA +1 -1
  49. {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/RECORD +52 -48
  50. {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/WHEEL +0 -0
  51. {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/licenses/LICENSE +0 -0
  52. {agno-2.0.6.dist-info → agno-2.0.8.dist-info}/top_level.txt +0 -0
agno/utils/gemini.py CHANGED
@@ -1,5 +1,7 @@
1
1
  from pathlib import Path
2
- from typing import Any, Dict, List, Optional
2
+ from typing import Any, Dict, List, Optional, Type, Union
3
+
4
+ from pydantic import BaseModel
3
5
 
4
6
  from agno.media import Image
5
7
  from agno.utils.log import log_error, log_warning
@@ -9,12 +11,119 @@ try:
9
11
  FunctionDeclaration,
10
12
  Schema,
11
13
  Tool,
12
- Type,
14
+ )
15
+ from google.genai.types import (
16
+ Type as GeminiType,
13
17
  )
14
18
  except ImportError:
15
19
  raise ImportError("`google-genai` not installed. Please install it using `pip install google-genai`")
16
20
 
17
21
 
22
+ def prepare_response_schema(pydantic_model: Type[BaseModel]) -> Union[Type[BaseModel], Schema]:
23
+ """
24
+ Prepare a Pydantic model for use as Gemini response schema.
25
+
26
+ Returns the model directly if Gemini can handle it natively,
27
+ otherwise converts to Gemini's Schema format.
28
+
29
+ Args:
30
+ pydantic_model: A Pydantic model class
31
+
32
+ Returns:
33
+ Either the original Pydantic model or a converted Schema object
34
+ """
35
+ schema_dict = pydantic_model.model_json_schema()
36
+
37
+ # Convert to Gemini Schema if the model has problematic patterns
38
+ if needs_conversion(schema_dict):
39
+ try:
40
+ converted = convert_schema(schema_dict)
41
+ except Exception as e:
42
+ log_warning(f"Failed to convert schema for {pydantic_model}: {e}")
43
+ converted = None
44
+
45
+ if converted is None:
46
+ # If conversion fails, let Gemini handle it directly
47
+ return pydantic_model
48
+ return converted
49
+
50
+ # Gemini can handle this model directly
51
+ return pydantic_model
52
+
53
+
54
+ def needs_conversion(schema_dict: Dict[str, Any]) -> bool:
55
+ """
56
+ Check if a schema needs conversion for Gemini.
57
+
58
+ Returns True if the schema has:
59
+ - Self-references or circular references
60
+ - Dict fields (additionalProperties) that Gemini doesn't handle well
61
+ - Empty object definitions that Gemini rejects
62
+ """
63
+ # Check for dict fields (additionalProperties) anywhere in the schema
64
+ if has_additional_properties(schema_dict):
65
+ return True
66
+
67
+ # Check if schema has $defs with circular references
68
+ if "$defs" in schema_dict:
69
+ defs = schema_dict["$defs"]
70
+ for def_name, def_schema in defs.items():
71
+ ref_path = f"#/$defs/{def_name}"
72
+ if has_self_reference(def_schema, ref_path):
73
+ return True
74
+
75
+ return False
76
+
77
+
78
+ def has_additional_properties(schema: Any) -> bool:
79
+ """Check if schema has additionalProperties (Dict fields)"""
80
+ if isinstance(schema, dict):
81
+ # Direct check
82
+ if "additionalProperties" in schema:
83
+ return True
84
+
85
+ # Check properties recursively
86
+ if "properties" in schema:
87
+ for prop_schema in schema["properties"].values():
88
+ if has_additional_properties(prop_schema):
89
+ return True
90
+
91
+ # Check array items
92
+ if "items" in schema:
93
+ if has_additional_properties(schema["items"]):
94
+ return True
95
+
96
+ return False
97
+
98
+
99
+ def has_self_reference(schema: Dict, target_ref: str) -> bool:
100
+ """Check if a schema references itself (directly or indirectly)"""
101
+ if isinstance(schema, dict):
102
+ # Direct self-reference
103
+ if schema.get("$ref") == target_ref:
104
+ return True
105
+
106
+ # Check properties
107
+ if "properties" in schema:
108
+ for prop_schema in schema["properties"].values():
109
+ if has_self_reference(prop_schema, target_ref):
110
+ return True
111
+
112
+ # Check array items
113
+ if "items" in schema:
114
+ if has_self_reference(schema["items"], target_ref):
115
+ return True
116
+
117
+ # Check anyOf/oneOf/allOf
118
+ for key in ["anyOf", "oneOf", "allOf"]:
119
+ if key in schema:
120
+ for sub_schema in schema[key]:
121
+ if has_self_reference(sub_schema, target_ref):
122
+ return True
123
+
124
+ return False
125
+
126
+
18
127
  def format_image_for_message(image: Image) -> Optional[Dict[str, Any]]:
19
128
  # Case 1: Image is a URL
20
129
  # Download the image from the URL and add it as base64 encoded data
@@ -66,7 +175,9 @@ def format_image_for_message(image: Image) -> Optional[Dict[str, Any]]:
66
175
  return None
67
176
 
68
177
 
69
- def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str, Any]] = None) -> Optional[Schema]:
178
+ def convert_schema(
179
+ schema_dict: Dict[str, Any], root_schema: Optional[Dict[str, Any]] = None, visited_refs: Optional[set] = None
180
+ ) -> Optional[Schema]:
70
181
  """
71
182
  Recursively convert a JSON-like schema dictionary to a types.Schema object.
72
183
 
@@ -74,23 +185,39 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
74
185
  schema_dict (dict): The JSON schema dictionary with keys like "type", "description",
75
186
  "properties", and "required".
76
187
  root_schema (dict, optional): The root schema containing $defs for resolving $ref
188
+ visited_refs (set, optional): Set of visited $ref paths to detect circular references
77
189
 
78
190
  Returns:
79
191
  types.Schema: The converted schema.
80
192
  """
81
193
 
82
- # If this is the initial call, set root_schema to self
194
+ # If this is the initial call, set root_schema to self and initialize visited_refs
83
195
  if root_schema is None:
84
196
  root_schema = schema_dict
197
+ if visited_refs is None:
198
+ visited_refs = set()
85
199
 
86
- # Handle $ref references
200
+ # Handle $ref references with cycle detection
87
201
  if "$ref" in schema_dict:
88
202
  ref_path = schema_dict["$ref"]
203
+
204
+ # Check for circular reference
205
+ if ref_path in visited_refs:
206
+ # Return a basic object schema to break the cycle
207
+ return Schema(
208
+ type=GeminiType.OBJECT,
209
+ description=f"Circular reference to {ref_path}",
210
+ )
211
+
89
212
  if ref_path.startswith("#/$defs/"):
90
213
  def_name = ref_path.split("/")[-1]
91
214
  if "$defs" in root_schema and def_name in root_schema["$defs"]:
215
+ # Add to visited set before recursing
216
+ new_visited = visited_refs.copy()
217
+ new_visited.add(ref_path)
218
+
92
219
  referenced_schema = root_schema["$defs"][def_name]
93
- return convert_schema(referenced_schema, root_schema)
220
+ return convert_schema(referenced_schema, root_schema, new_visited)
94
221
  # If we can't resolve the reference, return None
95
222
  return None
96
223
 
@@ -103,7 +230,7 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
103
230
  # Handle enum types
104
231
  if "enum" in schema_dict:
105
232
  enum_values = schema_dict["enum"]
106
- return Schema(type=Type.STRING, enum=enum_values, description=description, default=default)
233
+ return Schema(type=GeminiType.STRING, enum=enum_values, description=description, default=default)
107
234
 
108
235
  if schema_type == "object":
109
236
  # Handle regular objects with properties
@@ -117,8 +244,8 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
117
244
  prop_def["type"] = prop_type[0]
118
245
  is_nullable = True
119
246
 
120
- # Process property schema (pass root_schema for $ref resolution)
121
- converted_schema = convert_schema(prop_def, root_schema)
247
+ # Process property schema (pass root_schema and visited_refs for $ref resolution)
248
+ converted_schema = convert_schema(prop_def, root_schema, visited_refs)
122
249
  if converted_schema is not None:
123
250
  if is_nullable:
124
251
  converted_schema.nullable = True
@@ -128,14 +255,14 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
128
255
 
129
256
  if properties:
130
257
  return Schema(
131
- type=Type.OBJECT,
258
+ type=GeminiType.OBJECT,
132
259
  properties=properties,
133
260
  required=required,
134
261
  description=description,
135
262
  default=default,
136
263
  )
137
264
  else:
138
- return Schema(type=Type.OBJECT, description=description, default=default)
265
+ return Schema(type=GeminiType.OBJECT, description=description, default=default)
139
266
 
140
267
  # Handle Dict types (objects with additionalProperties but no properties)
141
268
  elif "additionalProperties" in schema_dict:
@@ -170,7 +297,7 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
170
297
  placeholder_properties["example_key"].items = {} # type: ignore
171
298
 
172
299
  return Schema(
173
- type=Type.OBJECT,
300
+ type=GeminiType.OBJECT,
174
301
  properties=placeholder_properties,
175
302
  description=description
176
303
  or f"Dictionary with {value_type.lower()} values{type_description_suffix}. Can contain any number of key-value pairs.",
@@ -178,21 +305,22 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
178
305
  )
179
306
  else:
180
307
  # additionalProperties is false or true
181
- return Schema(type=Type.OBJECT, description=description, default=default)
308
+ return Schema(type=GeminiType.OBJECT, description=description, default=default)
182
309
 
183
310
  # Handle empty objects
184
311
  else:
185
- return Schema(type=Type.OBJECT, description=description, default=default)
312
+ return Schema(type=GeminiType.OBJECT, description=description, default=default)
186
313
 
187
314
  elif schema_type == "array" and "items" in schema_dict:
188
315
  if not schema_dict["items"]: # Handle empty {}
189
- items = Schema(type=Type.STRING)
316
+ items = Schema(type=GeminiType.STRING)
190
317
  else:
191
- items = convert_schema(schema_dict["items"], root_schema)
318
+ converted_items = convert_schema(schema_dict["items"], root_schema, visited_refs)
319
+ items = converted_items if converted_items is not None else Schema(type=GeminiType.STRING)
192
320
  min_items = schema_dict.get("minItems")
193
321
  max_items = schema_dict.get("maxItems")
194
322
  return Schema(
195
- type=Type.ARRAY,
323
+ type=GeminiType.ARRAY,
196
324
  description=description,
197
325
  items=items,
198
326
  min_items=min_items,
@@ -201,7 +329,7 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
201
329
 
202
330
  elif schema_type == "string":
203
331
  schema_kwargs = {
204
- "type": Type.STRING,
332
+ "type": GeminiType.STRING,
205
333
  "description": description,
206
334
  "default": default,
207
335
  }
@@ -224,7 +352,7 @@ def convert_schema(schema_dict: Dict[str, Any], root_schema: Optional[Dict[str,
224
352
  elif schema_type == "" and "anyOf" in schema_dict:
225
353
  any_of = []
226
354
  for sub_schema in schema_dict["anyOf"]:
227
- sub_schema_converted = convert_schema(sub_schema, root_schema)
355
+ sub_schema_converted = convert_schema(sub_schema, root_schema, visited_refs)
228
356
  any_of.append(sub_schema_converted)
229
357
 
230
358
  is_nullable = False
@@ -279,6 +279,15 @@ def format_messages(messages: List[Message]) -> Tuple[List[Dict[str, str]], str]
279
279
  type="tool_use",
280
280
  )
281
281
  )
282
+ elif message.role == "tool":
283
+ content = []
284
+ content.append(
285
+ {
286
+ "type": "tool_result",
287
+ "tool_use_id": message.tool_call_id,
288
+ "content": str(message.content),
289
+ }
290
+ )
282
291
 
283
292
  # Skip empty assistant responses
284
293
  if message.role == "assistant" and not content:
@@ -44,6 +44,8 @@ def print_response_stream(
44
44
  console: Optional[Any] = None,
45
45
  add_history_to_context: Optional[bool] = None,
46
46
  dependencies: Optional[Dict[str, Any]] = None,
47
+ add_dependencies_to_context: Optional[bool] = None,
48
+ add_session_state_to_context: Optional[bool] = None,
47
49
  metadata: Optional[Dict[str, Any]] = None,
48
50
  **kwargs: Any,
49
51
  ):
@@ -90,6 +92,8 @@ def print_response_stream(
90
92
  knowledge_filters=knowledge_filters,
91
93
  debug_mode=debug_mode,
92
94
  add_history_to_context=add_history_to_context,
95
+ add_dependencies_to_context=add_dependencies_to_context,
96
+ add_session_state_to_context=add_session_state_to_context,
93
97
  dependencies=dependencies,
94
98
  metadata=metadata,
95
99
  **kwargs,
@@ -223,6 +227,8 @@ async def aprint_response_stream(
223
227
  console: Optional[Any] = None,
224
228
  add_history_to_context: Optional[bool] = None,
225
229
  dependencies: Optional[Dict[str, Any]] = None,
230
+ add_dependencies_to_context: Optional[bool] = None,
231
+ add_session_state_to_context: Optional[bool] = None,
226
232
  metadata: Optional[Dict[str, Any]] = None,
227
233
  **kwargs: Any,
228
234
  ):
@@ -269,6 +275,8 @@ async def aprint_response_stream(
269
275
  knowledge_filters=knowledge_filters,
270
276
  debug_mode=debug_mode,
271
277
  add_history_to_context=add_history_to_context,
278
+ add_dependencies_to_context=add_dependencies_to_context,
279
+ add_session_state_to_context=add_session_state_to_context,
272
280
  dependencies=dependencies,
273
281
  metadata=metadata,
274
282
  **kwargs,
@@ -490,6 +498,8 @@ def print_response(
490
498
  console: Optional[Any] = None,
491
499
  add_history_to_context: Optional[bool] = None,
492
500
  dependencies: Optional[Dict[str, Any]] = None,
501
+ add_dependencies_to_context: Optional[bool] = None,
502
+ add_session_state_to_context: Optional[bool] = None,
493
503
  metadata: Optional[Dict[str, Any]] = None,
494
504
  **kwargs: Any,
495
505
  ):
@@ -527,6 +537,8 @@ def print_response(
527
537
  knowledge_filters=knowledge_filters,
528
538
  debug_mode=debug_mode,
529
539
  add_history_to_context=add_history_to_context,
540
+ add_dependencies_to_context=add_dependencies_to_context,
541
+ add_session_state_to_context=add_session_state_to_context,
530
542
  dependencies=dependencies,
531
543
  metadata=metadata,
532
544
  **kwargs,
@@ -590,6 +602,8 @@ async def aprint_response(
590
602
  console: Optional[Any] = None,
591
603
  add_history_to_context: Optional[bool] = None,
592
604
  dependencies: Optional[Dict[str, Any]] = None,
605
+ add_dependencies_to_context: Optional[bool] = None,
606
+ add_session_state_to_context: Optional[bool] = None,
593
607
  metadata: Optional[Dict[str, Any]] = None,
594
608
  **kwargs: Any,
595
609
  ):
@@ -627,6 +641,8 @@ async def aprint_response(
627
641
  knowledge_filters=knowledge_filters,
628
642
  debug_mode=debug_mode,
629
643
  add_history_to_context=add_history_to_context,
644
+ add_dependencies_to_context=add_dependencies_to_context,
645
+ add_session_state_to_context=add_session_state_to_context,
630
646
  dependencies=dependencies,
631
647
  metadata=metadata,
632
648
  **kwargs,
@@ -36,6 +36,8 @@ def print_response(
36
36
  knowledge_filters: Optional[Dict[str, Any]] = None,
37
37
  add_history_to_context: Optional[bool] = None,
38
38
  dependencies: Optional[Dict[str, Any]] = None,
39
+ add_dependencies_to_context: Optional[bool] = None,
40
+ add_session_state_to_context: Optional[bool] = None,
39
41
  metadata: Optional[Dict[str, Any]] = None,
40
42
  debug_mode: Optional[bool] = None,
41
43
  **kwargs: Any,
@@ -88,6 +90,8 @@ def print_response(
88
90
  knowledge_filters=knowledge_filters,
89
91
  add_history_to_context=add_history_to_context,
90
92
  dependencies=dependencies,
93
+ add_dependencies_to_context=add_dependencies_to_context,
94
+ add_session_state_to_context=add_session_state_to_context,
91
95
  metadata=metadata,
92
96
  debug_mode=debug_mode,
93
97
  **kwargs,
@@ -317,6 +321,8 @@ def print_response_stream(
317
321
  knowledge_filters: Optional[Dict[str, Any]] = None,
318
322
  add_history_to_context: Optional[bool] = None,
319
323
  dependencies: Optional[Dict[str, Any]] = None,
324
+ add_dependencies_to_context: Optional[bool] = None,
325
+ add_session_state_to_context: Optional[bool] = None,
320
326
  metadata: Optional[Dict[str, Any]] = None,
321
327
  debug_mode: Optional[bool] = None,
322
328
  **kwargs: Any,
@@ -386,6 +392,8 @@ def print_response_stream(
386
392
  knowledge_filters=knowledge_filters,
387
393
  add_history_to_context=add_history_to_context,
388
394
  dependencies=dependencies,
395
+ add_dependencies_to_context=add_dependencies_to_context,
396
+ add_session_state_to_context=add_session_state_to_context,
389
397
  metadata=metadata,
390
398
  debug_mode=debug_mode,
391
399
  yield_run_response=True,
@@ -841,6 +849,8 @@ async def aprint_response(
841
849
  knowledge_filters: Optional[Dict[str, Any]] = None,
842
850
  add_history_to_context: Optional[bool] = None,
843
851
  dependencies: Optional[Dict[str, Any]] = None,
852
+ add_dependencies_to_context: Optional[bool] = None,
853
+ add_session_state_to_context: Optional[bool] = None,
844
854
  metadata: Optional[Dict[str, Any]] = None,
845
855
  debug_mode: Optional[bool] = None,
846
856
  **kwargs: Any,
@@ -893,6 +903,8 @@ async def aprint_response(
893
903
  knowledge_filters=knowledge_filters,
894
904
  add_history_to_context=add_history_to_context,
895
905
  dependencies=dependencies,
906
+ add_dependencies_to_context=add_dependencies_to_context,
907
+ add_session_state_to_context=add_session_state_to_context,
896
908
  metadata=metadata,
897
909
  debug_mode=debug_mode,
898
910
  **kwargs,
@@ -1120,6 +1132,8 @@ async def aprint_response_stream(
1120
1132
  knowledge_filters: Optional[Dict[str, Any]] = None,
1121
1133
  add_history_to_context: Optional[bool] = None,
1122
1134
  dependencies: Optional[Dict[str, Any]] = None,
1135
+ add_dependencies_to_context: Optional[bool] = None,
1136
+ add_session_state_to_context: Optional[bool] = None,
1123
1137
  metadata: Optional[Dict[str, Any]] = None,
1124
1138
  debug_mode: Optional[bool] = None,
1125
1139
  **kwargs: Any,
@@ -1196,6 +1210,8 @@ async def aprint_response_stream(
1196
1210
  user_id=user_id,
1197
1211
  knowledge_filters=knowledge_filters,
1198
1212
  add_history_to_context=add_history_to_context,
1213
+ add_dependencies_to_context=add_dependencies_to_context,
1214
+ add_session_state_to_context=add_session_state_to_context,
1199
1215
  dependencies=dependencies,
1200
1216
  metadata=metadata,
1201
1217
  debug_mode=debug_mode,
agno/vectordb/base.py CHANGED
@@ -1,12 +1,12 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from typing import Any, Dict, List, Optional
3
3
 
4
- from agno.knowledge.document import Document
5
-
6
4
 
7
5
  class VectorDb(ABC):
8
6
  """Base class for Vector Databases"""
9
7
 
8
+ from agno.knowledge.document import Document
9
+
10
10
  @abstractmethod
11
11
  def create(self) -> None:
12
12
  raise NotImplementedError
@@ -63,9 +63,7 @@ class LangChainVectorDb(VectorDb):
63
63
  logger.warning("LangChainKnowledgeBase.async_upsert() not supported - please check the vectorstore manually.")
64
64
  raise NotImplementedError
65
65
 
66
- def search(
67
- self, query: str, num_documents: Optional[int] = None, filters: Optional[Dict[str, Any]] = None
68
- ) -> List[Document]:
66
+ def search(self, query: str, limit: int = 5, filters: Optional[Dict[str, Any]] = None) -> List[Document]:
69
67
  """Returns relevant documents matching the query"""
70
68
 
71
69
  try:
@@ -79,7 +77,7 @@ class LangChainVectorDb(VectorDb):
79
77
  if self.vectorstore is not None and self.knowledge_retriever is None:
80
78
  log_debug("Creating knowledge retriever")
81
79
  if self.search_kwargs is None:
82
- self.search_kwargs = {"k": num_documents}
80
+ self.search_kwargs = {"k": limit}
83
81
  if filters is not None:
84
82
  self.search_kwargs.update(filters)
85
83
  self.knowledge_retriever = self.vectorstore.as_retriever(search_kwargs=self.search_kwargs)
@@ -91,7 +89,7 @@ class LangChainVectorDb(VectorDb):
91
89
  if not isinstance(self.knowledge_retriever, BaseRetriever):
92
90
  raise ValueError(f"Knowledge retriever is not of type BaseRetriever: {self.knowledge_retriever}")
93
91
 
94
- log_debug(f"Getting {num_documents} relevant documents for query: {query}")
92
+ log_debug(f"Getting {limit} relevant documents for query: {query}")
95
93
  lc_documents: List[LangChainDocument] = self.knowledge_retriever.invoke(input=query)
96
94
  documents = []
97
95
  for lc_doc in lc_documents:
@@ -104,9 +102,9 @@ class LangChainVectorDb(VectorDb):
104
102
  return documents
105
103
 
106
104
  async def async_search(
107
- self, query: str, num_documents: Optional[int] = None, filters: Optional[Dict[str, Any]] = None
105
+ self, query: str, limit: int = 5, filters: Optional[Dict[str, Any]] = None
108
106
  ) -> List[Document]:
109
- return self.search(query, num_documents, filters)
107
+ return self.search(query, limit, filters)
110
108
 
111
109
  def drop(self) -> None:
112
110
  raise NotImplementedError
@@ -17,6 +17,11 @@ class LlamaIndexVectorDb(VectorDb):
17
17
  knowledge_retriever: BaseRetriever
18
18
  loader: Optional[Callable] = None
19
19
 
20
+ def __init__(self, knowledge_retriever: BaseRetriever, loader: Optional[Callable] = None, **kwargs):
21
+ super().__init__(**kwargs)
22
+ self.knowledge_retriever = knowledge_retriever
23
+ self.loader = loader
24
+
20
25
  def create(self) -> None:
21
26
  raise NotImplementedError
22
27
 
@@ -53,15 +58,13 @@ class LlamaIndexVectorDb(VectorDb):
53
58
  logger.warning("LlamaIndexVectorDb.async_upsert() not supported - please check the vectorstore manually.")
54
59
  raise NotImplementedError
55
60
 
56
- def search(
57
- self, query: str, num_documents: Optional[int] = None, filters: Optional[Dict[str, Any]] = None
58
- ) -> List[Document]:
61
+ def search(self, query: str, limit: int = 5, filters: Optional[Dict[str, Any]] = None) -> List[Document]:
59
62
  """
60
63
  Returns relevant documents matching the query.
61
64
 
62
65
  Args:
63
66
  query (str): The query string to search for.
64
- num_documents (Optional[int]): The maximum number of documents to return. Defaults to None.
67
+ limit (int): The maximum number of documents to return. Defaults to 5.
65
68
  filters (Optional[Dict[str, Any]]): Filters to apply to the search. Defaults to None.
66
69
 
67
70
  Returns:
@@ -73,8 +76,8 @@ class LlamaIndexVectorDb(VectorDb):
73
76
  raise ValueError(f"Knowledge retriever is not of type BaseRetriever: {self.knowledge_retriever}")
74
77
 
75
78
  lc_documents: List[NodeWithScore] = self.knowledge_retriever.retrieve(query)
76
- if num_documents is not None:
77
- lc_documents = lc_documents[:num_documents]
79
+ if limit is not None:
80
+ lc_documents = lc_documents[:limit]
78
81
  documents = []
79
82
  for lc_doc in lc_documents:
80
83
  documents.append(
@@ -125,3 +128,19 @@ class LlamaIndexVectorDb(VectorDb):
125
128
  metadata (Dict[str, Any]): The metadata to update
126
129
  """
127
130
  raise NotImplementedError("update_metadata not supported for LlamaIndex vectorstores")
131
+
132
+ def delete_by_content_id(self, content_id: str) -> bool:
133
+ """
134
+ Delete documents by content ID.
135
+ Not implemented for LlamaIndex wrapper.
136
+
137
+ Args:
138
+ content_id (str): The content ID to delete
139
+
140
+ Returns:
141
+ bool: False as this operation is not supported
142
+ """
143
+ logger.warning(
144
+ "LlamaIndexVectorDb.delete_by_content_id() not supported - please check the vectorstore manually."
145
+ )
146
+ return False