agno 2.0.7__py3-none-any.whl → 2.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. agno/agent/agent.py +83 -51
  2. agno/db/base.py +14 -0
  3. agno/db/dynamo/dynamo.py +107 -27
  4. agno/db/firestore/firestore.py +109 -33
  5. agno/db/gcs_json/gcs_json_db.py +100 -20
  6. agno/db/in_memory/in_memory_db.py +95 -20
  7. agno/db/json/json_db.py +101 -21
  8. agno/db/migrations/v1_to_v2.py +322 -47
  9. agno/db/mongo/mongo.py +251 -26
  10. agno/db/mysql/mysql.py +307 -6
  11. agno/db/postgres/postgres.py +279 -33
  12. agno/db/redis/redis.py +99 -22
  13. agno/db/singlestore/singlestore.py +319 -38
  14. agno/db/sqlite/sqlite.py +339 -23
  15. agno/knowledge/embedder/sentence_transformer.py +3 -3
  16. agno/knowledge/knowledge.py +152 -31
  17. agno/knowledge/types.py +8 -0
  18. agno/models/anthropic/claude.py +0 -20
  19. agno/models/cometapi/__init__.py +5 -0
  20. agno/models/cometapi/cometapi.py +57 -0
  21. agno/models/google/gemini.py +4 -8
  22. agno/models/huggingface/huggingface.py +2 -1
  23. agno/models/ollama/chat.py +52 -3
  24. agno/models/openai/chat.py +9 -7
  25. agno/models/openai/responses.py +21 -17
  26. agno/os/interfaces/agui/agui.py +2 -2
  27. agno/os/interfaces/agui/utils.py +81 -18
  28. agno/os/interfaces/base.py +2 -0
  29. agno/os/interfaces/slack/router.py +50 -10
  30. agno/os/interfaces/slack/slack.py +6 -4
  31. agno/os/interfaces/whatsapp/router.py +7 -4
  32. agno/os/interfaces/whatsapp/whatsapp.py +2 -2
  33. agno/os/router.py +18 -0
  34. agno/os/utils.py +10 -2
  35. agno/reasoning/azure_ai_foundry.py +2 -2
  36. agno/reasoning/deepseek.py +2 -2
  37. agno/reasoning/default.py +3 -1
  38. agno/reasoning/groq.py +2 -2
  39. agno/reasoning/ollama.py +2 -2
  40. agno/reasoning/openai.py +2 -2
  41. agno/run/base.py +15 -2
  42. agno/session/agent.py +8 -5
  43. agno/session/team.py +14 -10
  44. agno/team/team.py +218 -111
  45. agno/tools/function.py +43 -4
  46. agno/tools/mcp.py +60 -37
  47. agno/tools/mcp_toolbox.py +284 -0
  48. agno/tools/scrapegraph.py +58 -31
  49. agno/tools/whatsapp.py +1 -1
  50. agno/utils/gemini.py +147 -19
  51. agno/utils/models/claude.py +9 -0
  52. agno/utils/print_response/agent.py +18 -2
  53. agno/utils/print_response/team.py +22 -6
  54. agno/utils/reasoning.py +22 -1
  55. agno/utils/string.py +9 -0
  56. agno/vectordb/base.py +2 -2
  57. agno/vectordb/langchaindb/langchaindb.py +5 -7
  58. agno/vectordb/llamaindex/llamaindexdb.py +25 -6
  59. agno/workflow/workflow.py +30 -15
  60. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/METADATA +4 -1
  61. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/RECORD +64 -61
  62. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/WHEEL +0 -0
  63. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/licenses/LICENSE +0 -0
  64. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/top_level.txt +0 -0
agno/agent/agent.py CHANGED
@@ -30,6 +30,7 @@ from pydantic import BaseModel
30
30
  from agno.db.base import BaseDb, SessionType, UserMemory
31
31
  from agno.exceptions import ModelProviderError, RunCancelledException, StopAgentRun
32
32
  from agno.knowledge.knowledge import Knowledge
33
+ from agno.knowledge.types import KnowledgeFilter
33
34
  from agno.media import Audio, File, Image, Video
34
35
  from agno.memory import MemoryManager
35
36
  from agno.models.base import Model
@@ -455,6 +456,11 @@ class Agent:
455
456
  self.add_history_to_context = add_history_to_context
456
457
  self.num_history_runs = num_history_runs
457
458
 
459
+ if add_history_to_context and not db:
460
+ log_warning(
461
+ "add_history_to_context is True, but no database has been assigned to the agent. History will not be added to the context."
462
+ )
463
+
458
464
  self.store_media = store_media
459
465
 
460
466
  self.knowledge = knowledge
@@ -711,6 +717,14 @@ class Agent:
711
717
  # Determine the session_state
712
718
  if session_state is None:
713
719
  session_state = self.session_state or {}
720
+ else:
721
+ # If run session_state is provided, merge agent defaults under it
722
+ # This ensures run state takes precedence over agent defaults
723
+ if self.session_state:
724
+ base_state = self.session_state.copy()
725
+ merge_dictionaries(base_state, session_state)
726
+ session_state.clear()
727
+ session_state.update(base_state)
714
728
 
715
729
  if user_id is not None:
716
730
  session_state["current_user_id"] = user_id
@@ -1177,6 +1191,7 @@ class Agent:
1177
1191
  run_response=run_response,
1178
1192
  session=agent_session,
1179
1193
  session_state=session_state,
1194
+ dependencies=run_dependencies,
1180
1195
  user_id=user_id,
1181
1196
  async_mode=False,
1182
1197
  knowledge_filters=effective_filters,
@@ -1809,6 +1824,7 @@ class Agent:
1809
1824
  run_response=run_response,
1810
1825
  session=agent_session,
1811
1826
  session_state=session_state,
1827
+ dependencies=run_dependencies,
1812
1828
  user_id=user_id,
1813
1829
  async_mode=True,
1814
1830
  knowledge_filters=effective_filters,
@@ -3345,6 +3361,7 @@ class Agent:
3345
3361
  run_response.content = model_response.content
3346
3362
  run_response.content_type = "str"
3347
3363
 
3364
+ # Process reasoning content
3348
3365
  if model_response_event.reasoning_content is not None:
3349
3366
  model_response.reasoning_content = (
3350
3367
  model_response.reasoning_content or ""
@@ -3358,12 +3375,12 @@ class Agent:
3358
3375
  model_response.reasoning_content += model_response_event.redacted_reasoning_content
3359
3376
  run_response.reasoning_content = model_response.reasoning_content
3360
3377
 
3378
+ # Handle provider data (one chunk)
3361
3379
  if model_response_event.provider_data is not None:
3362
- # We get citations in one chunk
3363
- run_response.model_provider_data = model_response.provider_data
3380
+ run_response.model_provider_data = model_response_event.provider_data
3364
3381
 
3382
+ # Handle citations (one chunk)
3365
3383
  if model_response_event.citations is not None:
3366
- # We get citations in one chunk
3367
3384
  run_response.citations = model_response_event.citations
3368
3385
 
3369
3386
  # Only yield if we have content to show
@@ -3790,29 +3807,21 @@ class Agent:
3790
3807
 
3791
3808
  # If any of the tools has "agent" as parameter, set _rebuild_tools to True
3792
3809
  for tool in agent_tools:
3810
+ param_names = {"agent", "session_state", "team", "images", "videos", "audios", "files"}
3811
+
3793
3812
  if isinstance(tool, Function):
3794
- if "agent" in tool.parameters:
3795
- self._rebuild_tools = True
3796
- break
3797
- if "team" in tool.parameters:
3813
+ if param_names & set(tool.parameters):
3798
3814
  self._rebuild_tools = True
3799
3815
  break
3800
- if isinstance(tool, Toolkit):
3816
+ elif isinstance(tool, Toolkit):
3801
3817
  for func in tool.functions.values():
3802
- if "agent" in func.parameters:
3818
+ if param_names & set(func.parameters):
3803
3819
  self._rebuild_tools = True
3804
3820
  break
3805
- if "team" in func.parameters:
3806
- self._rebuild_tools = True
3807
- break
3808
- if callable(tool):
3821
+ elif callable(tool):
3809
3822
  from inspect import signature
3810
3823
 
3811
- sig = signature(tool)
3812
- if "agent" in sig.parameters:
3813
- self._rebuild_tools = True
3814
- break
3815
- if "team" in sig.parameters:
3824
+ if param_names & set(signature(tool).parameters):
3816
3825
  self._rebuild_tools = True
3817
3826
  break
3818
3827
 
@@ -4007,6 +4016,7 @@ class Agent:
4007
4016
  run_response: RunOutput,
4008
4017
  session: AgentSession,
4009
4018
  session_state: Optional[Dict[str, Any]] = None,
4019
+ dependencies: Optional[Dict[str, Any]] = None,
4010
4020
  user_id: Optional[str] = None,
4011
4021
  async_mode: bool = False,
4012
4022
  knowledge_filters: Optional[Dict[str, Any]] = None,
@@ -4116,6 +4126,7 @@ class Agent:
4116
4126
 
4117
4127
  for func in self._functions_for_model.values():
4118
4128
  func._session_state = session_state
4129
+ func._dependencies = dependencies
4119
4130
  func._images = joint_images
4120
4131
  func._files = joint_files
4121
4132
  func._audios = joint_audios
@@ -4244,7 +4255,8 @@ class Agent:
4244
4255
  def _update_session_state(self, session: AgentSession, session_state: Dict[str, Any]):
4245
4256
  """Load the existing Agent from an AgentSession (from the database)"""
4246
4257
 
4247
- # Get the session_state from the database and update the current session_state
4258
+ # Get the session_state from the database and merge with proper precedence
4259
+ # At this point session_state contains: agent_defaults + run_params
4248
4260
  if session.session_data is not None and "session_state" in session.session_data:
4249
4261
  session_state_from_db = session.session_data.get("session_state")
4250
4262
 
@@ -4253,10 +4265,11 @@ class Agent:
4253
4265
  and isinstance(session_state_from_db, dict)
4254
4266
  and len(session_state_from_db) > 0
4255
4267
  ):
4256
- # This updates session_state_from_db
4257
- # If there are conflicting keys, values from provided session_state will take precedence
4258
- merge_dictionaries(session_state_from_db, session_state)
4259
- session_state = session_state_from_db
4268
+ # This preserves precedence: run_params > db_state > agent_defaults
4269
+ merged_state = session_state_from_db.copy()
4270
+ merge_dictionaries(merged_state, session_state)
4271
+ session_state.clear()
4272
+ session_state.update(merged_state)
4260
4273
 
4261
4274
  # Update the session_state in the session
4262
4275
  if session.session_data is not None:
@@ -4933,7 +4946,7 @@ class Agent:
4933
4946
  system_message_content += f"{get_response_model_format_prompt(self.output_schema)}"
4934
4947
 
4935
4948
  # 3.3.15 Add the session state to the system message
4936
- if self.add_session_state_to_context and session_state is not None:
4949
+ if add_session_state_to_context and session_state is not None:
4937
4950
  system_message_content += self._get_formatted_session_state_for_system_message(session_state)
4938
4951
 
4939
4952
  # Return the system message
@@ -5204,9 +5217,16 @@ class Agent:
5204
5217
  if add_history_to_context:
5205
5218
  from copy import deepcopy
5206
5219
 
5220
+ # Only skip messages from history when system_message_role is NOT a standard conversation role.
5221
+ # Standard conversation roles ("user", "assistant", "tool") should never be filtered
5222
+ # to preserve conversation continuity.
5223
+ skip_role = (
5224
+ self.system_message_role if self.system_message_role not in ["user", "assistant", "tool"] else None
5225
+ )
5226
+
5207
5227
  history: List[Message] = session.get_messages_from_last_n_runs(
5208
5228
  last_n=self.num_history_runs,
5209
- skip_role=self.system_message_role,
5229
+ skip_role=skip_role,
5210
5230
  agent_id=self.id if self.team_id is not None else None,
5211
5231
  )
5212
5232
 
@@ -5924,6 +5944,7 @@ class Agent:
5924
5944
  min_steps=self.reasoning_min_steps,
5925
5945
  max_steps=self.reasoning_max_steps,
5926
5946
  tools=self.tools,
5947
+ tool_call_limit=self.tool_call_limit,
5927
5948
  use_json_mode=self.use_json_mode,
5928
5949
  telemetry=self.telemetry,
5929
5950
  debug_mode=self.debug_mode,
@@ -6149,6 +6170,7 @@ class Agent:
6149
6170
  min_steps=self.reasoning_min_steps,
6150
6171
  max_steps=self.reasoning_max_steps,
6151
6172
  tools=self.tools,
6173
+ tool_call_limit=self.tool_call_limit,
6152
6174
  use_json_mode=self.use_json_mode,
6153
6175
  telemetry=self.telemetry,
6154
6176
  debug_mode=self.debug_mode,
@@ -6720,17 +6742,18 @@ class Agent:
6720
6742
  ) -> Function:
6721
6743
  """Factory function to create a search_knowledge_base function with filters."""
6722
6744
 
6723
- def search_knowledge_base(query: str, filters: Optional[Dict[str, Any]] = None) -> str:
6745
+ def search_knowledge_base(query: str, filters: Optional[List[KnowledgeFilter]] = None) -> str:
6724
6746
  """Use this function to search the knowledge base for information about a query.
6725
6747
 
6726
6748
  Args:
6727
6749
  query: The query to search for.
6728
- filters: The filters to apply to the search. This is a dictionary of key-value pairs.
6750
+ filters (optional): The filters to apply to the search. This is a list of KnowledgeFilter objects.
6729
6751
 
6730
6752
  Returns:
6731
6753
  str: A string containing the response from the knowledge base.
6732
6754
  """
6733
- search_filters = get_agentic_or_user_search_filters(filters, knowledge_filters)
6755
+ filters_dict = {filt.key: filt.value for filt in filters} if filters else None
6756
+ search_filters = get_agentic_or_user_search_filters(filters_dict, knowledge_filters)
6734
6757
 
6735
6758
  # Get the relevant documents from the knowledge base, passing filters
6736
6759
  retrieval_timer = Timer()
@@ -6753,17 +6776,18 @@ class Agent:
6753
6776
  return "No documents found"
6754
6777
  return self._convert_documents_to_string(docs_from_knowledge)
6755
6778
 
6756
- async def asearch_knowledge_base(query: str, filters: Optional[Dict[str, Any]] = None) -> str:
6779
+ async def asearch_knowledge_base(query: str, filters: Optional[List[KnowledgeFilter]] = None) -> str:
6757
6780
  """Use this function to search the knowledge base for information about a query asynchronously.
6758
6781
 
6759
6782
  Args:
6760
6783
  query: The query to search for.
6761
- filters: The filters to apply to the search. This is a dictionary of key-value pairs.
6784
+ filters (optional): The filters to apply to the search. This is a list of KnowledgeFilter objects.
6762
6785
 
6763
6786
  Returns:
6764
6787
  str: A string containing the response from the knowledge base.
6765
6788
  """
6766
- search_filters = get_agentic_or_user_search_filters(filters, knowledge_filters)
6789
+ filters_dict = {filt.key: filt.value for filt in filters} if filters else None
6790
+ search_filters = get_agentic_or_user_search_filters(filters_dict, knowledge_filters)
6767
6791
 
6768
6792
  retrieval_timer = Timer()
6769
6793
  retrieval_timer.start()
@@ -6894,21 +6918,21 @@ class Agent:
6894
6918
  stream: Optional[bool] = None,
6895
6919
  stream_intermediate_steps: Optional[bool] = None,
6896
6920
  markdown: Optional[bool] = None,
6921
+ knowledge_filters: Optional[Dict[str, Any]] = None,
6922
+ add_history_to_context: Optional[bool] = None,
6923
+ add_dependencies_to_context: Optional[bool] = None,
6924
+ dependencies: Optional[Dict[str, Any]] = None,
6925
+ add_session_state_to_context: Optional[bool] = None,
6926
+ metadata: Optional[Dict[str, Any]] = None,
6927
+ debug_mode: Optional[bool] = None,
6897
6928
  show_message: bool = True,
6898
6929
  show_reasoning: bool = True,
6899
6930
  show_full_reasoning: bool = False,
6900
6931
  console: Optional[Any] = None,
6901
6932
  # Add tags to include in markdown content
6902
6933
  tags_to_include_in_markdown: Optional[Set[str]] = None,
6903
- knowledge_filters: Optional[Dict[str, Any]] = None,
6904
- add_history_to_context: Optional[bool] = None,
6905
- dependencies: Optional[Dict[str, Any]] = None,
6906
- metadata: Optional[Dict[str, Any]] = None,
6907
- debug_mode: Optional[bool] = None,
6908
6934
  **kwargs: Any,
6909
6935
  ) -> None:
6910
- add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
6911
-
6912
6936
  if not tags_to_include_in_markdown:
6913
6937
  tags_to_include_in_markdown = {"think", "thinking"}
6914
6938
 
@@ -6945,8 +6969,10 @@ class Agent:
6945
6969
  show_full_reasoning=show_full_reasoning,
6946
6970
  tags_to_include_in_markdown=tags_to_include_in_markdown,
6947
6971
  console=console,
6948
- add_history_to_context=add_history,
6972
+ add_history_to_context=add_history_to_context,
6949
6973
  dependencies=dependencies,
6974
+ add_dependencies_to_context=add_dependencies_to_context,
6975
+ add_session_state_to_context=add_session_state_to_context,
6950
6976
  metadata=metadata,
6951
6977
  **kwargs,
6952
6978
  )
@@ -6971,8 +6997,10 @@ class Agent:
6971
6997
  show_full_reasoning=show_full_reasoning,
6972
6998
  tags_to_include_in_markdown=tags_to_include_in_markdown,
6973
6999
  console=console,
6974
- add_history_to_context=add_history,
7000
+ add_history_to_context=add_history_to_context,
6975
7001
  dependencies=dependencies,
7002
+ add_dependencies_to_context=add_dependencies_to_context,
7003
+ add_session_state_to_context=add_session_state_to_context,
6976
7004
  metadata=metadata,
6977
7005
  **kwargs,
6978
7006
  )
@@ -6991,21 +7019,21 @@ class Agent:
6991
7019
  stream: Optional[bool] = None,
6992
7020
  stream_intermediate_steps: Optional[bool] = None,
6993
7021
  markdown: Optional[bool] = None,
7022
+ knowledge_filters: Optional[Dict[str, Any]] = None,
7023
+ add_history_to_context: Optional[bool] = None,
7024
+ dependencies: Optional[Dict[str, Any]] = None,
7025
+ add_dependencies_to_context: Optional[bool] = None,
7026
+ add_session_state_to_context: Optional[bool] = None,
7027
+ metadata: Optional[Dict[str, Any]] = None,
7028
+ debug_mode: Optional[bool] = None,
6994
7029
  show_message: bool = True,
6995
7030
  show_reasoning: bool = True,
6996
7031
  show_full_reasoning: bool = False,
6997
7032
  console: Optional[Any] = None,
6998
7033
  # Add tags to include in markdown content
6999
7034
  tags_to_include_in_markdown: Optional[Set[str]] = None,
7000
- knowledge_filters: Optional[Dict[str, Any]] = None,
7001
- add_history_to_context: Optional[bool] = None,
7002
- dependencies: Optional[Dict[str, Any]] = None,
7003
- metadata: Optional[Dict[str, Any]] = None,
7004
- debug_mode: Optional[bool] = None,
7005
7035
  **kwargs: Any,
7006
7036
  ) -> None:
7007
- add_history = add_history_to_context if add_history_to_context is not None else self.add_history_to_context
7008
-
7009
7037
  if not tags_to_include_in_markdown:
7010
7038
  tags_to_include_in_markdown = {"think", "thinking"}
7011
7039
 
@@ -7041,8 +7069,10 @@ class Agent:
7041
7069
  show_full_reasoning=show_full_reasoning,
7042
7070
  tags_to_include_in_markdown=tags_to_include_in_markdown,
7043
7071
  console=console,
7044
- add_history_to_context=add_history,
7072
+ add_history_to_context=add_history_to_context,
7045
7073
  dependencies=dependencies,
7074
+ add_dependencies_to_context=add_dependencies_to_context,
7075
+ add_session_state_to_context=add_session_state_to_context,
7046
7076
  metadata=metadata,
7047
7077
  **kwargs,
7048
7078
  )
@@ -7066,8 +7096,10 @@ class Agent:
7066
7096
  show_full_reasoning=show_full_reasoning,
7067
7097
  tags_to_include_in_markdown=tags_to_include_in_markdown,
7068
7098
  console=console,
7069
- add_history_to_context=add_history,
7099
+ add_history_to_context=add_history_to_context,
7070
7100
  dependencies=dependencies,
7101
+ add_dependencies_to_context=add_dependencies_to_context,
7102
+ add_session_state_to_context=add_session_state_to_context,
7071
7103
  metadata=metadata,
7072
7104
  **kwargs,
7073
7105
  )
@@ -7374,8 +7406,8 @@ class Agent:
7374
7406
  "parser_model": self.parser_model.to_dict() if self.parser_model else None,
7375
7407
  "output_model": self.output_model.to_dict() if self.output_model else None,
7376
7408
  "has_tools": self.tools is not None,
7377
- "has_memory": self.enable_user_memories is not None,
7378
- "has_reasoning": self.reasoning is not None,
7409
+ "has_memory": self.enable_user_memories is True,
7410
+ "has_reasoning": self.reasoning is True,
7379
7411
  "has_knowledge": self.knowledge is not None,
7380
7412
  "has_input_schema": self.input_schema is not None,
7381
7413
  "has_output_schema": self.output_schema is not None,
agno/db/base.py CHANGED
@@ -81,6 +81,13 @@ class BaseDb(ABC):
81
81
  ) -> Optional[Union[Session, Dict[str, Any]]]:
82
82
  raise NotImplementedError
83
83
 
84
+ @abstractmethod
85
+ def upsert_sessions(
86
+ self, sessions: List[Session], deserialize: Optional[bool] = True
87
+ ) -> List[Union[Session, Dict[str, Any]]]:
88
+ """Bulk upsert multiple sessions for improved performance on large datasets."""
89
+ raise NotImplementedError
90
+
84
91
  # --- Memory ---
85
92
 
86
93
  @abstractmethod
@@ -135,6 +142,13 @@ class BaseDb(ABC):
135
142
  ) -> Optional[Union[UserMemory, Dict[str, Any]]]:
136
143
  raise NotImplementedError
137
144
 
145
+ @abstractmethod
146
+ def upsert_memories(
147
+ self, memories: List[UserMemory], deserialize: Optional[bool] = True
148
+ ) -> List[Union[UserMemory, Dict[str, Any]]]:
149
+ """Bulk upsert multiple memories for improved performance on large datasets."""
150
+ raise NotImplementedError
151
+
138
152
  # --- Metrics ---
139
153
  @abstractmethod
140
154
  def get_metrics(