agno 2.3.11__py3-none-any.whl → 2.3.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. agno/compression/manager.py +87 -16
  2. agno/db/mongo/async_mongo.py +1 -1
  3. agno/db/mongo/mongo.py +1 -1
  4. agno/exceptions.py +1 -0
  5. agno/knowledge/knowledge.py +83 -20
  6. agno/knowledge/reader/csv_reader.py +2 -2
  7. agno/knowledge/reader/text_reader.py +15 -3
  8. agno/knowledge/reader/wikipedia_reader.py +33 -1
  9. agno/memory/strategies/base.py +3 -4
  10. agno/models/anthropic/claude.py +44 -0
  11. agno/models/aws/bedrock.py +60 -0
  12. agno/models/base.py +124 -30
  13. agno/models/google/gemini.py +141 -23
  14. agno/models/litellm/chat.py +25 -0
  15. agno/models/openai/responses.py +44 -0
  16. agno/os/routers/knowledge/knowledge.py +0 -1
  17. agno/run/agent.py +17 -0
  18. agno/run/requirement.py +89 -6
  19. agno/utils/print_response/agent.py +4 -4
  20. agno/utils/print_response/team.py +12 -12
  21. agno/utils/tokens.py +643 -27
  22. agno/vectordb/chroma/chromadb.py +6 -2
  23. agno/vectordb/lancedb/lance_db.py +3 -37
  24. agno/vectordb/milvus/milvus.py +6 -32
  25. agno/vectordb/mongodb/mongodb.py +0 -27
  26. agno/vectordb/pgvector/pgvector.py +15 -5
  27. agno/vectordb/pineconedb/pineconedb.py +0 -17
  28. agno/vectordb/qdrant/qdrant.py +6 -29
  29. agno/vectordb/redis/redisdb.py +0 -26
  30. agno/vectordb/singlestore/singlestore.py +16 -8
  31. agno/vectordb/surrealdb/surrealdb.py +0 -36
  32. agno/vectordb/weaviate/weaviate.py +6 -2
  33. {agno-2.3.11.dist-info → agno-2.3.12.dist-info}/METADATA +4 -1
  34. {agno-2.3.11.dist-info → agno-2.3.12.dist-info}/RECORD +37 -37
  35. {agno-2.3.11.dist-info → agno-2.3.12.dist-info}/WHEEL +0 -0
  36. {agno-2.3.11.dist-info → agno-2.3.12.dist-info}/licenses/LICENSE +0 -0
  37. {agno-2.3.11.dist-info → agno-2.3.12.dist-info}/top_level.txt +0 -0
@@ -17,6 +17,7 @@ from agno.utils.http import get_default_async_client, get_default_sync_client
17
17
  from agno.utils.log import log_debug, log_error, log_warning
18
18
  from agno.utils.models.openai_responses import images_to_message
19
19
  from agno.utils.models.schema_utils import get_response_schema_for_provider
20
+ from agno.utils.tokens import count_schema_tokens
20
21
 
21
22
  try:
22
23
  from openai import APIConnectionError, APIStatusError, AsyncOpenAI, OpenAI, RateLimitError
@@ -520,6 +521,49 @@ class OpenAIResponses(Model):
520
521
  formatted_messages.append(reasoning_output)
521
522
  return formatted_messages
522
523
 
524
+ def count_tokens(
525
+ self,
526
+ messages: List[Message],
527
+ tools: Optional[List[Dict[str, Any]]] = None,
528
+ output_schema: Optional[Union[Dict, Type[BaseModel]]] = None,
529
+ ) -> int:
530
+ try:
531
+ formatted_input = self._format_messages(messages, compress_tool_results=True)
532
+ formatted_tools = self._format_tool_params(messages, tools) if tools else None
533
+
534
+ response = self.get_client().responses.input_tokens.count(
535
+ model=self.id,
536
+ input=formatted_input, # type: ignore
537
+ instructions=self.instructions, # type: ignore
538
+ tools=formatted_tools, # type: ignore
539
+ )
540
+ return response.input_tokens + count_schema_tokens(output_schema, self.id)
541
+ except Exception as e:
542
+ log_warning(f"Failed to count tokens via API: {e}")
543
+ return super().count_tokens(messages, tools, output_schema)
544
+
545
+ async def acount_tokens(
546
+ self,
547
+ messages: List[Message],
548
+ tools: Optional[List[Dict[str, Any]]] = None,
549
+ output_schema: Optional[Union[Dict, Type[BaseModel]]] = None,
550
+ ) -> int:
551
+ """Async version of count_tokens using the async client."""
552
+ try:
553
+ formatted_input = self._format_messages(messages, compress_tool_results=True)
554
+ formatted_tools = self._format_tool_params(messages, tools) if tools else None
555
+
556
+ response = await self.get_async_client().responses.input_tokens.count(
557
+ model=self.id,
558
+ input=formatted_input, # type: ignore
559
+ instructions=self.instructions, # type: ignore
560
+ tools=formatted_tools, # type: ignore
561
+ )
562
+ return response.input_tokens + count_schema_tokens(output_schema, self.id)
563
+ except Exception as e:
564
+ log_warning(f"Failed to count tokens via API: {e}")
565
+ return await super().acount_tokens(messages, tools, output_schema)
566
+
523
567
  def invoke(
524
568
  self,
525
569
  messages: List[Message],
@@ -888,7 +888,6 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
888
888
 
889
889
  # Add custom readers from knowledge.readers
890
890
  readers_result: Any = knowledge.get_readers() or {}
891
- print(f"readers_result: {readers_result}")
892
891
  # Ensure readers_dict is a dictionary (defensive check)
893
892
  if not isinstance(readers_result, dict):
894
893
  readers_dict: Dict[str, Reader] = {}
agno/run/agent.py CHANGED
@@ -604,6 +604,7 @@ class RunOutput:
604
604
  "reasoning_steps",
605
605
  "reasoning_messages",
606
606
  "references",
607
+ "requirements",
607
608
  ]
608
609
  }
609
610
 
@@ -689,6 +690,9 @@ class RunOutput:
689
690
  else:
690
691
  _dict["tools"].append(tool)
691
692
 
693
+ if self.requirements is not None:
694
+ _dict["requirements"] = [req.to_dict() if hasattr(req, "to_dict") else req for req in self.requirements]
695
+
692
696
  if self.input is not None:
693
697
  _dict["input"] = self.input.to_dict()
694
698
 
@@ -735,6 +739,18 @@ class RunOutput:
735
739
  tools = data.pop("tools", [])
736
740
  tools = [ToolExecution.from_dict(tool) for tool in tools] if tools else None
737
741
 
742
+ # Handle requirements
743
+ requirements_data = data.pop("requirements", None)
744
+ requirements: Optional[List[RunRequirement]] = None
745
+ if requirements_data is not None:
746
+ requirements_list: List[RunRequirement] = []
747
+ for item in requirements_data:
748
+ if isinstance(item, RunRequirement):
749
+ requirements_list.append(item)
750
+ elif isinstance(item, dict):
751
+ requirements_list.append(RunRequirement.from_dict(item))
752
+ requirements = requirements_list if requirements_list else None
753
+
738
754
  images = reconstruct_images(data.pop("images", []))
739
755
  videos = reconstruct_videos(data.pop("videos", []))
740
756
  audio = reconstruct_audio_list(data.pop("audio", []))
@@ -789,6 +805,7 @@ class RunOutput:
789
805
  reasoning_steps=reasoning_steps,
790
806
  reasoning_messages=reasoning_messages,
791
807
  references=references,
808
+ requirements=requirements,
792
809
  **filtered_data,
793
810
  )
794
811
 
agno/run/requirement.py CHANGED
@@ -1,6 +1,6 @@
1
- from dataclasses import dataclass
1
+ from dataclasses import dataclass, field
2
2
  from datetime import datetime, timezone
3
- from typing import TYPE_CHECKING, List, Optional
3
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
4
4
  from uuid import uuid4
5
5
 
6
6
  from agno.models.response import ToolExecution, UserInputField
@@ -14,7 +14,7 @@ class RunRequirement:
14
14
  """Requirement to complete a paused run (used in HITL flows)"""
15
15
 
16
16
  tool_execution: Optional[ToolExecution] = None
17
- created_at: datetime = datetime.now(timezone.utc)
17
+ created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
18
18
 
19
19
  # User confirmation
20
20
  confirmation: Optional[bool] = None
@@ -26,10 +26,19 @@ class RunRequirement:
26
26
  # External execution
27
27
  external_execution_result: Optional[str] = None
28
28
 
29
- def __init__(self, tool_execution: ToolExecution):
30
- self.id = str(uuid4())
29
+ def __init__(
30
+ self,
31
+ tool_execution: ToolExecution,
32
+ id: Optional[str] = None,
33
+ created_at: Optional[datetime] = None,
34
+ ):
35
+ self.id = id or str(uuid4())
31
36
  self.tool_execution = tool_execution
32
- self.user_input_schema = tool_execution.user_input_schema
37
+ self.user_input_schema = tool_execution.user_input_schema if tool_execution else None
38
+ self.created_at = created_at or datetime.now(timezone.utc)
39
+ self.confirmation = None
40
+ self.confirmation_note = None
41
+ self.external_execution_result = None
33
42
 
34
43
  @property
35
44
  def needs_confirmation(self) -> bool:
@@ -96,3 +105,77 @@ class RunRequirement:
96
105
  def is_resolved(self) -> bool:
97
106
  """Return True if the requirement has been resolved"""
98
107
  return not self.needs_confirmation and not self.needs_user_input and not self.needs_external_execution
108
+
109
+ def to_dict(self) -> Dict[str, Any]:
110
+ """Convert to JSON-serializable dictionary for storage."""
111
+ _dict: Dict[str, Any] = {
112
+ "id": self.id,
113
+ "created_at": self.created_at.isoformat() if isinstance(self.created_at, datetime) else self.created_at,
114
+ "confirmation": self.confirmation,
115
+ "confirmation_note": self.confirmation_note,
116
+ "external_execution_result": self.external_execution_result,
117
+ }
118
+
119
+ if self.tool_execution is not None:
120
+ _dict["tool_execution"] = (
121
+ self.tool_execution.to_dict() if isinstance(self.tool_execution, ToolExecution) else self.tool_execution
122
+ )
123
+
124
+ if self.user_input_schema is not None:
125
+ _dict["user_input_schema"] = [f.to_dict() if hasattr(f, "to_dict") else f for f in self.user_input_schema]
126
+
127
+ return {k: v for k, v in _dict.items() if v is not None}
128
+
129
+ @classmethod
130
+ def from_dict(cls, data: Dict[str, Any]) -> "RunRequirement":
131
+ """Reconstruct from stored dictionary."""
132
+ if data is None:
133
+ raise ValueError("RunRequirement.from_dict() requires a non-None dict")
134
+
135
+ # Handle tool_execution
136
+ tool_data = data.get("tool_execution")
137
+ tool_execution: Optional[ToolExecution] = None
138
+ if isinstance(tool_data, ToolExecution):
139
+ tool_execution = tool_data
140
+ elif isinstance(tool_data, dict):
141
+ tool_execution = ToolExecution.from_dict(tool_data)
142
+
143
+ # Handle created_at (ISO string or datetime)
144
+ created_at_raw = data.get("created_at")
145
+ created_at: Optional[datetime] = None
146
+ if isinstance(created_at_raw, datetime):
147
+ created_at = created_at_raw
148
+ elif isinstance(created_at_raw, str):
149
+ try:
150
+ created_at = datetime.fromisoformat(created_at_raw)
151
+ except ValueError:
152
+ created_at = None
153
+
154
+ # Build requirement - tool_execution is required by __init__
155
+ # For legacy data without tool_execution, create a minimal placeholder
156
+ if tool_execution is None:
157
+ tool_execution = ToolExecution(tool_name="unknown", tool_args={})
158
+
159
+ requirement = cls(
160
+ tool_execution=tool_execution,
161
+ id=data.get("id"),
162
+ created_at=created_at,
163
+ )
164
+
165
+ # Set optional fields
166
+ requirement.confirmation = data.get("confirmation")
167
+ requirement.confirmation_note = data.get("confirmation_note")
168
+ requirement.external_execution_result = data.get("external_execution_result")
169
+
170
+ # Handle user_input_schema
171
+ schema_raw = data.get("user_input_schema")
172
+ if schema_raw is not None:
173
+ rebuilt_schema: List[UserInputField] = []
174
+ for item in schema_raw:
175
+ if isinstance(item, UserInputField):
176
+ rebuilt_schema.append(item)
177
+ elif isinstance(item, dict):
178
+ rebuilt_schema.append(UserInputField.from_dict(item))
179
+ requirement.user_input_schema = rebuilt_schema if rebuilt_schema else None
180
+
181
+ return requirement
@@ -469,8 +469,8 @@ def build_panels_stream(
469
469
  stats = compression_manager.stats
470
470
  saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
471
471
  orig = stats.get("original_size", 1)
472
- if stats.get("messages_compressed", 0) > 0:
473
- tool_calls_text += f"\n\nTool results compressed: {stats.get('messages_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
472
+ if stats.get("tool_results_compressed", 0) > 0:
473
+ tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
474
474
 
475
475
  tool_calls_panel = create_panel(
476
476
  content=tool_calls_text,
@@ -847,8 +847,8 @@ def build_panels(
847
847
  stats = compression_manager.stats
848
848
  saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
849
849
  orig = stats.get("original_size", 1)
850
- if stats.get("messages_compressed", 0) > 0:
851
- tool_calls_text += f"\n\nTool results compressed: {stats.get('messages_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
850
+ if stats.get("tool_results_compressed", 0) > 0:
851
+ tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
852
852
  compression_manager.stats.clear()
853
853
 
854
854
  tool_calls_panel = create_panel(
@@ -267,8 +267,8 @@ def print_response(
267
267
  stats = team.compression_manager.stats
268
268
  saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
269
269
  orig = stats.get("original_size", 1)
270
- if stats.get("messages_compressed", 0) > 0:
271
- tool_calls_text += f"\n\nTool results compressed: {stats.get('messages_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
270
+ if stats.get("tool_results_compressed", 0) > 0:
271
+ tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
272
272
  team.compression_manager.stats.clear()
273
273
 
274
274
  team_tool_calls_panel = create_panel(
@@ -631,8 +631,8 @@ def print_response_stream(
631
631
  stats = team.compression_manager.stats
632
632
  saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
633
633
  orig = stats.get("original_size", 1)
634
- if stats.get("messages_compressed", 0) > 0:
635
- tool_calls_text += f"\n\nTool results compressed: {stats.get('messages_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
634
+ if stats.get("tool_results_compressed", 0) > 0:
635
+ tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
636
636
 
637
637
  team_tool_calls_panel = create_panel(
638
638
  content=tool_calls_text,
@@ -841,8 +841,8 @@ def print_response_stream(
841
841
  stats = team.compression_manager.stats
842
842
  saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
843
843
  orig = stats.get("original_size", 1)
844
- if stats.get("messages_compressed", 0) > 0:
845
- tool_calls_text += f"\n\nTool results compressed: {stats.get('messages_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
844
+ if stats.get("tool_results_compressed", 0) > 0:
845
+ tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
846
846
  team.compression_manager.stats.clear()
847
847
 
848
848
  team_tool_calls_panel = create_panel(
@@ -1132,8 +1132,8 @@ async def aprint_response(
1132
1132
  stats = team.compression_manager.stats
1133
1133
  saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
1134
1134
  orig = stats.get("original_size", 1)
1135
- if stats.get("messages_compressed", 0) > 0:
1136
- tool_calls_text += f"\n\nTool results compressed: {stats.get('messages_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
1135
+ if stats.get("tool_results_compressed", 0) > 0:
1136
+ tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
1137
1137
  team.compression_manager.stats.clear()
1138
1138
 
1139
1139
  team_tool_calls_panel = create_panel(
@@ -1494,8 +1494,8 @@ async def aprint_response_stream(
1494
1494
  stats = team.compression_manager.stats
1495
1495
  saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
1496
1496
  orig = stats.get("original_size", 1)
1497
- if stats.get("messages_compressed", 0) > 0:
1498
- tool_calls_text += f"\n\nTool results compressed: {stats.get('messages_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
1497
+ if stats.get("tool_results_compressed", 0) > 0:
1498
+ tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
1499
1499
 
1500
1500
  team_tool_calls_panel = create_panel(
1501
1501
  content=tool_calls_text,
@@ -1722,8 +1722,8 @@ async def aprint_response_stream(
1722
1722
  stats = team.compression_manager.stats
1723
1723
  saved = stats.get("original_size", 0) - stats.get("compressed_size", 0)
1724
1724
  orig = stats.get("original_size", 1)
1725
- if stats.get("messages_compressed", 0) > 0:
1726
- tool_calls_text += f"\n\nTool results compressed: {stats.get('messages_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
1725
+ if stats.get("tool_results_compressed", 0) > 0:
1726
+ tool_calls_text += f"\n\ncompressed: {stats.get('tool_results_compressed', 0)} | Saved: {saved:,} chars ({saved / orig * 100:.0f}%)"
1727
1727
  team.compression_manager.stats.clear()
1728
1728
 
1729
1729
  team_tool_calls_panel = create_panel(