agno 2.0.7__py3-none-any.whl → 2.0.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. agno/agent/agent.py +83 -51
  2. agno/db/base.py +14 -0
  3. agno/db/dynamo/dynamo.py +107 -27
  4. agno/db/firestore/firestore.py +109 -33
  5. agno/db/gcs_json/gcs_json_db.py +100 -20
  6. agno/db/in_memory/in_memory_db.py +95 -20
  7. agno/db/json/json_db.py +101 -21
  8. agno/db/migrations/v1_to_v2.py +322 -47
  9. agno/db/mongo/mongo.py +251 -26
  10. agno/db/mysql/mysql.py +307 -6
  11. agno/db/postgres/postgres.py +279 -33
  12. agno/db/redis/redis.py +99 -22
  13. agno/db/singlestore/singlestore.py +319 -38
  14. agno/db/sqlite/sqlite.py +339 -23
  15. agno/knowledge/embedder/sentence_transformer.py +3 -3
  16. agno/knowledge/knowledge.py +152 -31
  17. agno/knowledge/types.py +8 -0
  18. agno/models/anthropic/claude.py +0 -20
  19. agno/models/cometapi/__init__.py +5 -0
  20. agno/models/cometapi/cometapi.py +57 -0
  21. agno/models/google/gemini.py +4 -8
  22. agno/models/huggingface/huggingface.py +2 -1
  23. agno/models/ollama/chat.py +52 -3
  24. agno/models/openai/chat.py +9 -7
  25. agno/models/openai/responses.py +21 -17
  26. agno/os/interfaces/agui/agui.py +2 -2
  27. agno/os/interfaces/agui/utils.py +81 -18
  28. agno/os/interfaces/base.py +2 -0
  29. agno/os/interfaces/slack/router.py +50 -10
  30. agno/os/interfaces/slack/slack.py +6 -4
  31. agno/os/interfaces/whatsapp/router.py +7 -4
  32. agno/os/interfaces/whatsapp/whatsapp.py +2 -2
  33. agno/os/router.py +18 -0
  34. agno/os/utils.py +10 -2
  35. agno/reasoning/azure_ai_foundry.py +2 -2
  36. agno/reasoning/deepseek.py +2 -2
  37. agno/reasoning/default.py +3 -1
  38. agno/reasoning/groq.py +2 -2
  39. agno/reasoning/ollama.py +2 -2
  40. agno/reasoning/openai.py +2 -2
  41. agno/run/base.py +15 -2
  42. agno/session/agent.py +8 -5
  43. agno/session/team.py +14 -10
  44. agno/team/team.py +218 -111
  45. agno/tools/function.py +43 -4
  46. agno/tools/mcp.py +60 -37
  47. agno/tools/mcp_toolbox.py +284 -0
  48. agno/tools/scrapegraph.py +58 -31
  49. agno/tools/whatsapp.py +1 -1
  50. agno/utils/gemini.py +147 -19
  51. agno/utils/models/claude.py +9 -0
  52. agno/utils/print_response/agent.py +18 -2
  53. agno/utils/print_response/team.py +22 -6
  54. agno/utils/reasoning.py +22 -1
  55. agno/utils/string.py +9 -0
  56. agno/vectordb/base.py +2 -2
  57. agno/vectordb/langchaindb/langchaindb.py +5 -7
  58. agno/vectordb/llamaindex/llamaindexdb.py +25 -6
  59. agno/workflow/workflow.py +30 -15
  60. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/METADATA +4 -1
  61. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/RECORD +64 -61
  62. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/WHEEL +0 -0
  63. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/licenses/LICENSE +0 -0
  64. {agno-2.0.7.dist-info → agno-2.0.9.dist-info}/top_level.txt +0 -0
@@ -21,7 +21,6 @@ from agno.knowledge.remote_content.remote_content import GCSContent, RemoteConte
21
21
  from agno.utils.http import async_fetch_with_retry
22
22
  from agno.utils.log import log_debug, log_error, log_info, log_warning
23
23
  from agno.utils.string import generate_id
24
- from agno.vectordb import VectorDb
25
24
 
26
25
  ContentDict = Dict[str, Union[str, Dict[str, str]]]
27
26
 
@@ -39,12 +38,15 @@ class Knowledge:
39
38
 
40
39
  name: Optional[str] = None
41
40
  description: Optional[str] = None
42
- vector_db: Optional[VectorDb] = None
41
+ vector_db: Optional[Any] = None
43
42
  contents_db: Optional[BaseDb] = None
44
43
  max_results: int = 10
45
44
  readers: Optional[Dict[str, Reader]] = None
46
45
 
47
46
  def __post_init__(self):
47
+ from agno.vectordb import VectorDb
48
+
49
+ self.vector_db = cast(VectorDb, self.vector_db)
48
50
  if self.vector_db and not self.vector_db.exists():
49
51
  self.vector_db.create()
50
52
 
@@ -64,9 +66,12 @@ class Knowledge:
64
66
  paths: Optional[List[str]] = None,
65
67
  urls: Optional[List[str]] = None,
66
68
  metadata: Optional[Dict[str, str]] = None,
69
+ topics: Optional[List[str]] = None,
70
+ text_contents: Optional[List[str]] = None,
71
+ reader: Optional[Reader] = None,
67
72
  include: Optional[List[str]] = None,
68
73
  exclude: Optional[List[str]] = None,
69
- upsert: bool = False,
74
+ upsert: bool = True,
70
75
  skip_if_exists: bool = False,
71
76
  remote_content: Optional[RemoteContent] = None,
72
77
  ) -> None: ...
@@ -74,7 +79,7 @@ class Knowledge:
74
79
  async def add_contents_async(self, *args, **kwargs) -> None:
75
80
  if args and isinstance(args[0], list):
76
81
  arguments = args[0]
77
- upsert = kwargs.get("upsert", False)
82
+ upsert = kwargs.get("upsert", True)
78
83
  skip_if_exists = kwargs.get("skip_if_exists", False)
79
84
  for argument in arguments:
80
85
  await self.add_content_async(
@@ -84,6 +89,7 @@ class Knowledge:
84
89
  url=argument.get("url"),
85
90
  metadata=argument.get("metadata"),
86
91
  topics=argument.get("topics"),
92
+ text_contents=argument.get("text_contents"),
87
93
  reader=argument.get("reader"),
88
94
  include=argument.get("include"),
89
95
  exclude=argument.get("exclude"),
@@ -97,11 +103,13 @@ class Knowledge:
97
103
  metadata = kwargs.get("metadata", {})
98
104
  description = kwargs.get("description", [])
99
105
  topics = kwargs.get("topics", [])
106
+ reader = kwargs.get("reader", None)
100
107
  paths = kwargs.get("paths", [])
101
108
  urls = kwargs.get("urls", [])
109
+ text_contents = kwargs.get("text_contents", [])
102
110
  include = kwargs.get("include")
103
111
  exclude = kwargs.get("exclude")
104
- upsert = kwargs.get("upsert", False)
112
+ upsert = kwargs.get("upsert", True)
105
113
  skip_if_exists = kwargs.get("skip_if_exists", False)
106
114
  remote_content = kwargs.get("remote_content", None)
107
115
  for path in paths:
@@ -126,6 +134,19 @@ class Knowledge:
126
134
  upsert=upsert,
127
135
  skip_if_exists=skip_if_exists,
128
136
  )
137
+ for i, text_content in enumerate(text_contents):
138
+ content_name = f"{name}_{i}" if name else f"text_content_{i}"
139
+ log_debug(f"Adding text content: {content_name}")
140
+ await self.add_content_async(
141
+ name=content_name,
142
+ description=description,
143
+ text_content=text_content,
144
+ metadata=metadata,
145
+ include=include,
146
+ exclude=exclude,
147
+ upsert=upsert,
148
+ skip_if_exists=skip_if_exists,
149
+ )
129
150
  if topics:
130
151
  await self.add_content_async(
131
152
  name=name,
@@ -136,6 +157,7 @@ class Knowledge:
136
157
  exclude=exclude,
137
158
  upsert=upsert,
138
159
  skip_if_exists=skip_if_exists,
160
+ reader=reader,
139
161
  )
140
162
 
141
163
  if remote_content:
@@ -163,7 +185,7 @@ class Knowledge:
163
185
  metadata: Optional[Dict[str, str]] = None,
164
186
  include: Optional[List[str]] = None,
165
187
  exclude: Optional[List[str]] = None,
166
- upsert: bool = False,
188
+ upsert: bool = True,
167
189
  skip_if_exists: bool = False,
168
190
  ) -> None: ...
169
191
 
@@ -201,7 +223,7 @@ class Knowledge:
201
223
  metadata: Optional[Dict[str, str]] = None,
202
224
  include: Optional[List[str]] = None,
203
225
  exclude: Optional[List[str]] = None,
204
- upsert: bool = False,
226
+ upsert: bool = True,
205
227
  skip_if_exists: bool = False,
206
228
  reader: Optional[Reader] = None,
207
229
  auth: Optional[ContentAuth] = None,
@@ -268,7 +290,7 @@ class Knowledge:
268
290
  metadata: Optional[Dict[str, str]] = None,
269
291
  include: Optional[List[str]] = None,
270
292
  exclude: Optional[List[str]] = None,
271
- upsert: bool = False,
293
+ upsert: bool = True,
272
294
  skip_if_exists: bool = False,
273
295
  reader: Optional[Reader] = None,
274
296
  auth: Optional[ContentAuth] = None,
@@ -291,7 +313,7 @@ class Knowledge:
291
313
  include: Optional[List[str]] = None,
292
314
  exclude: Optional[List[str]] = None,
293
315
  upsert: bool = True,
294
- skip_if_exists: bool = True,
316
+ skip_if_exists: bool = False,
295
317
  auth: Optional[ContentAuth] = None,
296
318
  ) -> None:
297
319
  """
@@ -342,7 +364,11 @@ class Knowledge:
342
364
  Returns:
343
365
  bool: True if should skip processing, False if should continue
344
366
  """
367
+ from agno.vectordb import VectorDb
368
+
369
+ self.vector_db = cast(VectorDb, self.vector_db)
345
370
  if self.vector_db and self.vector_db.content_hash_exists(content_hash) and skip_if_exists:
371
+ log_debug(f"Content already exists: {content_hash}, skipping...")
346
372
  return True
347
373
 
348
374
  return False
@@ -355,6 +381,10 @@ class Knowledge:
355
381
  include: Optional[List[str]] = None,
356
382
  exclude: Optional[List[str]] = None,
357
383
  ):
384
+ from agno.vectordb import VectorDb
385
+
386
+ self.vector_db = cast(VectorDb, self.vector_db)
387
+
358
388
  log_info(f"Adding content from path, {content.id}, {content.name}, {content.path}, {content.description}")
359
389
  path = Path(content.path) # type: ignore
360
390
 
@@ -451,6 +481,11 @@ class Knowledge:
451
481
  3. Read the content
452
482
  4. Prepare and insert the content in the vector database
453
483
  """
484
+
485
+ from agno.vectordb import VectorDb
486
+
487
+ self.vector_db = cast(VectorDb, self.vector_db)
488
+
454
489
  log_info(f"Adding content from URL {content.url}")
455
490
  content.file_type = "url"
456
491
 
@@ -559,8 +594,12 @@ class Knowledge:
559
594
  self,
560
595
  content: Content,
561
596
  upsert: bool = True,
562
- skip_if_exists: bool = True,
597
+ skip_if_exists: bool = False,
563
598
  ):
599
+ from agno.vectordb import VectorDb
600
+
601
+ self.vector_db = cast(VectorDb, self.vector_db)
602
+
564
603
  if content.name:
565
604
  name = content.name
566
605
  elif content.file_data and content.file_data.content:
@@ -595,10 +634,7 @@ class Knowledge:
595
634
  read_documents = []
596
635
 
597
636
  if isinstance(content.file_data, str):
598
- try:
599
- content_bytes = content.file_data.encode("utf-8")
600
- except UnicodeEncodeError:
601
- content_bytes = content.file_data.encode("latin-1")
637
+ content_bytes = content.file_data.encode("utf-8", errors="replace")
602
638
  content_io = io.BytesIO(content_bytes)
603
639
 
604
640
  if content.reader:
@@ -619,14 +655,7 @@ class Knowledge:
619
655
  if isinstance(content.file_data.content, bytes):
620
656
  content_io = io.BytesIO(content.file_data.content)
621
657
  elif isinstance(content.file_data.content, str):
622
- if self._is_text_mime_type(content.file_data.type):
623
- try:
624
- content_bytes = content.file_data.content.encode("utf-8")
625
- except UnicodeEncodeError:
626
- log_debug(f"UTF-8 encoding failed for {content.file_data.type}, using latin-1")
627
- content_bytes = content.file_data.content.encode("latin-1")
628
- else:
629
- content_bytes = content.file_data.content.encode("latin-1")
658
+ content_bytes = content.file_data.content.encode("utf-8", errors="replace")
630
659
  content_io = io.BytesIO(content_bytes)
631
660
  else:
632
661
  content_io = content.file_data.content # type: ignore
@@ -663,6 +692,9 @@ class Knowledge:
663
692
  upsert: bool,
664
693
  skip_if_exists: bool,
665
694
  ):
695
+ from agno.vectordb import VectorDb
696
+
697
+ self.vector_db = cast(VectorDb, self.vector_db)
666
698
  log_info(f"Adding content from topics: {content.topics}")
667
699
 
668
700
  if content.topics is None:
@@ -896,6 +928,10 @@ class Knowledge:
896
928
  await self._handle_vector_db_insert(content_entry, read_documents, upsert)
897
929
 
898
930
  async def _handle_vector_db_insert(self, content: Content, read_documents, upsert):
931
+ from agno.vectordb import VectorDb
932
+
933
+ self.vector_db = cast(VectorDb, self.vector_db)
934
+
899
935
  if not self.vector_db:
900
936
  log_error("No vector database configured")
901
937
  content.status = ContentStatus.FAILED
@@ -985,6 +1021,48 @@ class Knowledge:
985
1021
  )
986
1022
  return hashlib.sha256(fallback.encode()).hexdigest()
987
1023
 
1024
+ def _ensure_string_field(self, value: Any, field_name: str, default: str = "") -> str:
1025
+ """
1026
+ Safely ensure a field is a string, handling various edge cases.
1027
+
1028
+ Args:
1029
+ value: The value to convert to string
1030
+ field_name: Name of the field for logging purposes
1031
+ default: Default string value if conversion fails
1032
+
1033
+ Returns:
1034
+ str: A safe string value
1035
+ """
1036
+ # Handle None/falsy values
1037
+ if value is None or value == "":
1038
+ return default
1039
+
1040
+ # Handle unexpected list types (the root cause of our Pydantic warning)
1041
+ if isinstance(value, list):
1042
+ if len(value) == 0:
1043
+ log_debug(f"Empty list found for {field_name}, using default: '{default}'")
1044
+ return default
1045
+ elif len(value) == 1:
1046
+ # Single item list, extract the item
1047
+ log_debug(f"Single-item list found for {field_name}, extracting: '{value[0]}'")
1048
+ return str(value[0]) if value[0] is not None else default
1049
+ else:
1050
+ # Multiple items, join them
1051
+ log_debug(f"Multi-item list found for {field_name}, joining: {value}")
1052
+ return " | ".join(str(item) for item in value if item is not None)
1053
+
1054
+ # Handle other unexpected types
1055
+ if not isinstance(value, str):
1056
+ log_debug(f"Non-string type {type(value)} found for {field_name}, converting: '{value}'")
1057
+ try:
1058
+ return str(value)
1059
+ except Exception as e:
1060
+ log_warning(f"Failed to convert {field_name} to string: {e}, using default")
1061
+ return default
1062
+
1063
+ # Already a string, return as-is
1064
+ return value
1065
+
988
1066
  def _add_to_contents_db(self, content: Content):
989
1067
  if self.contents_db:
990
1068
  created_at = content.created_at if content.created_at else int(time.time())
@@ -997,10 +1075,18 @@ class Knowledge:
997
1075
  if content.file_data and content.file_data.type
998
1076
  else None
999
1077
  )
1078
+ # Safely handle string fields with proper type checking
1079
+ safe_name = self._ensure_string_field(content.name, "content.name", default="")
1080
+ safe_description = self._ensure_string_field(content.description, "content.description", default="")
1081
+ safe_linked_to = self._ensure_string_field(self.name, "knowledge.name", default="")
1082
+ safe_status_message = self._ensure_string_field(
1083
+ content.status_message, "content.status_message", default=""
1084
+ )
1085
+
1000
1086
  content_row = KnowledgeRow(
1001
1087
  id=content.id,
1002
- name=content.name if content.name else "",
1003
- description=content.description if content.description else "",
1088
+ name=safe_name,
1089
+ description=safe_description,
1004
1090
  metadata=content.metadata,
1005
1091
  type=file_type,
1006
1092
  size=content.size
@@ -1008,16 +1094,19 @@ class Knowledge:
1008
1094
  else len(content.file_data.content)
1009
1095
  if content.file_data and content.file_data.content
1010
1096
  else None,
1011
- linked_to=self.name,
1097
+ linked_to=safe_linked_to,
1012
1098
  access_count=0,
1013
1099
  status=content.status if content.status else ContentStatus.PROCESSING,
1014
- status_message="",
1100
+ status_message=safe_status_message,
1015
1101
  created_at=created_at,
1016
1102
  updated_at=updated_at,
1017
1103
  )
1018
1104
  self.contents_db.upsert_knowledge_content(knowledge_row=content_row)
1019
1105
 
1020
1106
  def _update_content(self, content: Content) -> Optional[Dict[str, Any]]:
1107
+ from agno.vectordb import VectorDb
1108
+
1109
+ self.vector_db = cast(VectorDb, self.vector_db)
1021
1110
  if self.contents_db:
1022
1111
  if not content.id:
1023
1112
  log_warning("Content id is required to update Knowledge content")
@@ -1029,18 +1118,25 @@ class Knowledge:
1029
1118
  log_warning(f"Content row not found for id: {content.id}, cannot update status")
1030
1119
  return None
1031
1120
 
1121
+ # Apply safe string handling for updates as well
1032
1122
  if content.name is not None:
1033
- content_row.name = content.name
1123
+ content_row.name = self._ensure_string_field(content.name, "content.name", default="")
1034
1124
  if content.description is not None:
1035
- content_row.description = content.description
1125
+ content_row.description = self._ensure_string_field(
1126
+ content.description, "content.description", default=""
1127
+ )
1036
1128
  if content.metadata is not None:
1037
1129
  content_row.metadata = content.metadata
1038
1130
  if content.status is not None:
1039
1131
  content_row.status = content.status
1040
1132
  if content.status_message is not None:
1041
- content_row.status_message = content.status_message if content.status_message else ""
1133
+ content_row.status_message = self._ensure_string_field(
1134
+ content.status_message, "content.status_message", default=""
1135
+ )
1042
1136
  if content.external_id is not None:
1043
- content_row.external_id = content.external_id
1137
+ content_row.external_id = self._ensure_string_field(
1138
+ content.external_id, "content.external_id", default=""
1139
+ )
1044
1140
  content_row.updated_at = int(time.time())
1045
1141
  self.contents_db.upsert_knowledge_content(knowledge_row=content_row)
1046
1142
 
@@ -1053,10 +1149,17 @@ class Knowledge:
1053
1149
  return content_row.to_dict()
1054
1150
 
1055
1151
  else:
1056
- log_warning(f"Contents DB not found for knowledge base: {self.name}")
1152
+ if self.name:
1153
+ log_warning(f"Contents DB not found for knowledge base: {self.name}")
1154
+ else:
1155
+ log_warning("Contents DB not found for knowledge base")
1057
1156
  return None
1058
1157
 
1059
1158
  async def _process_lightrag_content(self, content: Content, content_type: KnowledgeContentOrigin) -> None:
1159
+ from agno.vectordb import VectorDb
1160
+
1161
+ self.vector_db = cast(VectorDb, self.vector_db)
1162
+
1060
1163
  self._add_to_contents_db(content)
1061
1164
  if content_type == KnowledgeContentOrigin.PATH:
1062
1165
  if content.file_data is None:
@@ -1214,6 +1317,9 @@ class Knowledge:
1214
1317
  ) -> List[Document]:
1215
1318
  """Returns relevant documents matching a query"""
1216
1319
 
1320
+ from agno.vectordb import VectorDb
1321
+
1322
+ self.vector_db = cast(VectorDb, self.vector_db)
1217
1323
  try:
1218
1324
  if self.vector_db is None:
1219
1325
  log_warning("No vector db provided")
@@ -1231,6 +1337,9 @@ class Knowledge:
1231
1337
  ) -> List[Document]:
1232
1338
  """Returns relevant documents matching a query"""
1233
1339
 
1340
+ from agno.vectordb import VectorDb
1341
+
1342
+ self.vector_db = cast(VectorDb, self.vector_db)
1234
1343
  try:
1235
1344
  if self.vector_db is None:
1236
1345
  log_warning("No vector db provided")
@@ -1295,18 +1404,27 @@ class Knowledge:
1295
1404
  return valid_filters
1296
1405
 
1297
1406
  def remove_vector_by_id(self, id: str) -> bool:
1407
+ from agno.vectordb import VectorDb
1408
+
1409
+ self.vector_db = cast(VectorDb, self.vector_db)
1298
1410
  if self.vector_db is None:
1299
1411
  log_warning("No vector DB provided")
1300
1412
  return False
1301
1413
  return self.vector_db.delete_by_id(id)
1302
1414
 
1303
1415
  def remove_vectors_by_name(self, name: str) -> bool:
1416
+ from agno.vectordb import VectorDb
1417
+
1418
+ self.vector_db = cast(VectorDb, self.vector_db)
1304
1419
  if self.vector_db is None:
1305
1420
  log_warning("No vector DB provided")
1306
1421
  return False
1307
1422
  return self.vector_db.delete_by_name(name)
1308
1423
 
1309
1424
  def remove_vectors_by_metadata(self, metadata: Dict[str, Any]) -> bool:
1425
+ from agno.vectordb import VectorDb
1426
+
1427
+ self.vector_db = cast(VectorDb, self.vector_db)
1310
1428
  if self.vector_db is None:
1311
1429
  log_warning("No vector DB provided")
1312
1430
  return False
@@ -1393,6 +1511,9 @@ class Knowledge:
1393
1511
  return status, content_row.status_message
1394
1512
 
1395
1513
  def remove_content_by_id(self, content_id: str):
1514
+ from agno.vectordb import VectorDb
1515
+
1516
+ self.vector_db = cast(VectorDb, self.vector_db)
1396
1517
  if self.vector_db is not None:
1397
1518
  if self.vector_db.__class__.__name__ == "LightRag":
1398
1519
  # For LightRAG, get the content first to find the external_id
agno/knowledge/types.py CHANGED
@@ -1,4 +1,7 @@
1
1
  from enum import Enum
2
+ from typing import Any
3
+
4
+ from pydantic import BaseModel
2
5
 
3
6
 
4
7
  class ContentType(str, Enum):
@@ -28,3 +31,8 @@ class ContentType(str, Enum):
28
31
  def get_content_type_enum(content_type_str: str) -> ContentType:
29
32
  """Convert a content type string to ContentType enum."""
30
33
  return ContentType(content_type_str)
34
+
35
+
36
+ class KnowledgeFilter(BaseModel):
37
+ key: str
38
+ value: Any
@@ -421,26 +421,6 @@ class Claude(Model):
421
421
  log_error(f"Unexpected error calling Claude API: {str(e)}")
422
422
  raise ModelProviderError(message=str(e), model_name=self.name, model_id=self.id) from e
423
423
 
424
- def format_function_call_results(self, messages: List[Message], function_call_results: List[Message]) -> None:
425
- """
426
- Handle the results of function calls.
427
-
428
- Args:
429
- messages (List[Message]): The list of conversation messages.
430
- function_call_results (List[Message]): The results of the function calls.
431
- """
432
- if len(function_call_results) > 0:
433
- fc_responses: List = []
434
- for _fc_message in function_call_results:
435
- fc_responses.append(
436
- {
437
- "type": "tool_result",
438
- "tool_use_id": _fc_message.tool_call_id,
439
- "content": str(_fc_message.content),
440
- }
441
- )
442
- messages.append(Message(role="user", content=fc_responses))
443
-
444
424
  def get_system_message_for_model(self, tools: Optional[List[Any]] = None) -> Optional[str]:
445
425
  if tools is not None and len(tools) > 0:
446
426
  tool_call_prompt = "Do not reflect on the quality of the returned search results in your response\n\n"
@@ -0,0 +1,5 @@
1
+ from agno.models.cometapi.cometapi import CometAPI
2
+
3
+ __all__ = [
4
+ "CometAPI",
5
+ ]
@@ -0,0 +1,57 @@
1
+ from dataclasses import dataclass, field
2
+ from os import getenv
3
+ from typing import List, Optional
4
+
5
+ import httpx
6
+
7
+ from agno.models.openai.like import OpenAILike
8
+ from agno.utils.log import log_debug
9
+
10
+
11
+ @dataclass
12
+ class CometAPI(OpenAILike):
13
+ """
14
+ The CometAPI class provides access to multiple AI model providers
15
+ (GPT, Claude, Gemini, DeepSeek, etc.) through OpenAI-compatible endpoints.
16
+
17
+ Args:
18
+ id (str): The id of the CometAPI model to use. Default is "gpt-5-mini".
19
+ name (str): The name for this model. Defaults to "CometAPI".
20
+ api_key (str): The API key for CometAPI. Defaults to COMETAPI_KEY environment variable.
21
+ base_url (str): The base URL for CometAPI. Defaults to "https://api.cometapi.com/v1".
22
+ """
23
+
24
+ name: str = "CometAPI"
25
+ id: str = "gpt-5-mini"
26
+ api_key: Optional[str] = field(default_factory=lambda: getenv("COMETAPI_KEY"))
27
+ base_url: str = "https://api.cometapi.com/v1"
28
+
29
+ def get_available_models(self) -> List[str]:
30
+ """
31
+ Fetch available chat models from CometAPI, filtering out non-chat models.
32
+
33
+ Returns:
34
+ List of available chat model IDs
35
+ """
36
+ if not self.api_key:
37
+ log_debug("No API key provided, returning empty model list")
38
+ return []
39
+
40
+ try:
41
+ with httpx.Client() as client:
42
+ response = client.get(
43
+ f"{self.base_url}/models",
44
+ headers={"Authorization": f"Bearer {self.api_key}", "Accept": "application/json"},
45
+ timeout=30.0,
46
+ )
47
+ response.raise_for_status()
48
+
49
+ data = response.json()
50
+ all_models = data.get("data", [])
51
+
52
+ log_debug(f"Found {len(all_models)} total models")
53
+ return sorted(all_models)
54
+
55
+ except Exception as e:
56
+ log_debug(f"Error fetching models from CometAPI: {e}")
57
+ return []
@@ -16,9 +16,8 @@ from agno.models.message import Citations, Message, UrlCitation
16
16
  from agno.models.metrics import Metrics
17
17
  from agno.models.response import ModelResponse
18
18
  from agno.run.agent import RunOutput
19
- from agno.utils.gemini import convert_schema, format_function_definitions, format_image_for_message
19
+ from agno.utils.gemini import format_function_definitions, format_image_for_message, prepare_response_schema
20
20
  from agno.utils.log import log_debug, log_error, log_info, log_warning
21
- from agno.utils.models.schema_utils import get_response_schema_for_provider
22
21
 
23
22
  try:
24
23
  from google import genai
@@ -191,12 +190,9 @@ class Gemini(Model):
191
190
 
192
191
  if response_format is not None and isinstance(response_format, type) and issubclass(response_format, BaseModel):
193
192
  config["response_mime_type"] = "application/json" # type: ignore
194
- # Convert Pydantic model to JSON schema, then normalize for Gemini, then convert to Gemini schema format
195
-
196
- # Get the normalized schema for Gemini
197
- normalized_schema = get_response_schema_for_provider(response_format, "gemini")
198
- gemini_schema = convert_schema(normalized_schema)
199
- config["response_schema"] = gemini_schema
193
+ # Convert Pydantic model using our hybrid approach
194
+ # This will handle complex schemas with nested models, dicts, and circular refs
195
+ config["response_schema"] = prepare_response_schema(response_format)
200
196
 
201
197
  # Add thinking configuration
202
198
  thinking_config_params = {}
@@ -382,7 +382,8 @@ class HuggingFace(Model):
382
382
  List[Dict[str, Any]]: The built tool calls.
383
383
  """
384
384
  tool_calls: List[Dict[str, Any]] = []
385
- for _tool_call in tool_calls_data:
385
+ for tool_call in tool_calls_data:
386
+ _tool_call = tool_call[0]
386
387
  _index = _tool_call.index
387
388
  _tool_call_id = _tool_call.id
388
389
  _tool_call_type = _tool_call.type
@@ -1,5 +1,6 @@
1
1
  import json
2
- from dataclasses import dataclass
2
+ from dataclasses import dataclass, field
3
+ from os import getenv
3
4
  from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Type, Union
4
5
 
5
6
  from pydantic import BaseModel
@@ -10,6 +11,7 @@ from agno.models.message import Message
10
11
  from agno.models.metrics import Metrics
11
12
  from agno.models.response import ModelResponse
12
13
  from agno.utils.log import log_debug, log_warning
14
+ from agno.utils.reasoning import extract_thinking_content
13
15
 
14
16
  try:
15
17
  from ollama import AsyncClient as AsyncOllamaClient
@@ -43,6 +45,7 @@ class Ollama(Model):
43
45
  # Client parameters
44
46
  host: Optional[str] = None
45
47
  timeout: Optional[Any] = None
48
+ api_key: Optional[str] = field(default_factory=lambda: getenv("OLLAMA_API_KEY"))
46
49
  client_params: Optional[Dict[str, Any]] = None
47
50
 
48
51
  # Ollama clients
@@ -50,10 +53,23 @@ class Ollama(Model):
50
53
  async_client: Optional[AsyncOllamaClient] = None
51
54
 
52
55
  def _get_client_params(self) -> Dict[str, Any]:
56
+ host = self.host
57
+ headers = {}
58
+
59
+ if self.api_key:
60
+ if not host:
61
+ host = "https://ollama.com"
62
+ headers["authorization"] = f"Bearer {self.api_key}"
63
+ log_debug(f"Using Ollama cloud endpoint: {host}")
64
+
53
65
  base_params = {
54
- "host": self.host,
66
+ "host": host,
55
67
  "timeout": self.timeout,
56
68
  }
69
+
70
+ if headers:
71
+ base_params["headers"] = headers
72
+
57
73
  # Create client_params dict with non-None values
58
74
  client_params = {k: v for k, v in base_params.items() if v is not None}
59
75
  # Add additional client params if provided
@@ -84,7 +100,8 @@ class Ollama(Model):
84
100
  if self.async_client is not None:
85
101
  return self.async_client
86
102
 
87
- return AsyncOllamaClient(**self._get_client_params())
103
+ self.async_client = AsyncOllamaClient(**self._get_client_params())
104
+ return self.async_client
88
105
 
89
106
  def get_request_params(
90
107
  self,
@@ -144,6 +161,28 @@ class Ollama(Model):
144
161
  "role": message.role,
145
162
  "content": message.content,
146
163
  }
164
+
165
+ if message.role == "assistant" and message.tool_calls is not None:
166
+ # Format tool calls for assistant messages
167
+ formatted_tool_calls = []
168
+ for tool_call in message.tool_calls:
169
+ if "function" in tool_call:
170
+ function_data = tool_call["function"]
171
+ formatted_tool_call = {
172
+ "id": tool_call.get("id"),
173
+ "type": "function",
174
+ "function": {
175
+ "name": function_data["name"],
176
+ "arguments": json.loads(function_data["arguments"])
177
+ if isinstance(function_data["arguments"], str)
178
+ else function_data["arguments"],
179
+ },
180
+ }
181
+ formatted_tool_calls.append(formatted_tool_call)
182
+
183
+ if formatted_tool_calls:
184
+ _message["tool_calls"] = formatted_tool_calls
185
+
147
186
  if message.role == "user":
148
187
  if message.images is not None:
149
188
  message_images = []
@@ -309,6 +348,16 @@ class Ollama(Model):
309
348
  if response_message.get("content") is not None:
310
349
  model_response.content = response_message.get("content")
311
350
 
351
+ # Extract thinking content between <think> tags if present
352
+ if model_response.content and model_response.content.find("<think>") != -1:
353
+ reasoning_content, clean_content = extract_thinking_content(model_response.content)
354
+
355
+ if reasoning_content:
356
+ # Store extracted thinking content separately
357
+ model_response.reasoning_content = reasoning_content
358
+ # Update main content with clean version
359
+ model_response.content = clean_content
360
+
312
361
  if response_message.get("tool_calls") is not None:
313
362
  if model_response.tool_calls is None:
314
363
  model_response.tool_calls = []