agno 2.0.7__py3-none-any.whl → 2.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -21,7 +21,6 @@ from agno.knowledge.remote_content.remote_content import GCSContent, RemoteConte
21
21
  from agno.utils.http import async_fetch_with_retry
22
22
  from agno.utils.log import log_debug, log_error, log_info, log_warning
23
23
  from agno.utils.string import generate_id
24
- from agno.vectordb import VectorDb
25
24
 
26
25
  ContentDict = Dict[str, Union[str, Dict[str, str]]]
27
26
 
@@ -39,12 +38,15 @@ class Knowledge:
39
38
 
40
39
  name: Optional[str] = None
41
40
  description: Optional[str] = None
42
- vector_db: Optional[VectorDb] = None
41
+ vector_db: Optional[Any] = None
43
42
  contents_db: Optional[BaseDb] = None
44
43
  max_results: int = 10
45
44
  readers: Optional[Dict[str, Reader]] = None
46
45
 
47
46
  def __post_init__(self):
47
+ from agno.vectordb import VectorDb
48
+
49
+ self.vector_db = cast(VectorDb, self.vector_db)
48
50
  if self.vector_db and not self.vector_db.exists():
49
51
  self.vector_db.create()
50
52
 
@@ -64,9 +66,12 @@ class Knowledge:
64
66
  paths: Optional[List[str]] = None,
65
67
  urls: Optional[List[str]] = None,
66
68
  metadata: Optional[Dict[str, str]] = None,
69
+ topics: Optional[List[str]] = None,
70
+ text_contents: Optional[List[str]] = None,
71
+ reader: Optional[Reader] = None,
67
72
  include: Optional[List[str]] = None,
68
73
  exclude: Optional[List[str]] = None,
69
- upsert: bool = False,
74
+ upsert: bool = True,
70
75
  skip_if_exists: bool = False,
71
76
  remote_content: Optional[RemoteContent] = None,
72
77
  ) -> None: ...
@@ -74,7 +79,7 @@ class Knowledge:
74
79
  async def add_contents_async(self, *args, **kwargs) -> None:
75
80
  if args and isinstance(args[0], list):
76
81
  arguments = args[0]
77
- upsert = kwargs.get("upsert", False)
82
+ upsert = kwargs.get("upsert", True)
78
83
  skip_if_exists = kwargs.get("skip_if_exists", False)
79
84
  for argument in arguments:
80
85
  await self.add_content_async(
@@ -84,6 +89,7 @@ class Knowledge:
84
89
  url=argument.get("url"),
85
90
  metadata=argument.get("metadata"),
86
91
  topics=argument.get("topics"),
92
+ text_contents=argument.get("text_contents"),
87
93
  reader=argument.get("reader"),
88
94
  include=argument.get("include"),
89
95
  exclude=argument.get("exclude"),
@@ -97,11 +103,13 @@ class Knowledge:
97
103
  metadata = kwargs.get("metadata", {})
98
104
  description = kwargs.get("description", [])
99
105
  topics = kwargs.get("topics", [])
106
+ reader = kwargs.get("reader", None)
100
107
  paths = kwargs.get("paths", [])
101
108
  urls = kwargs.get("urls", [])
109
+ text_contents = kwargs.get("text_contents", [])
102
110
  include = kwargs.get("include")
103
111
  exclude = kwargs.get("exclude")
104
- upsert = kwargs.get("upsert", False)
112
+ upsert = kwargs.get("upsert", True)
105
113
  skip_if_exists = kwargs.get("skip_if_exists", False)
106
114
  remote_content = kwargs.get("remote_content", None)
107
115
  for path in paths:
@@ -126,6 +134,19 @@ class Knowledge:
126
134
  upsert=upsert,
127
135
  skip_if_exists=skip_if_exists,
128
136
  )
137
+ for i, text_content in enumerate(text_contents):
138
+ content_name = f"{name}_{i}" if name else f"text_content_{i}"
139
+ log_debug(f"Adding text content: {content_name}")
140
+ await self.add_content_async(
141
+ name=content_name,
142
+ description=description,
143
+ text_content=text_content,
144
+ metadata=metadata,
145
+ include=include,
146
+ exclude=exclude,
147
+ upsert=upsert,
148
+ skip_if_exists=skip_if_exists,
149
+ )
129
150
  if topics:
130
151
  await self.add_content_async(
131
152
  name=name,
@@ -136,6 +157,7 @@ class Knowledge:
136
157
  exclude=exclude,
137
158
  upsert=upsert,
138
159
  skip_if_exists=skip_if_exists,
160
+ reader=reader,
139
161
  )
140
162
 
141
163
  if remote_content:
@@ -163,7 +185,7 @@ class Knowledge:
163
185
  metadata: Optional[Dict[str, str]] = None,
164
186
  include: Optional[List[str]] = None,
165
187
  exclude: Optional[List[str]] = None,
166
- upsert: bool = False,
188
+ upsert: bool = True,
167
189
  skip_if_exists: bool = False,
168
190
  ) -> None: ...
169
191
 
@@ -201,7 +223,7 @@ class Knowledge:
201
223
  metadata: Optional[Dict[str, str]] = None,
202
224
  include: Optional[List[str]] = None,
203
225
  exclude: Optional[List[str]] = None,
204
- upsert: bool = False,
226
+ upsert: bool = True,
205
227
  skip_if_exists: bool = False,
206
228
  reader: Optional[Reader] = None,
207
229
  auth: Optional[ContentAuth] = None,
@@ -268,7 +290,7 @@ class Knowledge:
268
290
  metadata: Optional[Dict[str, str]] = None,
269
291
  include: Optional[List[str]] = None,
270
292
  exclude: Optional[List[str]] = None,
271
- upsert: bool = False,
293
+ upsert: bool = True,
272
294
  skip_if_exists: bool = False,
273
295
  reader: Optional[Reader] = None,
274
296
  auth: Optional[ContentAuth] = None,
@@ -291,7 +313,7 @@ class Knowledge:
291
313
  include: Optional[List[str]] = None,
292
314
  exclude: Optional[List[str]] = None,
293
315
  upsert: bool = True,
294
- skip_if_exists: bool = True,
316
+ skip_if_exists: bool = False,
295
317
  auth: Optional[ContentAuth] = None,
296
318
  ) -> None:
297
319
  """
@@ -342,7 +364,11 @@ class Knowledge:
342
364
  Returns:
343
365
  bool: True if should skip processing, False if should continue
344
366
  """
367
+ from agno.vectordb import VectorDb
368
+
369
+ self.vector_db = cast(VectorDb, self.vector_db)
345
370
  if self.vector_db and self.vector_db.content_hash_exists(content_hash) and skip_if_exists:
371
+ log_debug(f"Content already exists: {content_hash}, skipping...")
346
372
  return True
347
373
 
348
374
  return False
@@ -355,6 +381,10 @@ class Knowledge:
355
381
  include: Optional[List[str]] = None,
356
382
  exclude: Optional[List[str]] = None,
357
383
  ):
384
+ from agno.vectordb import VectorDb
385
+
386
+ self.vector_db = cast(VectorDb, self.vector_db)
387
+
358
388
  log_info(f"Adding content from path, {content.id}, {content.name}, {content.path}, {content.description}")
359
389
  path = Path(content.path) # type: ignore
360
390
 
@@ -451,6 +481,11 @@ class Knowledge:
451
481
  3. Read the content
452
482
  4. Prepare and insert the content in the vector database
453
483
  """
484
+
485
+ from agno.vectordb import VectorDb
486
+
487
+ self.vector_db = cast(VectorDb, self.vector_db)
488
+
454
489
  log_info(f"Adding content from URL {content.url}")
455
490
  content.file_type = "url"
456
491
 
@@ -559,8 +594,12 @@ class Knowledge:
559
594
  self,
560
595
  content: Content,
561
596
  upsert: bool = True,
562
- skip_if_exists: bool = True,
597
+ skip_if_exists: bool = False,
563
598
  ):
599
+ from agno.vectordb import VectorDb
600
+
601
+ self.vector_db = cast(VectorDb, self.vector_db)
602
+
564
603
  if content.name:
565
604
  name = content.name
566
605
  elif content.file_data and content.file_data.content:
@@ -595,10 +634,7 @@ class Knowledge:
595
634
  read_documents = []
596
635
 
597
636
  if isinstance(content.file_data, str):
598
- try:
599
- content_bytes = content.file_data.encode("utf-8")
600
- except UnicodeEncodeError:
601
- content_bytes = content.file_data.encode("latin-1")
637
+ content_bytes = content.file_data.encode("utf-8", errors="replace")
602
638
  content_io = io.BytesIO(content_bytes)
603
639
 
604
640
  if content.reader:
@@ -619,14 +655,7 @@ class Knowledge:
619
655
  if isinstance(content.file_data.content, bytes):
620
656
  content_io = io.BytesIO(content.file_data.content)
621
657
  elif isinstance(content.file_data.content, str):
622
- if self._is_text_mime_type(content.file_data.type):
623
- try:
624
- content_bytes = content.file_data.content.encode("utf-8")
625
- except UnicodeEncodeError:
626
- log_debug(f"UTF-8 encoding failed for {content.file_data.type}, using latin-1")
627
- content_bytes = content.file_data.content.encode("latin-1")
628
- else:
629
- content_bytes = content.file_data.content.encode("latin-1")
658
+ content_bytes = content.file_data.content.encode("utf-8", errors="replace")
630
659
  content_io = io.BytesIO(content_bytes)
631
660
  else:
632
661
  content_io = content.file_data.content # type: ignore
@@ -663,6 +692,9 @@ class Knowledge:
663
692
  upsert: bool,
664
693
  skip_if_exists: bool,
665
694
  ):
695
+ from agno.vectordb import VectorDb
696
+
697
+ self.vector_db = cast(VectorDb, self.vector_db)
666
698
  log_info(f"Adding content from topics: {content.topics}")
667
699
 
668
700
  if content.topics is None:
@@ -896,6 +928,10 @@ class Knowledge:
896
928
  await self._handle_vector_db_insert(content_entry, read_documents, upsert)
897
929
 
898
930
  async def _handle_vector_db_insert(self, content: Content, read_documents, upsert):
931
+ from agno.vectordb import VectorDb
932
+
933
+ self.vector_db = cast(VectorDb, self.vector_db)
934
+
899
935
  if not self.vector_db:
900
936
  log_error("No vector database configured")
901
937
  content.status = ContentStatus.FAILED
@@ -985,6 +1021,48 @@ class Knowledge:
985
1021
  )
986
1022
  return hashlib.sha256(fallback.encode()).hexdigest()
987
1023
 
1024
+ def _ensure_string_field(self, value: Any, field_name: str, default: str = "") -> str:
1025
+ """
1026
+ Safely ensure a field is a string, handling various edge cases.
1027
+
1028
+ Args:
1029
+ value: The value to convert to string
1030
+ field_name: Name of the field for logging purposes
1031
+ default: Default string value if conversion fails
1032
+
1033
+ Returns:
1034
+ str: A safe string value
1035
+ """
1036
+ # Handle None/falsy values
1037
+ if value is None or value == "":
1038
+ return default
1039
+
1040
+ # Handle unexpected list types (the root cause of our Pydantic warning)
1041
+ if isinstance(value, list):
1042
+ if len(value) == 0:
1043
+ log_debug(f"Empty list found for {field_name}, using default: '{default}'")
1044
+ return default
1045
+ elif len(value) == 1:
1046
+ # Single item list, extract the item
1047
+ log_debug(f"Single-item list found for {field_name}, extracting: '{value[0]}'")
1048
+ return str(value[0]) if value[0] is not None else default
1049
+ else:
1050
+ # Multiple items, join them
1051
+ log_debug(f"Multi-item list found for {field_name}, joining: {value}")
1052
+ return " | ".join(str(item) for item in value if item is not None)
1053
+
1054
+ # Handle other unexpected types
1055
+ if not isinstance(value, str):
1056
+ log_debug(f"Non-string type {type(value)} found for {field_name}, converting: '{value}'")
1057
+ try:
1058
+ return str(value)
1059
+ except Exception as e:
1060
+ log_warning(f"Failed to convert {field_name} to string: {e}, using default")
1061
+ return default
1062
+
1063
+ # Already a string, return as-is
1064
+ return value
1065
+
988
1066
  def _add_to_contents_db(self, content: Content):
989
1067
  if self.contents_db:
990
1068
  created_at = content.created_at if content.created_at else int(time.time())
@@ -997,10 +1075,18 @@ class Knowledge:
997
1075
  if content.file_data and content.file_data.type
998
1076
  else None
999
1077
  )
1078
+ # Safely handle string fields with proper type checking
1079
+ safe_name = self._ensure_string_field(content.name, "content.name", default="")
1080
+ safe_description = self._ensure_string_field(content.description, "content.description", default="")
1081
+ safe_linked_to = self._ensure_string_field(self.name, "knowledge.name", default="")
1082
+ safe_status_message = self._ensure_string_field(
1083
+ content.status_message, "content.status_message", default=""
1084
+ )
1085
+
1000
1086
  content_row = KnowledgeRow(
1001
1087
  id=content.id,
1002
- name=content.name if content.name else "",
1003
- description=content.description if content.description else "",
1088
+ name=safe_name,
1089
+ description=safe_description,
1004
1090
  metadata=content.metadata,
1005
1091
  type=file_type,
1006
1092
  size=content.size
@@ -1008,16 +1094,19 @@ class Knowledge:
1008
1094
  else len(content.file_data.content)
1009
1095
  if content.file_data and content.file_data.content
1010
1096
  else None,
1011
- linked_to=self.name,
1097
+ linked_to=safe_linked_to,
1012
1098
  access_count=0,
1013
1099
  status=content.status if content.status else ContentStatus.PROCESSING,
1014
- status_message="",
1100
+ status_message=safe_status_message,
1015
1101
  created_at=created_at,
1016
1102
  updated_at=updated_at,
1017
1103
  )
1018
1104
  self.contents_db.upsert_knowledge_content(knowledge_row=content_row)
1019
1105
 
1020
1106
  def _update_content(self, content: Content) -> Optional[Dict[str, Any]]:
1107
+ from agno.vectordb import VectorDb
1108
+
1109
+ self.vector_db = cast(VectorDb, self.vector_db)
1021
1110
  if self.contents_db:
1022
1111
  if not content.id:
1023
1112
  log_warning("Content id is required to update Knowledge content")
@@ -1029,18 +1118,25 @@ class Knowledge:
1029
1118
  log_warning(f"Content row not found for id: {content.id}, cannot update status")
1030
1119
  return None
1031
1120
 
1121
+ # Apply safe string handling for updates as well
1032
1122
  if content.name is not None:
1033
- content_row.name = content.name
1123
+ content_row.name = self._ensure_string_field(content.name, "content.name", default="")
1034
1124
  if content.description is not None:
1035
- content_row.description = content.description
1125
+ content_row.description = self._ensure_string_field(
1126
+ content.description, "content.description", default=""
1127
+ )
1036
1128
  if content.metadata is not None:
1037
1129
  content_row.metadata = content.metadata
1038
1130
  if content.status is not None:
1039
1131
  content_row.status = content.status
1040
1132
  if content.status_message is not None:
1041
- content_row.status_message = content.status_message if content.status_message else ""
1133
+ content_row.status_message = self._ensure_string_field(
1134
+ content.status_message, "content.status_message", default=""
1135
+ )
1042
1136
  if content.external_id is not None:
1043
- content_row.external_id = content.external_id
1137
+ content_row.external_id = self._ensure_string_field(
1138
+ content.external_id, "content.external_id", default=""
1139
+ )
1044
1140
  content_row.updated_at = int(time.time())
1045
1141
  self.contents_db.upsert_knowledge_content(knowledge_row=content_row)
1046
1142
 
@@ -1053,10 +1149,17 @@ class Knowledge:
1053
1149
  return content_row.to_dict()
1054
1150
 
1055
1151
  else:
1056
- log_warning(f"Contents DB not found for knowledge base: {self.name}")
1152
+ if self.name:
1153
+ log_warning(f"Contents DB not found for knowledge base: {self.name}")
1154
+ else:
1155
+ log_warning("Contents DB not found for knowledge base")
1057
1156
  return None
1058
1157
 
1059
1158
  async def _process_lightrag_content(self, content: Content, content_type: KnowledgeContentOrigin) -> None:
1159
+ from agno.vectordb import VectorDb
1160
+
1161
+ self.vector_db = cast(VectorDb, self.vector_db)
1162
+
1060
1163
  self._add_to_contents_db(content)
1061
1164
  if content_type == KnowledgeContentOrigin.PATH:
1062
1165
  if content.file_data is None:
@@ -1214,6 +1317,9 @@ class Knowledge:
1214
1317
  ) -> List[Document]:
1215
1318
  """Returns relevant documents matching a query"""
1216
1319
 
1320
+ from agno.vectordb import VectorDb
1321
+
1322
+ self.vector_db = cast(VectorDb, self.vector_db)
1217
1323
  try:
1218
1324
  if self.vector_db is None:
1219
1325
  log_warning("No vector db provided")
@@ -1231,6 +1337,9 @@ class Knowledge:
1231
1337
  ) -> List[Document]:
1232
1338
  """Returns relevant documents matching a query"""
1233
1339
 
1340
+ from agno.vectordb import VectorDb
1341
+
1342
+ self.vector_db = cast(VectorDb, self.vector_db)
1234
1343
  try:
1235
1344
  if self.vector_db is None:
1236
1345
  log_warning("No vector db provided")
@@ -1295,18 +1404,27 @@ class Knowledge:
1295
1404
  return valid_filters
1296
1405
 
1297
1406
  def remove_vector_by_id(self, id: str) -> bool:
1407
+ from agno.vectordb import VectorDb
1408
+
1409
+ self.vector_db = cast(VectorDb, self.vector_db)
1298
1410
  if self.vector_db is None:
1299
1411
  log_warning("No vector DB provided")
1300
1412
  return False
1301
1413
  return self.vector_db.delete_by_id(id)
1302
1414
 
1303
1415
  def remove_vectors_by_name(self, name: str) -> bool:
1416
+ from agno.vectordb import VectorDb
1417
+
1418
+ self.vector_db = cast(VectorDb, self.vector_db)
1304
1419
  if self.vector_db is None:
1305
1420
  log_warning("No vector DB provided")
1306
1421
  return False
1307
1422
  return self.vector_db.delete_by_name(name)
1308
1423
 
1309
1424
  def remove_vectors_by_metadata(self, metadata: Dict[str, Any]) -> bool:
1425
+ from agno.vectordb import VectorDb
1426
+
1427
+ self.vector_db = cast(VectorDb, self.vector_db)
1310
1428
  if self.vector_db is None:
1311
1429
  log_warning("No vector DB provided")
1312
1430
  return False
@@ -1393,6 +1511,9 @@ class Knowledge:
1393
1511
  return status, content_row.status_message
1394
1512
 
1395
1513
  def remove_content_by_id(self, content_id: str):
1514
+ from agno.vectordb import VectorDb
1515
+
1516
+ self.vector_db = cast(VectorDb, self.vector_db)
1396
1517
  if self.vector_db is not None:
1397
1518
  if self.vector_db.__class__.__name__ == "LightRag":
1398
1519
  # For LightRAG, get the content first to find the external_id
agno/knowledge/types.py CHANGED
@@ -1,4 +1,7 @@
1
1
  from enum import Enum
2
+ from typing import Any
3
+
4
+ from pydantic import BaseModel
2
5
 
3
6
 
4
7
  class ContentType(str, Enum):
@@ -28,3 +31,8 @@ class ContentType(str, Enum):
28
31
  def get_content_type_enum(content_type_str: str) -> ContentType:
29
32
  """Convert a content type string to ContentType enum."""
30
33
  return ContentType(content_type_str)
34
+
35
+
36
+ class KnowledgeFilter(BaseModel):
37
+ key: str
38
+ value: Any
@@ -0,0 +1,5 @@
1
+ from agno.models.cometapi.cometapi import CometAPI
2
+
3
+ __all__ = [
4
+ "CometAPI",
5
+ ]
@@ -0,0 +1,57 @@
1
+ from dataclasses import dataclass, field
2
+ from os import getenv
3
+ from typing import List, Optional
4
+
5
+ import httpx
6
+
7
+ from agno.models.openai.like import OpenAILike
8
+ from agno.utils.log import log_debug
9
+
10
+
11
+ @dataclass
12
+ class CometAPI(OpenAILike):
13
+ """
14
+ The CometAPI class provides access to multiple AI model providers
15
+ (GPT, Claude, Gemini, DeepSeek, etc.) through OpenAI-compatible endpoints.
16
+
17
+ Args:
18
+ id (str): The id of the CometAPI model to use. Default is "gpt-5-mini".
19
+ name (str): The name for this model. Defaults to "CometAPI".
20
+ api_key (str): The API key for CometAPI. Defaults to COMETAPI_KEY environment variable.
21
+ base_url (str): The base URL for CometAPI. Defaults to "https://api.cometapi.com/v1".
22
+ """
23
+
24
+ name: str = "CometAPI"
25
+ id: str = "gpt-5-mini"
26
+ api_key: Optional[str] = field(default_factory=lambda: getenv("COMETAPI_KEY"))
27
+ base_url: str = "https://api.cometapi.com/v1"
28
+
29
+ def get_available_models(self) -> List[str]:
30
+ """
31
+ Fetch available chat models from CometAPI, filtering out non-chat models.
32
+
33
+ Returns:
34
+ List of available chat model IDs
35
+ """
36
+ if not self.api_key:
37
+ log_debug("No API key provided, returning empty model list")
38
+ return []
39
+
40
+ try:
41
+ with httpx.Client() as client:
42
+ response = client.get(
43
+ f"{self.base_url}/models",
44
+ headers={"Authorization": f"Bearer {self.api_key}", "Accept": "application/json"},
45
+ timeout=30.0,
46
+ )
47
+ response.raise_for_status()
48
+
49
+ data = response.json()
50
+ all_models = data.get("data", [])
51
+
52
+ log_debug(f"Found {len(all_models)} total models")
53
+ return sorted(all_models)
54
+
55
+ except Exception as e:
56
+ log_debug(f"Error fetching models from CometAPI: {e}")
57
+ return []
@@ -16,9 +16,8 @@ from agno.models.message import Citations, Message, UrlCitation
16
16
  from agno.models.metrics import Metrics
17
17
  from agno.models.response import ModelResponse
18
18
  from agno.run.agent import RunOutput
19
- from agno.utils.gemini import convert_schema, format_function_definitions, format_image_for_message
19
+ from agno.utils.gemini import format_function_definitions, format_image_for_message, prepare_response_schema
20
20
  from agno.utils.log import log_debug, log_error, log_info, log_warning
21
- from agno.utils.models.schema_utils import get_response_schema_for_provider
22
21
 
23
22
  try:
24
23
  from google import genai
@@ -191,12 +190,9 @@ class Gemini(Model):
191
190
 
192
191
  if response_format is not None and isinstance(response_format, type) and issubclass(response_format, BaseModel):
193
192
  config["response_mime_type"] = "application/json" # type: ignore
194
- # Convert Pydantic model to JSON schema, then normalize for Gemini, then convert to Gemini schema format
195
-
196
- # Get the normalized schema for Gemini
197
- normalized_schema = get_response_schema_for_provider(response_format, "gemini")
198
- gemini_schema = convert_schema(normalized_schema)
199
- config["response_schema"] = gemini_schema
193
+ # Convert Pydantic model using our hybrid approach
194
+ # This will handle complex schemas with nested models, dicts, and circular refs
195
+ config["response_schema"] = prepare_response_schema(response_format)
200
196
 
201
197
  # Add thinking configuration
202
198
  thinking_config_params = {}
@@ -84,7 +84,8 @@ class Ollama(Model):
84
84
  if self.async_client is not None:
85
85
  return self.async_client
86
86
 
87
- return AsyncOllamaClient(**self._get_client_params())
87
+ self.async_client = AsyncOllamaClient(**self._get_client_params())
88
+ return self.async_client
88
89
 
89
90
  def get_request_params(
90
91
  self,
@@ -144,6 +145,28 @@ class Ollama(Model):
144
145
  "role": message.role,
145
146
  "content": message.content,
146
147
  }
148
+
149
+ if message.role == "assistant" and message.tool_calls is not None:
150
+ # Format tool calls for assistant messages
151
+ formatted_tool_calls = []
152
+ for tool_call in message.tool_calls:
153
+ if "function" in tool_call:
154
+ function_data = tool_call["function"]
155
+ formatted_tool_call = {
156
+ "id": tool_call.get("id"),
157
+ "type": "function",
158
+ "function": {
159
+ "name": function_data["name"],
160
+ "arguments": json.loads(function_data["arguments"])
161
+ if isinstance(function_data["arguments"], str)
162
+ else function_data["arguments"],
163
+ },
164
+ }
165
+ formatted_tool_calls.append(formatted_tool_call)
166
+
167
+ if formatted_tool_calls:
168
+ _message["tool_calls"] = formatted_tool_calls
169
+
147
170
  if message.role == "user":
148
171
  if message.images is not None:
149
172
  message_images = []
@@ -22,13 +22,8 @@ try:
22
22
  from openai import AsyncOpenAI as AsyncOpenAIClient
23
23
  from openai import OpenAI as OpenAIClient
24
24
  from openai.types import CompletionUsage
25
- from openai.types.chat import ChatCompletionAudio
26
- from openai.types.chat.chat_completion import ChatCompletion
27
- from openai.types.chat.chat_completion_chunk import (
28
- ChatCompletionChunk,
29
- ChoiceDelta,
30
- ChoiceDeltaToolCall,
31
- )
25
+ from openai.types.chat import ChatCompletion, ChatCompletionAudio, ChatCompletionChunk
26
+ from openai.types.chat.chat_completion_chunk import ChoiceDelta, ChoiceDeltaToolCall
32
27
  except (ImportError, ModuleNotFoundError):
33
28
  raise ImportError("`openai` not installed. Please install using `pip install openai`")
34
29
 
@@ -19,10 +19,7 @@ from agno.utils.models.schema_utils import get_response_schema_for_provider
19
19
 
20
20
  try:
21
21
  from openai import APIConnectionError, APIStatusError, AsyncOpenAI, OpenAI, RateLimitError
22
- from openai.types.responses.response import Response
23
- from openai.types.responses.response_reasoning_item import ResponseReasoningItem
24
- from openai.types.responses.response_stream_event import ResponseStreamEvent
25
- from openai.types.responses.response_usage import ResponseUsage
22
+ from openai.types.responses import Response, ResponseReasoningItem, ResponseStreamEvent, ResponseUsage
26
23
  except ImportError as e:
27
24
  raise ImportError("`openai` not installed. Please install using `pip install openai -U`") from e
28
25
 
@@ -407,21 +404,28 @@ class OpenAIResponses(Model):
407
404
  """
408
405
  formatted_messages: List[Union[Dict[str, Any], ResponseReasoningItem]] = []
409
406
 
410
- if self._using_reasoning_model():
407
+ messages_to_format = messages
408
+ previous_response_id: Optional[str] = None
409
+
410
+ if self._using_reasoning_model() and self.store is not False:
411
411
  # Detect whether we're chaining via previous_response_id. If so, we should NOT
412
412
  # re-send prior function_call items; the Responses API already has the state and
413
413
  # expects only the corresponding function_call_output items.
414
- previous_response_id: Optional[str] = None
415
- if self.store is not False:
416
- for msg in reversed(messages):
417
- if (
418
- msg.role == "assistant"
419
- and hasattr(msg, "provider_data")
420
- and msg.provider_data
421
- and "response_id" in msg.provider_data
422
- ):
423
- previous_response_id = msg.provider_data["response_id"]
424
- break
414
+
415
+ for msg in reversed(messages):
416
+ if (
417
+ msg.role == "assistant"
418
+ and hasattr(msg, "provider_data")
419
+ and msg.provider_data
420
+ and "response_id" in msg.provider_data
421
+ ):
422
+ previous_response_id = msg.provider_data["response_id"]
423
+ msg_index = messages.index(msg)
424
+
425
+ # Include messages after this assistant message
426
+ messages_to_format = messages[msg_index + 1:]
427
+
428
+ break
425
429
 
426
430
  # Build a mapping from function_call id (fc_*) → call_id (call_*) from prior assistant tool_calls
427
431
  fc_id_to_call_id: Dict[str, str] = {}
@@ -434,7 +438,7 @@ class OpenAIResponses(Model):
434
438
  if isinstance(fc_id, str) and isinstance(call_id, str):
435
439
  fc_id_to_call_id[fc_id] = call_id
436
440
 
437
- for message in messages:
441
+ for message in messages_to_format:
438
442
  if message.role in ["user", "system"]:
439
443
  message_dict: Dict[str, Any] = {
440
444
  "role": self.role_map[message.role],
@@ -19,8 +19,8 @@ class AGUI(BaseInterface):
19
19
  self.agent = agent
20
20
  self.team = team
21
21
 
22
- if not self.agent and not self.team:
23
- raise ValueError("AGUI requires an agent and a team")
22
+ if not (self.agent or self.team):
23
+ raise ValueError("AGUI requires an agent or a team")
24
24
 
25
25
  def get_router(self, **kwargs) -> APIRouter:
26
26
  # Cannot be overridden