agno 1.7.8__py3-none-any.whl → 1.7.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +33 -27
- agno/document/reader/pdf_reader.py +302 -143
- agno/knowledge/agent.py +68 -72
- agno/knowledge/pdf.py +32 -8
- agno/knowledge/pdf_url.py +13 -5
- agno/models/openai/responses.py +30 -1
- agno/run/response.py +10 -0
- agno/run/team.py +10 -0
- agno/team/team.py +39 -20
- agno/tools/aws_lambda.py +10 -0
- agno/tools/github.py +54 -18
- agno/vectordb/lancedb/lance_db.py +10 -2
- agno/vectordb/pgvector/pgvector.py +3 -0
- agno/vectordb/weaviate/weaviate.py +84 -18
- {agno-1.7.8.dist-info → agno-1.7.10.dist-info}/METADATA +2 -1
- {agno-1.7.8.dist-info → agno-1.7.10.dist-info}/RECORD +20 -20
- {agno-1.7.8.dist-info → agno-1.7.10.dist-info}/WHEEL +0 -0
- {agno-1.7.8.dist-info → agno-1.7.10.dist-info}/entry_points.txt +0 -0
- {agno-1.7.8.dist-info → agno-1.7.10.dist-info}/licenses/LICENSE +0 -0
- {agno-1.7.8.dist-info → agno-1.7.10.dist-info}/top_level.txt +0 -0
agno/knowledge/agent.py
CHANGED
|
@@ -50,6 +50,53 @@ class AgentKnowledge(BaseModel):
|
|
|
50
50
|
"""
|
|
51
51
|
raise NotImplementedError
|
|
52
52
|
|
|
53
|
+
def _upsert_warning(self, upsert) -> None:
|
|
54
|
+
"""Log a warning if upsert is not available"""
|
|
55
|
+
if upsert and self.vector_db is not None and not self.vector_db.upsert_available():
|
|
56
|
+
log_info(
|
|
57
|
+
f"Vector db '{self.vector_db.__class__.__module__}' does not support upsert. Falling back to insert."
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def _load_init(self, recreate: bool, upsert: bool) -> None:
|
|
61
|
+
"""Initial setup for loading knowledge base"""
|
|
62
|
+
if self.vector_db is None:
|
|
63
|
+
logger.warning("No vector db provided")
|
|
64
|
+
return
|
|
65
|
+
|
|
66
|
+
if recreate:
|
|
67
|
+
log_info("Dropping collection")
|
|
68
|
+
self.vector_db.drop()
|
|
69
|
+
|
|
70
|
+
if not self.vector_db.exists():
|
|
71
|
+
log_info("Creating collection")
|
|
72
|
+
self.vector_db.create()
|
|
73
|
+
|
|
74
|
+
self._upsert_warning(upsert)
|
|
75
|
+
|
|
76
|
+
async def _aload_init(self, recreate: bool, upsert: bool) -> None:
|
|
77
|
+
"""Initial async setup for loading knowledge base"""
|
|
78
|
+
if self.vector_db is None:
|
|
79
|
+
logger.warning("No vector db provided")
|
|
80
|
+
return
|
|
81
|
+
|
|
82
|
+
if recreate:
|
|
83
|
+
log_info("Dropping collection")
|
|
84
|
+
try:
|
|
85
|
+
await self.vector_db.async_drop()
|
|
86
|
+
except NotImplementedError:
|
|
87
|
+
logger.warning("Vector db does not support async drop, falling back to sync drop")
|
|
88
|
+
self.vector_db.drop()
|
|
89
|
+
|
|
90
|
+
if not self.vector_db.exists():
|
|
91
|
+
log_info("Creating collection")
|
|
92
|
+
try:
|
|
93
|
+
await self.vector_db.async_create()
|
|
94
|
+
except NotImplementedError:
|
|
95
|
+
logger.warning("Vector db does not support async create, falling back to sync create")
|
|
96
|
+
self.vector_db.create()
|
|
97
|
+
|
|
98
|
+
self._upsert_warning(upsert)
|
|
99
|
+
|
|
53
100
|
def search(
|
|
54
101
|
self, query: str, num_documents: Optional[int] = None, filters: Optional[Dict[str, Any]] = None
|
|
55
102
|
) -> List[Document]:
|
|
@@ -80,7 +127,7 @@ class AgentKnowledge(BaseModel):
|
|
|
80
127
|
try:
|
|
81
128
|
return await self.vector_db.async_search(query=query, limit=_num_documents, filters=filters)
|
|
82
129
|
except NotImplementedError:
|
|
83
|
-
|
|
130
|
+
log_info("Vector db does not support async search")
|
|
84
131
|
return self.search(query=query, num_documents=_num_documents, filters=filters)
|
|
85
132
|
except Exception as e:
|
|
86
133
|
logger.error(f"Error searching for documents: {e}")
|
|
@@ -99,18 +146,10 @@ class AgentKnowledge(BaseModel):
|
|
|
99
146
|
upsert (bool): If True, upserts documents to the vector db. Defaults to False.
|
|
100
147
|
skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
|
|
101
148
|
"""
|
|
149
|
+
self._load_init(recreate, upsert)
|
|
102
150
|
if self.vector_db is None:
|
|
103
|
-
logger.warning("No vector db provided")
|
|
104
151
|
return
|
|
105
152
|
|
|
106
|
-
if recreate:
|
|
107
|
-
log_info("Dropping collection")
|
|
108
|
-
self.vector_db.drop()
|
|
109
|
-
|
|
110
|
-
if not self.vector_db.exists():
|
|
111
|
-
log_info("Creating collection")
|
|
112
|
-
self.vector_db.create()
|
|
113
|
-
|
|
114
153
|
log_info("Loading knowledge base")
|
|
115
154
|
num_documents = 0
|
|
116
155
|
for document_list in self.document_lists:
|
|
@@ -123,8 +162,7 @@ class AgentKnowledge(BaseModel):
|
|
|
123
162
|
|
|
124
163
|
# Upsert documents if upsert is True and vector db supports upsert
|
|
125
164
|
if upsert and self.vector_db.upsert_available():
|
|
126
|
-
|
|
127
|
-
self.vector_db.upsert(documents=[doc], filters=doc.meta_data)
|
|
165
|
+
self.vector_db.upsert(documents=documents_to_load, filters=doc.meta_data)
|
|
128
166
|
# Insert documents
|
|
129
167
|
else:
|
|
130
168
|
# Filter out documents which already exist in the vector db
|
|
@@ -133,11 +171,10 @@ class AgentKnowledge(BaseModel):
|
|
|
133
171
|
documents_to_load = self.filter_existing_documents(document_list)
|
|
134
172
|
|
|
135
173
|
if documents_to_load:
|
|
136
|
-
|
|
137
|
-
self.vector_db.insert(documents=[doc], filters=doc.meta_data)
|
|
174
|
+
self.vector_db.insert(documents=documents_to_load, filters=doc.meta_data)
|
|
138
175
|
|
|
139
176
|
num_documents += len(documents_to_load)
|
|
140
|
-
|
|
177
|
+
log_info(f"Added {num_documents} documents to knowledge base")
|
|
141
178
|
|
|
142
179
|
async def aload(
|
|
143
180
|
self,
|
|
@@ -152,19 +189,10 @@ class AgentKnowledge(BaseModel):
|
|
|
152
189
|
upsert (bool): If True, upserts documents to the vector db. Defaults to False.
|
|
153
190
|
skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
|
|
154
191
|
"""
|
|
155
|
-
|
|
192
|
+
await self._aload_init(recreate, upsert)
|
|
156
193
|
if self.vector_db is None:
|
|
157
|
-
logger.warning("No vector db provided")
|
|
158
194
|
return
|
|
159
195
|
|
|
160
|
-
if recreate:
|
|
161
|
-
log_info("Dropping collection")
|
|
162
|
-
await self.vector_db.async_drop()
|
|
163
|
-
|
|
164
|
-
if not await self.vector_db.async_exists():
|
|
165
|
-
log_info("Creating collection")
|
|
166
|
-
await self.vector_db.async_create()
|
|
167
|
-
|
|
168
196
|
log_info("Loading knowledge base")
|
|
169
197
|
num_documents = 0
|
|
170
198
|
document_iterator = self.async_document_lists
|
|
@@ -177,8 +205,7 @@ class AgentKnowledge(BaseModel):
|
|
|
177
205
|
|
|
178
206
|
# Upsert documents if upsert is True and vector db supports upsert
|
|
179
207
|
if upsert and self.vector_db.upsert_available():
|
|
180
|
-
|
|
181
|
-
await self.vector_db.async_upsert(documents=[doc], filters=doc.meta_data)
|
|
208
|
+
await self.vector_db.async_upsert(documents=documents_to_load, filters=doc.meta_data)
|
|
182
209
|
# Insert documents
|
|
183
210
|
else:
|
|
184
211
|
# Filter out documents which already exist in the vector db
|
|
@@ -187,11 +214,10 @@ class AgentKnowledge(BaseModel):
|
|
|
187
214
|
documents_to_load = await self.async_filter_existing_documents(document_list)
|
|
188
215
|
|
|
189
216
|
if documents_to_load:
|
|
190
|
-
|
|
191
|
-
await self.vector_db.async_insert(documents=[doc], filters=doc.meta_data)
|
|
217
|
+
await self.vector_db.async_insert(documents=documents_to_load, filters=doc.meta_data)
|
|
192
218
|
|
|
193
219
|
num_documents += len(documents_to_load)
|
|
194
|
-
|
|
220
|
+
log_info(f"Added {num_documents} documents to knowledge base")
|
|
195
221
|
|
|
196
222
|
def load_documents(
|
|
197
223
|
self,
|
|
@@ -208,15 +234,11 @@ class AgentKnowledge(BaseModel):
|
|
|
208
234
|
skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
|
|
209
235
|
filters (Optional[Dict[str, Any]]): Filters to add to each row that can be used to limit results during querying. Defaults to None.
|
|
210
236
|
"""
|
|
211
|
-
|
|
212
|
-
log_info("Loading knowledge base")
|
|
237
|
+
self._load_init(recreate=False, upsert=upsert)
|
|
213
238
|
if self.vector_db is None:
|
|
214
|
-
logger.warning("No vector db provided")
|
|
215
239
|
return
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
self.vector_db.create()
|
|
219
|
-
|
|
240
|
+
|
|
241
|
+
log_info("Loading knowledge base")
|
|
220
242
|
# Upsert documents if upsert is True
|
|
221
243
|
if upsert and self.vector_db.upsert_available():
|
|
222
244
|
self.vector_db.upsert(documents=documents, filters=filters)
|
|
@@ -251,17 +273,11 @@ class AgentKnowledge(BaseModel):
|
|
|
251
273
|
skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
|
|
252
274
|
filters (Optional[Dict[str, Any]]): Filters to add to each row that can be used to limit results during querying. Defaults to None.
|
|
253
275
|
"""
|
|
254
|
-
|
|
276
|
+
await self._aload_init(recreate=False, upsert=upsert)
|
|
255
277
|
if self.vector_db is None:
|
|
256
|
-
logger.warning("No vector db provided")
|
|
257
278
|
return
|
|
258
279
|
|
|
259
|
-
|
|
260
|
-
try:
|
|
261
|
-
await self.vector_db.async_create()
|
|
262
|
-
except NotImplementedError:
|
|
263
|
-
logger.warning("Vector db does not support async create")
|
|
264
|
-
self.vector_db.create()
|
|
280
|
+
log_info("Loading knowledge base")
|
|
265
281
|
|
|
266
282
|
# Upsert documents if upsert is True
|
|
267
283
|
if upsert and self.vector_db.upsert_available():
|
|
@@ -302,7 +318,7 @@ class AgentKnowledge(BaseModel):
|
|
|
302
318
|
else:
|
|
303
319
|
log_info("No new documents to load")
|
|
304
320
|
|
|
305
|
-
def
|
|
321
|
+
def load_document(
|
|
306
322
|
self,
|
|
307
323
|
document: Document,
|
|
308
324
|
upsert: bool = False,
|
|
@@ -414,8 +430,6 @@ class AgentKnowledge(BaseModel):
|
|
|
414
430
|
Returns:
|
|
415
431
|
List[Document]: Filtered list of documents that don't exist in the database
|
|
416
432
|
"""
|
|
417
|
-
from agno.utils.log import log_debug, log_info
|
|
418
|
-
|
|
419
433
|
if not self.vector_db:
|
|
420
434
|
log_debug("No vector database configured, skipping document filtering")
|
|
421
435
|
return documents
|
|
@@ -556,20 +570,9 @@ class AgentKnowledge(BaseModel):
|
|
|
556
570
|
self._track_metadata_structure(metadata)
|
|
557
571
|
|
|
558
572
|
# 3. Prepare vector DB
|
|
573
|
+
self._load_init(recreate, upsert=False)
|
|
559
574
|
if self.vector_db is None:
|
|
560
|
-
logger.warning("Cannot load file: No vector db provided.")
|
|
561
575
|
return False
|
|
562
|
-
|
|
563
|
-
# Recreate collection if requested
|
|
564
|
-
if recreate:
|
|
565
|
-
# log_info(f"Recreating collection.")
|
|
566
|
-
self.vector_db.drop()
|
|
567
|
-
|
|
568
|
-
# Create collection if it doesn't exist
|
|
569
|
-
if not self.vector_db.exists():
|
|
570
|
-
# log_info(f"Collection does not exist. Creating.")
|
|
571
|
-
self.vector_db.create()
|
|
572
|
-
|
|
573
576
|
return True
|
|
574
577
|
|
|
575
578
|
async def aprepare_load(
|
|
@@ -604,20 +607,9 @@ class AgentKnowledge(BaseModel):
|
|
|
604
607
|
self._track_metadata_structure(metadata)
|
|
605
608
|
|
|
606
609
|
# 3. Prepare vector DB
|
|
610
|
+
await self._aload_init(recreate, upsert=False)
|
|
607
611
|
if self.vector_db is None:
|
|
608
|
-
logger.warning("Cannot load file: No vector db provided.")
|
|
609
612
|
return False
|
|
610
|
-
|
|
611
|
-
# Recreate collection if requested
|
|
612
|
-
if recreate:
|
|
613
|
-
log_info("Recreating collection.")
|
|
614
|
-
await self.vector_db.async_drop()
|
|
615
|
-
|
|
616
|
-
# Create collection if it doesn't exist
|
|
617
|
-
if not await self.vector_db.async_exists():
|
|
618
|
-
log_info("Collection does not exist. Creating.")
|
|
619
|
-
await self.vector_db.async_create()
|
|
620
|
-
|
|
621
613
|
return True
|
|
622
614
|
|
|
623
615
|
def process_documents(
|
|
@@ -642,6 +634,8 @@ class AgentKnowledge(BaseModel):
|
|
|
642
634
|
|
|
643
635
|
log_info(f"Loading {len(documents)} documents from {source_info} with metadata: {metadata}")
|
|
644
636
|
|
|
637
|
+
self._upsert_warning(upsert)
|
|
638
|
+
|
|
645
639
|
# Decide loading strategy: upsert or insert (with optional skip)
|
|
646
640
|
if upsert and self.vector_db.upsert_available(): # type: ignore
|
|
647
641
|
log_debug(f"Upserting {len(documents)} documents.") # type: ignore
|
|
@@ -681,6 +675,8 @@ class AgentKnowledge(BaseModel):
|
|
|
681
675
|
logger.warning(f"No documents were read from {source_info}")
|
|
682
676
|
return
|
|
683
677
|
|
|
678
|
+
self._upsert_warning(upsert)
|
|
679
|
+
|
|
684
680
|
log_info(f"Loading {len(documents)} documents from {source_info} with metadata: {metadata}")
|
|
685
681
|
|
|
686
682
|
# Decide loading strategy: upsert or insert (with optional skip)
|
agno/knowledge/pdf.py
CHANGED
|
@@ -2,15 +2,22 @@ from pathlib import Path
|
|
|
2
2
|
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union
|
|
3
3
|
|
|
4
4
|
from pydantic import Field
|
|
5
|
+
from typing_extensions import TypedDict
|
|
5
6
|
|
|
6
7
|
from agno.document import Document
|
|
7
8
|
from agno.document.reader.pdf_reader import PDFImageReader, PDFReader
|
|
8
9
|
from agno.knowledge.agent import AgentKnowledge
|
|
9
|
-
from agno.utils.log import log_info, logger
|
|
10
|
+
from agno.utils.log import log_error, log_info, logger
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class PDFConfig(TypedDict, total=False):
|
|
14
|
+
path: str
|
|
15
|
+
password: Optional[str]
|
|
16
|
+
metadata: Optional[Dict[str, Any]]
|
|
10
17
|
|
|
11
18
|
|
|
12
19
|
class PDFKnowledgeBase(AgentKnowledge):
|
|
13
|
-
path: Optional[Union[str, Path, List[
|
|
20
|
+
path: Optional[Union[str, Path, List[PDFConfig]]] = None
|
|
14
21
|
formats: List[str] = [".pdf"]
|
|
15
22
|
exclude_files: List[str] = Field(default_factory=list)
|
|
16
23
|
reader: Union[PDFReader, PDFImageReader] = PDFReader()
|
|
@@ -24,19 +31,21 @@ class PDFKnowledgeBase(AgentKnowledge):
|
|
|
24
31
|
if isinstance(self.path, list):
|
|
25
32
|
for item in self.path:
|
|
26
33
|
if isinstance(item, dict) and "path" in item:
|
|
27
|
-
# Handle path with metadata
|
|
28
34
|
file_path = item["path"]
|
|
29
35
|
config = item.get("metadata", {})
|
|
36
|
+
file_password = item.get("password")
|
|
37
|
+
if file_password is not None and not isinstance(file_password, str):
|
|
38
|
+
file_password = None
|
|
39
|
+
|
|
30
40
|
_pdf_path = Path(file_path) # type: ignore
|
|
31
41
|
if self._is_valid_pdf(_pdf_path):
|
|
32
|
-
documents = self.reader.read(pdf=_pdf_path)
|
|
42
|
+
documents = self.reader.read(pdf=_pdf_path, password=file_password)
|
|
33
43
|
if config:
|
|
34
44
|
for doc in documents:
|
|
35
45
|
log_info(f"Adding metadata {config} to document: {doc.name}")
|
|
36
46
|
doc.meta_data.update(config) # type: ignore
|
|
37
47
|
yield documents
|
|
38
48
|
else:
|
|
39
|
-
# Handle single path
|
|
40
49
|
_pdf_path = Path(self.path)
|
|
41
50
|
if _pdf_path.is_dir():
|
|
42
51
|
for _pdf in _pdf_path.glob("**/*.pdf"):
|
|
@@ -47,7 +56,19 @@ class PDFKnowledgeBase(AgentKnowledge):
|
|
|
47
56
|
|
|
48
57
|
def _is_valid_pdf(self, path: Path) -> bool:
|
|
49
58
|
"""Helper to check if path is a valid PDF file."""
|
|
50
|
-
|
|
59
|
+
if not path.exists():
|
|
60
|
+
log_error(f"PDF file not found: {path}")
|
|
61
|
+
return False
|
|
62
|
+
if not path.is_file():
|
|
63
|
+
log_error(f"Path is not a file: {path}")
|
|
64
|
+
return False
|
|
65
|
+
if path.suffix != ".pdf":
|
|
66
|
+
log_error(f"File is not a PDF: {path}")
|
|
67
|
+
return False
|
|
68
|
+
if path.name in self.exclude_files:
|
|
69
|
+
log_error(f"PDF file excluded: {path}")
|
|
70
|
+
return False
|
|
71
|
+
return True
|
|
51
72
|
|
|
52
73
|
@property
|
|
53
74
|
async def async_document_lists(self) -> AsyncIterator[List[Document]]:
|
|
@@ -58,12 +79,15 @@ class PDFKnowledgeBase(AgentKnowledge):
|
|
|
58
79
|
if isinstance(self.path, list):
|
|
59
80
|
for item in self.path:
|
|
60
81
|
if isinstance(item, dict) and "path" in item:
|
|
61
|
-
# Handle path with metadata
|
|
62
82
|
file_path = item["path"]
|
|
63
83
|
config = item.get("metadata", {})
|
|
84
|
+
file_password = item.get("password")
|
|
85
|
+
if file_password is not None and not isinstance(file_password, str):
|
|
86
|
+
file_password = None
|
|
87
|
+
|
|
64
88
|
_pdf_path = Path(file_path) # type: ignore
|
|
65
89
|
if self._is_valid_pdf(_pdf_path):
|
|
66
|
-
documents = await self.reader.async_read(pdf=_pdf_path)
|
|
90
|
+
documents = await self.reader.async_read(pdf=_pdf_path, password=file_password)
|
|
67
91
|
if config:
|
|
68
92
|
for doc in documents:
|
|
69
93
|
log_info(f"Adding metadata {config} to document: {doc.name}")
|
agno/knowledge/pdf_url.py
CHANGED
|
@@ -19,18 +19,22 @@ class PDFUrlKnowledgeBase(AgentKnowledge):
|
|
|
19
19
|
|
|
20
20
|
for item in self.urls:
|
|
21
21
|
if isinstance(item, dict) and "url" in item:
|
|
22
|
-
# Handle URL with metadata
|
|
22
|
+
# Handle URL with metadata/password
|
|
23
23
|
url = item["url"]
|
|
24
24
|
config = item.get("metadata", {})
|
|
25
|
+
pdf_password = item.get("password")
|
|
26
|
+
if pdf_password is not None and not isinstance(pdf_password, str):
|
|
27
|
+
pdf_password = None
|
|
28
|
+
|
|
25
29
|
if self._is_valid_url(url): # type: ignore
|
|
26
|
-
documents = self.reader.read(url=url) # type: ignore
|
|
30
|
+
documents = self.reader.read(url=url, password=pdf_password) # type: ignore
|
|
27
31
|
if config:
|
|
28
32
|
for doc in documents:
|
|
29
33
|
log_info(f"Adding metadata {config} to document from URL: {url}")
|
|
30
34
|
doc.meta_data.update(config) # type: ignore
|
|
31
35
|
yield documents
|
|
32
36
|
else:
|
|
33
|
-
# Handle simple URL
|
|
37
|
+
# Handle simple URL - no password
|
|
34
38
|
if self._is_valid_url(item): # type: ignore
|
|
35
39
|
yield self.reader.read(url=item) # type: ignore
|
|
36
40
|
|
|
@@ -49,11 +53,15 @@ class PDFUrlKnowledgeBase(AgentKnowledge):
|
|
|
49
53
|
|
|
50
54
|
for item in self.urls:
|
|
51
55
|
if isinstance(item, dict) and "url" in item:
|
|
52
|
-
# Handle URL with metadata
|
|
56
|
+
# Handle URL with metadata/password
|
|
53
57
|
url = item["url"]
|
|
54
58
|
config = item.get("metadata", {})
|
|
59
|
+
pdf_password = item.get("password")
|
|
60
|
+
if pdf_password is not None and not isinstance(pdf_password, str):
|
|
61
|
+
pdf_password = None
|
|
62
|
+
|
|
55
63
|
if self._is_valid_url(url): # type: ignore
|
|
56
|
-
documents = await self.reader.async_read(url=url) # type: ignore
|
|
64
|
+
documents = await self.reader.async_read(url=url, password=pdf_password) # type: ignore
|
|
57
65
|
if config:
|
|
58
66
|
for doc in documents:
|
|
59
67
|
log_info(f"Adding metadata {config} to document from URL: {url}")
|
agno/models/openai/responses.py
CHANGED
|
@@ -78,6 +78,10 @@ class OpenAIResponses(Model):
|
|
|
78
78
|
}
|
|
79
79
|
)
|
|
80
80
|
|
|
81
|
+
def _using_reasoning_model(self) -> bool:
|
|
82
|
+
"""Return True if the contextual used model is a known reasoning model."""
|
|
83
|
+
return self.id.startswith("o3") or self.id.startswith("o4-mini") or self.id.startswith("gpt-5")
|
|
84
|
+
|
|
81
85
|
def _get_client_params(self) -> Dict[str, Any]:
|
|
82
86
|
"""
|
|
83
87
|
Get client parameters for API requests.
|
|
@@ -221,7 +225,7 @@ class OpenAIResponses(Model):
|
|
|
221
225
|
request_params["tool_choice"] = tool_choice
|
|
222
226
|
|
|
223
227
|
# Handle reasoning tools for o3 and o4-mini models
|
|
224
|
-
if
|
|
228
|
+
if self._using_reasoning_model() and messages is not None:
|
|
225
229
|
request_params["store"] = True
|
|
226
230
|
|
|
227
231
|
# Check if the last assistant message has a previous_response_id to continue from
|
|
@@ -352,6 +356,22 @@ class OpenAIResponses(Model):
|
|
|
352
356
|
Dict[str, Any]: The formatted message.
|
|
353
357
|
"""
|
|
354
358
|
formatted_messages: List[Dict[str, Any]] = []
|
|
359
|
+
|
|
360
|
+
if self._using_reasoning_model():
|
|
361
|
+
# Detect whether we're chaining via previous_response_id. If so, we should NOT
|
|
362
|
+
# re-send prior function_call items; the Responses API already has the state and
|
|
363
|
+
# expects only the corresponding function_call_output items.
|
|
364
|
+
previous_response_id: Optional[str] = None
|
|
365
|
+
for msg in reversed(messages):
|
|
366
|
+
if (
|
|
367
|
+
msg.role == "assistant"
|
|
368
|
+
and hasattr(msg, "provider_data")
|
|
369
|
+
and msg.provider_data
|
|
370
|
+
and "response_id" in msg.provider_data
|
|
371
|
+
):
|
|
372
|
+
previous_response_id = msg.provider_data["response_id"]
|
|
373
|
+
break
|
|
374
|
+
|
|
355
375
|
for message in messages:
|
|
356
376
|
if message.role in ["user", "system"]:
|
|
357
377
|
message_dict: Dict[str, Any] = {
|
|
@@ -384,6 +404,15 @@ class OpenAIResponses(Model):
|
|
|
384
404
|
{"type": "function_call_output", "call_id": message.tool_call_id, "output": message.content}
|
|
385
405
|
)
|
|
386
406
|
elif message.tool_calls is not None and len(message.tool_calls) > 0:
|
|
407
|
+
if self._using_reasoning_model():
|
|
408
|
+
# Only include prior function_call items when we are NOT using
|
|
409
|
+
# previous_response_id. When previous_response_id is present, the
|
|
410
|
+
# Responses API already knows about earlier output items (including
|
|
411
|
+
# reasoning/function_call), and re-sending them can trigger validation
|
|
412
|
+
# errors (e.g., missing required reasoning item).
|
|
413
|
+
if previous_response_id is not None:
|
|
414
|
+
continue
|
|
415
|
+
|
|
387
416
|
for tool_call in message.tool_calls:
|
|
388
417
|
formatted_messages.append(
|
|
389
418
|
{
|
agno/run/response.py
CHANGED
|
@@ -17,6 +17,7 @@ class RunEvent(str, Enum):
|
|
|
17
17
|
|
|
18
18
|
run_started = "RunStarted"
|
|
19
19
|
run_response_content = "RunResponseContent"
|
|
20
|
+
run_intermediate_response_content = "RunIntermediateResponseContent"
|
|
20
21
|
run_completed = "RunCompleted"
|
|
21
22
|
run_error = "RunError"
|
|
22
23
|
run_cancelled = "RunCancelled"
|
|
@@ -92,6 +93,13 @@ class RunResponseContentEvent(BaseAgentRunResponseEvent):
|
|
|
92
93
|
extra_data: Optional[RunResponseExtraData] = None
|
|
93
94
|
|
|
94
95
|
|
|
96
|
+
@dataclass
|
|
97
|
+
class IntermediateRunResponseContentEvent(BaseAgentRunResponseEvent):
|
|
98
|
+
event: str = RunEvent.run_intermediate_response_content.value
|
|
99
|
+
content: Optional[Any] = None
|
|
100
|
+
content_type: str = "str"
|
|
101
|
+
|
|
102
|
+
|
|
95
103
|
@dataclass
|
|
96
104
|
class RunResponseCompletedEvent(BaseAgentRunResponseEvent):
|
|
97
105
|
event: str = RunEvent.run_completed.value
|
|
@@ -207,6 +215,7 @@ class OutputModelResponseCompletedEvent(BaseAgentRunResponseEvent):
|
|
|
207
215
|
RunResponseEvent = Union[
|
|
208
216
|
RunResponseStartedEvent,
|
|
209
217
|
RunResponseContentEvent,
|
|
218
|
+
IntermediateRunResponseContentEvent,
|
|
210
219
|
RunResponseCompletedEvent,
|
|
211
220
|
RunResponseErrorEvent,
|
|
212
221
|
RunResponseCancelledEvent,
|
|
@@ -230,6 +239,7 @@ RunResponseEvent = Union[
|
|
|
230
239
|
RUN_EVENT_TYPE_REGISTRY = {
|
|
231
240
|
RunEvent.run_started.value: RunResponseStartedEvent,
|
|
232
241
|
RunEvent.run_response_content.value: RunResponseContentEvent,
|
|
242
|
+
RunEvent.run_intermediate_response_content.value: IntermediateRunResponseContentEvent,
|
|
233
243
|
RunEvent.run_completed.value: RunResponseCompletedEvent,
|
|
234
244
|
RunEvent.run_error.value: RunResponseErrorEvent,
|
|
235
245
|
RunEvent.run_cancelled.value: RunResponseCancelledEvent,
|
agno/run/team.py
CHANGED
|
@@ -17,6 +17,7 @@ class TeamRunEvent(str, Enum):
|
|
|
17
17
|
|
|
18
18
|
run_started = "TeamRunStarted"
|
|
19
19
|
run_response_content = "TeamRunResponseContent"
|
|
20
|
+
run_intermediate_response_content = "TeamRunIntermediateResponseContent"
|
|
20
21
|
run_completed = "TeamRunCompleted"
|
|
21
22
|
run_error = "TeamRunError"
|
|
22
23
|
run_cancelled = "TeamRunCancelled"
|
|
@@ -94,6 +95,13 @@ class RunResponseContentEvent(BaseTeamRunResponseEvent):
|
|
|
94
95
|
extra_data: Optional[RunResponseExtraData] = None
|
|
95
96
|
|
|
96
97
|
|
|
98
|
+
@dataclass
|
|
99
|
+
class IntermediateRunResponseContentEvent(BaseTeamRunResponseEvent):
|
|
100
|
+
event: str = TeamRunEvent.run_intermediate_response_content.value
|
|
101
|
+
content: Optional[Any] = None
|
|
102
|
+
content_type: str = "str"
|
|
103
|
+
|
|
104
|
+
|
|
97
105
|
@dataclass
|
|
98
106
|
class RunResponseCompletedEvent(BaseTeamRunResponseEvent):
|
|
99
107
|
event: str = TeamRunEvent.run_completed.value
|
|
@@ -191,6 +199,7 @@ class OutputModelResponseCompletedEvent(BaseTeamRunResponseEvent):
|
|
|
191
199
|
TeamRunResponseEvent = Union[
|
|
192
200
|
RunResponseStartedEvent,
|
|
193
201
|
RunResponseContentEvent,
|
|
202
|
+
IntermediateRunResponseContentEvent,
|
|
194
203
|
RunResponseCompletedEvent,
|
|
195
204
|
RunResponseErrorEvent,
|
|
196
205
|
RunResponseCancelledEvent,
|
|
@@ -211,6 +220,7 @@ TeamRunResponseEvent = Union[
|
|
|
211
220
|
TEAM_RUN_EVENT_TYPE_REGISTRY = {
|
|
212
221
|
TeamRunEvent.run_started.value: RunResponseStartedEvent,
|
|
213
222
|
TeamRunEvent.run_response_content.value: RunResponseContentEvent,
|
|
223
|
+
TeamRunEvent.run_intermediate_response_content.value: IntermediateRunResponseContentEvent,
|
|
214
224
|
TeamRunEvent.run_completed.value: RunResponseCompletedEvent,
|
|
215
225
|
TeamRunEvent.run_error.value: RunResponseErrorEvent,
|
|
216
226
|
TeamRunEvent.run_cancelled.value: RunResponseCancelledEvent,
|