agno 1.7.9__py3-none-any.whl → 1.7.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +1 -1
- agno/document/reader/pdf_reader.py +69 -13
- agno/knowledge/agent.py +68 -72
- agno/knowledge/pdf.py +32 -8
- agno/knowledge/pdf_url.py +13 -5
- agno/models/openai/responses.py +30 -1
- agno/team/team.py +15 -7
- agno/tools/github.py +54 -18
- agno/vectordb/weaviate/weaviate.py +84 -18
- {agno-1.7.9.dist-info → agno-1.7.10.dist-info}/METADATA +2 -1
- {agno-1.7.9.dist-info → agno-1.7.10.dist-info}/RECORD +15 -15
- {agno-1.7.9.dist-info → agno-1.7.10.dist-info}/WHEEL +0 -0
- {agno-1.7.9.dist-info → agno-1.7.10.dist-info}/entry_points.txt +0 -0
- {agno-1.7.9.dist-info → agno-1.7.10.dist-info}/licenses/LICENSE +0 -0
- {agno-1.7.9.dist-info → agno-1.7.10.dist-info}/top_level.txt +0 -0
agno/agent/agent.py
CHANGED
|
@@ -6872,7 +6872,7 @@ class Agent:
|
|
|
6872
6872
|
document_name = query.replace(" ", "_").replace("?", "").replace("!", "").replace(".", "")
|
|
6873
6873
|
document_content = json.dumps({"query": query, "result": result})
|
|
6874
6874
|
log_info(f"Adding document to knowledge base: {document_name}: {document_content}")
|
|
6875
|
-
self.knowledge.
|
|
6875
|
+
self.knowledge.load_document(
|
|
6876
6876
|
document=Document(
|
|
6877
6877
|
name=document_name,
|
|
6878
6878
|
content=document_content,
|
|
@@ -7,7 +7,7 @@ from uuid import uuid4
|
|
|
7
7
|
from agno.document.base import Document
|
|
8
8
|
from agno.document.reader.base import Reader
|
|
9
9
|
from agno.utils.http import async_fetch_with_retry, fetch_with_retry
|
|
10
|
-
from agno.utils.log import log_info, logger
|
|
10
|
+
from agno.utils.log import log_error, log_info, logger
|
|
11
11
|
|
|
12
12
|
try:
|
|
13
13
|
from pypdf import PdfReader as DocumentReader # noqa: F401
|
|
@@ -177,6 +177,7 @@ class BasePDFReader(Reader):
|
|
|
177
177
|
split_on_pages: bool = True,
|
|
178
178
|
page_start_numbering_format: Optional[str] = None,
|
|
179
179
|
page_end_numbering_format: Optional[str] = None,
|
|
180
|
+
password: Optional[str] = None,
|
|
180
181
|
**kwargs,
|
|
181
182
|
):
|
|
182
183
|
if page_start_numbering_format is None:
|
|
@@ -187,6 +188,7 @@ class BasePDFReader(Reader):
|
|
|
187
188
|
self.split_on_pages = split_on_pages
|
|
188
189
|
self.page_start_numbering_format = page_start_numbering_format
|
|
189
190
|
self.page_end_numbering_format = page_end_numbering_format
|
|
191
|
+
self.password = password
|
|
190
192
|
|
|
191
193
|
super().__init__(**kwargs)
|
|
192
194
|
|
|
@@ -196,6 +198,28 @@ class BasePDFReader(Reader):
|
|
|
196
198
|
chunked_documents.extend(self.chunk_document(document))
|
|
197
199
|
return chunked_documents
|
|
198
200
|
|
|
201
|
+
def _decrypt_pdf(self, doc_reader: DocumentReader, doc_name: str, password: Optional[str] = None) -> bool:
|
|
202
|
+
if not doc_reader.is_encrypted:
|
|
203
|
+
return True
|
|
204
|
+
|
|
205
|
+
# Use provided password or fall back to instance password
|
|
206
|
+
pdf_password = password or self.password
|
|
207
|
+
if not pdf_password:
|
|
208
|
+
logger.error(f"PDF {doc_name} is password protected but no password provided")
|
|
209
|
+
return False
|
|
210
|
+
|
|
211
|
+
try:
|
|
212
|
+
decrypted_pdf = doc_reader.decrypt(pdf_password)
|
|
213
|
+
if decrypted_pdf:
|
|
214
|
+
log_info(f"Successfully decrypted PDF {doc_name} with user password")
|
|
215
|
+
return True
|
|
216
|
+
else:
|
|
217
|
+
log_error(f"Failed to decrypt PDF {doc_name}: incorrect password")
|
|
218
|
+
return False
|
|
219
|
+
except Exception as e:
|
|
220
|
+
log_error(f"Error decrypting PDF {doc_name}: {e}")
|
|
221
|
+
return False
|
|
222
|
+
|
|
199
223
|
def _create_documents(self, pdf_content: List[str], doc_name: str, use_uuid_for_id: bool, page_number_shift):
|
|
200
224
|
if self.split_on_pages:
|
|
201
225
|
shift = page_number_shift if page_number_shift is not None else 1
|
|
@@ -282,7 +306,7 @@ class BasePDFReader(Reader):
|
|
|
282
306
|
class PDFReader(BasePDFReader):
|
|
283
307
|
"""Reader for PDF files"""
|
|
284
308
|
|
|
285
|
-
def read(self, pdf: Union[str, Path, IO[Any]]) -> List[Document]:
|
|
309
|
+
def read(self, pdf: Union[str, Path, IO[Any]], password: Optional[str] = None) -> List[Document]:
|
|
286
310
|
try:
|
|
287
311
|
if isinstance(pdf, str):
|
|
288
312
|
doc_name = pdf.split("/")[-1].split(".")[0].replace(" ", "_")
|
|
@@ -299,10 +323,14 @@ class PDFReader(BasePDFReader):
|
|
|
299
323
|
logger.error(f"Error reading PDF: {e}")
|
|
300
324
|
return []
|
|
301
325
|
|
|
326
|
+
# Handle PDF decryption
|
|
327
|
+
if not self._decrypt_pdf(pdf_reader, doc_name, password):
|
|
328
|
+
return []
|
|
329
|
+
|
|
302
330
|
# Read and chunk.
|
|
303
331
|
return self._pdf_reader_to_documents(pdf_reader, doc_name, use_uuid_for_id=True)
|
|
304
332
|
|
|
305
|
-
async def async_read(self, pdf: Union[str, Path, IO[Any]]) -> List[Document]:
|
|
333
|
+
async def async_read(self, pdf: Union[str, Path, IO[Any]], password: Optional[str] = None) -> List[Document]:
|
|
306
334
|
try:
|
|
307
335
|
if isinstance(pdf, str):
|
|
308
336
|
doc_name = pdf.split("/")[-1].split(".")[0].replace(" ", "_")
|
|
@@ -319,6 +347,10 @@ class PDFReader(BasePDFReader):
|
|
|
319
347
|
logger.error(f"Error reading PDF: {e}")
|
|
320
348
|
return []
|
|
321
349
|
|
|
350
|
+
# Handle PDF decryption
|
|
351
|
+
if not self._decrypt_pdf(pdf_reader, doc_name, password):
|
|
352
|
+
return []
|
|
353
|
+
|
|
322
354
|
# Read and chunk.
|
|
323
355
|
return await self._async_pdf_reader_to_documents(pdf_reader, doc_name, use_uuid_for_id=True)
|
|
324
356
|
|
|
@@ -326,11 +358,11 @@ class PDFReader(BasePDFReader):
|
|
|
326
358
|
class PDFUrlReader(BasePDFReader):
|
|
327
359
|
"""Reader for PDF files from URL"""
|
|
328
360
|
|
|
329
|
-
def __init__(self, proxy: Optional[str] = None, **kwargs):
|
|
330
|
-
super().__init__(**kwargs)
|
|
361
|
+
def __init__(self, proxy: Optional[str] = None, password: Optional[str] = None, **kwargs):
|
|
362
|
+
super().__init__(password=password, **kwargs)
|
|
331
363
|
self.proxy = proxy
|
|
332
364
|
|
|
333
|
-
def read(self, url: str) -> List[Document]:
|
|
365
|
+
def read(self, url: str, password: Optional[str] = None) -> List[Document]:
|
|
334
366
|
if not url:
|
|
335
367
|
raise ValueError("No url provided")
|
|
336
368
|
|
|
@@ -344,10 +376,14 @@ class PDFUrlReader(BasePDFReader):
|
|
|
344
376
|
doc_name = url.split("/")[-1].split(".")[0].replace("/", "_").replace(" ", "_")
|
|
345
377
|
pdf_reader = DocumentReader(BytesIO(response.content))
|
|
346
378
|
|
|
379
|
+
# Handle PDF decryption
|
|
380
|
+
if not self._decrypt_pdf(pdf_reader, doc_name, password):
|
|
381
|
+
return []
|
|
382
|
+
|
|
347
383
|
# Read and chunk.
|
|
348
384
|
return self._pdf_reader_to_documents(pdf_reader, doc_name, use_uuid_for_id=False)
|
|
349
385
|
|
|
350
|
-
async def async_read(self, url: str) -> List[Document]:
|
|
386
|
+
async def async_read(self, url: str, password: Optional[str] = None) -> List[Document]:
|
|
351
387
|
if not url:
|
|
352
388
|
raise ValueError("No url provided")
|
|
353
389
|
|
|
@@ -364,6 +400,10 @@ class PDFUrlReader(BasePDFReader):
|
|
|
364
400
|
doc_name = url.split("/")[-1].split(".")[0].replace("/", "_").replace(" ", "_")
|
|
365
401
|
pdf_reader = DocumentReader(BytesIO(response.content))
|
|
366
402
|
|
|
403
|
+
# Handle PDF decryption
|
|
404
|
+
if not self._decrypt_pdf(pdf_reader, doc_name, password):
|
|
405
|
+
return []
|
|
406
|
+
|
|
367
407
|
# Read and chunk.
|
|
368
408
|
return await self._async_pdf_reader_to_documents(pdf_reader, doc_name, use_uuid_for_id=False)
|
|
369
409
|
|
|
@@ -371,7 +411,7 @@ class PDFUrlReader(BasePDFReader):
|
|
|
371
411
|
class PDFImageReader(BasePDFReader):
|
|
372
412
|
"""Reader for PDF files with text and images extraction"""
|
|
373
413
|
|
|
374
|
-
def read(self, pdf: Union[str, Path, IO[Any]]) -> List[Document]:
|
|
414
|
+
def read(self, pdf: Union[str, Path, IO[Any]], password: Optional[str] = None) -> List[Document]:
|
|
375
415
|
if not pdf:
|
|
376
416
|
raise ValueError("No pdf provided")
|
|
377
417
|
|
|
@@ -386,10 +426,14 @@ class PDFImageReader(BasePDFReader):
|
|
|
386
426
|
log_info(f"Reading: {doc_name}")
|
|
387
427
|
pdf_reader = DocumentReader(pdf)
|
|
388
428
|
|
|
429
|
+
# Handle PDF decryption
|
|
430
|
+
if not self._decrypt_pdf(pdf_reader, doc_name, password):
|
|
431
|
+
return []
|
|
432
|
+
|
|
389
433
|
# Read and chunk.
|
|
390
434
|
return self._pdf_reader_to_documents(pdf_reader, doc_name, read_images=True, use_uuid_for_id=False)
|
|
391
435
|
|
|
392
|
-
async def async_read(self, pdf: Union[str, Path, IO[Any]]) -> List[Document]:
|
|
436
|
+
async def async_read(self, pdf: Union[str, Path, IO[Any]], password: Optional[str] = None) -> List[Document]:
|
|
393
437
|
if not pdf:
|
|
394
438
|
raise ValueError("No pdf provided")
|
|
395
439
|
|
|
@@ -404,6 +448,10 @@ class PDFImageReader(BasePDFReader):
|
|
|
404
448
|
log_info(f"Reading: {doc_name}")
|
|
405
449
|
pdf_reader = DocumentReader(pdf)
|
|
406
450
|
|
|
451
|
+
# Handle PDF decryption
|
|
452
|
+
if not self._decrypt_pdf(pdf_reader, doc_name, password):
|
|
453
|
+
return []
|
|
454
|
+
|
|
407
455
|
# Read and chunk.
|
|
408
456
|
return await self._async_pdf_reader_to_documents(pdf_reader, doc_name, read_images=True, use_uuid_for_id=False)
|
|
409
457
|
|
|
@@ -411,11 +459,11 @@ class PDFImageReader(BasePDFReader):
|
|
|
411
459
|
class PDFUrlImageReader(BasePDFReader):
|
|
412
460
|
"""Reader for PDF files from URL with text and images extraction"""
|
|
413
461
|
|
|
414
|
-
def __init__(self, proxy: Optional[str] = None, **kwargs):
|
|
415
|
-
super().__init__(**kwargs)
|
|
462
|
+
def __init__(self, proxy: Optional[str] = None, password: Optional[str] = None, **kwargs):
|
|
463
|
+
super().__init__(password=password, **kwargs)
|
|
416
464
|
self.proxy = proxy
|
|
417
465
|
|
|
418
|
-
def read(self, url: str) -> List[Document]:
|
|
466
|
+
def read(self, url: str, password: Optional[str] = None) -> List[Document]:
|
|
419
467
|
if not url:
|
|
420
468
|
raise ValueError("No url provided")
|
|
421
469
|
|
|
@@ -430,10 +478,14 @@ class PDFUrlImageReader(BasePDFReader):
|
|
|
430
478
|
doc_name = url.split("/")[-1].split(".")[0].replace(" ", "_")
|
|
431
479
|
pdf_reader = DocumentReader(BytesIO(response.content))
|
|
432
480
|
|
|
481
|
+
# Handle PDF decryption
|
|
482
|
+
if not self._decrypt_pdf(pdf_reader, doc_name, password):
|
|
483
|
+
return []
|
|
484
|
+
|
|
433
485
|
# Read and chunk.
|
|
434
486
|
return self._pdf_reader_to_documents(pdf_reader, doc_name, read_images=True, use_uuid_for_id=False)
|
|
435
487
|
|
|
436
|
-
async def async_read(self, url: str) -> List[Document]:
|
|
488
|
+
async def async_read(self, url: str, password: Optional[str] = None) -> List[Document]:
|
|
437
489
|
if not url:
|
|
438
490
|
raise ValueError("No url provided")
|
|
439
491
|
|
|
@@ -451,5 +503,9 @@ class PDFUrlImageReader(BasePDFReader):
|
|
|
451
503
|
doc_name = url.split("/")[-1].split(".")[0].replace(" ", "_")
|
|
452
504
|
pdf_reader = DocumentReader(BytesIO(response.content))
|
|
453
505
|
|
|
506
|
+
# Handle PDF decryption
|
|
507
|
+
if not self._decrypt_pdf(pdf_reader, doc_name, password):
|
|
508
|
+
return []
|
|
509
|
+
|
|
454
510
|
# Read and chunk.
|
|
455
511
|
return await self._async_pdf_reader_to_documents(pdf_reader, doc_name, read_images=True, use_uuid_for_id=False)
|
agno/knowledge/agent.py
CHANGED
|
@@ -50,6 +50,53 @@ class AgentKnowledge(BaseModel):
|
|
|
50
50
|
"""
|
|
51
51
|
raise NotImplementedError
|
|
52
52
|
|
|
53
|
+
def _upsert_warning(self, upsert) -> None:
|
|
54
|
+
"""Log a warning if upsert is not available"""
|
|
55
|
+
if upsert and self.vector_db is not None and not self.vector_db.upsert_available():
|
|
56
|
+
log_info(
|
|
57
|
+
f"Vector db '{self.vector_db.__class__.__module__}' does not support upsert. Falling back to insert."
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def _load_init(self, recreate: bool, upsert: bool) -> None:
|
|
61
|
+
"""Initial setup for loading knowledge base"""
|
|
62
|
+
if self.vector_db is None:
|
|
63
|
+
logger.warning("No vector db provided")
|
|
64
|
+
return
|
|
65
|
+
|
|
66
|
+
if recreate:
|
|
67
|
+
log_info("Dropping collection")
|
|
68
|
+
self.vector_db.drop()
|
|
69
|
+
|
|
70
|
+
if not self.vector_db.exists():
|
|
71
|
+
log_info("Creating collection")
|
|
72
|
+
self.vector_db.create()
|
|
73
|
+
|
|
74
|
+
self._upsert_warning(upsert)
|
|
75
|
+
|
|
76
|
+
async def _aload_init(self, recreate: bool, upsert: bool) -> None:
|
|
77
|
+
"""Initial async setup for loading knowledge base"""
|
|
78
|
+
if self.vector_db is None:
|
|
79
|
+
logger.warning("No vector db provided")
|
|
80
|
+
return
|
|
81
|
+
|
|
82
|
+
if recreate:
|
|
83
|
+
log_info("Dropping collection")
|
|
84
|
+
try:
|
|
85
|
+
await self.vector_db.async_drop()
|
|
86
|
+
except NotImplementedError:
|
|
87
|
+
logger.warning("Vector db does not support async drop, falling back to sync drop")
|
|
88
|
+
self.vector_db.drop()
|
|
89
|
+
|
|
90
|
+
if not self.vector_db.exists():
|
|
91
|
+
log_info("Creating collection")
|
|
92
|
+
try:
|
|
93
|
+
await self.vector_db.async_create()
|
|
94
|
+
except NotImplementedError:
|
|
95
|
+
logger.warning("Vector db does not support async create, falling back to sync create")
|
|
96
|
+
self.vector_db.create()
|
|
97
|
+
|
|
98
|
+
self._upsert_warning(upsert)
|
|
99
|
+
|
|
53
100
|
def search(
|
|
54
101
|
self, query: str, num_documents: Optional[int] = None, filters: Optional[Dict[str, Any]] = None
|
|
55
102
|
) -> List[Document]:
|
|
@@ -80,7 +127,7 @@ class AgentKnowledge(BaseModel):
|
|
|
80
127
|
try:
|
|
81
128
|
return await self.vector_db.async_search(query=query, limit=_num_documents, filters=filters)
|
|
82
129
|
except NotImplementedError:
|
|
83
|
-
|
|
130
|
+
log_info("Vector db does not support async search")
|
|
84
131
|
return self.search(query=query, num_documents=_num_documents, filters=filters)
|
|
85
132
|
except Exception as e:
|
|
86
133
|
logger.error(f"Error searching for documents: {e}")
|
|
@@ -99,18 +146,10 @@ class AgentKnowledge(BaseModel):
|
|
|
99
146
|
upsert (bool): If True, upserts documents to the vector db. Defaults to False.
|
|
100
147
|
skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
|
|
101
148
|
"""
|
|
149
|
+
self._load_init(recreate, upsert)
|
|
102
150
|
if self.vector_db is None:
|
|
103
|
-
logger.warning("No vector db provided")
|
|
104
151
|
return
|
|
105
152
|
|
|
106
|
-
if recreate:
|
|
107
|
-
log_info("Dropping collection")
|
|
108
|
-
self.vector_db.drop()
|
|
109
|
-
|
|
110
|
-
if not self.vector_db.exists():
|
|
111
|
-
log_info("Creating collection")
|
|
112
|
-
self.vector_db.create()
|
|
113
|
-
|
|
114
153
|
log_info("Loading knowledge base")
|
|
115
154
|
num_documents = 0
|
|
116
155
|
for document_list in self.document_lists:
|
|
@@ -123,8 +162,7 @@ class AgentKnowledge(BaseModel):
|
|
|
123
162
|
|
|
124
163
|
# Upsert documents if upsert is True and vector db supports upsert
|
|
125
164
|
if upsert and self.vector_db.upsert_available():
|
|
126
|
-
|
|
127
|
-
self.vector_db.upsert(documents=[doc], filters=doc.meta_data)
|
|
165
|
+
self.vector_db.upsert(documents=documents_to_load, filters=doc.meta_data)
|
|
128
166
|
# Insert documents
|
|
129
167
|
else:
|
|
130
168
|
# Filter out documents which already exist in the vector db
|
|
@@ -133,11 +171,10 @@ class AgentKnowledge(BaseModel):
|
|
|
133
171
|
documents_to_load = self.filter_existing_documents(document_list)
|
|
134
172
|
|
|
135
173
|
if documents_to_load:
|
|
136
|
-
|
|
137
|
-
self.vector_db.insert(documents=[doc], filters=doc.meta_data)
|
|
174
|
+
self.vector_db.insert(documents=documents_to_load, filters=doc.meta_data)
|
|
138
175
|
|
|
139
176
|
num_documents += len(documents_to_load)
|
|
140
|
-
|
|
177
|
+
log_info(f"Added {num_documents} documents to knowledge base")
|
|
141
178
|
|
|
142
179
|
async def aload(
|
|
143
180
|
self,
|
|
@@ -152,19 +189,10 @@ class AgentKnowledge(BaseModel):
|
|
|
152
189
|
upsert (bool): If True, upserts documents to the vector db. Defaults to False.
|
|
153
190
|
skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
|
|
154
191
|
"""
|
|
155
|
-
|
|
192
|
+
await self._aload_init(recreate, upsert)
|
|
156
193
|
if self.vector_db is None:
|
|
157
|
-
logger.warning("No vector db provided")
|
|
158
194
|
return
|
|
159
195
|
|
|
160
|
-
if recreate:
|
|
161
|
-
log_info("Dropping collection")
|
|
162
|
-
await self.vector_db.async_drop()
|
|
163
|
-
|
|
164
|
-
if not await self.vector_db.async_exists():
|
|
165
|
-
log_info("Creating collection")
|
|
166
|
-
await self.vector_db.async_create()
|
|
167
|
-
|
|
168
196
|
log_info("Loading knowledge base")
|
|
169
197
|
num_documents = 0
|
|
170
198
|
document_iterator = self.async_document_lists
|
|
@@ -177,8 +205,7 @@ class AgentKnowledge(BaseModel):
|
|
|
177
205
|
|
|
178
206
|
# Upsert documents if upsert is True and vector db supports upsert
|
|
179
207
|
if upsert and self.vector_db.upsert_available():
|
|
180
|
-
|
|
181
|
-
await self.vector_db.async_upsert(documents=[doc], filters=doc.meta_data)
|
|
208
|
+
await self.vector_db.async_upsert(documents=documents_to_load, filters=doc.meta_data)
|
|
182
209
|
# Insert documents
|
|
183
210
|
else:
|
|
184
211
|
# Filter out documents which already exist in the vector db
|
|
@@ -187,11 +214,10 @@ class AgentKnowledge(BaseModel):
|
|
|
187
214
|
documents_to_load = await self.async_filter_existing_documents(document_list)
|
|
188
215
|
|
|
189
216
|
if documents_to_load:
|
|
190
|
-
|
|
191
|
-
await self.vector_db.async_insert(documents=[doc], filters=doc.meta_data)
|
|
217
|
+
await self.vector_db.async_insert(documents=documents_to_load, filters=doc.meta_data)
|
|
192
218
|
|
|
193
219
|
num_documents += len(documents_to_load)
|
|
194
|
-
|
|
220
|
+
log_info(f"Added {num_documents} documents to knowledge base")
|
|
195
221
|
|
|
196
222
|
def load_documents(
|
|
197
223
|
self,
|
|
@@ -208,15 +234,11 @@ class AgentKnowledge(BaseModel):
|
|
|
208
234
|
skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
|
|
209
235
|
filters (Optional[Dict[str, Any]]): Filters to add to each row that can be used to limit results during querying. Defaults to None.
|
|
210
236
|
"""
|
|
211
|
-
|
|
212
|
-
log_info("Loading knowledge base")
|
|
237
|
+
self._load_init(recreate=False, upsert=upsert)
|
|
213
238
|
if self.vector_db is None:
|
|
214
|
-
logger.warning("No vector db provided")
|
|
215
239
|
return
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
self.vector_db.create()
|
|
219
|
-
|
|
240
|
+
|
|
241
|
+
log_info("Loading knowledge base")
|
|
220
242
|
# Upsert documents if upsert is True
|
|
221
243
|
if upsert and self.vector_db.upsert_available():
|
|
222
244
|
self.vector_db.upsert(documents=documents, filters=filters)
|
|
@@ -251,17 +273,11 @@ class AgentKnowledge(BaseModel):
|
|
|
251
273
|
skip_existing (bool): If True, skips documents which already exist in the vector db when inserting. Defaults to True.
|
|
252
274
|
filters (Optional[Dict[str, Any]]): Filters to add to each row that can be used to limit results during querying. Defaults to None.
|
|
253
275
|
"""
|
|
254
|
-
|
|
276
|
+
await self._aload_init(recreate=False, upsert=upsert)
|
|
255
277
|
if self.vector_db is None:
|
|
256
|
-
logger.warning("No vector db provided")
|
|
257
278
|
return
|
|
258
279
|
|
|
259
|
-
|
|
260
|
-
try:
|
|
261
|
-
await self.vector_db.async_create()
|
|
262
|
-
except NotImplementedError:
|
|
263
|
-
logger.warning("Vector db does not support async create")
|
|
264
|
-
self.vector_db.create()
|
|
280
|
+
log_info("Loading knowledge base")
|
|
265
281
|
|
|
266
282
|
# Upsert documents if upsert is True
|
|
267
283
|
if upsert and self.vector_db.upsert_available():
|
|
@@ -302,7 +318,7 @@ class AgentKnowledge(BaseModel):
|
|
|
302
318
|
else:
|
|
303
319
|
log_info("No new documents to load")
|
|
304
320
|
|
|
305
|
-
def
|
|
321
|
+
def load_document(
|
|
306
322
|
self,
|
|
307
323
|
document: Document,
|
|
308
324
|
upsert: bool = False,
|
|
@@ -414,8 +430,6 @@ class AgentKnowledge(BaseModel):
|
|
|
414
430
|
Returns:
|
|
415
431
|
List[Document]: Filtered list of documents that don't exist in the database
|
|
416
432
|
"""
|
|
417
|
-
from agno.utils.log import log_debug, log_info
|
|
418
|
-
|
|
419
433
|
if not self.vector_db:
|
|
420
434
|
log_debug("No vector database configured, skipping document filtering")
|
|
421
435
|
return documents
|
|
@@ -556,20 +570,9 @@ class AgentKnowledge(BaseModel):
|
|
|
556
570
|
self._track_metadata_structure(metadata)
|
|
557
571
|
|
|
558
572
|
# 3. Prepare vector DB
|
|
573
|
+
self._load_init(recreate, upsert=False)
|
|
559
574
|
if self.vector_db is None:
|
|
560
|
-
logger.warning("Cannot load file: No vector db provided.")
|
|
561
575
|
return False
|
|
562
|
-
|
|
563
|
-
# Recreate collection if requested
|
|
564
|
-
if recreate:
|
|
565
|
-
# log_info(f"Recreating collection.")
|
|
566
|
-
self.vector_db.drop()
|
|
567
|
-
|
|
568
|
-
# Create collection if it doesn't exist
|
|
569
|
-
if not self.vector_db.exists():
|
|
570
|
-
# log_info(f"Collection does not exist. Creating.")
|
|
571
|
-
self.vector_db.create()
|
|
572
|
-
|
|
573
576
|
return True
|
|
574
577
|
|
|
575
578
|
async def aprepare_load(
|
|
@@ -604,20 +607,9 @@ class AgentKnowledge(BaseModel):
|
|
|
604
607
|
self._track_metadata_structure(metadata)
|
|
605
608
|
|
|
606
609
|
# 3. Prepare vector DB
|
|
610
|
+
await self._aload_init(recreate, upsert=False)
|
|
607
611
|
if self.vector_db is None:
|
|
608
|
-
logger.warning("Cannot load file: No vector db provided.")
|
|
609
612
|
return False
|
|
610
|
-
|
|
611
|
-
# Recreate collection if requested
|
|
612
|
-
if recreate:
|
|
613
|
-
log_info("Recreating collection.")
|
|
614
|
-
await self.vector_db.async_drop()
|
|
615
|
-
|
|
616
|
-
# Create collection if it doesn't exist
|
|
617
|
-
if not await self.vector_db.async_exists():
|
|
618
|
-
log_info("Collection does not exist. Creating.")
|
|
619
|
-
await self.vector_db.async_create()
|
|
620
|
-
|
|
621
613
|
return True
|
|
622
614
|
|
|
623
615
|
def process_documents(
|
|
@@ -642,6 +634,8 @@ class AgentKnowledge(BaseModel):
|
|
|
642
634
|
|
|
643
635
|
log_info(f"Loading {len(documents)} documents from {source_info} with metadata: {metadata}")
|
|
644
636
|
|
|
637
|
+
self._upsert_warning(upsert)
|
|
638
|
+
|
|
645
639
|
# Decide loading strategy: upsert or insert (with optional skip)
|
|
646
640
|
if upsert and self.vector_db.upsert_available(): # type: ignore
|
|
647
641
|
log_debug(f"Upserting {len(documents)} documents.") # type: ignore
|
|
@@ -681,6 +675,8 @@ class AgentKnowledge(BaseModel):
|
|
|
681
675
|
logger.warning(f"No documents were read from {source_info}")
|
|
682
676
|
return
|
|
683
677
|
|
|
678
|
+
self._upsert_warning(upsert)
|
|
679
|
+
|
|
684
680
|
log_info(f"Loading {len(documents)} documents from {source_info} with metadata: {metadata}")
|
|
685
681
|
|
|
686
682
|
# Decide loading strategy: upsert or insert (with optional skip)
|
agno/knowledge/pdf.py
CHANGED
|
@@ -2,15 +2,22 @@ from pathlib import Path
|
|
|
2
2
|
from typing import Any, AsyncIterator, Dict, Iterator, List, Optional, Union
|
|
3
3
|
|
|
4
4
|
from pydantic import Field
|
|
5
|
+
from typing_extensions import TypedDict
|
|
5
6
|
|
|
6
7
|
from agno.document import Document
|
|
7
8
|
from agno.document.reader.pdf_reader import PDFImageReader, PDFReader
|
|
8
9
|
from agno.knowledge.agent import AgentKnowledge
|
|
9
|
-
from agno.utils.log import log_info, logger
|
|
10
|
+
from agno.utils.log import log_error, log_info, logger
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class PDFConfig(TypedDict, total=False):
|
|
14
|
+
path: str
|
|
15
|
+
password: Optional[str]
|
|
16
|
+
metadata: Optional[Dict[str, Any]]
|
|
10
17
|
|
|
11
18
|
|
|
12
19
|
class PDFKnowledgeBase(AgentKnowledge):
|
|
13
|
-
path: Optional[Union[str, Path, List[
|
|
20
|
+
path: Optional[Union[str, Path, List[PDFConfig]]] = None
|
|
14
21
|
formats: List[str] = [".pdf"]
|
|
15
22
|
exclude_files: List[str] = Field(default_factory=list)
|
|
16
23
|
reader: Union[PDFReader, PDFImageReader] = PDFReader()
|
|
@@ -24,19 +31,21 @@ class PDFKnowledgeBase(AgentKnowledge):
|
|
|
24
31
|
if isinstance(self.path, list):
|
|
25
32
|
for item in self.path:
|
|
26
33
|
if isinstance(item, dict) and "path" in item:
|
|
27
|
-
# Handle path with metadata
|
|
28
34
|
file_path = item["path"]
|
|
29
35
|
config = item.get("metadata", {})
|
|
36
|
+
file_password = item.get("password")
|
|
37
|
+
if file_password is not None and not isinstance(file_password, str):
|
|
38
|
+
file_password = None
|
|
39
|
+
|
|
30
40
|
_pdf_path = Path(file_path) # type: ignore
|
|
31
41
|
if self._is_valid_pdf(_pdf_path):
|
|
32
|
-
documents = self.reader.read(pdf=_pdf_path)
|
|
42
|
+
documents = self.reader.read(pdf=_pdf_path, password=file_password)
|
|
33
43
|
if config:
|
|
34
44
|
for doc in documents:
|
|
35
45
|
log_info(f"Adding metadata {config} to document: {doc.name}")
|
|
36
46
|
doc.meta_data.update(config) # type: ignore
|
|
37
47
|
yield documents
|
|
38
48
|
else:
|
|
39
|
-
# Handle single path
|
|
40
49
|
_pdf_path = Path(self.path)
|
|
41
50
|
if _pdf_path.is_dir():
|
|
42
51
|
for _pdf in _pdf_path.glob("**/*.pdf"):
|
|
@@ -47,7 +56,19 @@ class PDFKnowledgeBase(AgentKnowledge):
|
|
|
47
56
|
|
|
48
57
|
def _is_valid_pdf(self, path: Path) -> bool:
|
|
49
58
|
"""Helper to check if path is a valid PDF file."""
|
|
50
|
-
|
|
59
|
+
if not path.exists():
|
|
60
|
+
log_error(f"PDF file not found: {path}")
|
|
61
|
+
return False
|
|
62
|
+
if not path.is_file():
|
|
63
|
+
log_error(f"Path is not a file: {path}")
|
|
64
|
+
return False
|
|
65
|
+
if path.suffix != ".pdf":
|
|
66
|
+
log_error(f"File is not a PDF: {path}")
|
|
67
|
+
return False
|
|
68
|
+
if path.name in self.exclude_files:
|
|
69
|
+
log_error(f"PDF file excluded: {path}")
|
|
70
|
+
return False
|
|
71
|
+
return True
|
|
51
72
|
|
|
52
73
|
@property
|
|
53
74
|
async def async_document_lists(self) -> AsyncIterator[List[Document]]:
|
|
@@ -58,12 +79,15 @@ class PDFKnowledgeBase(AgentKnowledge):
|
|
|
58
79
|
if isinstance(self.path, list):
|
|
59
80
|
for item in self.path:
|
|
60
81
|
if isinstance(item, dict) and "path" in item:
|
|
61
|
-
# Handle path with metadata
|
|
62
82
|
file_path = item["path"]
|
|
63
83
|
config = item.get("metadata", {})
|
|
84
|
+
file_password = item.get("password")
|
|
85
|
+
if file_password is not None and not isinstance(file_password, str):
|
|
86
|
+
file_password = None
|
|
87
|
+
|
|
64
88
|
_pdf_path = Path(file_path) # type: ignore
|
|
65
89
|
if self._is_valid_pdf(_pdf_path):
|
|
66
|
-
documents = await self.reader.async_read(pdf=_pdf_path)
|
|
90
|
+
documents = await self.reader.async_read(pdf=_pdf_path, password=file_password)
|
|
67
91
|
if config:
|
|
68
92
|
for doc in documents:
|
|
69
93
|
log_info(f"Adding metadata {config} to document: {doc.name}")
|
agno/knowledge/pdf_url.py
CHANGED
|
@@ -19,18 +19,22 @@ class PDFUrlKnowledgeBase(AgentKnowledge):
|
|
|
19
19
|
|
|
20
20
|
for item in self.urls:
|
|
21
21
|
if isinstance(item, dict) and "url" in item:
|
|
22
|
-
# Handle URL with metadata
|
|
22
|
+
# Handle URL with metadata/password
|
|
23
23
|
url = item["url"]
|
|
24
24
|
config = item.get("metadata", {})
|
|
25
|
+
pdf_password = item.get("password")
|
|
26
|
+
if pdf_password is not None and not isinstance(pdf_password, str):
|
|
27
|
+
pdf_password = None
|
|
28
|
+
|
|
25
29
|
if self._is_valid_url(url): # type: ignore
|
|
26
|
-
documents = self.reader.read(url=url) # type: ignore
|
|
30
|
+
documents = self.reader.read(url=url, password=pdf_password) # type: ignore
|
|
27
31
|
if config:
|
|
28
32
|
for doc in documents:
|
|
29
33
|
log_info(f"Adding metadata {config} to document from URL: {url}")
|
|
30
34
|
doc.meta_data.update(config) # type: ignore
|
|
31
35
|
yield documents
|
|
32
36
|
else:
|
|
33
|
-
# Handle simple URL
|
|
37
|
+
# Handle simple URL - no password
|
|
34
38
|
if self._is_valid_url(item): # type: ignore
|
|
35
39
|
yield self.reader.read(url=item) # type: ignore
|
|
36
40
|
|
|
@@ -49,11 +53,15 @@ class PDFUrlKnowledgeBase(AgentKnowledge):
|
|
|
49
53
|
|
|
50
54
|
for item in self.urls:
|
|
51
55
|
if isinstance(item, dict) and "url" in item:
|
|
52
|
-
# Handle URL with metadata
|
|
56
|
+
# Handle URL with metadata/password
|
|
53
57
|
url = item["url"]
|
|
54
58
|
config = item.get("metadata", {})
|
|
59
|
+
pdf_password = item.get("password")
|
|
60
|
+
if pdf_password is not None and not isinstance(pdf_password, str):
|
|
61
|
+
pdf_password = None
|
|
62
|
+
|
|
55
63
|
if self._is_valid_url(url): # type: ignore
|
|
56
|
-
documents = await self.reader.async_read(url=url) # type: ignore
|
|
64
|
+
documents = await self.reader.async_read(url=url, password=pdf_password) # type: ignore
|
|
57
65
|
if config:
|
|
58
66
|
for doc in documents:
|
|
59
67
|
log_info(f"Adding metadata {config} to document from URL: {url}")
|
agno/models/openai/responses.py
CHANGED
|
@@ -78,6 +78,10 @@ class OpenAIResponses(Model):
|
|
|
78
78
|
}
|
|
79
79
|
)
|
|
80
80
|
|
|
81
|
+
def _using_reasoning_model(self) -> bool:
|
|
82
|
+
"""Return True if the contextual used model is a known reasoning model."""
|
|
83
|
+
return self.id.startswith("o3") or self.id.startswith("o4-mini") or self.id.startswith("gpt-5")
|
|
84
|
+
|
|
81
85
|
def _get_client_params(self) -> Dict[str, Any]:
|
|
82
86
|
"""
|
|
83
87
|
Get client parameters for API requests.
|
|
@@ -221,7 +225,7 @@ class OpenAIResponses(Model):
|
|
|
221
225
|
request_params["tool_choice"] = tool_choice
|
|
222
226
|
|
|
223
227
|
# Handle reasoning tools for o3 and o4-mini models
|
|
224
|
-
if
|
|
228
|
+
if self._using_reasoning_model() and messages is not None:
|
|
225
229
|
request_params["store"] = True
|
|
226
230
|
|
|
227
231
|
# Check if the last assistant message has a previous_response_id to continue from
|
|
@@ -352,6 +356,22 @@ class OpenAIResponses(Model):
|
|
|
352
356
|
Dict[str, Any]: The formatted message.
|
|
353
357
|
"""
|
|
354
358
|
formatted_messages: List[Dict[str, Any]] = []
|
|
359
|
+
|
|
360
|
+
if self._using_reasoning_model():
|
|
361
|
+
# Detect whether we're chaining via previous_response_id. If so, we should NOT
|
|
362
|
+
# re-send prior function_call items; the Responses API already has the state and
|
|
363
|
+
# expects only the corresponding function_call_output items.
|
|
364
|
+
previous_response_id: Optional[str] = None
|
|
365
|
+
for msg in reversed(messages):
|
|
366
|
+
if (
|
|
367
|
+
msg.role == "assistant"
|
|
368
|
+
and hasattr(msg, "provider_data")
|
|
369
|
+
and msg.provider_data
|
|
370
|
+
and "response_id" in msg.provider_data
|
|
371
|
+
):
|
|
372
|
+
previous_response_id = msg.provider_data["response_id"]
|
|
373
|
+
break
|
|
374
|
+
|
|
355
375
|
for message in messages:
|
|
356
376
|
if message.role in ["user", "system"]:
|
|
357
377
|
message_dict: Dict[str, Any] = {
|
|
@@ -384,6 +404,15 @@ class OpenAIResponses(Model):
|
|
|
384
404
|
{"type": "function_call_output", "call_id": message.tool_call_id, "output": message.content}
|
|
385
405
|
)
|
|
386
406
|
elif message.tool_calls is not None and len(message.tool_calls) > 0:
|
|
407
|
+
if self._using_reasoning_model():
|
|
408
|
+
# Only include prior function_call items when we are NOT using
|
|
409
|
+
# previous_response_id. When previous_response_id is present, the
|
|
410
|
+
# Responses API already knows about earlier output items (including
|
|
411
|
+
# reasoning/function_call), and re-sending them can trigger validation
|
|
412
|
+
# errors (e.g., missing required reasoning item).
|
|
413
|
+
if previous_response_id is not None:
|
|
414
|
+
continue
|
|
415
|
+
|
|
387
416
|
for tool_call in message.tool_calls:
|
|
388
417
|
formatted_messages.append(
|
|
389
418
|
{
|
agno/team/team.py
CHANGED
|
@@ -310,6 +310,7 @@ class Team:
|
|
|
310
310
|
model: Optional[Model] = None,
|
|
311
311
|
name: Optional[str] = None,
|
|
312
312
|
team_id: Optional[str] = None,
|
|
313
|
+
role: Optional[str] = None,
|
|
313
314
|
user_id: Optional[str] = None,
|
|
314
315
|
session_id: Optional[str] = None,
|
|
315
316
|
session_name: Optional[str] = None,
|
|
@@ -390,6 +391,7 @@ class Team:
|
|
|
390
391
|
|
|
391
392
|
self.name = name
|
|
392
393
|
self.team_id = team_id
|
|
394
|
+
self.role = role
|
|
393
395
|
|
|
394
396
|
self.user_id = user_id
|
|
395
397
|
self.session_id = session_id
|
|
@@ -5453,6 +5455,10 @@ class Team:
|
|
|
5453
5455
|
if self.description is not None:
|
|
5454
5456
|
system_message_content += f"<description>\n{self.description}\n</description>\n\n"
|
|
5455
5457
|
|
|
5458
|
+
# 3.3.4 Then add the Team role if provided
|
|
5459
|
+
if self.role is not None:
|
|
5460
|
+
system_message_content += f"\n<your_role>\n{self.role}\n</your_role>\n\n"
|
|
5461
|
+
|
|
5456
5462
|
# 3.3.5 Then add instructions for the Agent
|
|
5457
5463
|
if len(instructions) > 0:
|
|
5458
5464
|
system_message_content += "<instructions>"
|
|
@@ -5645,7 +5651,7 @@ class Team:
|
|
|
5645
5651
|
if isinstance(message, str):
|
|
5646
5652
|
user_message_content = message
|
|
5647
5653
|
else:
|
|
5648
|
-
user_message_content = "\n".join(message)
|
|
5654
|
+
user_message_content = "\n".join(str(message))
|
|
5649
5655
|
|
|
5650
5656
|
# Add references to user message
|
|
5651
5657
|
if (
|
|
@@ -7152,9 +7158,10 @@ class Team:
|
|
|
7152
7158
|
# If the team_session_state is already set, merge the team_session_state from the database with the current team_session_state
|
|
7153
7159
|
if self.team_session_state is not None and len(self.team_session_state) > 0:
|
|
7154
7160
|
# This updates team_session_state_from_db
|
|
7155
|
-
merge_dictionaries(
|
|
7156
|
-
|
|
7157
|
-
|
|
7161
|
+
merge_dictionaries(self.team_session_state, team_session_state_from_db)
|
|
7162
|
+
else:
|
|
7163
|
+
# Update the current team_session_state
|
|
7164
|
+
self.team_session_state = team_session_state_from_db
|
|
7158
7165
|
|
|
7159
7166
|
if "workflow_session_state" in session.session_data:
|
|
7160
7167
|
workflow_session_state_from_db = session.session_data.get("workflow_session_state")
|
|
@@ -7166,9 +7173,10 @@ class Team:
|
|
|
7166
7173
|
# If the workflow_session_state is already set, merge the workflow_session_state from the database with the current workflow_session_state
|
|
7167
7174
|
if self.workflow_session_state is not None and len(self.workflow_session_state) > 0:
|
|
7168
7175
|
# This updates workflow_session_state_from_db
|
|
7169
|
-
merge_dictionaries(
|
|
7170
|
-
|
|
7171
|
-
|
|
7176
|
+
merge_dictionaries(self.workflow_session_state, workflow_session_state_from_db)
|
|
7177
|
+
else:
|
|
7178
|
+
# Update the current workflow_session_state
|
|
7179
|
+
self.workflow_session_state = workflow_session_state_from_db
|
|
7172
7180
|
|
|
7173
7181
|
# Get the session_metrics from the database
|
|
7174
7182
|
if "session_metrics" in session.session_data:
|
agno/tools/github.py
CHANGED
|
@@ -458,35 +458,71 @@ class GithubTools(Toolkit):
|
|
|
458
458
|
logger.error(f"Error creating issue: {e}")
|
|
459
459
|
return json.dumps({"error": str(e)})
|
|
460
460
|
|
|
461
|
-
def list_issues(self, repo_name: str, state: str = "open",
|
|
462
|
-
"""List issues for a repository.
|
|
461
|
+
def list_issues(self, repo_name: str, state: str = "open", page: int = 1, per_page: int = 20) -> str:
|
|
462
|
+
"""List issues for a repository with pagination.
|
|
463
463
|
|
|
464
464
|
Args:
|
|
465
465
|
repo_name (str): The full name of the repository (e.g., 'owner/repo').
|
|
466
466
|
state (str, optional): The state of issues to list ('open', 'closed', 'all'). Defaults to 'open'.
|
|
467
|
-
|
|
467
|
+
page (int, optional): Page number of results to return, counting from 1. Defaults to 1.
|
|
468
|
+
per_page (int, optional): Number of results per page. Defaults to 20.
|
|
468
469
|
Returns:
|
|
469
|
-
A JSON-formatted string containing a list of issues.
|
|
470
|
+
A JSON-formatted string containing a list of issues with pagination metadata.
|
|
470
471
|
"""
|
|
471
|
-
log_debug(f"Listing issues for repository: {repo_name} with state: {state}")
|
|
472
|
+
log_debug(f"Listing issues for repository: {repo_name} with state: {state}, page: {page}, per_page: {per_page}")
|
|
472
473
|
try:
|
|
473
474
|
repo = self.g.get_repo(repo_name)
|
|
475
|
+
|
|
474
476
|
issues = repo.get_issues(state=state)
|
|
477
|
+
|
|
475
478
|
# Filter out pull requests after fetching issues
|
|
476
|
-
|
|
477
|
-
|
|
479
|
+
total_issues = 0
|
|
480
|
+
all_issues = []
|
|
481
|
+
for issue in issues:
|
|
482
|
+
if not issue.pull_request:
|
|
483
|
+
all_issues.append(issue)
|
|
484
|
+
total_issues += 1
|
|
485
|
+
|
|
486
|
+
# Calculate pagination metadata
|
|
487
|
+
total_pages = (total_issues + per_page - 1) // per_page
|
|
488
|
+
|
|
489
|
+
# Validate page number
|
|
490
|
+
if page < 1:
|
|
491
|
+
page = 1
|
|
492
|
+
elif page > total_pages and total_pages > 0:
|
|
493
|
+
page = total_pages
|
|
494
|
+
|
|
495
|
+
# Get the specified page of results
|
|
478
496
|
issue_list = []
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
497
|
+
page_start = (page - 1) * per_page
|
|
498
|
+
page_end = page_start + per_page
|
|
499
|
+
|
|
500
|
+
for i in range(page_start, min(page_end, total_issues)):
|
|
501
|
+
if i < len(all_issues):
|
|
502
|
+
issue = all_issues[i]
|
|
503
|
+
issue_info = {
|
|
504
|
+
"number": issue.number,
|
|
505
|
+
"title": issue.title,
|
|
506
|
+
"user": issue.user.login,
|
|
507
|
+
"created_at": issue.created_at.isoformat(),
|
|
508
|
+
"state": issue.state,
|
|
509
|
+
"url": issue.html_url,
|
|
510
|
+
}
|
|
511
|
+
issue_list.append(issue_info)
|
|
512
|
+
|
|
513
|
+
meta = {
|
|
514
|
+
"current_page": page,
|
|
515
|
+
"per_page": per_page,
|
|
516
|
+
"total_items": total_issues,
|
|
517
|
+
"total_pages": total_pages
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
response = {
|
|
521
|
+
"data": issue_list,
|
|
522
|
+
"meta": meta
|
|
523
|
+
}
|
|
524
|
+
|
|
525
|
+
return json.dumps(response, indent=2)
|
|
490
526
|
except GithubException as e:
|
|
491
527
|
logger.error(f"Error listing issues: {e}")
|
|
492
528
|
return json.dumps({"error": str(e)})
|
|
@@ -2,7 +2,7 @@ import json
|
|
|
2
2
|
import uuid
|
|
3
3
|
from hashlib import md5
|
|
4
4
|
from os import getenv
|
|
5
|
-
from typing import Any, Dict, List, Optional
|
|
5
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
6
6
|
|
|
7
7
|
try:
|
|
8
8
|
from warnings import filterwarnings
|
|
@@ -73,6 +73,13 @@ class Weaviate(VectorDb):
|
|
|
73
73
|
self.reranker: Optional[Reranker] = reranker
|
|
74
74
|
self.hybrid_search_alpha = hybrid_search_alpha
|
|
75
75
|
|
|
76
|
+
@staticmethod
|
|
77
|
+
def _get_doc_uuid(document: Document) -> Tuple[uuid.UUID, str]:
|
|
78
|
+
cleaned_content = document.content.replace("\x00", "\ufffd")
|
|
79
|
+
content_hash = md5(cleaned_content.encode()).hexdigest()
|
|
80
|
+
doc_uuid = uuid.UUID(hex=content_hash[:32])
|
|
81
|
+
return doc_uuid, cleaned_content
|
|
82
|
+
|
|
76
83
|
def get_client(self) -> weaviate.WeaviateClient:
|
|
77
84
|
"""Initialize and return a Weaviate client instance.
|
|
78
85
|
|
|
@@ -118,7 +125,7 @@ class Weaviate(VectorDb):
|
|
|
118
125
|
await self.async_client.connect() # type: ignore
|
|
119
126
|
|
|
120
127
|
if not await self.async_client.is_ready(): # type: ignore
|
|
121
|
-
raise
|
|
128
|
+
raise ConnectionError("Weaviate async client is not ready")
|
|
122
129
|
|
|
123
130
|
return self.async_client # type: ignore
|
|
124
131
|
|
|
@@ -155,6 +162,54 @@ class Weaviate(VectorDb):
|
|
|
155
162
|
finally:
|
|
156
163
|
await client.close()
|
|
157
164
|
|
|
165
|
+
def doc_content_changed(self, document: Document, check_existing: Optional[bool] = True) -> Optional[bool]:
|
|
166
|
+
"""
|
|
167
|
+
Check if the content of the document has changed by comparing its UUID.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
document (Document): Document to check
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
bool: True if the document content has changed, False otherwise. None on wrong input.
|
|
174
|
+
check_existing (bool): If True, check if the document exists before checking if the content changed.
|
|
175
|
+
"""
|
|
176
|
+
if not document or not document.content:
|
|
177
|
+
logger.warning("Invalid document: Missing content.")
|
|
178
|
+
return None
|
|
179
|
+
|
|
180
|
+
if check_existing and document.name and not self.name_exists(document.name):
|
|
181
|
+
logger.warning(f"A document by this name does not exist: {document.name}")
|
|
182
|
+
return None
|
|
183
|
+
|
|
184
|
+
doc_uuid, _ = self._get_doc_uuid(document)
|
|
185
|
+
|
|
186
|
+
collection = self.get_client().collections.get(self.collection)
|
|
187
|
+
existing_doc = collection.query.fetch_object_by_id(doc_uuid)
|
|
188
|
+
|
|
189
|
+
if not existing_doc:
|
|
190
|
+
return True
|
|
191
|
+
else:
|
|
192
|
+
return False
|
|
193
|
+
|
|
194
|
+
def doc_delete(self, name: str) -> None:
|
|
195
|
+
"""
|
|
196
|
+
Delete all documents from Weaviate with a specific 'name' property.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
name (str): Document name to delete.
|
|
200
|
+
"""
|
|
201
|
+
collection = self.get_client().collections.get(self.collection)
|
|
202
|
+
filter_expr = Filter.by_property("name").equal(name)
|
|
203
|
+
|
|
204
|
+
result = collection.data.delete_many(where=filter_expr)
|
|
205
|
+
|
|
206
|
+
log_debug(f"Deleted document by name: '{name}' - {result.successful} documents deleted.")
|
|
207
|
+
if result.failed > 0:
|
|
208
|
+
logger.warning(
|
|
209
|
+
f"Failed to delete (some chunks of) document with name: '{name}' - "
|
|
210
|
+
f"Failed {result.failed} out of {result.matches} times. {result.successful} successful deletions."
|
|
211
|
+
)
|
|
212
|
+
|
|
158
213
|
def doc_exists(self, document: Document) -> bool:
|
|
159
214
|
"""
|
|
160
215
|
Validate if the document exists using consistent UUID generation.
|
|
@@ -169,9 +224,7 @@ class Weaviate(VectorDb):
|
|
|
169
224
|
logger.warning("Invalid document: Missing content.")
|
|
170
225
|
return False # Early exit for invalid input
|
|
171
226
|
|
|
172
|
-
|
|
173
|
-
content_hash = md5(cleaned_content.encode()).hexdigest()
|
|
174
|
-
doc_uuid = uuid.UUID(hex=content_hash[:32])
|
|
227
|
+
doc_uuid, _ = self._get_doc_uuid(document)
|
|
175
228
|
|
|
176
229
|
collection = self.get_client().collections.get(self.collection)
|
|
177
230
|
return collection.data.exists(doc_uuid)
|
|
@@ -190,9 +243,7 @@ class Weaviate(VectorDb):
|
|
|
190
243
|
logger.warning("Invalid document: Missing content.")
|
|
191
244
|
return False # Early exit for invalid input
|
|
192
245
|
|
|
193
|
-
|
|
194
|
-
content_hash = md5(cleaned_content.encode()).hexdigest()
|
|
195
|
-
doc_uuid = uuid.UUID(hex=content_hash[:32])
|
|
246
|
+
doc_uuid, _ = self._get_doc_uuid(document)
|
|
196
247
|
|
|
197
248
|
client = await self.get_async_client()
|
|
198
249
|
try:
|
|
@@ -256,9 +307,7 @@ class Weaviate(VectorDb):
|
|
|
256
307
|
logger.error(f"Document embedding is None: {document.name}")
|
|
257
308
|
continue
|
|
258
309
|
|
|
259
|
-
cleaned_content =
|
|
260
|
-
content_hash = md5(cleaned_content.encode()).hexdigest()
|
|
261
|
-
doc_uuid = uuid.UUID(hex=content_hash[:32])
|
|
310
|
+
doc_uuid, cleaned_content = self._get_doc_uuid(document)
|
|
262
311
|
|
|
263
312
|
# Merge filters with metadata
|
|
264
313
|
meta_data = document.meta_data or {}
|
|
@@ -305,9 +354,7 @@ class Weaviate(VectorDb):
|
|
|
305
354
|
continue
|
|
306
355
|
|
|
307
356
|
# Clean content and generate UUID
|
|
308
|
-
cleaned_content =
|
|
309
|
-
content_hash = md5(cleaned_content.encode()).hexdigest()
|
|
310
|
-
doc_uuid = uuid.UUID(hex=content_hash[:32])
|
|
357
|
+
doc_uuid, cleaned_content = self._get_doc_uuid(document)
|
|
311
358
|
|
|
312
359
|
# Serialize meta_data to JSON string
|
|
313
360
|
meta_data_str = json.dumps(document.meta_data) if document.meta_data else None
|
|
@@ -338,7 +385,28 @@ class Weaviate(VectorDb):
|
|
|
338
385
|
filters (Optional[Dict[str, Any]]): Filters to apply while upserting
|
|
339
386
|
"""
|
|
340
387
|
log_debug(f"Upserting {len(documents)} documents into Weaviate.")
|
|
341
|
-
|
|
388
|
+
|
|
389
|
+
_docs_to_insert = []
|
|
390
|
+
for document in documents:
|
|
391
|
+
assert document.name is not None, "Document name must be set for upsert operation."
|
|
392
|
+
|
|
393
|
+
if self.name_exists(document.name):
|
|
394
|
+
if self.doc_content_changed(document, check_existing=False):
|
|
395
|
+
log_debug(
|
|
396
|
+
f"Document already exists, but content changed. Document will be deleted and added again: {document.name}"
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
is_first_or_only_chunk = ("chunk" in document.meta_data and document.meta_data["chunk"] == 1) or (
|
|
400
|
+
"chunk" not in document.meta_data
|
|
401
|
+
)
|
|
402
|
+
if is_first_or_only_chunk:
|
|
403
|
+
self.doc_delete(document.name)
|
|
404
|
+
_docs_to_insert.append(document)
|
|
405
|
+
else:
|
|
406
|
+
log_debug(f"Document skipped, content is unchanged: {document.name}")
|
|
407
|
+
else:
|
|
408
|
+
_docs_to_insert.append(document)
|
|
409
|
+
self.insert(_docs_to_insert)
|
|
342
410
|
|
|
343
411
|
async def async_upsert(self, documents: List[Document], filters: Optional[Dict[str, Any]] = None) -> None:
|
|
344
412
|
"""
|
|
@@ -365,9 +433,7 @@ class Weaviate(VectorDb):
|
|
|
365
433
|
logger.error(f"Document embedding is None: {document.name}")
|
|
366
434
|
continue
|
|
367
435
|
|
|
368
|
-
cleaned_content =
|
|
369
|
-
content_hash = md5(cleaned_content.encode()).hexdigest()
|
|
370
|
-
doc_uuid = uuid.UUID(hex=content_hash[:32])
|
|
436
|
+
doc_uuid, cleaned_content = self._get_doc_uuid(document)
|
|
371
437
|
|
|
372
438
|
# Serialize meta_data to JSON string
|
|
373
439
|
meta_data_str = json.dumps(document.meta_data) if document.meta_data else None
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agno
|
|
3
|
-
Version: 1.7.
|
|
3
|
+
Version: 1.7.10
|
|
4
4
|
Summary: Agno: a lightweight library for building Multi-Agent Systems
|
|
5
5
|
Author-email: Ashpreet Bedi <ashpreet@agno.com>
|
|
6
6
|
License: Copyright (c) Agno, Inc.
|
|
@@ -400,6 +400,7 @@ License-File: LICENSE
|
|
|
400
400
|
Requires-Dist: docstring-parser
|
|
401
401
|
Requires-Dist: gitpython
|
|
402
402
|
Requires-Dist: httpx
|
|
403
|
+
Requires-Dist: packaging
|
|
403
404
|
Requires-Dist: pydantic-settings
|
|
404
405
|
Requires-Dist: pydantic
|
|
405
406
|
Requires-Dist: python-dotenv
|
|
@@ -5,7 +5,7 @@ agno/exceptions.py,sha256=HWuuNFS5J0l1RYJsdUrSx51M22aFEoh9ltoeonXBoBw,2891
|
|
|
5
5
|
agno/media.py,sha256=lXJuylmhuIEWThKZkQ9pUZPp8Kms7EdT4N_U4YN9I00,12656
|
|
6
6
|
agno/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
7
|
agno/agent/__init__.py,sha256=Ai6GVyw-0rkA2eYAfoEQIvbi_mrWQUxuPFaFbSDJYCQ,1306
|
|
8
|
-
agno/agent/agent.py,sha256=
|
|
8
|
+
agno/agent/agent.py,sha256=9qp01o6lX72-fflCY74J7XIeFpz4jm7mnvpvF6m7n_0,381409
|
|
9
9
|
agno/agent/metrics.py,sha256=Lf7JYgPPdqRCyPfCDVUjnmUZ1SkWXrJClL80aW2ffEw,4379
|
|
10
10
|
agno/api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
11
11
|
agno/api/agent.py,sha256=J-Y4HI-J0Bu6r9gxRYCM3U7SnVBGwLIouDy806KSIIw,2821
|
|
@@ -92,7 +92,7 @@ agno/document/reader/docx_reader.py,sha256=fNSZNzBlROQow7nagouEfN8E4KgVp3hTcSj3d
|
|
|
92
92
|
agno/document/reader/firecrawl_reader.py,sha256=4CFACPA22t0gI1YgYL1mZLZQN_T6LrLj8V9mAmU2In8,5748
|
|
93
93
|
agno/document/reader/json_reader.py,sha256=TrE14YAPkEd3q1e1dFf1ZX-GJPlXadsbeCzNh6EGpgg,2189
|
|
94
94
|
agno/document/reader/markdown_reader.py,sha256=SX0Zydj_0AmBr8Grk8z9jDiNmebU7rnPg2aAJoFEHAk,3546
|
|
95
|
-
agno/document/reader/pdf_reader.py,sha256=
|
|
95
|
+
agno/document/reader/pdf_reader.py,sha256=CGh2aYSJb7dAu-blSDz8ZkjiyU3U5-F_Foag7Eebnko,19584
|
|
96
96
|
agno/document/reader/text_reader.py,sha256=jtCuhWHkC5QNPmRF3DyXcXVKHM1jnqqxXVxHKZIpkfQ,3315
|
|
97
97
|
agno/document/reader/url_reader.py,sha256=dQmuO1NkJg9A9m2YeiVMuR7GikLl_K1FudE0UO62Um4,1988
|
|
98
98
|
agno/document/reader/website_reader.py,sha256=Ei-FIsdw0FWhGOBVge24JcNMfiAHL8nUA1tawrfBZPM,17307
|
|
@@ -138,7 +138,7 @@ agno/infra/db_app.py,sha256=W_XeDK0FhIkc--kfB0Jd34MJmgOfFQeT9M2y5840vlA,1794
|
|
|
138
138
|
agno/infra/resource.py,sha256=yPRZI4uprQtaugLD6x8_WAmFTPNPy5rlLvT1-bNIa5c,8491
|
|
139
139
|
agno/infra/resources.py,sha256=FKmDaDFup4cDiSA_y8eZRxpXEAmlEPxWzXKcY8xqF5w,1681
|
|
140
140
|
agno/knowledge/__init__.py,sha256=H1opQqY6oTPYJiLAiaIHtPuaVCry5GAhPoyT1ojdJwg,85
|
|
141
|
-
agno/knowledge/agent.py,sha256=
|
|
141
|
+
agno/knowledge/agent.py,sha256=udSFrUyHAwUA4RWT6iLR_hzYKh4-TnbKkQg_4xfksJk,29773
|
|
142
142
|
agno/knowledge/arxiv.py,sha256=0q1teC40wmau50ZTmdbvewCNxgxF_92QtfhJ87Evagk,1140
|
|
143
143
|
agno/knowledge/combined.py,sha256=6aHPfbjdKSfkE0D-vt8CocU3WOVAQaPWhuNjKBVLrv4,1262
|
|
144
144
|
agno/knowledge/csv.py,sha256=uUusY3Gf2HrYDtAOxoXsWfw11z95NEE9x71YNDqoK6g,5732
|
|
@@ -151,9 +151,9 @@ agno/knowledge/langchain.py,sha256=nHk4ohORSRwEY00DcdgXSwMewcv-caKMG24wghnUXaM,2
|
|
|
151
151
|
agno/knowledge/light_rag.py,sha256=8grl8nRirgqUyaoExNNJ1MwEiqqyRXdtM7hQw7w5z3s,11516
|
|
152
152
|
agno/knowledge/llamaindex.py,sha256=Lclu1rA0XMf4Y1xuupwUrtPvTi75gj9XvJ-so6oWxfw,2332
|
|
153
153
|
agno/knowledge/markdown.py,sha256=4DS3nb25ZP-Gld-H0TMijUIsqq2D8E2BVgfZzXKI0A8,5639
|
|
154
|
-
agno/knowledge/pdf.py,sha256=
|
|
154
|
+
agno/knowledge/pdf.py,sha256=ioV5szNDIVR1kzG3gZPwrM9FJprLSY0YzSP3VC9xpHo,6366
|
|
155
155
|
agno/knowledge/pdf_bytes.py,sha256=VmLuWO-UCJQ9CCXuWfYB0nG3W90_69g1xCoFxiKbaEM,1399
|
|
156
|
-
agno/knowledge/pdf_url.py,sha256=
|
|
156
|
+
agno/knowledge/pdf_url.py,sha256=gXskDyRbWqRK1mtBH6OfujlCy7OTQLOZY1LlqGfh84g,6119
|
|
157
157
|
agno/knowledge/text.py,sha256=SHVnvEF6Qa2yksuXY0tpdEvP8Y4cbFneKzyUi1V3tNw,5620
|
|
158
158
|
agno/knowledge/url.py,sha256=OTy-4BFP32Hg8N3IRvb-7LaoAlKAW61iewhkAASDL84,1488
|
|
159
159
|
agno/knowledge/website.py,sha256=W_vnFKAYteuNwnoNVnESEw55q-o3C9JHpfDyDxcnZNA,6556
|
|
@@ -252,7 +252,7 @@ agno/models/ollama/tools.py,sha256=PLYT9VSCGSwKAHNDEgOtyKg0HuUlYUxzGzvhoK19Vr0,1
|
|
|
252
252
|
agno/models/openai/__init__.py,sha256=OssVgQRpsriU6aJZ3lIp_jFuqvX6y78L4Fd3uTlmI3E,225
|
|
253
253
|
agno/models/openai/chat.py,sha256=PQ8GfVNZxM17rvLtY5UqeOPo4faGmX-1H_hx8BSLphI,30033
|
|
254
254
|
agno/models/openai/like.py,sha256=wmw9PfAVqluBs4MMY73dgjelKn1yl5JDKyCRvaNFjFw,745
|
|
255
|
-
agno/models/openai/responses.py,sha256=
|
|
255
|
+
agno/models/openai/responses.py,sha256=CxNS39S2O6eiXCY540ls2pJDu5m8XmYWvT_jvyEhDTQ,37951
|
|
256
256
|
agno/models/openrouter/__init__.py,sha256=ZpZhNyy_EGSXp58uC9e2iyjnxBctql7GaY8rUG-599I,90
|
|
257
257
|
agno/models/openrouter/openrouter.py,sha256=Ng-_ztpq_lghGI3tM94nsC8minKhiZ6d265c6IYXtg4,869
|
|
258
258
|
agno/models/perplexity/__init__.py,sha256=JNmOElDLwcZ9_Lk5owkEdgwmAhaH3YJ-VJqOI8rgp5c,90
|
|
@@ -328,7 +328,7 @@ agno/storage/workflow/mongodb.py,sha256=x-0Jl2WovupTfwuVNOSndE9-7V4U7BBIjejtJ1Wa
|
|
|
328
328
|
agno/storage/workflow/postgres.py,sha256=66bvx6eT7PtFvd4EtTCfI2smynAyvpjvAPYtPo-PCNg,91
|
|
329
329
|
agno/storage/workflow/sqlite.py,sha256=PLqEA1YC8AtIklINr6wy8lzK6KABEqvlJW-nz5KacWM,85
|
|
330
330
|
agno/team/__init__.py,sha256=OSkwJhm4uSoOwpHLeDdcH4q2R_BmfS-7a9_aPxB-Skw,967
|
|
331
|
-
agno/team/team.py,sha256=
|
|
331
|
+
agno/team/team.py,sha256=bWK4aaNyipoN0BbqaKkqWWocH8yflXQFLJo3eESTvo8,377734
|
|
332
332
|
agno/tools/__init__.py,sha256=jNll2sELhPPbqm5nPeT4_uyzRO2_KRTW-8Or60kioS0,210
|
|
333
333
|
agno/tools/agentql.py,sha256=w6FlCfhuS0cc2BHa9K6dZjqO1ycA66fSZbR_nvXiVSo,3813
|
|
334
334
|
agno/tools/airflow.py,sha256=2ZCwx65w_tSXm4xEzZQR_teOiXJlnEgIqU9AgQTQemI,2493
|
|
@@ -368,7 +368,7 @@ agno/tools/financial_datasets.py,sha256=rvGSjz7gRdouB0PRJGngRSlO0wOwQE7cnrI8YaNI
|
|
|
368
368
|
agno/tools/firecrawl.py,sha256=0jeJlupLXCJTf2tDckEXlCV6EpLeDEO_G27-w4EWDL0,4985
|
|
369
369
|
agno/tools/function.py,sha256=VWgQAjIqxJl7MJrrMmMr7tNNvz3L3m7jXaUoIqxdlKY,36119
|
|
370
370
|
agno/tools/giphy.py,sha256=HKzTHEmiUdrkJsguG0qkpnsmXPikbrQyCoAHvAzI6XU,2422
|
|
371
|
-
agno/tools/github.py,sha256=
|
|
371
|
+
agno/tools/github.py,sha256=GMKuULX_9bzJY3PVocJsUVJITMR7g0hsjxY4gquL7wg,73271
|
|
372
372
|
agno/tools/gmail.py,sha256=p7zKW2NLuRIYsZZ6ru4WMdwMPQpoBt4JbuCwfMI3YTs,28845
|
|
373
373
|
agno/tools/google_bigquery.py,sha256=i93QJLJCH39sZDMPWYmie_A_ElBxVC6D36TVDcSaplo,4423
|
|
374
374
|
agno/tools/google_maps.py,sha256=iZa6FgjNID_G6T96ZXUOe5JnXIWn0c3AgzFU7O6S2WE,10123
|
|
@@ -532,7 +532,7 @@ agno/vectordb/upstashdb/__init__.py,sha256=set3Sx1F3ZCw0--0AeC036EAS0cC1xKsvQUK5
|
|
|
532
532
|
agno/vectordb/upstashdb/upstashdb.py,sha256=PNV_Wt0LTszwt-jzn2E87y_6aulCr8BaeA2YlJrkTtQ,13088
|
|
533
533
|
agno/vectordb/weaviate/__init__.py,sha256=FIoFJgqSmGuFgpvmsg8EjAn8FDAhuqAXed7fjaW4exY,182
|
|
534
534
|
agno/vectordb/weaviate/index.py,sha256=y4XYPRZFksMfrrF85B4hn5AtmXM4SH--4CyLo27EHgM,253
|
|
535
|
-
agno/vectordb/weaviate/weaviate.py,sha256=
|
|
535
|
+
agno/vectordb/weaviate/weaviate.py,sha256=ZOHCHIuenpBcSx--jIA8l8nvUzZ-DzER2ISSfdGHtSc,33253
|
|
536
536
|
agno/workflow/__init__.py,sha256=jPTHCWpHZbfR34-KqIX-SLA7tO7VdZqgZ8C2I0Nhbps,407
|
|
537
537
|
agno/workflow/workflow.py,sha256=iHWjowKjw_gKE-JTigHseY9DDL1rpqQrP6ihBUO9d-A,35227
|
|
538
538
|
agno/workflow/v2/__init__.py,sha256=tACFZfj7OqbmjOtyZ8nllYnsal3w8xN2SHrIXXmA6Ys,566
|
|
@@ -550,9 +550,9 @@ agno/workspace/enums.py,sha256=MxF1CUMXBaZMTKLEfiR-7kEhTki2Gfz6W7u49RdYYaE,123
|
|
|
550
550
|
agno/workspace/helpers.py,sha256=Mp-VlRsPVhW10CfDWYVhc9ANLk9RjNurDfCgXmycZCg,2066
|
|
551
551
|
agno/workspace/operator.py,sha256=CNLwVR45eE5dSRjto2o0c9NgCi2xD-JZR5uLt9kfIt8,30758
|
|
552
552
|
agno/workspace/settings.py,sha256=bcyHHN7lH1LPSMt4i_20XpTjZLoNXdzwyW-G9nHYV40,5703
|
|
553
|
-
agno-1.7.
|
|
554
|
-
agno-1.7.
|
|
555
|
-
agno-1.7.
|
|
556
|
-
agno-1.7.
|
|
557
|
-
agno-1.7.
|
|
558
|
-
agno-1.7.
|
|
553
|
+
agno-1.7.10.dist-info/licenses/LICENSE,sha256=m2rfTWFUfIwCaQqgT2WeBjuKzMKEJRwnaiofg9n8MsQ,16751
|
|
554
|
+
agno-1.7.10.dist-info/METADATA,sha256=39SoZig58R20Lust76guW14N2t8HpMHYRa-zAgXLVIU,44303
|
|
555
|
+
agno-1.7.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
556
|
+
agno-1.7.10.dist-info/entry_points.txt,sha256=Be-iPnPVabMohESsuUdV5w6IAYEIlpc2emJZbyNnfGI,88
|
|
557
|
+
agno-1.7.10.dist-info/top_level.txt,sha256=MKyeuVesTyOKIXUhc-d_tPa2Hrh0oTA4LM0izowpx70,5
|
|
558
|
+
agno-1.7.10.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|