agno 2.2.13__py3-none-any.whl → 2.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. agno/agent/agent.py +197 -110
  2. agno/api/api.py +2 -0
  3. agno/db/base.py +26 -0
  4. agno/db/dynamo/dynamo.py +8 -0
  5. agno/db/dynamo/schemas.py +1 -0
  6. agno/db/firestore/firestore.py +8 -0
  7. agno/db/firestore/schemas.py +1 -0
  8. agno/db/gcs_json/gcs_json_db.py +8 -0
  9. agno/db/in_memory/in_memory_db.py +8 -1
  10. agno/db/json/json_db.py +8 -0
  11. agno/db/migrations/manager.py +199 -0
  12. agno/db/migrations/versions/__init__.py +0 -0
  13. agno/db/migrations/versions/v2_3_0.py +938 -0
  14. agno/db/mongo/async_mongo.py +16 -6
  15. agno/db/mongo/mongo.py +11 -0
  16. agno/db/mongo/schemas.py +3 -0
  17. agno/db/mongo/utils.py +17 -0
  18. agno/db/mysql/mysql.py +76 -3
  19. agno/db/mysql/schemas.py +20 -10
  20. agno/db/postgres/async_postgres.py +99 -25
  21. agno/db/postgres/postgres.py +75 -6
  22. agno/db/postgres/schemas.py +30 -20
  23. agno/db/redis/redis.py +15 -2
  24. agno/db/redis/schemas.py +4 -0
  25. agno/db/schemas/memory.py +13 -0
  26. agno/db/singlestore/schemas.py +11 -0
  27. agno/db/singlestore/singlestore.py +79 -5
  28. agno/db/sqlite/async_sqlite.py +97 -19
  29. agno/db/sqlite/schemas.py +10 -0
  30. agno/db/sqlite/sqlite.py +79 -2
  31. agno/db/surrealdb/surrealdb.py +8 -0
  32. agno/knowledge/chunking/semantic.py +7 -2
  33. agno/knowledge/embedder/nebius.py +1 -1
  34. agno/knowledge/knowledge.py +57 -86
  35. agno/knowledge/reader/csv_reader.py +7 -9
  36. agno/knowledge/reader/docx_reader.py +5 -5
  37. agno/knowledge/reader/field_labeled_csv_reader.py +16 -18
  38. agno/knowledge/reader/json_reader.py +5 -4
  39. agno/knowledge/reader/markdown_reader.py +8 -8
  40. agno/knowledge/reader/pdf_reader.py +11 -11
  41. agno/knowledge/reader/pptx_reader.py +5 -5
  42. agno/knowledge/reader/s3_reader.py +3 -3
  43. agno/knowledge/reader/text_reader.py +8 -8
  44. agno/knowledge/reader/web_search_reader.py +1 -48
  45. agno/knowledge/reader/website_reader.py +10 -10
  46. agno/models/anthropic/claude.py +319 -28
  47. agno/models/aws/claude.py +32 -0
  48. agno/models/azure/openai_chat.py +19 -10
  49. agno/models/base.py +612 -545
  50. agno/models/cerebras/cerebras.py +8 -11
  51. agno/models/cohere/chat.py +27 -1
  52. agno/models/google/gemini.py +39 -7
  53. agno/models/groq/groq.py +25 -11
  54. agno/models/meta/llama.py +20 -9
  55. agno/models/meta/llama_openai.py +3 -19
  56. agno/models/nebius/nebius.py +4 -4
  57. agno/models/openai/chat.py +30 -14
  58. agno/models/openai/responses.py +10 -13
  59. agno/models/response.py +1 -0
  60. agno/models/vertexai/claude.py +26 -0
  61. agno/os/app.py +8 -19
  62. agno/os/router.py +54 -0
  63. agno/os/routers/knowledge/knowledge.py +2 -2
  64. agno/os/schema.py +2 -2
  65. agno/session/agent.py +57 -92
  66. agno/session/summary.py +1 -1
  67. agno/session/team.py +62 -112
  68. agno/session/workflow.py +353 -57
  69. agno/team/team.py +227 -125
  70. agno/tools/models/nebius.py +5 -5
  71. agno/tools/models_labs.py +20 -10
  72. agno/tools/nano_banana.py +151 -0
  73. agno/tools/yfinance.py +12 -11
  74. agno/utils/http.py +111 -0
  75. agno/utils/media.py +11 -0
  76. agno/utils/models/claude.py +8 -0
  77. agno/utils/print_response/agent.py +33 -12
  78. agno/utils/print_response/team.py +22 -12
  79. agno/vectordb/couchbase/couchbase.py +6 -2
  80. agno/workflow/condition.py +13 -0
  81. agno/workflow/loop.py +13 -0
  82. agno/workflow/parallel.py +13 -0
  83. agno/workflow/router.py +13 -0
  84. agno/workflow/step.py +120 -20
  85. agno/workflow/steps.py +13 -0
  86. agno/workflow/workflow.py +76 -63
  87. {agno-2.2.13.dist-info → agno-2.3.1.dist-info}/METADATA +6 -2
  88. {agno-2.2.13.dist-info → agno-2.3.1.dist-info}/RECORD +91 -88
  89. agno/tools/googlesearch.py +0 -98
  90. {agno-2.2.13.dist-info → agno-2.3.1.dist-info}/WHEEL +0 -0
  91. {agno-2.2.13.dist-info → agno-2.3.1.dist-info}/licenses/LICENSE +0 -0
  92. {agno-2.2.13.dist-info → agno-2.3.1.dist-info}/top_level.txt +0 -0
@@ -10,7 +10,7 @@ from agno.knowledge.reader.base import Reader
10
10
  from agno.knowledge.reader.pdf_reader import PDFReader
11
11
  from agno.knowledge.reader.text_reader import TextReader
12
12
  from agno.knowledge.types import ContentType
13
- from agno.utils.log import log_info, logger
13
+ from agno.utils.log import log_debug, log_error
14
14
 
15
15
  try:
16
16
  from agno.aws.resource.s3.object import S3Object # type: ignore
@@ -51,7 +51,7 @@ class S3Reader(Reader):
51
51
 
52
52
  def read(self, name: Optional[str], s3_object: S3Object) -> List[Document]:
53
53
  try:
54
- log_info(f"Reading S3 file: {s3_object.uri}")
54
+ log_debug(f"Reading S3 file: {s3_object.uri}")
55
55
 
56
56
  # Read PDF files
57
57
  if s3_object.uri.endswith(".pdf"):
@@ -80,7 +80,7 @@ class S3Reader(Reader):
80
80
  return documents
81
81
 
82
82
  except Exception as e:
83
- logger.error(f"Error reading: {s3_object.uri}: {e}")
83
+ log_error(f"Error reading: {s3_object.uri}: {e}")
84
84
 
85
85
  return []
86
86
 
@@ -8,7 +8,7 @@ from agno.knowledge.chunking.strategy import ChunkingStrategy, ChunkingStrategyT
8
8
  from agno.knowledge.document.base import Document
9
9
  from agno.knowledge.reader.base import Reader
10
10
  from agno.knowledge.types import ContentType
11
- from agno.utils.log import log_info, logger
11
+ from agno.utils.log import log_debug, log_error, log_warning
12
12
 
13
13
 
14
14
  class TextReader(Reader):
@@ -37,12 +37,12 @@ class TextReader(Reader):
37
37
  if isinstance(file, Path):
38
38
  if not file.exists():
39
39
  raise FileNotFoundError(f"Could not find file: {file}")
40
- log_info(f"Reading: {file}")
40
+ log_debug(f"Reading: {file}")
41
41
  file_name = name or file.stem
42
42
  file_contents = file.read_text(self.encoding or "utf-8")
43
43
  else:
44
44
  file_name = name or file.name.split(".")[0]
45
- log_info(f"Reading uploaded file: {file_name}")
45
+ log_debug(f"Reading uploaded file: {file_name}")
46
46
  file.seek(0)
47
47
  file_contents = file.read().decode(self.encoding or "utf-8")
48
48
 
@@ -60,7 +60,7 @@ class TextReader(Reader):
60
60
  return chunked_documents
61
61
  return documents
62
62
  except Exception as e:
63
- logger.error(f"Error reading: {file}: {e}")
63
+ log_error(f"Error reading: {file}: {e}")
64
64
  return []
65
65
 
66
66
  async def async_read(self, file: Union[Path, IO[Any]], name: Optional[str] = None) -> List[Document]:
@@ -69,7 +69,7 @@ class TextReader(Reader):
69
69
  if not file.exists():
70
70
  raise FileNotFoundError(f"Could not find file: {file}")
71
71
 
72
- log_info(f"Reading asynchronously: {file}")
72
+ log_debug(f"Reading asynchronously: {file}")
73
73
  file_name = name or file.stem
74
74
 
75
75
  try:
@@ -78,10 +78,10 @@ class TextReader(Reader):
78
78
  async with aiofiles.open(file, "r", encoding=self.encoding or "utf-8") as f:
79
79
  file_contents = await f.read()
80
80
  except ImportError:
81
- logger.warning("aiofiles not installed, using synchronous file I/O")
81
+ log_warning("aiofiles not installed, using synchronous file I/O")
82
82
  file_contents = file.read_text(self.encoding or "utf-8")
83
83
  else:
84
- log_info(f"Reading uploaded file asynchronously: {file.name}")
84
+ log_debug(f"Reading uploaded file asynchronously: {file.name}")
85
85
  file_name = name or file.name.split(".")[0]
86
86
  file.seek(0)
87
87
  file_contents = file.read().decode(self.encoding or "utf-8")
@@ -96,7 +96,7 @@ class TextReader(Reader):
96
96
  return await self._async_chunk_document(document)
97
97
  return [document]
98
98
  except Exception as e:
99
- logger.error(f"Error reading asynchronously: {file}: {e}")
99
+ log_error(f"Error reading asynchronously: {file}: {e}")
100
100
  return []
101
101
 
102
102
  async def _async_chunk_document(self, document: Document) -> List[Document]:
@@ -37,7 +37,7 @@ class WebSearchReader(Reader):
37
37
  user_agent: str = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
38
38
 
39
39
  # Search engine configuration
40
- search_engine: Literal["duckduckgo", "google"] = "duckduckgo"
40
+ search_engine: Literal["duckduckgo"] = "duckduckgo"
41
41
  search_delay: float = 3.0 # Delay between search requests
42
42
  max_search_retries: int = 2 # Retries for search operations
43
43
 
@@ -121,57 +121,10 @@ class WebSearchReader(Reader):
121
121
  return []
122
122
  return []
123
123
 
124
- def _perform_google_search(self, query: str) -> List[Dict[str, str]]:
125
- """Perform web search using Google (requires googlesearch-python)"""
126
- log_debug(f"Performing Google search for: {query}")
127
-
128
- try:
129
- from googlesearch import search
130
- except ImportError:
131
- logger.error("Google search requires 'googlesearch-python'. Install with: pip install googlesearch-python")
132
- return []
133
-
134
- for attempt in range(self.max_search_retries):
135
- try:
136
- self._respect_rate_limits()
137
-
138
- results = []
139
- # Use the basic search function without unsupported parameters
140
- # The googlesearch-python library's search function only accepts basic parameters
141
- search_results = search(query)
142
-
143
- # Convert iterator to list and limit results
144
- result_list = list(search_results)[: self.max_results]
145
-
146
- for result in result_list:
147
- # The search function returns URLs as strings
148
- results.append(
149
- {
150
- "title": "", # Google search doesn't provide titles directly
151
- "url": result,
152
- "description": "", # Google search doesn't provide descriptions directly
153
- }
154
- )
155
-
156
- log_debug(f"Found {len(results)} Google search results")
157
- return results
158
-
159
- except Exception as e:
160
- logger.warning(f"Google search attempt {attempt + 1} failed: {e}")
161
- if attempt < self.max_search_retries - 1:
162
- time.sleep(self.search_delay)
163
- else:
164
- logger.error(f"All Google search attempts failed: {e}")
165
- return []
166
-
167
- return []
168
-
169
124
  def _perform_web_search(self, query: str) -> List[Dict[str, str]]:
170
125
  """Perform web search using the configured search engine"""
171
126
  if self.search_engine == "duckduckgo":
172
127
  return self._perform_duckduckgo_search(query)
173
- elif self.search_engine == "google":
174
- return self._perform_google_search(query)
175
128
  else:
176
129
  logger.error(f"Unsupported search engine: {self.search_engine}")
177
130
  return []
@@ -12,7 +12,7 @@ from agno.knowledge.chunking.strategy import ChunkingStrategy, ChunkingStrategyT
12
12
  from agno.knowledge.document.base import Document
13
13
  from agno.knowledge.reader.base import Reader
14
14
  from agno.knowledge.types import ContentType
15
- from agno.utils.log import log_debug, logger
15
+ from agno.utils.log import log_debug, log_error, log_warning
16
16
 
17
17
  try:
18
18
  from bs4 import BeautifulSoup, Tag # noqa: F401
@@ -229,21 +229,21 @@ class WebsiteReader(Reader):
229
229
  # Log HTTP status errors but continue crawling other pages
230
230
  # Skip redirect errors (3xx) as they should be handled by follow_redirects
231
231
  if e.response.status_code >= 300 and e.response.status_code < 400:
232
- logger.debug(f"Redirect encountered for {current_url}, skipping: {e}")
232
+ log_debug(f"Redirect encountered for {current_url}, skipping: {e}")
233
233
  else:
234
- logger.warning(f"HTTP status error while crawling {current_url}: {e}")
234
+ log_warning(f"HTTP status error while crawling {current_url}: {e}")
235
235
  # For the initial URL, we should raise the error only if it's not a redirect
236
236
  if current_url == url and not crawler_result and not (300 <= e.response.status_code < 400):
237
237
  raise
238
238
  except httpx.RequestError as e:
239
239
  # Log request errors but continue crawling other pages
240
- logger.warning(f"Request error while crawling {current_url}: {e}")
240
+ log_warning(f"Request error while crawling {current_url}: {e}")
241
241
  # For the initial URL, we should raise the error
242
242
  if current_url == url and not crawler_result:
243
243
  raise
244
244
  except Exception as e:
245
245
  # Log other exceptions but continue crawling other pages
246
- logger.warning(f"Failed to crawl {current_url}: {e}")
246
+ log_warning(f"Failed to crawl {current_url}: {e}")
247
247
  # For the initial URL, we should raise the error
248
248
  if current_url == url and not crawler_result:
249
249
  # Wrap non-HTTP exceptions in a RequestError
@@ -332,19 +332,19 @@ class WebsiteReader(Reader):
332
332
 
333
333
  except httpx.HTTPStatusError as e:
334
334
  # Log HTTP status errors but continue crawling other pages
335
- logger.warning(f"HTTP status error while crawling asynchronously {current_url}: {e}")
335
+ log_warning(f"HTTP status error while crawling asynchronously {current_url}: {e}")
336
336
  # For the initial URL, we should raise the error
337
337
  if current_url == url and not crawler_result:
338
338
  raise
339
339
  except httpx.RequestError as e:
340
340
  # Log request errors but continue crawling other pages
341
- logger.warning(f"Request error while crawling asynchronously {current_url}: {e}")
341
+ log_warning(f"Request error while crawling asynchronously {current_url}: {e}")
342
342
  # For the initial URL, we should raise the error
343
343
  if current_url == url and not crawler_result:
344
344
  raise
345
345
  except Exception as e:
346
346
  # Log other exceptions but continue crawling other pages
347
- logger.warning(f"Failed to crawl asynchronously {current_url}: {e}")
347
+ log_warning(f"Failed to crawl asynchronously {current_url}: {e}")
348
348
  # For the initial URL, we should raise the error
349
349
  if current_url == url and not crawler_result:
350
350
  # Wrap non-HTTP exceptions in a RequestError
@@ -398,7 +398,7 @@ class WebsiteReader(Reader):
398
398
  )
399
399
  return documents
400
400
  except (httpx.HTTPStatusError, httpx.RequestError) as e:
401
- logger.error(f"Error reading website {url}: {e}")
401
+ log_error(f"Error reading website {url}: {e}")
402
402
  raise
403
403
 
404
404
  async def async_read(self, url: str, name: Optional[str] = None) -> List[Document]:
@@ -451,5 +451,5 @@ class WebsiteReader(Reader):
451
451
 
452
452
  return documents
453
453
  except (httpx.HTTPStatusError, httpx.RequestError) as e:
454
- logger.error(f"Error reading website asynchronously {url}: {e}")
454
+ log_error(f"Error reading website asynchronously {url}: {e}")
455
455
  raise