agno 2.0.4__py3-none-any.whl → 2.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. agno/agent/agent.py +127 -102
  2. agno/db/dynamo/dynamo.py +9 -7
  3. agno/db/firestore/firestore.py +7 -4
  4. agno/db/gcs_json/gcs_json_db.py +6 -4
  5. agno/db/json/json_db.py +10 -6
  6. agno/db/migrations/v1_to_v2.py +191 -23
  7. agno/db/mongo/mongo.py +67 -6
  8. agno/db/mysql/mysql.py +7 -6
  9. agno/db/mysql/schemas.py +27 -27
  10. agno/db/postgres/postgres.py +7 -6
  11. agno/db/redis/redis.py +3 -3
  12. agno/db/singlestore/singlestore.py +4 -4
  13. agno/db/sqlite/sqlite.py +7 -6
  14. agno/db/utils.py +0 -14
  15. agno/integrations/discord/client.py +1 -0
  16. agno/knowledge/embedder/openai.py +19 -11
  17. agno/knowledge/knowledge.py +11 -10
  18. agno/knowledge/reader/reader_factory.py +7 -3
  19. agno/knowledge/reader/web_search_reader.py +12 -6
  20. agno/knowledge/reader/website_reader.py +33 -16
  21. agno/media.py +70 -0
  22. agno/models/aimlapi/aimlapi.py +2 -2
  23. agno/models/base.py +31 -4
  24. agno/models/cerebras/cerebras_openai.py +2 -2
  25. agno/models/deepinfra/deepinfra.py +2 -2
  26. agno/models/deepseek/deepseek.py +2 -2
  27. agno/models/fireworks/fireworks.py +2 -2
  28. agno/models/internlm/internlm.py +2 -2
  29. agno/models/langdb/langdb.py +4 -4
  30. agno/models/litellm/litellm_openai.py +2 -2
  31. agno/models/message.py +135 -0
  32. agno/models/meta/llama_openai.py +2 -2
  33. agno/models/nebius/nebius.py +2 -2
  34. agno/models/nexus/__init__.py +3 -0
  35. agno/models/nexus/nexus.py +25 -0
  36. agno/models/nvidia/nvidia.py +2 -2
  37. agno/models/openai/responses.py +6 -0
  38. agno/models/openrouter/openrouter.py +2 -2
  39. agno/models/perplexity/perplexity.py +2 -2
  40. agno/models/portkey/portkey.py +3 -3
  41. agno/models/response.py +2 -1
  42. agno/models/sambanova/sambanova.py +2 -2
  43. agno/models/together/together.py +2 -2
  44. agno/models/vercel/v0.py +2 -2
  45. agno/models/xai/xai.py +2 -2
  46. agno/os/app.py +162 -42
  47. agno/os/interfaces/agui/utils.py +98 -134
  48. agno/os/router.py +3 -1
  49. agno/os/routers/health.py +0 -1
  50. agno/os/routers/home.py +52 -0
  51. agno/os/routers/knowledge/knowledge.py +2 -2
  52. agno/os/schema.py +21 -0
  53. agno/os/utils.py +1 -9
  54. agno/run/agent.py +19 -3
  55. agno/run/team.py +18 -3
  56. agno/run/workflow.py +10 -0
  57. agno/team/team.py +70 -45
  58. agno/tools/duckduckgo.py +15 -11
  59. agno/tools/e2b.py +14 -7
  60. agno/tools/file_generation.py +350 -0
  61. agno/tools/function.py +2 -0
  62. agno/tools/googlesearch.py +1 -1
  63. agno/utils/gemini.py +24 -4
  64. agno/utils/string.py +32 -0
  65. agno/utils/tools.py +1 -1
  66. agno/vectordb/chroma/chromadb.py +66 -25
  67. agno/vectordb/lancedb/lance_db.py +15 -4
  68. agno/vectordb/milvus/milvus.py +6 -0
  69. agno/workflow/step.py +4 -3
  70. agno/workflow/workflow.py +4 -0
  71. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/METADATA +9 -5
  72. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/RECORD +75 -72
  73. agno/knowledge/reader/url_reader.py +0 -128
  74. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/WHEEL +0 -0
  75. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/licenses/LICENSE +0 -0
  76. {agno-2.0.4.dist-info → agno-2.0.6.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import json
2
3
  from hashlib import md5
3
4
  from typing import Any, Dict, List, Mapping, Optional, Union, cast
4
5
 
@@ -60,6 +61,44 @@ class ChromaDb(VectorDb):
60
61
  # Chroma client kwargs
61
62
  self.kwargs = kwargs
62
63
 
64
+ def _flatten_metadata(self, metadata: Dict[str, Any]) -> Dict[str, Union[str, int, float, bool]]:
65
+ """
66
+ Flatten nested metadata to ChromaDB-compatible format.
67
+
68
+ Args:
69
+ metadata: Dictionary that may contain nested structures
70
+
71
+ Returns:
72
+ Flattened dictionary with only primitive values
73
+ """
74
+ flattened: Dict[str, Any] = {}
75
+
76
+ def _flatten_recursive(obj: Any, prefix: str = "") -> None:
77
+ if isinstance(obj, dict):
78
+ if len(obj) == 0:
79
+ # Handle empty dictionaries by converting to JSON string
80
+ flattened[prefix] = json.dumps(obj)
81
+ else:
82
+ for key, value in obj.items():
83
+ new_key = f"{prefix}.{key}" if prefix else key
84
+ _flatten_recursive(value, new_key)
85
+ elif isinstance(obj, (list, tuple)):
86
+ # Convert lists/tuples to JSON strings
87
+ flattened[prefix] = json.dumps(obj)
88
+ elif isinstance(obj, (str, int, float, bool)) or obj is None:
89
+ if obj is not None: # ChromaDB doesn't accept None values
90
+ flattened[prefix] = obj
91
+ else:
92
+ # Convert other complex types to JSON strings
93
+ try:
94
+ flattened[prefix] = json.dumps(obj)
95
+ except (TypeError, ValueError):
96
+ # If it can't be serialized, convert to string
97
+ flattened[prefix] = str(obj)
98
+
99
+ _flatten_recursive(metadata)
100
+ return flattened
101
+
63
102
  @property
64
103
  def client(self) -> ClientAPI:
65
104
  if self._client is None:
@@ -147,11 +186,14 @@ class ChromaDb(VectorDb):
147
186
 
148
187
  metadata["content_hash"] = content_hash
149
188
 
189
+ # Flatten metadata for ChromaDB compatibility
190
+ flattened_metadata = self._flatten_metadata(metadata)
191
+
150
192
  docs_embeddings.append(document.embedding)
151
193
  docs.append(cleaned_content)
152
194
  ids.append(doc_id)
153
- docs_metadata.append(metadata)
154
- log_debug(f"Prepared document: {document.id} | {document.name} | {metadata}")
195
+ docs_metadata.append(flattened_metadata)
196
+ log_debug(f"Prepared document: {document.id} | {document.name} | {flattened_metadata}")
155
197
 
156
198
  if self._collection is None:
157
199
  logger.warning("Collection does not exist")
@@ -196,11 +238,14 @@ class ChromaDb(VectorDb):
196
238
 
197
239
  metadata["content_hash"] = content_hash
198
240
 
241
+ # Flatten metadata for ChromaDB compatibility
242
+ flattened_metadata = self._flatten_metadata(metadata)
243
+
199
244
  docs_embeddings.append(document.embedding)
200
245
  docs.append(cleaned_content)
201
246
  ids.append(doc_id)
202
- docs_metadata.append(metadata)
203
- log_debug(f"Prepared document: {document.id} | {document.name} | {metadata}")
247
+ docs_metadata.append(flattened_metadata)
248
+ log_debug(f"Prepared document: {document.id} | {document.name} | {flattened_metadata}")
204
249
 
205
250
  if self._collection is None:
206
251
  logger.warning("Collection does not exist")
@@ -262,11 +307,14 @@ class ChromaDb(VectorDb):
262
307
 
263
308
  metadata["content_hash"] = content_hash
264
309
 
310
+ # Flatten metadata for ChromaDB compatibility
311
+ flattened_metadata = self._flatten_metadata(metadata)
312
+
265
313
  docs_embeddings.append(document.embedding)
266
314
  docs.append(cleaned_content)
267
315
  ids.append(doc_id)
268
- docs_metadata.append(metadata)
269
- log_debug(f"Upserted document: {document.id} | {document.name} | {metadata}")
316
+ docs_metadata.append(flattened_metadata)
317
+ log_debug(f"Upserted document: {document.id} | {document.name} | {flattened_metadata}")
270
318
 
271
319
  if self._collection is None:
272
320
  logger.warning("Collection does not exist")
@@ -313,11 +361,14 @@ class ChromaDb(VectorDb):
313
361
 
314
362
  metadata["content_hash"] = content_hash
315
363
 
364
+ # Flatten metadata for ChromaDB compatibility
365
+ flattened_metadata = self._flatten_metadata(metadata)
366
+
316
367
  docs_embeddings.append(document.embedding)
317
368
  docs.append(cleaned_content)
318
369
  ids.append(doc_id)
319
- docs_metadata.append(metadata)
320
- log_debug(f"Upserted document: {document.id} | {document.name} | {metadata}")
370
+ docs_metadata.append(flattened_metadata)
371
+ log_debug(f"Upserted document: {document.id} | {document.name} | {flattened_metadata}")
321
372
 
322
373
  if self._collection is None:
323
374
  logger.warning("Collection does not exist")
@@ -747,6 +798,9 @@ class ChromaDb(VectorDb):
747
798
  logger.debug(f"No documents found with content_id: {content_id}")
748
799
  return
749
800
 
801
+ # Flatten the new metadata first
802
+ flattened_new_metadata = self._flatten_metadata(metadata)
803
+
750
804
  # Merge metadata for each document
751
805
  updated_metadatas = []
752
806
  for i, current_meta in enumerate(current_metadatas or []):
@@ -754,26 +808,13 @@ class ChromaDb(VectorDb):
754
808
  meta_dict: Dict[str, Any] = {}
755
809
  else:
756
810
  meta_dict = dict(current_meta) # Convert Mapping to dict
757
- updated_meta: Dict[str, Any] = meta_dict.copy()
758
- updated_meta.update(metadata)
759
-
760
- if "filters" not in updated_meta:
761
- updated_meta["filters"] = {}
762
- if isinstance(updated_meta["filters"], dict):
763
- updated_meta["filters"].update(metadata)
764
- else:
765
- updated_meta["filters"] = metadata
766
- updated_metadatas.append(updated_meta)
767
811
 
768
- # Update the documents
769
- # Filter out None values from metadata as ChromaDB doesn't accept them
770
- cleaned_metadatas = []
771
- for meta in updated_metadatas:
772
- cleaned_meta = {k: v for k, v in meta.items() if v is not None}
773
- cleaned_metadatas.append(cleaned_meta)
812
+ # Update with flattened metadata
813
+ meta_dict.update(flattened_new_metadata)
814
+ updated_metadatas.append(meta_dict)
774
815
 
775
816
  # Convert to the expected type for ChromaDB
776
- chroma_metadatas = cast(List[Mapping[str, Union[str, int, float, bool]]], cleaned_metadatas)
817
+ chroma_metadatas = cast(List[Mapping[str, Union[str, int, float, bool]]], updated_metadatas)
777
818
  collection.update(ids=ids, metadatas=chroma_metadatas) # type: ignore
778
819
  logger.debug(f"Updated metadata for {len(ids)} documents with content_id: {content_id}")
779
820
 
@@ -950,17 +950,28 @@ class LanceDb(VectorDb):
950
950
  logger.error("Table not initialized")
951
951
  return
952
952
 
953
- # Search for documents with the given content_id
954
- query_filter = f"payload->>'content_id' = '{content_id}'"
955
- results = self.table.search().where(query_filter).to_pandas()
953
+ # Get all documents and filter in Python (LanceDB doesn't support JSON operators)
954
+ total_count = self.table.count_rows()
955
+ results = self.table.search().select(["id", "payload"]).limit(total_count).to_pandas()
956
956
 
957
957
  if results.empty:
958
+ logger.debug("No documents found")
959
+ return
960
+
961
+ # Find matching documents with the given content_id
962
+ matching_rows = []
963
+ for _, row in results.iterrows():
964
+ payload = json.loads(row["payload"])
965
+ if payload.get("content_id") == content_id:
966
+ matching_rows.append(row)
967
+
968
+ if not matching_rows:
958
969
  logger.debug(f"No documents found with content_id: {content_id}")
959
970
  return
960
971
 
961
972
  # Update each matching document
962
973
  updated_count = 0
963
- for _, row in results.iterrows():
974
+ for row in matching_rows:
964
975
  row_id = row["id"]
965
976
  current_payload = json.loads(row["payload"])
966
977
 
@@ -423,6 +423,9 @@ class Milvus(VectorDb):
423
423
  else:
424
424
  for document in documents:
425
425
  document.embed(embedder=self.embedder)
426
+ if not document.embedding:
427
+ log_debug(f"Skipping document without embedding: {document.name} ({document.meta_data})")
428
+ continue
426
429
  cleaned_content = document.content.replace("\x00", "\ufffd")
427
430
  doc_id = md5(cleaned_content.encode()).hexdigest()
428
431
 
@@ -465,6 +468,9 @@ class Milvus(VectorDb):
465
468
 
466
469
  async def process_document(document):
467
470
  document.embed(embedder=self.embedder)
471
+ if not document.embedding:
472
+ log_debug(f"Skipping document without embedding: {document.name} ({document.meta_data})")
473
+ return None
468
474
  cleaned_content = document.content.replace("\x00", "\ufffd")
469
475
  doc_id = md5(cleaned_content.encode()).hexdigest()
470
476
 
agno/workflow/step.py CHANGED
@@ -499,7 +499,7 @@ class Step:
499
499
  if store_executor_outputs and workflow_run_response is not None:
500
500
  self._store_executor_response(workflow_run_response, active_executor_run_response) # type: ignore
501
501
 
502
- final_response = self._process_step_output(active_executor_run_response) # type: ignore
502
+ final_response = active_executor_run_response # type: ignore
503
503
 
504
504
  else:
505
505
  raise ValueError(f"Unsupported executor type: {self._executor_type}")
@@ -513,6 +513,7 @@ class Step:
513
513
  use_workflow_logger()
514
514
 
515
515
  # Yield the step output
516
+ final_response = self._process_step_output(final_response)
516
517
  yield final_response
517
518
 
518
519
  # Emit StepCompletedEvent
@@ -865,7 +866,6 @@ class Step:
865
866
 
866
867
  active_executor_run_response = None
867
868
  async for event in response_stream:
868
- log_debug(f"Received async event from agent: {type(event).__name__}")
869
869
  if isinstance(event, RunOutput) or isinstance(event, TeamRunOutput):
870
870
  active_executor_run_response = event
871
871
  break
@@ -877,7 +877,7 @@ class Step:
877
877
  if store_executor_outputs and workflow_run_response is not None:
878
878
  self._store_executor_response(workflow_run_response, active_executor_run_response) # type: ignore
879
879
 
880
- final_response = self._process_step_output(active_executor_run_response) # type: ignore
880
+ final_response = active_executor_run_response # type: ignore
881
881
  else:
882
882
  raise ValueError(f"Unsupported executor type: {self._executor_type}")
883
883
 
@@ -889,6 +889,7 @@ class Step:
889
889
  use_workflow_logger()
890
890
 
891
891
  # Yield the final response
892
+ final_response = self._process_step_output(final_response)
892
893
  yield final_response
893
894
 
894
895
  if stream_intermediate_steps and workflow_run_response:
agno/workflow/workflow.py CHANGED
@@ -1708,6 +1708,7 @@ class Workflow:
1708
1708
  # Create workflow run response with PENDING status
1709
1709
  workflow_run_response = WorkflowRunOutput(
1710
1710
  run_id=run_id,
1711
+ input=input,
1711
1712
  session_id=session_id,
1712
1713
  workflow_id=self.id,
1713
1714
  workflow_name=self.name,
@@ -1798,6 +1799,7 @@ class Workflow:
1798
1799
  # Create workflow run response with PENDING status
1799
1800
  workflow_run_response = WorkflowRunOutput(
1800
1801
  run_id=run_id,
1802
+ input=input,
1801
1803
  session_id=session_id,
1802
1804
  workflow_id=self.id,
1803
1805
  workflow_name=self.name,
@@ -1971,6 +1973,7 @@ class Workflow:
1971
1973
  # Create workflow run response that will be updated by reference
1972
1974
  workflow_run_response = WorkflowRunOutput(
1973
1975
  run_id=run_id,
1976
+ input=input,
1974
1977
  session_id=session_id,
1975
1978
  workflow_id=self.id,
1976
1979
  workflow_name=self.name,
@@ -2139,6 +2142,7 @@ class Workflow:
2139
2142
  # Create workflow run response that will be updated by reference
2140
2143
  workflow_run_response = WorkflowRunOutput(
2141
2144
  run_id=run_id,
2145
+ input=input,
2142
2146
  session_id=session_id,
2143
2147
  workflow_id=self.id,
2144
2148
  workflow_name=self.name,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agno
3
- Version: 2.0.4
3
+ Version: 2.0.6
4
4
  Summary: Agno: a lightweight library for building Multi-Agent Systems
5
5
  Author-email: Ashpreet Bedi <ashpreet@agno.com>
6
6
  Project-URL: homepage, https://agno.com
@@ -161,6 +161,8 @@ Requires-Dist: opencv-python; extra == "opencv"
161
161
  Provides-Extra: psycopg
162
162
  Requires-Dist: psycopg-binary; extra == "psycopg"
163
163
  Requires-Dist: psycopg; extra == "psycopg"
164
+ Provides-Extra: reportlab
165
+ Requires-Dist: reportlab; extra == "reportlab"
164
166
  Provides-Extra: todoist
165
167
  Requires-Dist: todoist-api-python; extra == "todoist"
166
168
  Provides-Extra: valyu
@@ -306,6 +308,7 @@ Requires-Dist: agno[mem0]; extra == "tools"
306
308
  Requires-Dist: agno[memori]; extra == "tools"
307
309
  Requires-Dist: agno[google_bigquery]; extra == "tools"
308
310
  Requires-Dist: agno[psycopg]; extra == "tools"
311
+ Requires-Dist: agno[reportlab]; extra == "tools"
309
312
  Requires-Dist: agno[trafilatura]; extra == "tools"
310
313
  Requires-Dist: agno[neo4j]; extra == "tools"
311
314
  Provides-Extra: storage
@@ -366,6 +369,7 @@ Dynamic: license-file
366
369
  <div align="center">
367
370
  <a href="https://docs.agno.com">📚 Documentation</a> &nbsp;|&nbsp;
368
371
  <a href="https://docs.agno.com/examples/introduction">💡 Examples</a> &nbsp;|&nbsp;
372
+ <a href="https://www.agno.com/?utm_source=github&utm_medium=readme&utm_campaign=agno-github&utm_content=header">🏠 Website</a> &nbsp;|&nbsp;
369
373
  <a href="https://github.com/agno-agi/agno/stargazers">🌟 Star Us</a>
370
374
  </div>
371
375
 
@@ -406,14 +410,14 @@ If you're new to Agno, follow our [quickstart](https://docs.agno.com/introductio
406
410
 
407
411
  After that, checkout the [examples gallery](https://docs.agno.com/examples/introduction) and build real-world applications with Agno.
408
412
 
409
- ## Documentation, Community & More examples
413
+ ## Documentation, Community & More Examples
410
414
 
411
415
  - Docs: <a href="https://docs.agno.com" target="_blank" rel="noopener noreferrer">docs.agno.com</a>
412
416
  - Cookbook: <a href="https://github.com/agno-agi/agno/tree/main/cookbook" target="_blank" rel="noopener noreferrer">Cookbook</a>
413
417
  - Community forum: <a href="https://community.agno.com/" target="_blank" rel="noopener noreferrer">community.agno.com</a>
414
418
  - Discord: <a href="https://discord.gg/4MtYHHrgA8" target="_blank" rel="noopener noreferrer">discord</a>
415
419
 
416
- ## Setup your coding agent to use Agno
420
+ ## Setup Your Coding Agent to Use Agno
417
421
 
418
422
  For LLMs and AI assistants to understand and navigate Agno's documentation, we provide an [llms.txt](https://docs.agno.com/llms.txt) or [llms-full.txt](https://docs.agno.com/llms-full.txt) file.
419
423
 
@@ -441,7 +445,7 @@ At Agno, we're obsessed with performance. Why? because even simple AI workflows
441
445
 
442
446
  While an Agent's run-time is bottlenecked by inference, we must do everything possible to minimize execution time, reduce memory usage, and parallelize tool calls. These numbers may seem trivial at first, but our experience shows that they add up even at a reasonably small scale.
443
447
 
444
- ### Instantiation time
448
+ ### Instantiation Time
445
449
 
446
450
  Let's measure the time it takes for an Agent with 1 tool to start up. We'll run the evaluation 1000 times to get a baseline measurement.
447
451
 
@@ -469,7 +473,7 @@ Agno is on the left, notice how it finishes before LangGraph gets 1/2 way throug
469
473
 
470
474
  https://github.com/user-attachments/assets/ba466d45-75dd-45ac-917b-0a56c5742e23
471
475
 
472
- ### Memory usage
476
+ ### Memory Usage
473
477
 
474
478
  To measure memory usage, we use the `tracemalloc` library. We first calculate a baseline memory usage by running an empty function, then run the Agent 1000x times and calculate the difference. This gives a (reasonably) isolated measurement of the memory usage of the Agent.
475
479