agno 2.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +5540 -2273
- agno/api/api.py +2 -0
- agno/api/os.py +1 -1
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +247 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +689 -6
- agno/db/dynamo/dynamo.py +933 -37
- agno/db/dynamo/schemas.py +174 -10
- agno/db/dynamo/utils.py +63 -4
- agno/db/firestore/firestore.py +831 -9
- agno/db/firestore/schemas.py +51 -0
- agno/db/firestore/utils.py +102 -4
- agno/db/gcs_json/gcs_json_db.py +660 -12
- agno/db/gcs_json/utils.py +60 -26
- agno/db/in_memory/in_memory_db.py +287 -14
- agno/db/in_memory/utils.py +60 -2
- agno/db/json/json_db.py +590 -14
- agno/db/json/utils.py +60 -26
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +43 -13
- agno/db/migrations/versions/__init__.py +0 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +15 -1
- agno/db/mongo/async_mongo.py +2760 -0
- agno/db/mongo/mongo.py +879 -11
- agno/db/mongo/schemas.py +42 -0
- agno/db/mongo/utils.py +80 -8
- agno/db/mysql/__init__.py +2 -1
- agno/db/mysql/async_mysql.py +2912 -0
- agno/db/mysql/mysql.py +946 -68
- agno/db/mysql/schemas.py +72 -10
- agno/db/mysql/utils.py +198 -7
- agno/db/postgres/__init__.py +2 -1
- agno/db/postgres/async_postgres.py +2579 -0
- agno/db/postgres/postgres.py +942 -57
- agno/db/postgres/schemas.py +81 -18
- agno/db/postgres/utils.py +164 -2
- agno/db/redis/redis.py +671 -7
- agno/db/redis/schemas.py +50 -0
- agno/db/redis/utils.py +65 -7
- agno/db/schemas/__init__.py +2 -1
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +1 -0
- agno/db/schemas/memory.py +17 -2
- agno/db/singlestore/schemas.py +63 -0
- agno/db/singlestore/singlestore.py +949 -83
- agno/db/singlestore/utils.py +60 -2
- agno/db/sqlite/__init__.py +2 -1
- agno/db/sqlite/async_sqlite.py +2911 -0
- agno/db/sqlite/schemas.py +62 -0
- agno/db/sqlite/sqlite.py +965 -46
- agno/db/sqlite/utils.py +169 -8
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +334 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1908 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +2 -0
- agno/eval/__init__.py +10 -0
- agno/eval/accuracy.py +75 -55
- agno/eval/agent_as_judge.py +861 -0
- agno/eval/base.py +29 -0
- agno/eval/performance.py +16 -7
- agno/eval/reliability.py +28 -16
- agno/eval/utils.py +35 -17
- agno/exceptions.py +27 -2
- agno/filters.py +354 -0
- agno/guardrails/prompt_injection.py +1 -0
- agno/hooks/__init__.py +3 -0
- agno/hooks/decorator.py +164 -0
- agno/integrations/discord/client.py +1 -1
- agno/knowledge/chunking/agentic.py +13 -10
- agno/knowledge/chunking/fixed.py +4 -1
- agno/knowledge/chunking/semantic.py +9 -4
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/embedder/nebius.py +1 -1
- agno/knowledge/embedder/ollama.py +8 -0
- agno/knowledge/embedder/openai.py +8 -8
- agno/knowledge/embedder/sentence_transformer.py +6 -2
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/knowledge.py +1618 -318
- agno/knowledge/reader/base.py +6 -2
- agno/knowledge/reader/csv_reader.py +8 -10
- agno/knowledge/reader/docx_reader.py +5 -6
- agno/knowledge/reader/field_labeled_csv_reader.py +16 -20
- agno/knowledge/reader/json_reader.py +5 -4
- agno/knowledge/reader/markdown_reader.py +8 -8
- agno/knowledge/reader/pdf_reader.py +17 -19
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +32 -3
- agno/knowledge/reader/s3_reader.py +3 -3
- agno/knowledge/reader/tavily_reader.py +193 -0
- agno/knowledge/reader/text_reader.py +22 -10
- agno/knowledge/reader/web_search_reader.py +1 -48
- agno/knowledge/reader/website_reader.py +10 -10
- agno/knowledge/reader/wikipedia_reader.py +33 -1
- agno/knowledge/types.py +1 -0
- agno/knowledge/utils.py +72 -7
- agno/media.py +22 -6
- agno/memory/__init__.py +14 -1
- agno/memory/manager.py +544 -83
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +66 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/aimlapi/aimlapi.py +17 -0
- agno/models/anthropic/claude.py +515 -40
- agno/models/aws/bedrock.py +102 -21
- agno/models/aws/claude.py +131 -274
- agno/models/azure/ai_foundry.py +41 -19
- agno/models/azure/openai_chat.py +39 -8
- agno/models/base.py +1249 -525
- agno/models/cerebras/cerebras.py +91 -21
- agno/models/cerebras/cerebras_openai.py +21 -2
- agno/models/cohere/chat.py +40 -6
- agno/models/cometapi/cometapi.py +18 -1
- agno/models/dashscope/dashscope.py +2 -3
- agno/models/deepinfra/deepinfra.py +18 -1
- agno/models/deepseek/deepseek.py +69 -3
- agno/models/fireworks/fireworks.py +18 -1
- agno/models/google/gemini.py +877 -80
- agno/models/google/utils.py +22 -0
- agno/models/groq/groq.py +51 -18
- agno/models/huggingface/huggingface.py +17 -6
- agno/models/ibm/watsonx.py +16 -6
- agno/models/internlm/internlm.py +18 -1
- agno/models/langdb/langdb.py +13 -1
- agno/models/litellm/chat.py +44 -9
- agno/models/litellm/litellm_openai.py +18 -1
- agno/models/message.py +28 -5
- agno/models/meta/llama.py +47 -14
- agno/models/meta/llama_openai.py +22 -17
- agno/models/mistral/mistral.py +8 -4
- agno/models/nebius/nebius.py +6 -7
- agno/models/nvidia/nvidia.py +20 -3
- agno/models/ollama/chat.py +24 -8
- agno/models/openai/chat.py +104 -29
- agno/models/openai/responses.py +101 -81
- agno/models/openrouter/openrouter.py +60 -3
- agno/models/perplexity/perplexity.py +17 -1
- agno/models/portkey/portkey.py +7 -6
- agno/models/requesty/requesty.py +24 -4
- agno/models/response.py +73 -2
- agno/models/sambanova/sambanova.py +20 -3
- agno/models/siliconflow/siliconflow.py +19 -2
- agno/models/together/together.py +20 -3
- agno/models/utils.py +254 -8
- agno/models/vercel/v0.py +20 -3
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +190 -0
- agno/models/vllm/vllm.py +19 -14
- agno/models/xai/xai.py +19 -2
- agno/os/app.py +549 -152
- agno/os/auth.py +190 -3
- agno/os/config.py +23 -0
- agno/os/interfaces/a2a/router.py +8 -11
- agno/os/interfaces/a2a/utils.py +1 -1
- agno/os/interfaces/agui/router.py +18 -3
- agno/os/interfaces/agui/utils.py +152 -39
- agno/os/interfaces/slack/router.py +55 -37
- agno/os/interfaces/slack/slack.py +9 -1
- agno/os/interfaces/whatsapp/router.py +0 -1
- agno/os/interfaces/whatsapp/security.py +3 -1
- agno/os/mcp.py +110 -52
- agno/os/middleware/__init__.py +2 -0
- agno/os/middleware/jwt.py +676 -112
- agno/os/router.py +40 -1478
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +599 -0
- agno/os/routers/agents/schema.py +261 -0
- agno/os/routers/evals/evals.py +96 -39
- agno/os/routers/evals/schemas.py +65 -33
- agno/os/routers/evals/utils.py +80 -10
- agno/os/routers/health.py +10 -4
- agno/os/routers/knowledge/knowledge.py +196 -38
- agno/os/routers/knowledge/schemas.py +82 -22
- agno/os/routers/memory/memory.py +279 -52
- agno/os/routers/memory/schemas.py +46 -17
- agno/os/routers/metrics/metrics.py +20 -8
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +462 -34
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +512 -0
- agno/os/routers/teams/schema.py +257 -0
- agno/os/routers/traces/__init__.py +3 -0
- agno/os/routers/traces/schemas.py +414 -0
- agno/os/routers/traces/traces.py +499 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +624 -0
- agno/os/routers/workflows/schema.py +75 -0
- agno/os/schema.py +256 -693
- agno/os/scopes.py +469 -0
- agno/os/utils.py +514 -36
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/openai.py +5 -0
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +155 -32
- agno/run/base.py +55 -3
- agno/run/requirement.py +181 -0
- agno/run/team.py +125 -38
- agno/run/workflow.py +72 -18
- agno/session/agent.py +102 -89
- agno/session/summary.py +56 -15
- agno/session/team.py +164 -90
- agno/session/workflow.py +405 -40
- agno/table.py +10 -0
- agno/team/team.py +3974 -1903
- agno/tools/dalle.py +2 -4
- agno/tools/eleven_labs.py +23 -25
- agno/tools/exa.py +21 -16
- agno/tools/file.py +153 -23
- agno/tools/file_generation.py +16 -10
- agno/tools/firecrawl.py +15 -7
- agno/tools/function.py +193 -38
- agno/tools/gmail.py +238 -14
- agno/tools/google_drive.py +271 -0
- agno/tools/googlecalendar.py +36 -8
- agno/tools/googlesheets.py +20 -5
- agno/tools/jira.py +20 -0
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +3 -3
- agno/tools/models/nebius.py +5 -5
- agno/tools/models_labs.py +20 -10
- agno/tools/nano_banana.py +151 -0
- agno/tools/notion.py +204 -0
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +76 -36
- agno/tools/redshift.py +406 -0
- agno/tools/scrapegraph.py +1 -1
- agno/tools/shopify.py +1519 -0
- agno/tools/slack.py +18 -3
- agno/tools/spotify.py +919 -0
- agno/tools/tavily.py +146 -0
- agno/tools/toolkit.py +25 -0
- agno/tools/workflow.py +8 -1
- agno/tools/yfinance.py +12 -11
- agno/tracing/__init__.py +12 -0
- agno/tracing/exporter.py +157 -0
- agno/tracing/schemas.py +276 -0
- agno/tracing/setup.py +111 -0
- agno/utils/agent.py +938 -0
- agno/utils/cryptography.py +22 -0
- agno/utils/dttm.py +33 -0
- agno/utils/events.py +151 -3
- agno/utils/gemini.py +15 -5
- agno/utils/hooks.py +118 -4
- agno/utils/http.py +113 -2
- agno/utils/knowledge.py +12 -5
- agno/utils/log.py +1 -0
- agno/utils/mcp.py +92 -2
- agno/utils/media.py +187 -1
- agno/utils/merge_dict.py +3 -3
- agno/utils/message.py +60 -0
- agno/utils/models/ai_foundry.py +9 -2
- agno/utils/models/claude.py +49 -14
- agno/utils/models/cohere.py +9 -2
- agno/utils/models/llama.py +9 -2
- agno/utils/models/mistral.py +4 -2
- agno/utils/print_response/agent.py +109 -16
- agno/utils/print_response/team.py +223 -30
- agno/utils/print_response/workflow.py +251 -34
- agno/utils/streamlit.py +1 -1
- agno/utils/team.py +98 -9
- agno/utils/tokens.py +657 -0
- agno/vectordb/base.py +39 -7
- agno/vectordb/cassandra/cassandra.py +21 -5
- agno/vectordb/chroma/chromadb.py +43 -12
- agno/vectordb/clickhouse/clickhousedb.py +21 -5
- agno/vectordb/couchbase/couchbase.py +29 -5
- agno/vectordb/lancedb/lance_db.py +92 -181
- agno/vectordb/langchaindb/langchaindb.py +24 -4
- agno/vectordb/lightrag/lightrag.py +17 -3
- agno/vectordb/llamaindex/llamaindexdb.py +25 -5
- agno/vectordb/milvus/milvus.py +50 -37
- agno/vectordb/mongodb/__init__.py +7 -1
- agno/vectordb/mongodb/mongodb.py +36 -30
- agno/vectordb/pgvector/pgvector.py +201 -77
- agno/vectordb/pineconedb/pineconedb.py +41 -23
- agno/vectordb/qdrant/qdrant.py +67 -54
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +682 -0
- agno/vectordb/singlestore/singlestore.py +50 -29
- agno/vectordb/surrealdb/surrealdb.py +31 -41
- agno/vectordb/upstashdb/upstashdb.py +34 -6
- agno/vectordb/weaviate/weaviate.py +53 -14
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +120 -18
- agno/workflow/loop.py +77 -10
- agno/workflow/parallel.py +231 -143
- agno/workflow/router.py +118 -17
- agno/workflow/step.py +609 -170
- agno/workflow/steps.py +73 -6
- agno/workflow/types.py +96 -21
- agno/workflow/workflow.py +2039 -262
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/METADATA +201 -66
- agno-2.3.13.dist-info/RECORD +613 -0
- agno/tools/googlesearch.py +0 -98
- agno/tools/mcp.py +0 -679
- agno/tools/memori.py +0 -339
- agno-2.1.2.dist-info/RECORD +0 -543
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Dict, List, Literal, Optional
|
|
4
|
+
|
|
5
|
+
from agno.knowledge.chunking.semantic import SemanticChunking
|
|
6
|
+
from agno.knowledge.chunking.strategy import ChunkingStrategy, ChunkingStrategyType
|
|
7
|
+
from agno.knowledge.document.base import Document
|
|
8
|
+
from agno.knowledge.reader.base import Reader
|
|
9
|
+
from agno.knowledge.types import ContentType
|
|
10
|
+
from agno.utils.log import log_debug, logger
|
|
11
|
+
|
|
12
|
+
try:
|
|
13
|
+
from tavily import TavilyClient # type: ignore[attr-defined]
|
|
14
|
+
except ImportError:
|
|
15
|
+
raise ImportError(
|
|
16
|
+
"The `tavily-python` package is not installed. Please install it via `pip install tavily-python`."
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class TavilyReader(Reader):
|
|
22
|
+
api_key: Optional[str] = None
|
|
23
|
+
params: Optional[Dict] = None
|
|
24
|
+
extract_format: Literal["markdown", "text"] = "markdown"
|
|
25
|
+
extract_depth: Literal["basic", "advanced"] = "basic"
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
api_key: Optional[str] = None,
|
|
30
|
+
params: Optional[Dict] = None,
|
|
31
|
+
extract_format: Literal["markdown", "text"] = "markdown",
|
|
32
|
+
extract_depth: Literal["basic", "advanced"] = "basic",
|
|
33
|
+
chunk: bool = True,
|
|
34
|
+
chunk_size: int = 5000,
|
|
35
|
+
chunking_strategy: Optional[ChunkingStrategy] = SemanticChunking(),
|
|
36
|
+
name: Optional[str] = None,
|
|
37
|
+
description: Optional[str] = None,
|
|
38
|
+
) -> None:
|
|
39
|
+
"""
|
|
40
|
+
Initialize TavilyReader for extracting content from URLs using Tavily's Extract API.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
api_key: Tavily API key (or use TAVILY_API_KEY env var)
|
|
44
|
+
params: Additional parameters to pass to the extract API
|
|
45
|
+
extract_format: Output format - "markdown" or "text"
|
|
46
|
+
extract_depth: Extraction depth - "basic" (1 credit/5 URLs) or "advanced" (2 credits/5 URLs)
|
|
47
|
+
chunk: Whether to chunk the extracted content
|
|
48
|
+
chunk_size: Size of chunks when chunking is enabled
|
|
49
|
+
chunking_strategy: Strategy to use for chunking
|
|
50
|
+
name: Name of the reader
|
|
51
|
+
description: Description of the reader
|
|
52
|
+
"""
|
|
53
|
+
# Initialize base Reader (handles chunk_size / strategy)
|
|
54
|
+
super().__init__(
|
|
55
|
+
chunk=chunk, chunk_size=chunk_size, chunking_strategy=chunking_strategy, name=name, description=description
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Tavily-specific attributes
|
|
59
|
+
self.api_key = api_key
|
|
60
|
+
self.params = params or {}
|
|
61
|
+
self.extract_format = extract_format
|
|
62
|
+
self.extract_depth = extract_depth
|
|
63
|
+
|
|
64
|
+
@classmethod
|
|
65
|
+
def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
|
|
66
|
+
"""Get the list of supported chunking strategies for Tavily readers."""
|
|
67
|
+
return [
|
|
68
|
+
ChunkingStrategyType.SEMANTIC_CHUNKER,
|
|
69
|
+
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
|
|
70
|
+
ChunkingStrategyType.AGENTIC_CHUNKER,
|
|
71
|
+
ChunkingStrategyType.DOCUMENT_CHUNKER,
|
|
72
|
+
ChunkingStrategyType.RECURSIVE_CHUNKER,
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
@classmethod
|
|
76
|
+
def get_supported_content_types(self) -> List[ContentType]:
|
|
77
|
+
return [ContentType.URL]
|
|
78
|
+
|
|
79
|
+
def _extract(self, url: str, name: Optional[str] = None) -> List[Document]:
|
|
80
|
+
"""
|
|
81
|
+
Internal method to extract content from a URL using Tavily's Extract API.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
url: The URL to extract content from
|
|
85
|
+
name: Optional name for the document (defaults to URL)
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
A list of documents containing the extracted content
|
|
89
|
+
"""
|
|
90
|
+
log_debug(f"Extracting content from: {url}")
|
|
91
|
+
|
|
92
|
+
client = TavilyClient(api_key=self.api_key)
|
|
93
|
+
|
|
94
|
+
# Prepare extract parameters
|
|
95
|
+
extract_params = {
|
|
96
|
+
"urls": [url],
|
|
97
|
+
"depth": self.extract_depth,
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
# Add optional params if provided
|
|
101
|
+
if self.params:
|
|
102
|
+
extract_params.update(self.params)
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
# Call Tavily Extract API
|
|
106
|
+
response = client.extract(**extract_params)
|
|
107
|
+
|
|
108
|
+
# Extract content from response
|
|
109
|
+
if not response or "results" not in response:
|
|
110
|
+
logger.warning(f"No results received for URL: {url}")
|
|
111
|
+
return [Document(name=name or url, id=url, content="")]
|
|
112
|
+
|
|
113
|
+
results = response.get("results", [])
|
|
114
|
+
if not results:
|
|
115
|
+
logger.warning(f"Empty results for URL: {url}")
|
|
116
|
+
return [Document(name=name or url, id=url, content="")]
|
|
117
|
+
|
|
118
|
+
# Get the first result (since we're extracting a single URL)
|
|
119
|
+
result = results[0]
|
|
120
|
+
|
|
121
|
+
# Check if extraction failed
|
|
122
|
+
if "failed_reason" in result:
|
|
123
|
+
logger.warning(f"Extraction failed for {url}: {result['failed_reason']}")
|
|
124
|
+
return [Document(name=name or url, id=url, content="")]
|
|
125
|
+
|
|
126
|
+
# Get raw content
|
|
127
|
+
content = result.get("raw_content", "")
|
|
128
|
+
|
|
129
|
+
if content is None:
|
|
130
|
+
content = ""
|
|
131
|
+
logger.warning(f"No content received for URL: {url}")
|
|
132
|
+
|
|
133
|
+
# Debug logging
|
|
134
|
+
log_debug(f"Received content type: {type(content)}")
|
|
135
|
+
log_debug(f"Content length: {len(content) if content else 0}")
|
|
136
|
+
|
|
137
|
+
# Create documents
|
|
138
|
+
documents = []
|
|
139
|
+
if self.chunk and content:
|
|
140
|
+
documents.extend(self.chunk_document(Document(name=name or url, id=url, content=content)))
|
|
141
|
+
else:
|
|
142
|
+
documents.append(Document(name=name or url, id=url, content=content))
|
|
143
|
+
return documents
|
|
144
|
+
|
|
145
|
+
except Exception as e:
|
|
146
|
+
logger.error(f"Error extracting content from {url}: {e}")
|
|
147
|
+
return [Document(name=name or url, id=url, content="")]
|
|
148
|
+
|
|
149
|
+
async def _async_extract(self, url: str, name: Optional[str] = None) -> List[Document]:
|
|
150
|
+
"""
|
|
151
|
+
Internal async method to extract content from a URL.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
url: The URL to extract content from
|
|
155
|
+
name: Optional name for the document
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
A list of documents containing the extracted content
|
|
159
|
+
"""
|
|
160
|
+
log_debug(f"Async extracting content from: {url}")
|
|
161
|
+
|
|
162
|
+
# Use asyncio.to_thread to run the synchronous extract in a thread
|
|
163
|
+
return await asyncio.to_thread(self._extract, url, name)
|
|
164
|
+
|
|
165
|
+
def read(self, url: str, name: Optional[str] = None) -> List[Document]:
|
|
166
|
+
"""
|
|
167
|
+
Reads content from a URL using Tavily Extract API.
|
|
168
|
+
|
|
169
|
+
This is the public API method that users should call.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
url: The URL to extract content from
|
|
173
|
+
name: Optional name for the document
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
A list of documents containing the extracted content
|
|
177
|
+
"""
|
|
178
|
+
return self._extract(url, name)
|
|
179
|
+
|
|
180
|
+
async def async_read(self, url: str, name: Optional[str] = None) -> List[Document]:
|
|
181
|
+
"""
|
|
182
|
+
Asynchronously reads content from a URL using Tavily Extract API.
|
|
183
|
+
|
|
184
|
+
This is the public API method that users should call for async operations.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
url: The URL to extract content from
|
|
188
|
+
name: Optional name for the document
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
A list of documents containing the extracted content
|
|
192
|
+
"""
|
|
193
|
+
return await self._async_extract(url, name)
|
|
@@ -8,7 +8,7 @@ from agno.knowledge.chunking.strategy import ChunkingStrategy, ChunkingStrategyT
|
|
|
8
8
|
from agno.knowledge.document.base import Document
|
|
9
9
|
from agno.knowledge.reader.base import Reader
|
|
10
10
|
from agno.knowledge.types import ContentType
|
|
11
|
-
from agno.utils.log import
|
|
11
|
+
from agno.utils.log import log_debug, log_error, log_warning
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class TextReader(Reader):
|
|
@@ -37,12 +37,18 @@ class TextReader(Reader):
|
|
|
37
37
|
if isinstance(file, Path):
|
|
38
38
|
if not file.exists():
|
|
39
39
|
raise FileNotFoundError(f"Could not find file: {file}")
|
|
40
|
-
|
|
40
|
+
log_debug(f"Reading: {file}")
|
|
41
41
|
file_name = name or file.stem
|
|
42
42
|
file_contents = file.read_text(self.encoding or "utf-8")
|
|
43
43
|
else:
|
|
44
|
-
|
|
45
|
-
|
|
44
|
+
# Handle BytesIO and other file-like objects that may not have a name attribute
|
|
45
|
+
if name:
|
|
46
|
+
file_name = name
|
|
47
|
+
elif hasattr(file, "name") and file.name is not None:
|
|
48
|
+
file_name = file.name.split(".")[0]
|
|
49
|
+
else:
|
|
50
|
+
file_name = "text_file"
|
|
51
|
+
log_debug(f"Reading uploaded file: {file_name}")
|
|
46
52
|
file.seek(0)
|
|
47
53
|
file_contents = file.read().decode(self.encoding or "utf-8")
|
|
48
54
|
|
|
@@ -60,7 +66,7 @@ class TextReader(Reader):
|
|
|
60
66
|
return chunked_documents
|
|
61
67
|
return documents
|
|
62
68
|
except Exception as e:
|
|
63
|
-
|
|
69
|
+
log_error(f"Error reading: {file}: {e}")
|
|
64
70
|
return []
|
|
65
71
|
|
|
66
72
|
async def async_read(self, file: Union[Path, IO[Any]], name: Optional[str] = None) -> List[Document]:
|
|
@@ -69,7 +75,7 @@ class TextReader(Reader):
|
|
|
69
75
|
if not file.exists():
|
|
70
76
|
raise FileNotFoundError(f"Could not find file: {file}")
|
|
71
77
|
|
|
72
|
-
|
|
78
|
+
log_debug(f"Reading asynchronously: {file}")
|
|
73
79
|
file_name = name or file.stem
|
|
74
80
|
|
|
75
81
|
try:
|
|
@@ -78,11 +84,17 @@ class TextReader(Reader):
|
|
|
78
84
|
async with aiofiles.open(file, "r", encoding=self.encoding or "utf-8") as f:
|
|
79
85
|
file_contents = await f.read()
|
|
80
86
|
except ImportError:
|
|
81
|
-
|
|
87
|
+
log_warning("aiofiles not installed, using synchronous file I/O")
|
|
82
88
|
file_contents = file.read_text(self.encoding or "utf-8")
|
|
83
89
|
else:
|
|
84
|
-
|
|
85
|
-
|
|
90
|
+
# Handle BytesIO and other file-like objects that may not have a name attribute
|
|
91
|
+
if name:
|
|
92
|
+
file_name = name
|
|
93
|
+
elif hasattr(file, "name") and file.name is not None:
|
|
94
|
+
file_name = file.name.split(".")[0]
|
|
95
|
+
else:
|
|
96
|
+
file_name = "text_file"
|
|
97
|
+
log_debug(f"Reading uploaded file asynchronously: {file_name}")
|
|
86
98
|
file.seek(0)
|
|
87
99
|
file_contents = file.read().decode(self.encoding or "utf-8")
|
|
88
100
|
|
|
@@ -96,7 +108,7 @@ class TextReader(Reader):
|
|
|
96
108
|
return await self._async_chunk_document(document)
|
|
97
109
|
return [document]
|
|
98
110
|
except Exception as e:
|
|
99
|
-
|
|
111
|
+
log_error(f"Error reading asynchronously: {file}: {e}")
|
|
100
112
|
return []
|
|
101
113
|
|
|
102
114
|
async def _async_chunk_document(self, document: Document) -> List[Document]:
|
|
@@ -37,7 +37,7 @@ class WebSearchReader(Reader):
|
|
|
37
37
|
user_agent: str = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
|
38
38
|
|
|
39
39
|
# Search engine configuration
|
|
40
|
-
search_engine: Literal["duckduckgo"
|
|
40
|
+
search_engine: Literal["duckduckgo"] = "duckduckgo"
|
|
41
41
|
search_delay: float = 3.0 # Delay between search requests
|
|
42
42
|
max_search_retries: int = 2 # Retries for search operations
|
|
43
43
|
|
|
@@ -121,57 +121,10 @@ class WebSearchReader(Reader):
|
|
|
121
121
|
return []
|
|
122
122
|
return []
|
|
123
123
|
|
|
124
|
-
def _perform_google_search(self, query: str) -> List[Dict[str, str]]:
|
|
125
|
-
"""Perform web search using Google (requires googlesearch-python)"""
|
|
126
|
-
log_debug(f"Performing Google search for: {query}")
|
|
127
|
-
|
|
128
|
-
try:
|
|
129
|
-
from googlesearch import search
|
|
130
|
-
except ImportError:
|
|
131
|
-
logger.error("Google search requires 'googlesearch-python'. Install with: pip install googlesearch-python")
|
|
132
|
-
return []
|
|
133
|
-
|
|
134
|
-
for attempt in range(self.max_search_retries):
|
|
135
|
-
try:
|
|
136
|
-
self._respect_rate_limits()
|
|
137
|
-
|
|
138
|
-
results = []
|
|
139
|
-
# Use the basic search function without unsupported parameters
|
|
140
|
-
# The googlesearch-python library's search function only accepts basic parameters
|
|
141
|
-
search_results = search(query)
|
|
142
|
-
|
|
143
|
-
# Convert iterator to list and limit results
|
|
144
|
-
result_list = list(search_results)[: self.max_results]
|
|
145
|
-
|
|
146
|
-
for result in result_list:
|
|
147
|
-
# The search function returns URLs as strings
|
|
148
|
-
results.append(
|
|
149
|
-
{
|
|
150
|
-
"title": "", # Google search doesn't provide titles directly
|
|
151
|
-
"url": result,
|
|
152
|
-
"description": "", # Google search doesn't provide descriptions directly
|
|
153
|
-
}
|
|
154
|
-
)
|
|
155
|
-
|
|
156
|
-
log_debug(f"Found {len(results)} Google search results")
|
|
157
|
-
return results
|
|
158
|
-
|
|
159
|
-
except Exception as e:
|
|
160
|
-
logger.warning(f"Google search attempt {attempt + 1} failed: {e}")
|
|
161
|
-
if attempt < self.max_search_retries - 1:
|
|
162
|
-
time.sleep(self.search_delay)
|
|
163
|
-
else:
|
|
164
|
-
logger.error(f"All Google search attempts failed: {e}")
|
|
165
|
-
return []
|
|
166
|
-
|
|
167
|
-
return []
|
|
168
|
-
|
|
169
124
|
def _perform_web_search(self, query: str) -> List[Dict[str, str]]:
|
|
170
125
|
"""Perform web search using the configured search engine"""
|
|
171
126
|
if self.search_engine == "duckduckgo":
|
|
172
127
|
return self._perform_duckduckgo_search(query)
|
|
173
|
-
elif self.search_engine == "google":
|
|
174
|
-
return self._perform_google_search(query)
|
|
175
128
|
else:
|
|
176
129
|
logger.error(f"Unsupported search engine: {self.search_engine}")
|
|
177
130
|
return []
|
|
@@ -12,7 +12,7 @@ from agno.knowledge.chunking.strategy import ChunkingStrategy, ChunkingStrategyT
|
|
|
12
12
|
from agno.knowledge.document.base import Document
|
|
13
13
|
from agno.knowledge.reader.base import Reader
|
|
14
14
|
from agno.knowledge.types import ContentType
|
|
15
|
-
from agno.utils.log import log_debug,
|
|
15
|
+
from agno.utils.log import log_debug, log_error, log_warning
|
|
16
16
|
|
|
17
17
|
try:
|
|
18
18
|
from bs4 import BeautifulSoup, Tag # noqa: F401
|
|
@@ -229,21 +229,21 @@ class WebsiteReader(Reader):
|
|
|
229
229
|
# Log HTTP status errors but continue crawling other pages
|
|
230
230
|
# Skip redirect errors (3xx) as they should be handled by follow_redirects
|
|
231
231
|
if e.response.status_code >= 300 and e.response.status_code < 400:
|
|
232
|
-
|
|
232
|
+
log_debug(f"Redirect encountered for {current_url}, skipping: {e}")
|
|
233
233
|
else:
|
|
234
|
-
|
|
234
|
+
log_warning(f"HTTP status error while crawling {current_url}: {e}")
|
|
235
235
|
# For the initial URL, we should raise the error only if it's not a redirect
|
|
236
236
|
if current_url == url and not crawler_result and not (300 <= e.response.status_code < 400):
|
|
237
237
|
raise
|
|
238
238
|
except httpx.RequestError as e:
|
|
239
239
|
# Log request errors but continue crawling other pages
|
|
240
|
-
|
|
240
|
+
log_warning(f"Request error while crawling {current_url}: {e}")
|
|
241
241
|
# For the initial URL, we should raise the error
|
|
242
242
|
if current_url == url and not crawler_result:
|
|
243
243
|
raise
|
|
244
244
|
except Exception as e:
|
|
245
245
|
# Log other exceptions but continue crawling other pages
|
|
246
|
-
|
|
246
|
+
log_warning(f"Failed to crawl {current_url}: {e}")
|
|
247
247
|
# For the initial URL, we should raise the error
|
|
248
248
|
if current_url == url and not crawler_result:
|
|
249
249
|
# Wrap non-HTTP exceptions in a RequestError
|
|
@@ -332,19 +332,19 @@ class WebsiteReader(Reader):
|
|
|
332
332
|
|
|
333
333
|
except httpx.HTTPStatusError as e:
|
|
334
334
|
# Log HTTP status errors but continue crawling other pages
|
|
335
|
-
|
|
335
|
+
log_warning(f"HTTP status error while crawling asynchronously {current_url}: {e}")
|
|
336
336
|
# For the initial URL, we should raise the error
|
|
337
337
|
if current_url == url and not crawler_result:
|
|
338
338
|
raise
|
|
339
339
|
except httpx.RequestError as e:
|
|
340
340
|
# Log request errors but continue crawling other pages
|
|
341
|
-
|
|
341
|
+
log_warning(f"Request error while crawling asynchronously {current_url}: {e}")
|
|
342
342
|
# For the initial URL, we should raise the error
|
|
343
343
|
if current_url == url and not crawler_result:
|
|
344
344
|
raise
|
|
345
345
|
except Exception as e:
|
|
346
346
|
# Log other exceptions but continue crawling other pages
|
|
347
|
-
|
|
347
|
+
log_warning(f"Failed to crawl asynchronously {current_url}: {e}")
|
|
348
348
|
# For the initial URL, we should raise the error
|
|
349
349
|
if current_url == url and not crawler_result:
|
|
350
350
|
# Wrap non-HTTP exceptions in a RequestError
|
|
@@ -398,7 +398,7 @@ class WebsiteReader(Reader):
|
|
|
398
398
|
)
|
|
399
399
|
return documents
|
|
400
400
|
except (httpx.HTTPStatusError, httpx.RequestError) as e:
|
|
401
|
-
|
|
401
|
+
log_error(f"Error reading website {url}: {e}")
|
|
402
402
|
raise
|
|
403
403
|
|
|
404
404
|
async def async_read(self, url: str, name: Optional[str] = None) -> List[Document]:
|
|
@@ -451,5 +451,5 @@ class WebsiteReader(Reader):
|
|
|
451
451
|
|
|
452
452
|
return documents
|
|
453
453
|
except (httpx.HTTPStatusError, httpx.RequestError) as e:
|
|
454
|
-
|
|
454
|
+
log_error(f"Error reading website asynchronously {url}: {e}")
|
|
455
455
|
raise
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
from typing import List, Optional
|
|
2
3
|
|
|
3
4
|
from agno.knowledge.chunking.fixed import FixedSizeChunking
|
|
@@ -45,7 +46,38 @@ class WikipediaReader(Reader):
|
|
|
45
46
|
|
|
46
47
|
except wikipedia.exceptions.PageError:
|
|
47
48
|
summary = None
|
|
48
|
-
log_info("
|
|
49
|
+
log_info("Wikipedia Error: Page not found.")
|
|
50
|
+
|
|
51
|
+
# Only create Document if we successfully got a summary
|
|
52
|
+
if summary:
|
|
53
|
+
return [
|
|
54
|
+
Document(
|
|
55
|
+
name=topic,
|
|
56
|
+
meta_data={"topic": topic},
|
|
57
|
+
content=summary,
|
|
58
|
+
)
|
|
59
|
+
]
|
|
60
|
+
return []
|
|
61
|
+
|
|
62
|
+
async def async_read(self, topic: str) -> List[Document]:
|
|
63
|
+
"""
|
|
64
|
+
Asynchronously read content from Wikipedia.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
topic: The Wikipedia topic to read
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
A list of documents containing the Wikipedia summary
|
|
71
|
+
"""
|
|
72
|
+
log_debug(f"Async reading Wikipedia topic: {topic}")
|
|
73
|
+
summary = None
|
|
74
|
+
try:
|
|
75
|
+
# Run the synchronous wikipedia API call in a thread pool
|
|
76
|
+
summary = await asyncio.to_thread(wikipedia.summary, topic, auto_suggest=self.auto_suggest)
|
|
77
|
+
|
|
78
|
+
except wikipedia.exceptions.PageError:
|
|
79
|
+
summary = None
|
|
80
|
+
log_info("Wikipedia Error: Page not found.")
|
|
49
81
|
|
|
50
82
|
# Only create Document if we successfully got a summary
|
|
51
83
|
if summary:
|
agno/knowledge/types.py
CHANGED
agno/knowledge/utils.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
from typing import Dict, List
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
|
2
2
|
|
|
3
|
+
from agno.knowledge.reader.base import Reader
|
|
3
4
|
from agno.knowledge.reader.reader_factory import ReaderFactory
|
|
4
5
|
from agno.knowledge.types import ContentType
|
|
5
6
|
from agno.utils.log import log_debug
|
|
@@ -75,8 +76,33 @@ def get_reader_info(reader_key: str) -> Dict:
|
|
|
75
76
|
raise ValueError(f"Unknown reader: {reader_key}. Error: {str(e)}")
|
|
76
77
|
|
|
77
78
|
|
|
78
|
-
def
|
|
79
|
-
"""Get information about
|
|
79
|
+
def get_reader_info_from_instance(reader: Reader, reader_id: str) -> Dict:
|
|
80
|
+
"""Get information about a reader instance."""
|
|
81
|
+
try:
|
|
82
|
+
reader_class = reader.__class__
|
|
83
|
+
supported_strategies = reader_class.get_supported_chunking_strategies()
|
|
84
|
+
supported_content_types = reader_class.get_supported_content_types()
|
|
85
|
+
|
|
86
|
+
return {
|
|
87
|
+
"id": reader_id,
|
|
88
|
+
"name": getattr(reader, "name", reader_class.__name__),
|
|
89
|
+
"description": getattr(reader, "description", f"Custom {reader_class.__name__}"),
|
|
90
|
+
"chunking_strategies": [strategy.value for strategy in supported_strategies],
|
|
91
|
+
"content_types": [ct.value for ct in supported_content_types],
|
|
92
|
+
}
|
|
93
|
+
except Exception as e:
|
|
94
|
+
raise ValueError(f"Failed to get info for reader '{reader_id}': {str(e)}")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def get_all_readers_info(knowledge_instance: Optional[Any] = None) -> List[Dict]:
|
|
98
|
+
"""Get information about all available readers, including custom readers from a Knowledge instance.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
knowledge_instance: Optional Knowledge instance to include custom readers from.
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
List of reader info dictionaries.
|
|
105
|
+
"""
|
|
80
106
|
readers_info = []
|
|
81
107
|
keys = ReaderFactory.get_all_reader_keys()
|
|
82
108
|
for key in keys:
|
|
@@ -88,18 +114,35 @@ def get_all_readers_info() -> List[Dict]:
|
|
|
88
114
|
# Log the error but don't fail the entire request
|
|
89
115
|
log_debug(f"Skipping reader '{key}': {e}")
|
|
90
116
|
continue
|
|
117
|
+
|
|
118
|
+
# Add custom readers from knowledge instance if provided
|
|
119
|
+
if knowledge_instance is not None:
|
|
120
|
+
custom_readers = knowledge_instance.get_readers()
|
|
121
|
+
if isinstance(custom_readers, dict):
|
|
122
|
+
for reader_id, reader in custom_readers.items():
|
|
123
|
+
try:
|
|
124
|
+
reader_info = get_reader_info_from_instance(reader, reader_id)
|
|
125
|
+
# Only add if not already present (custom readers take precedence)
|
|
126
|
+
if not any(r["id"] == reader_id for r in readers_info):
|
|
127
|
+
readers_info.append(reader_info)
|
|
128
|
+
except ValueError as e:
|
|
129
|
+
log_debug(f"Skipping custom reader '{reader_id}': {e}")
|
|
130
|
+
continue
|
|
131
|
+
|
|
91
132
|
return readers_info
|
|
92
133
|
|
|
93
134
|
|
|
94
|
-
def get_content_types_to_readers_mapping() -> Dict[str, List[str]]:
|
|
135
|
+
def get_content_types_to_readers_mapping(knowledge_instance: Optional[Any] = None) -> Dict[str, List[str]]:
|
|
95
136
|
"""Get mapping of content types to list of reader IDs that support them.
|
|
96
137
|
|
|
138
|
+
Args:
|
|
139
|
+
knowledge_instance: Optional Knowledge instance to include custom readers from.
|
|
140
|
+
|
|
97
141
|
Returns:
|
|
98
142
|
Dictionary mapping content type strings (ContentType enum values) to list of reader IDs.
|
|
99
143
|
"""
|
|
100
144
|
content_type_mapping: Dict[str, List[str]] = {}
|
|
101
|
-
readers_info = get_all_readers_info()
|
|
102
|
-
|
|
145
|
+
readers_info = get_all_readers_info(knowledge_instance)
|
|
103
146
|
for reader_info in readers_info:
|
|
104
147
|
reader_id = reader_info["id"]
|
|
105
148
|
content_types = reader_info.get("content_types", [])
|
|
@@ -107,7 +150,9 @@ def get_content_types_to_readers_mapping() -> Dict[str, List[str]]:
|
|
|
107
150
|
for content_type in content_types:
|
|
108
151
|
if content_type not in content_type_mapping:
|
|
109
152
|
content_type_mapping[content_type] = []
|
|
110
|
-
|
|
153
|
+
# Avoid duplicates
|
|
154
|
+
if reader_id not in content_type_mapping[content_type]:
|
|
155
|
+
content_type_mapping[content_type].append(reader_id)
|
|
111
156
|
|
|
112
157
|
return content_type_mapping
|
|
113
158
|
|
|
@@ -129,12 +174,32 @@ def get_chunker_info(chunker_key: str) -> Dict:
|
|
|
129
174
|
class_name = chunker_class.__name__
|
|
130
175
|
docstring = chunker_class.__doc__ or f"{class_name} chunking strategy"
|
|
131
176
|
|
|
177
|
+
# Check class __init__ signature for chunk_size and overlap parameters
|
|
178
|
+
metadata = {}
|
|
179
|
+
import inspect
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
sig = inspect.signature(chunker_class.__init__)
|
|
183
|
+
param_names = set(sig.parameters.keys())
|
|
184
|
+
|
|
185
|
+
# If class has chunk_size or max_chunk_size parameter, set default chunk_size
|
|
186
|
+
if "chunk_size" in param_names or "max_chunk_size" in param_names:
|
|
187
|
+
metadata["chunk_size"] = 5000
|
|
188
|
+
|
|
189
|
+
# If class has overlap parameter, set default overlap
|
|
190
|
+
if "overlap" in param_names:
|
|
191
|
+
metadata["chunk_overlap"] = 0
|
|
192
|
+
except Exception:
|
|
193
|
+
# If we can't inspect, skip metadata
|
|
194
|
+
pass
|
|
195
|
+
|
|
132
196
|
return {
|
|
133
197
|
"key": chunker_key,
|
|
134
198
|
"class_name": class_name,
|
|
135
199
|
"name": chunker_key,
|
|
136
200
|
"description": docstring.strip(),
|
|
137
201
|
"strategy_type": strategy_type.value,
|
|
202
|
+
"metadata": metadata,
|
|
138
203
|
}
|
|
139
204
|
except ValueError:
|
|
140
205
|
raise ValueError(f"Unknown chunker key: {chunker_key}")
|
agno/media.py
CHANGED
|
@@ -4,6 +4,8 @@ from uuid import uuid4
|
|
|
4
4
|
|
|
5
5
|
from pydantic import BaseModel, field_validator, model_validator
|
|
6
6
|
|
|
7
|
+
from agno.utils.log import log_error
|
|
8
|
+
|
|
7
9
|
|
|
8
10
|
class Image(BaseModel):
|
|
9
11
|
"""Unified Image class for all use cases (input, output, artifacts)"""
|
|
@@ -395,10 +397,20 @@ class File(BaseModel):
|
|
|
395
397
|
name: Optional[str] = None,
|
|
396
398
|
format: Optional[str] = None,
|
|
397
399
|
) -> "File":
|
|
398
|
-
"""Create File from base64 encoded content
|
|
400
|
+
"""Create File from base64 encoded content or plain text.
|
|
401
|
+
|
|
402
|
+
Handles both base64-encoded binary content and plain text content
|
|
403
|
+
(which is stored as UTF-8 strings for text/* MIME types).
|
|
404
|
+
"""
|
|
399
405
|
import base64
|
|
400
406
|
|
|
401
|
-
|
|
407
|
+
try:
|
|
408
|
+
content_bytes = base64.b64decode(base64_content)
|
|
409
|
+
except Exception:
|
|
410
|
+
# If not valid base64, it might be plain text content (text/csv, text/plain, etc.)
|
|
411
|
+
# which is stored as UTF-8 strings, not base64
|
|
412
|
+
content_bytes = base64_content.encode("utf-8")
|
|
413
|
+
|
|
402
414
|
return cls(
|
|
403
415
|
content=content_bytes,
|
|
404
416
|
id=id,
|
|
@@ -413,10 +425,14 @@ class File(BaseModel):
|
|
|
413
425
|
import httpx
|
|
414
426
|
|
|
415
427
|
if self.url:
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
428
|
+
try:
|
|
429
|
+
response = httpx.get(self.url)
|
|
430
|
+
content = response.content
|
|
431
|
+
mime_type = response.headers.get("Content-Type", "").split(";")[0]
|
|
432
|
+
return content, mime_type
|
|
433
|
+
except Exception:
|
|
434
|
+
log_error(f"Failed to download file from {self.url}")
|
|
435
|
+
return None
|
|
420
436
|
else:
|
|
421
437
|
return None
|
|
422
438
|
|
agno/memory/__init__.py
CHANGED
|
@@ -1,3 +1,16 @@
|
|
|
1
1
|
from agno.memory.manager import MemoryManager, UserMemory
|
|
2
|
+
from agno.memory.strategies import (
|
|
3
|
+
MemoryOptimizationStrategy,
|
|
4
|
+
MemoryOptimizationStrategyFactory,
|
|
5
|
+
MemoryOptimizationStrategyType,
|
|
6
|
+
SummarizeStrategy,
|
|
7
|
+
)
|
|
2
8
|
|
|
3
|
-
__all__ = [
|
|
9
|
+
__all__ = [
|
|
10
|
+
"MemoryManager",
|
|
11
|
+
"UserMemory",
|
|
12
|
+
"MemoryOptimizationStrategy",
|
|
13
|
+
"MemoryOptimizationStrategyType",
|
|
14
|
+
"MemoryOptimizationStrategyFactory",
|
|
15
|
+
"SummarizeStrategy",
|
|
16
|
+
]
|