agno 2.0.0rc2__py3-none-any.whl → 2.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +6009 -2874
- agno/api/api.py +2 -0
- agno/api/os.py +1 -1
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +385 -6
- agno/db/dynamo/dynamo.py +388 -81
- agno/db/dynamo/schemas.py +47 -10
- agno/db/dynamo/utils.py +63 -4
- agno/db/firestore/firestore.py +435 -64
- agno/db/firestore/schemas.py +11 -0
- agno/db/firestore/utils.py +102 -4
- agno/db/gcs_json/gcs_json_db.py +384 -42
- agno/db/gcs_json/utils.py +60 -26
- agno/db/in_memory/in_memory_db.py +351 -66
- agno/db/in_memory/utils.py +60 -2
- agno/db/json/json_db.py +339 -48
- agno/db/json/utils.py +60 -26
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +510 -37
- agno/db/migrations/versions/__init__.py +0 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +15 -1
- agno/db/mongo/async_mongo.py +2036 -0
- agno/db/mongo/mongo.py +653 -76
- agno/db/mongo/schemas.py +13 -0
- agno/db/mongo/utils.py +80 -8
- agno/db/mysql/mysql.py +687 -25
- agno/db/mysql/schemas.py +61 -37
- agno/db/mysql/utils.py +60 -2
- agno/db/postgres/__init__.py +2 -1
- agno/db/postgres/async_postgres.py +2001 -0
- agno/db/postgres/postgres.py +676 -57
- agno/db/postgres/schemas.py +43 -18
- agno/db/postgres/utils.py +164 -2
- agno/db/redis/redis.py +344 -38
- agno/db/redis/schemas.py +18 -0
- agno/db/redis/utils.py +60 -2
- agno/db/schemas/__init__.py +2 -1
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/memory.py +13 -0
- agno/db/singlestore/schemas.py +26 -1
- agno/db/singlestore/singlestore.py +687 -53
- agno/db/singlestore/utils.py +60 -2
- agno/db/sqlite/__init__.py +2 -1
- agno/db/sqlite/async_sqlite.py +2371 -0
- agno/db/sqlite/schemas.py +24 -0
- agno/db/sqlite/sqlite.py +774 -85
- agno/db/sqlite/utils.py +168 -5
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +309 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1361 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +50 -22
- agno/eval/accuracy.py +50 -43
- agno/eval/performance.py +6 -3
- agno/eval/reliability.py +6 -3
- agno/eval/utils.py +33 -16
- agno/exceptions.py +68 -1
- agno/filters.py +354 -0
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +52 -0
- agno/integrations/discord/client.py +1 -0
- agno/knowledge/chunking/agentic.py +13 -10
- agno/knowledge/chunking/fixed.py +1 -1
- agno/knowledge/chunking/semantic.py +40 -8
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/aws_bedrock.py +9 -4
- agno/knowledge/embedder/azure_openai.py +54 -0
- agno/knowledge/embedder/base.py +2 -0
- agno/knowledge/embedder/cohere.py +184 -5
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/embedder/google.py +79 -1
- agno/knowledge/embedder/huggingface.py +9 -4
- agno/knowledge/embedder/jina.py +63 -0
- agno/knowledge/embedder/mistral.py +78 -11
- agno/knowledge/embedder/nebius.py +1 -1
- agno/knowledge/embedder/ollama.py +13 -0
- agno/knowledge/embedder/openai.py +37 -65
- agno/knowledge/embedder/sentence_transformer.py +8 -4
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/embedder/voyageai.py +69 -16
- agno/knowledge/knowledge.py +595 -187
- agno/knowledge/reader/base.py +9 -2
- agno/knowledge/reader/csv_reader.py +8 -10
- agno/knowledge/reader/docx_reader.py +5 -6
- agno/knowledge/reader/field_labeled_csv_reader.py +290 -0
- agno/knowledge/reader/json_reader.py +6 -5
- agno/knowledge/reader/markdown_reader.py +13 -13
- agno/knowledge/reader/pdf_reader.py +43 -68
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +51 -6
- agno/knowledge/reader/s3_reader.py +3 -15
- agno/knowledge/reader/tavily_reader.py +194 -0
- agno/knowledge/reader/text_reader.py +13 -13
- agno/knowledge/reader/web_search_reader.py +2 -43
- agno/knowledge/reader/website_reader.py +43 -25
- agno/knowledge/reranker/__init__.py +3 -0
- agno/knowledge/types.py +9 -0
- agno/knowledge/utils.py +20 -0
- agno/media.py +339 -266
- agno/memory/manager.py +336 -82
- agno/models/aimlapi/aimlapi.py +2 -2
- agno/models/anthropic/claude.py +183 -37
- agno/models/aws/bedrock.py +52 -112
- agno/models/aws/claude.py +33 -1
- agno/models/azure/ai_foundry.py +33 -15
- agno/models/azure/openai_chat.py +25 -8
- agno/models/base.py +1011 -566
- agno/models/cerebras/cerebras.py +19 -13
- agno/models/cerebras/cerebras_openai.py +8 -5
- agno/models/cohere/chat.py +27 -1
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +57 -0
- agno/models/dashscope/dashscope.py +1 -0
- agno/models/deepinfra/deepinfra.py +2 -2
- agno/models/deepseek/deepseek.py +2 -2
- agno/models/fireworks/fireworks.py +2 -2
- agno/models/google/gemini.py +110 -37
- agno/models/groq/groq.py +28 -11
- agno/models/huggingface/huggingface.py +2 -1
- agno/models/internlm/internlm.py +2 -2
- agno/models/langdb/langdb.py +4 -4
- agno/models/litellm/chat.py +18 -1
- agno/models/litellm/litellm_openai.py +2 -2
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/message.py +143 -4
- agno/models/meta/llama.py +27 -10
- agno/models/meta/llama_openai.py +5 -17
- agno/models/nebius/nebius.py +6 -6
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/nvidia.py +2 -2
- agno/models/ollama/chat.py +60 -6
- agno/models/openai/chat.py +102 -43
- agno/models/openai/responses.py +103 -106
- agno/models/openrouter/openrouter.py +41 -3
- agno/models/perplexity/perplexity.py +4 -5
- agno/models/portkey/portkey.py +3 -3
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +52 -0
- agno/models/response.py +81 -5
- agno/models/sambanova/sambanova.py +2 -2
- agno/models/siliconflow/__init__.py +5 -0
- agno/models/siliconflow/siliconflow.py +25 -0
- agno/models/together/together.py +2 -2
- agno/models/utils.py +254 -8
- agno/models/vercel/v0.py +2 -2
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +96 -0
- agno/models/vllm/vllm.py +1 -0
- agno/models/xai/xai.py +3 -2
- agno/os/app.py +543 -175
- agno/os/auth.py +24 -14
- agno/os/config.py +1 -0
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +250 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/agui.py +23 -7
- agno/os/interfaces/agui/router.py +27 -3
- agno/os/interfaces/agui/utils.py +242 -142
- agno/os/interfaces/base.py +6 -2
- agno/os/interfaces/slack/router.py +81 -23
- agno/os/interfaces/slack/slack.py +29 -14
- agno/os/interfaces/whatsapp/router.py +11 -4
- agno/os/interfaces/whatsapp/whatsapp.py +14 -7
- agno/os/mcp.py +111 -54
- agno/os/middleware/__init__.py +7 -0
- agno/os/middleware/jwt.py +233 -0
- agno/os/router.py +556 -139
- agno/os/routers/evals/evals.py +71 -34
- agno/os/routers/evals/schemas.py +31 -31
- agno/os/routers/evals/utils.py +6 -5
- agno/os/routers/health.py +31 -0
- agno/os/routers/home.py +52 -0
- agno/os/routers/knowledge/knowledge.py +185 -38
- agno/os/routers/knowledge/schemas.py +82 -22
- agno/os/routers/memory/memory.py +158 -53
- agno/os/routers/memory/schemas.py +20 -16
- agno/os/routers/metrics/metrics.py +20 -8
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +499 -38
- agno/os/schema.py +308 -198
- agno/os/utils.py +401 -41
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/azure_ai_foundry.py +2 -2
- agno/reasoning/deepseek.py +2 -2
- agno/reasoning/default.py +3 -1
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/groq.py +2 -2
- agno/reasoning/ollama.py +2 -2
- agno/reasoning/openai.py +7 -2
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +266 -112
- agno/run/base.py +53 -24
- agno/run/team.py +252 -111
- agno/run/workflow.py +156 -45
- agno/session/agent.py +105 -89
- agno/session/summary.py +65 -25
- agno/session/team.py +176 -96
- agno/session/workflow.py +406 -40
- agno/team/team.py +3854 -1692
- agno/tools/brightdata.py +3 -3
- agno/tools/cartesia.py +3 -5
- agno/tools/dalle.py +9 -8
- agno/tools/decorator.py +4 -2
- agno/tools/desi_vocal.py +2 -2
- agno/tools/duckduckgo.py +15 -11
- agno/tools/e2b.py +20 -13
- agno/tools/eleven_labs.py +26 -28
- agno/tools/exa.py +21 -16
- agno/tools/fal.py +4 -4
- agno/tools/file.py +153 -23
- agno/tools/file_generation.py +350 -0
- agno/tools/firecrawl.py +4 -4
- agno/tools/function.py +257 -37
- agno/tools/giphy.py +2 -2
- agno/tools/gmail.py +238 -14
- agno/tools/google_drive.py +270 -0
- agno/tools/googlecalendar.py +36 -8
- agno/tools/googlesheets.py +20 -5
- agno/tools/jira.py +20 -0
- agno/tools/knowledge.py +3 -3
- agno/tools/lumalab.py +3 -3
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +284 -0
- agno/tools/mem0.py +11 -17
- agno/tools/memori.py +1 -53
- agno/tools/memory.py +419 -0
- agno/tools/models/azure_openai.py +2 -2
- agno/tools/models/gemini.py +3 -3
- agno/tools/models/groq.py +3 -5
- agno/tools/models/nebius.py +7 -7
- agno/tools/models_labs.py +25 -15
- agno/tools/notion.py +204 -0
- agno/tools/openai.py +4 -9
- agno/tools/opencv.py +3 -3
- agno/tools/parallel.py +314 -0
- agno/tools/replicate.py +7 -7
- agno/tools/scrapegraph.py +58 -31
- agno/tools/searxng.py +2 -2
- agno/tools/serper.py +2 -2
- agno/tools/slack.py +18 -3
- agno/tools/spider.py +2 -2
- agno/tools/tavily.py +146 -0
- agno/tools/whatsapp.py +1 -1
- agno/tools/workflow.py +278 -0
- agno/tools/yfinance.py +12 -11
- agno/utils/agent.py +820 -0
- agno/utils/audio.py +27 -0
- agno/utils/common.py +90 -1
- agno/utils/events.py +222 -7
- agno/utils/gemini.py +181 -23
- agno/utils/hooks.py +57 -0
- agno/utils/http.py +111 -0
- agno/utils/knowledge.py +12 -5
- agno/utils/log.py +1 -0
- agno/utils/mcp.py +95 -5
- agno/utils/media.py +188 -10
- agno/utils/merge_dict.py +22 -1
- agno/utils/message.py +60 -0
- agno/utils/models/claude.py +40 -11
- agno/utils/models/cohere.py +1 -1
- agno/utils/models/watsonx.py +1 -1
- agno/utils/openai.py +1 -1
- agno/utils/print_response/agent.py +105 -21
- agno/utils/print_response/team.py +103 -38
- agno/utils/print_response/workflow.py +251 -34
- agno/utils/reasoning.py +22 -1
- agno/utils/serialize.py +32 -0
- agno/utils/streamlit.py +16 -10
- agno/utils/string.py +41 -0
- agno/utils/team.py +98 -9
- agno/utils/tools.py +1 -1
- agno/vectordb/base.py +23 -4
- agno/vectordb/cassandra/cassandra.py +65 -9
- agno/vectordb/chroma/chromadb.py +182 -38
- agno/vectordb/clickhouse/clickhousedb.py +64 -11
- agno/vectordb/couchbase/couchbase.py +105 -10
- agno/vectordb/lancedb/lance_db.py +183 -135
- agno/vectordb/langchaindb/langchaindb.py +25 -7
- agno/vectordb/lightrag/lightrag.py +17 -3
- agno/vectordb/llamaindex/__init__.py +3 -0
- agno/vectordb/llamaindex/llamaindexdb.py +46 -7
- agno/vectordb/milvus/milvus.py +126 -9
- agno/vectordb/mongodb/__init__.py +7 -1
- agno/vectordb/mongodb/mongodb.py +112 -7
- agno/vectordb/pgvector/pgvector.py +142 -21
- agno/vectordb/pineconedb/pineconedb.py +80 -8
- agno/vectordb/qdrant/qdrant.py +125 -39
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +694 -0
- agno/vectordb/singlestore/singlestore.py +111 -25
- agno/vectordb/surrealdb/surrealdb.py +31 -5
- agno/vectordb/upstashdb/upstashdb.py +76 -8
- agno/vectordb/weaviate/weaviate.py +86 -15
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +112 -18
- agno/workflow/loop.py +69 -10
- agno/workflow/parallel.py +266 -118
- agno/workflow/router.py +110 -17
- agno/workflow/step.py +645 -136
- agno/workflow/steps.py +65 -6
- agno/workflow/types.py +71 -33
- agno/workflow/workflow.py +2113 -300
- agno-2.3.0.dist-info/METADATA +618 -0
- agno-2.3.0.dist-info/RECORD +577 -0
- agno-2.3.0.dist-info/licenses/LICENSE +201 -0
- agno/knowledge/reader/url_reader.py +0 -128
- agno/tools/googlesearch.py +0 -98
- agno/tools/mcp.py +0 -610
- agno/utils/models/aws_claude.py +0 -170
- agno-2.0.0rc2.dist-info/METADATA +0 -355
- agno-2.0.0rc2.dist-info/RECORD +0 -515
- agno-2.0.0rc2.dist-info/licenses/LICENSE +0 -375
- {agno-2.0.0rc2.dist-info → agno-2.3.0.dist-info}/WHEEL +0 -0
- {agno-2.0.0rc2.dist-info → agno-2.3.0.dist-info}/top_level.txt +0 -0
agno/tools/parallel.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from os import getenv
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
from agno.tools import Toolkit
|
|
6
|
+
from agno.utils.log import log_error
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from parallel import Parallel as ParallelClient
|
|
10
|
+
except ImportError:
|
|
11
|
+
raise ImportError("`parallel-web` not installed. Please install using `pip install parallel-web`")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CustomJSONEncoder(json.JSONEncoder):
|
|
15
|
+
"""Custom JSON encoder that handles non-serializable types by converting them to strings."""
|
|
16
|
+
|
|
17
|
+
def default(self, obj):
|
|
18
|
+
try:
|
|
19
|
+
return super().default(obj)
|
|
20
|
+
except TypeError:
|
|
21
|
+
return str(obj)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ParallelTools(Toolkit):
|
|
25
|
+
"""
|
|
26
|
+
ParallelTools provides access to Parallel's web search and extraction APIs.
|
|
27
|
+
|
|
28
|
+
Parallel offers powerful APIs optimized for AI agents:
|
|
29
|
+
- Search API: AI-optimized web search that returns relevant excerpts tailored for LLMs
|
|
30
|
+
- Extract API: Extract content from specific URLs in clean markdown format, handling JavaScript-heavy pages and PDFs
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
api_key (Optional[str]): Parallel API key. If not provided, will use PARALLEL_API_KEY environment variable.
|
|
34
|
+
enable_search (bool): Enable Search API functionality. Default is True.
|
|
35
|
+
enable_extract (bool): Enable Extract API functionality. Default is True.
|
|
36
|
+
all (bool): Enable all tools. Overrides individual flags when True. Default is False.
|
|
37
|
+
max_results (int): Default maximum number of results for search operations. Default is 10.
|
|
38
|
+
max_chars_per_result (int): Default maximum characters per result for search operations. Default is 10000.
|
|
39
|
+
beta_version (str): Beta API version header. Default is "search-extract-2025-10-10".
|
|
40
|
+
mode (Optional[str]): Default search mode. Options: "one-shot" or "agentic". Default is None.
|
|
41
|
+
include_domains (Optional[List[str]]): Default domains to restrict results to. Default is None.
|
|
42
|
+
exclude_domains (Optional[List[str]]): Default domains to exclude from results. Default is None.
|
|
43
|
+
max_age_seconds (Optional[int]): Default cache age threshold (minimum 600). Default is None.
|
|
44
|
+
timeout_seconds (Optional[float]): Default timeout for content retrieval. Default is None.
|
|
45
|
+
disable_cache_fallback (Optional[bool]): Default cache fallback behavior. Default is None.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
api_key: Optional[str] = None,
|
|
51
|
+
enable_search: bool = True,
|
|
52
|
+
enable_extract: bool = True,
|
|
53
|
+
all: bool = False,
|
|
54
|
+
max_results: int = 10,
|
|
55
|
+
max_chars_per_result: int = 10000,
|
|
56
|
+
beta_version: str = "search-extract-2025-10-10",
|
|
57
|
+
mode: Optional[str] = None,
|
|
58
|
+
include_domains: Optional[List[str]] = None,
|
|
59
|
+
exclude_domains: Optional[List[str]] = None,
|
|
60
|
+
max_age_seconds: Optional[int] = None,
|
|
61
|
+
timeout_seconds: Optional[float] = None,
|
|
62
|
+
disable_cache_fallback: Optional[bool] = None,
|
|
63
|
+
**kwargs,
|
|
64
|
+
):
|
|
65
|
+
self.api_key: Optional[str] = api_key or getenv("PARALLEL_API_KEY")
|
|
66
|
+
if not self.api_key:
|
|
67
|
+
log_error("PARALLEL_API_KEY not set. Please set the PARALLEL_API_KEY environment variable.")
|
|
68
|
+
|
|
69
|
+
self.max_results = max_results
|
|
70
|
+
self.max_chars_per_result = max_chars_per_result
|
|
71
|
+
self.beta_version = beta_version
|
|
72
|
+
self.mode = mode
|
|
73
|
+
self.include_domains = include_domains
|
|
74
|
+
self.exclude_domains = exclude_domains
|
|
75
|
+
self.max_age_seconds = max_age_seconds
|
|
76
|
+
self.timeout_seconds = timeout_seconds
|
|
77
|
+
self.disable_cache_fallback = disable_cache_fallback
|
|
78
|
+
|
|
79
|
+
self.parallel_client = ParallelClient(
|
|
80
|
+
api_key=self.api_key, default_headers={"parallel-beta": self.beta_version}
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
tools: List[Any] = []
|
|
84
|
+
if all or enable_search:
|
|
85
|
+
tools.append(self.parallel_search)
|
|
86
|
+
if all or enable_extract:
|
|
87
|
+
tools.append(self.parallel_extract)
|
|
88
|
+
|
|
89
|
+
super().__init__(name="parallel_tools", tools=tools, **kwargs)
|
|
90
|
+
|
|
91
|
+
def parallel_search(
|
|
92
|
+
self,
|
|
93
|
+
objective: Optional[str] = None,
|
|
94
|
+
search_queries: Optional[List[str]] = None,
|
|
95
|
+
max_results: Optional[int] = None,
|
|
96
|
+
max_chars_per_result: Optional[int] = None,
|
|
97
|
+
) -> str:
|
|
98
|
+
"""Use this function to search the web using Parallel's Search API with a natural language objective.
|
|
99
|
+
You must provide at least one of objective or search_queries.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
objective (Optional[str]): Natural-language description of what the web search is trying to find.
|
|
103
|
+
search_queries (Optional[List[str]]): Traditional keyword queries with optional search operators.
|
|
104
|
+
max_results (Optional[int]): Upper bound on results returned. Overrides constructor default.
|
|
105
|
+
max_chars_per_result (Optional[int]): Upper bound on total characters per url for excerpts.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
str: A JSON formatted string containing the search results with URLs, titles, publish dates, and relevant excerpts.
|
|
109
|
+
"""
|
|
110
|
+
try:
|
|
111
|
+
if not objective and not search_queries:
|
|
112
|
+
return json.dumps({"error": "Please provide at least one of: objective or search_queries"}, indent=2)
|
|
113
|
+
|
|
114
|
+
# Use instance defaults if not provided
|
|
115
|
+
final_max_results = max_results if max_results is not None else self.max_results
|
|
116
|
+
|
|
117
|
+
search_params: Dict[str, Any] = {
|
|
118
|
+
"max_results": final_max_results,
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# Add objective if provided
|
|
122
|
+
if objective:
|
|
123
|
+
search_params["objective"] = objective
|
|
124
|
+
|
|
125
|
+
# Add search_queries if provided
|
|
126
|
+
if search_queries:
|
|
127
|
+
search_params["search_queries"] = search_queries
|
|
128
|
+
|
|
129
|
+
# Add mode from constructor default
|
|
130
|
+
if self.mode:
|
|
131
|
+
search_params["mode"] = self.mode
|
|
132
|
+
|
|
133
|
+
# Add excerpts configuration
|
|
134
|
+
excerpts_config: Dict[str, Any] = {}
|
|
135
|
+
final_max_chars = max_chars_per_result if max_chars_per_result is not None else self.max_chars_per_result
|
|
136
|
+
if final_max_chars is not None:
|
|
137
|
+
excerpts_config["max_chars_per_result"] = final_max_chars
|
|
138
|
+
|
|
139
|
+
if excerpts_config:
|
|
140
|
+
search_params["excerpts"] = excerpts_config
|
|
141
|
+
|
|
142
|
+
# Add source_policy from constructor defaults
|
|
143
|
+
source_policy: Dict[str, Any] = {}
|
|
144
|
+
if self.include_domains:
|
|
145
|
+
source_policy["include_domains"] = self.include_domains
|
|
146
|
+
if self.exclude_domains:
|
|
147
|
+
source_policy["exclude_domains"] = self.exclude_domains
|
|
148
|
+
|
|
149
|
+
if source_policy:
|
|
150
|
+
search_params["source_policy"] = source_policy
|
|
151
|
+
|
|
152
|
+
# Add fetch_policy from constructor defaults
|
|
153
|
+
fetch_policy: Dict[str, Any] = {}
|
|
154
|
+
if self.max_age_seconds is not None:
|
|
155
|
+
fetch_policy["max_age_seconds"] = self.max_age_seconds
|
|
156
|
+
if self.timeout_seconds is not None:
|
|
157
|
+
fetch_policy["timeout_seconds"] = self.timeout_seconds
|
|
158
|
+
if self.disable_cache_fallback is not None:
|
|
159
|
+
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
|
|
160
|
+
|
|
161
|
+
if fetch_policy:
|
|
162
|
+
search_params["fetch_policy"] = fetch_policy
|
|
163
|
+
|
|
164
|
+
search_result = self.parallel_client.beta.search(**search_params)
|
|
165
|
+
|
|
166
|
+
# Use model_dump() if available, otherwise convert to dict
|
|
167
|
+
try:
|
|
168
|
+
if hasattr(search_result, "model_dump"):
|
|
169
|
+
return json.dumps(search_result.model_dump(), cls=CustomJSONEncoder)
|
|
170
|
+
except Exception:
|
|
171
|
+
pass
|
|
172
|
+
|
|
173
|
+
# Manually format the results
|
|
174
|
+
formatted_results: Dict[str, Any] = {
|
|
175
|
+
"search_id": getattr(search_result, "search_id", ""),
|
|
176
|
+
"results": [],
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if hasattr(search_result, "results") and search_result.results:
|
|
180
|
+
results_list: List[Dict[str, Any]] = []
|
|
181
|
+
for result in search_result.results:
|
|
182
|
+
formatted_result: Dict[str, Any] = {
|
|
183
|
+
"title": getattr(result, "title", ""),
|
|
184
|
+
"url": getattr(result, "url", ""),
|
|
185
|
+
"publish_date": getattr(result, "publish_date", ""),
|
|
186
|
+
"excerpt": getattr(result, "excerpt", ""),
|
|
187
|
+
}
|
|
188
|
+
results_list.append(formatted_result)
|
|
189
|
+
formatted_results["results"] = results_list
|
|
190
|
+
|
|
191
|
+
if hasattr(search_result, "warnings"):
|
|
192
|
+
formatted_results["warnings"] = search_result.warnings
|
|
193
|
+
|
|
194
|
+
if hasattr(search_result, "usage"):
|
|
195
|
+
formatted_results["usage"] = search_result.usage
|
|
196
|
+
|
|
197
|
+
return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
|
|
198
|
+
|
|
199
|
+
except Exception as e:
|
|
200
|
+
log_error(f"Error searching Parallel for objective '{objective}': {e}")
|
|
201
|
+
return json.dumps({"error": f"Search failed: {str(e)}"}, indent=2)
|
|
202
|
+
|
|
203
|
+
def parallel_extract(
|
|
204
|
+
self,
|
|
205
|
+
urls: List[str],
|
|
206
|
+
objective: Optional[str] = None,
|
|
207
|
+
search_queries: Optional[List[str]] = None,
|
|
208
|
+
excerpts: bool = True,
|
|
209
|
+
max_chars_per_excerpt: Optional[int] = None,
|
|
210
|
+
full_content: bool = False,
|
|
211
|
+
max_chars_for_full_content: Optional[int] = None,
|
|
212
|
+
) -> str:
|
|
213
|
+
"""Use this function to extract content from specific URLs using Parallel's Extract API.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
urls (List[str]): List of public URLs to extract content from.
|
|
217
|
+
objective (Optional[str]): Search focus to guide content extraction.
|
|
218
|
+
search_queries (Optional[List[str]]): Keywords for targeting relevant content.
|
|
219
|
+
excerpts (bool): Include relevant text snippets.
|
|
220
|
+
max_chars_per_excerpt (Optional[int]): Upper bound on total characters per url. Only used when excerpts is True.
|
|
221
|
+
full_content (bool): Include complete page text.
|
|
222
|
+
max_chars_for_full_content (Optional[int]): Limit on characters per url. Only used when full_content is True.
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
str: A JSON formatted string containing extracted content with titles, publish dates, excerpts and/or full content.
|
|
226
|
+
"""
|
|
227
|
+
try:
|
|
228
|
+
if not urls:
|
|
229
|
+
return json.dumps({"error": "Please provide at least one URL to extract"}, indent=2)
|
|
230
|
+
|
|
231
|
+
extract_params: Dict[str, Any] = {
|
|
232
|
+
"urls": urls,
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
# Add objective if provided
|
|
236
|
+
if objective:
|
|
237
|
+
extract_params["objective"] = objective
|
|
238
|
+
|
|
239
|
+
# Add search_queries if provided
|
|
240
|
+
if search_queries:
|
|
241
|
+
extract_params["search_queries"] = search_queries
|
|
242
|
+
|
|
243
|
+
# Add excerpts configuration
|
|
244
|
+
if excerpts and max_chars_per_excerpt is not None:
|
|
245
|
+
extract_params["excerpts"] = {"max_chars_per_result": max_chars_per_excerpt}
|
|
246
|
+
else:
|
|
247
|
+
extract_params["excerpts"] = excerpts
|
|
248
|
+
|
|
249
|
+
# Add full_content configuration
|
|
250
|
+
if full_content and max_chars_for_full_content is not None:
|
|
251
|
+
extract_params["full_content"] = {"max_chars_per_result": max_chars_for_full_content}
|
|
252
|
+
else:
|
|
253
|
+
extract_params["full_content"] = full_content
|
|
254
|
+
|
|
255
|
+
# Add fetch_policy from constructor defaults
|
|
256
|
+
fetch_policy: Dict[str, Any] = {}
|
|
257
|
+
if self.max_age_seconds is not None:
|
|
258
|
+
fetch_policy["max_age_seconds"] = self.max_age_seconds
|
|
259
|
+
if self.timeout_seconds is not None:
|
|
260
|
+
fetch_policy["timeout_seconds"] = self.timeout_seconds
|
|
261
|
+
if self.disable_cache_fallback is not None:
|
|
262
|
+
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
|
|
263
|
+
|
|
264
|
+
if fetch_policy:
|
|
265
|
+
extract_params["fetch_policy"] = fetch_policy
|
|
266
|
+
|
|
267
|
+
extract_result = self.parallel_client.beta.extract(**extract_params)
|
|
268
|
+
|
|
269
|
+
# Use model_dump() if available, otherwise convert to dict
|
|
270
|
+
try:
|
|
271
|
+
if hasattr(extract_result, "model_dump"):
|
|
272
|
+
return json.dumps(extract_result.model_dump(), cls=CustomJSONEncoder)
|
|
273
|
+
except Exception:
|
|
274
|
+
pass
|
|
275
|
+
|
|
276
|
+
# Manually format the results
|
|
277
|
+
formatted_results: Dict[str, Any] = {
|
|
278
|
+
"extract_id": getattr(extract_result, "extract_id", ""),
|
|
279
|
+
"results": [],
|
|
280
|
+
"errors": [],
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
if hasattr(extract_result, "results") and extract_result.results:
|
|
284
|
+
results_list: List[Dict[str, Any]] = []
|
|
285
|
+
for result in extract_result.results:
|
|
286
|
+
formatted_result: Dict[str, Any] = {
|
|
287
|
+
"url": getattr(result, "url", ""),
|
|
288
|
+
"title": getattr(result, "title", ""),
|
|
289
|
+
"publish_date": getattr(result, "publish_date", ""),
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
if excerpts and hasattr(result, "excerpts"):
|
|
293
|
+
formatted_result["excerpts"] = result.excerpts
|
|
294
|
+
|
|
295
|
+
if full_content and hasattr(result, "full_content"):
|
|
296
|
+
formatted_result["full_content"] = result.full_content
|
|
297
|
+
|
|
298
|
+
results_list.append(formatted_result)
|
|
299
|
+
formatted_results["results"] = results_list
|
|
300
|
+
|
|
301
|
+
if hasattr(extract_result, "errors") and extract_result.errors:
|
|
302
|
+
formatted_results["errors"] = extract_result.errors
|
|
303
|
+
|
|
304
|
+
if hasattr(extract_result, "warnings"):
|
|
305
|
+
formatted_results["warnings"] = extract_result.warnings
|
|
306
|
+
|
|
307
|
+
if hasattr(extract_result, "usage"):
|
|
308
|
+
formatted_results["usage"] = extract_result.usage
|
|
309
|
+
|
|
310
|
+
return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
|
|
311
|
+
|
|
312
|
+
except Exception as e:
|
|
313
|
+
log_error(f"Error extracting from Parallel: {e}")
|
|
314
|
+
return json.dumps({"error": f"Extract failed: {str(e)}"}, indent=2)
|
agno/tools/replicate.py
CHANGED
|
@@ -5,7 +5,7 @@ from urllib.parse import urlparse
|
|
|
5
5
|
from uuid import uuid4
|
|
6
6
|
|
|
7
7
|
from agno.agent import Agent
|
|
8
|
-
from agno.media import
|
|
8
|
+
from agno.media import Image, Video
|
|
9
9
|
from agno.team.team import Team
|
|
10
10
|
from agno.tools import Toolkit
|
|
11
11
|
from agno.tools.function import ToolResult
|
|
@@ -72,9 +72,9 @@ class ReplicateTools(Toolkit):
|
|
|
72
72
|
result_msg, media_artifact = self._parse_output(output)
|
|
73
73
|
results.append(result_msg)
|
|
74
74
|
|
|
75
|
-
if isinstance(media_artifact,
|
|
75
|
+
if isinstance(media_artifact, Image):
|
|
76
76
|
images.append(media_artifact)
|
|
77
|
-
elif isinstance(media_artifact,
|
|
77
|
+
elif isinstance(media_artifact, Video):
|
|
78
78
|
videos.append(media_artifact)
|
|
79
79
|
|
|
80
80
|
content = "\n".join(results)
|
|
@@ -87,7 +87,7 @@ class ReplicateTools(Toolkit):
|
|
|
87
87
|
logger.error(f"Failed to generate media: {e}")
|
|
88
88
|
return ToolResult(content=f"Error: {e}")
|
|
89
89
|
|
|
90
|
-
def _parse_output(self, output: FileOutput) -> Tuple[str, Union[
|
|
90
|
+
def _parse_output(self, output: FileOutput) -> Tuple[str, Union[Image, Video]]:
|
|
91
91
|
"""
|
|
92
92
|
Parse the outputs from the replicate model.
|
|
93
93
|
"""
|
|
@@ -101,14 +101,14 @@ class ReplicateTools(Toolkit):
|
|
|
101
101
|
video_extensions = {".mp4", ".mov", ".avi", ".mkv", ".flv", ".wmv", ".webm"}
|
|
102
102
|
|
|
103
103
|
media_id = str(uuid4())
|
|
104
|
-
artifact: Union[
|
|
104
|
+
artifact: Union[Image, Video]
|
|
105
105
|
media_type: str
|
|
106
106
|
|
|
107
107
|
if ext in image_extensions:
|
|
108
|
-
artifact =
|
|
108
|
+
artifact = Image(id=media_id, url=output.url)
|
|
109
109
|
media_type = "image"
|
|
110
110
|
elif ext in video_extensions:
|
|
111
|
-
artifact =
|
|
111
|
+
artifact = Video(id=media_id, url=output.url)
|
|
112
112
|
media_type = "video"
|
|
113
113
|
else:
|
|
114
114
|
logger.error(f"Unsupported media type with extension '{ext}' for URL: {output.url}")
|
agno/tools/scrapegraph.py
CHANGED
|
@@ -3,6 +3,7 @@ from os import getenv
|
|
|
3
3
|
from typing import Any, List, Optional
|
|
4
4
|
|
|
5
5
|
from agno.tools import Toolkit
|
|
6
|
+
from agno.utils.log import log_debug, log_error
|
|
6
7
|
|
|
7
8
|
try:
|
|
8
9
|
from scrapegraph_py import Client
|
|
@@ -23,11 +24,14 @@ class ScrapeGraphTools(Toolkit):
|
|
|
23
24
|
enable_crawl: bool = False,
|
|
24
25
|
enable_searchscraper: bool = False,
|
|
25
26
|
enable_agentic_crawler: bool = False,
|
|
27
|
+
enable_scrape: bool = False,
|
|
28
|
+
render_heavy_js: bool = False,
|
|
26
29
|
all: bool = False,
|
|
27
30
|
**kwargs,
|
|
28
31
|
):
|
|
29
32
|
self.api_key: Optional[str] = api_key or getenv("SGAI_API_KEY")
|
|
30
33
|
self.client = Client(api_key=self.api_key)
|
|
34
|
+
self.render_heavy_js = render_heavy_js
|
|
31
35
|
|
|
32
36
|
# Start with smartscraper by default
|
|
33
37
|
# Only enable markdownify if smartscraper is False
|
|
@@ -45,6 +49,8 @@ class ScrapeGraphTools(Toolkit):
|
|
|
45
49
|
tools.append(self.searchscraper)
|
|
46
50
|
if enable_agentic_crawler or all:
|
|
47
51
|
tools.append(self.agentic_crawler)
|
|
52
|
+
if enable_scrape or all:
|
|
53
|
+
tools.append(self.scrape)
|
|
48
54
|
|
|
49
55
|
super().__init__(name="scrapegraph_tools", tools=tools, **kwargs)
|
|
50
56
|
|
|
@@ -57,10 +63,13 @@ class ScrapeGraphTools(Toolkit):
|
|
|
57
63
|
The structured data extracted from the webpage
|
|
58
64
|
"""
|
|
59
65
|
try:
|
|
66
|
+
log_debug(f"ScrapeGraph smartscraper request for URL: {url}")
|
|
60
67
|
response = self.client.smartscraper(website_url=url, user_prompt=prompt)
|
|
61
68
|
return json.dumps(response["result"])
|
|
62
69
|
except Exception as e:
|
|
63
|
-
|
|
70
|
+
error_msg = f"Smartscraper failed: {str(e)}"
|
|
71
|
+
log_error(error_msg)
|
|
72
|
+
return f"Error: {error_msg}"
|
|
64
73
|
|
|
65
74
|
def markdownify(self, url: str) -> str:
|
|
66
75
|
"""Convert a webpage to markdown format.
|
|
@@ -70,10 +79,13 @@ class ScrapeGraphTools(Toolkit):
|
|
|
70
79
|
The markdown version of the webpage
|
|
71
80
|
"""
|
|
72
81
|
try:
|
|
82
|
+
log_debug(f"ScrapeGraph markdownify request for URL: {url}")
|
|
73
83
|
response = self.client.markdownify(website_url=url)
|
|
74
84
|
return response["result"]
|
|
75
85
|
except Exception as e:
|
|
76
|
-
|
|
86
|
+
error_msg = f"Markdownify failed: {str(e)}"
|
|
87
|
+
log_error(error_msg)
|
|
88
|
+
return f"Error: {error_msg}"
|
|
77
89
|
|
|
78
90
|
def crawl(
|
|
79
91
|
self,
|
|
@@ -100,10 +112,11 @@ class ScrapeGraphTools(Toolkit):
|
|
|
100
112
|
The structured data extracted from the website
|
|
101
113
|
"""
|
|
102
114
|
try:
|
|
115
|
+
log_debug(f"ScrapeGraph crawl request for URL: {url}")
|
|
103
116
|
response = self.client.crawl(
|
|
104
117
|
url=url,
|
|
105
118
|
prompt=prompt,
|
|
106
|
-
|
|
119
|
+
data_schema=schema,
|
|
107
120
|
cache_website=cache_website,
|
|
108
121
|
depth=depth,
|
|
109
122
|
max_pages=max_pages,
|
|
@@ -112,7 +125,9 @@ class ScrapeGraphTools(Toolkit):
|
|
|
112
125
|
)
|
|
113
126
|
return json.dumps(response, indent=2)
|
|
114
127
|
except Exception as e:
|
|
115
|
-
|
|
128
|
+
error_msg = f"Crawl failed: {str(e)}"
|
|
129
|
+
log_error(error_msg)
|
|
130
|
+
return f"Error: {error_msg}"
|
|
116
131
|
|
|
117
132
|
def agentic_crawler(
|
|
118
133
|
self,
|
|
@@ -143,21 +158,7 @@ class ScrapeGraphTools(Toolkit):
|
|
|
143
158
|
JSON string containing the scraping results, including request_id, status, and extracted data
|
|
144
159
|
"""
|
|
145
160
|
try:
|
|
146
|
-
|
|
147
|
-
if ai_extraction and not user_prompt:
|
|
148
|
-
return json.dumps({"error": "user_prompt is required when ai_extraction=True"})
|
|
149
|
-
|
|
150
|
-
# Validate URL format
|
|
151
|
-
if not url.strip():
|
|
152
|
-
return json.dumps({"error": "URL cannot be empty"})
|
|
153
|
-
if not (url.startswith("http://") or url.startswith("https://")):
|
|
154
|
-
return json.dumps({"error": "Invalid URL - must start with http:// or https://"})
|
|
155
|
-
|
|
156
|
-
# Validate steps
|
|
157
|
-
if not steps:
|
|
158
|
-
return json.dumps({"error": "Steps cannot be empty"})
|
|
159
|
-
if any(not step.strip() for step in steps):
|
|
160
|
-
return json.dumps({"error": "All steps must contain valid instructions"})
|
|
161
|
+
log_debug(f"ScrapeGraph agentic_crawler request for URL: {url}")
|
|
161
162
|
|
|
162
163
|
# Prepare parameters for the API call
|
|
163
164
|
params = {"url": url, "steps": steps, "use_session": use_session, "ai_extraction": ai_extraction}
|
|
@@ -170,26 +171,52 @@ class ScrapeGraphTools(Toolkit):
|
|
|
170
171
|
|
|
171
172
|
# Call the agentic scraper API
|
|
172
173
|
response = self.client.agenticscraper(**params)
|
|
173
|
-
|
|
174
174
|
return json.dumps(response, indent=2)
|
|
175
175
|
|
|
176
176
|
except Exception as e:
|
|
177
|
-
|
|
177
|
+
error_msg = f"Agentic crawler failed: {str(e)}"
|
|
178
|
+
log_error(error_msg)
|
|
179
|
+
return f"Error: {error_msg}"
|
|
178
180
|
|
|
179
|
-
def searchscraper(self,
|
|
181
|
+
def searchscraper(self, user_prompt: str) -> str:
|
|
180
182
|
"""Search the web and extract information from the web.
|
|
181
183
|
Args:
|
|
182
|
-
|
|
184
|
+
user_prompt (str): Search query
|
|
183
185
|
Returns:
|
|
184
186
|
JSON of the search results
|
|
185
187
|
"""
|
|
186
188
|
try:
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
189
|
+
log_debug(f"ScrapeGraph searchscraper request with prompt: {user_prompt}")
|
|
190
|
+
response = self.client.searchscraper(user_prompt=user_prompt)
|
|
191
|
+
return json.dumps(response["result"])
|
|
192
|
+
except Exception as e:
|
|
193
|
+
error_msg = f"Searchscraper failed: {str(e)}"
|
|
194
|
+
log_error(error_msg)
|
|
195
|
+
return f"Error: {error_msg}"
|
|
196
|
+
|
|
197
|
+
def scrape(
|
|
198
|
+
self,
|
|
199
|
+
website_url: str,
|
|
200
|
+
headers: Optional[dict] = None,
|
|
201
|
+
) -> str:
|
|
202
|
+
"""Get raw HTML content from a website using the ScrapeGraphAI scrape API.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
website_url (str): The URL of the website to scrape
|
|
206
|
+
headers (Optional[dict]): Optional headers to send with the request
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
JSON string containing the HTML content and metadata
|
|
210
|
+
"""
|
|
211
|
+
try:
|
|
212
|
+
log_debug(f"ScrapeGraph scrape request for URL: {website_url}")
|
|
213
|
+
response = self.client.scrape(
|
|
214
|
+
website_url=website_url,
|
|
215
|
+
headers=headers,
|
|
216
|
+
render_heavy_js=self.render_heavy_js,
|
|
217
|
+
)
|
|
218
|
+
return json.dumps(response, indent=2)
|
|
194
219
|
except Exception as e:
|
|
195
|
-
|
|
220
|
+
error_msg = f"Scrape failed: {str(e)}"
|
|
221
|
+
log_error(error_msg)
|
|
222
|
+
return f"Error: {error_msg}"
|
agno/tools/searxng.py
CHANGED
|
@@ -21,7 +21,7 @@ class Searxng(Toolkit):
|
|
|
21
21
|
self.fixed_max_results = fixed_max_results
|
|
22
22
|
|
|
23
23
|
tools: List[Any] = [
|
|
24
|
-
self.
|
|
24
|
+
self.search_web,
|
|
25
25
|
self.image_search,
|
|
26
26
|
self.it_search,
|
|
27
27
|
self.map_search,
|
|
@@ -33,7 +33,7 @@ class Searxng(Toolkit):
|
|
|
33
33
|
|
|
34
34
|
super().__init__(name="searxng", tools=tools, **kwargs)
|
|
35
35
|
|
|
36
|
-
def
|
|
36
|
+
def search_web(self, query: str, max_results: int = 5) -> str:
|
|
37
37
|
"""Use this function to search the web.
|
|
38
38
|
|
|
39
39
|
Args:
|
agno/tools/serper.py
CHANGED
|
@@ -44,7 +44,7 @@ class SerperTools(Toolkit):
|
|
|
44
44
|
|
|
45
45
|
tools: List[Any] = []
|
|
46
46
|
if all or enable_search:
|
|
47
|
-
tools.append(self.
|
|
47
|
+
tools.append(self.search_web)
|
|
48
48
|
if all or enable_search_news:
|
|
49
49
|
tools.append(self.search_news)
|
|
50
50
|
if all or enable_search_scholar:
|
|
@@ -97,7 +97,7 @@ class SerperTools(Toolkit):
|
|
|
97
97
|
log_error(f"Serper API error: {str(e)}")
|
|
98
98
|
return {"success": False, "error": str(e)}
|
|
99
99
|
|
|
100
|
-
def
|
|
100
|
+
def search_web(
|
|
101
101
|
self,
|
|
102
102
|
query: str,
|
|
103
103
|
num_results: Optional[int] = None,
|
agno/tools/slack.py
CHANGED
|
@@ -16,6 +16,7 @@ class SlackTools(Toolkit):
|
|
|
16
16
|
def __init__(
|
|
17
17
|
self,
|
|
18
18
|
token: Optional[str] = None,
|
|
19
|
+
markdown: bool = True,
|
|
19
20
|
enable_send_message: bool = True,
|
|
20
21
|
enable_send_message_thread: bool = True,
|
|
21
22
|
enable_list_channels: bool = True,
|
|
@@ -23,10 +24,22 @@ class SlackTools(Toolkit):
|
|
|
23
24
|
all: bool = False,
|
|
24
25
|
**kwargs,
|
|
25
26
|
):
|
|
27
|
+
"""
|
|
28
|
+
Initialize the SlackTools class.
|
|
29
|
+
Args:
|
|
30
|
+
token: The Slack API token. Defaults to the SLACK_TOKEN environment variable.
|
|
31
|
+
markdown: Whether to enable Slack markdown formatting. Defaults to True.
|
|
32
|
+
enable_send_message: Whether to enable the send_message tool. Defaults to True.
|
|
33
|
+
enable_send_message_thread: Whether to enable the send_message_thread tool. Defaults to True.
|
|
34
|
+
enable_list_channels: Whether to enable the list_channels tool. Defaults to True.
|
|
35
|
+
enable_get_channel_history: Whether to enable the get_channel_history tool. Defaults to True.
|
|
36
|
+
all: Whether to enable all tools. Defaults to False.
|
|
37
|
+
"""
|
|
26
38
|
self.token: Optional[str] = token or getenv("SLACK_TOKEN")
|
|
27
39
|
if self.token is None or self.token == "":
|
|
28
40
|
raise ValueError("SLACK_TOKEN is not set")
|
|
29
41
|
self.client = WebClient(token=self.token)
|
|
42
|
+
self.markdown = markdown
|
|
30
43
|
|
|
31
44
|
tools: List[Any] = []
|
|
32
45
|
if enable_send_message or all:
|
|
@@ -52,7 +65,7 @@ class SlackTools(Toolkit):
|
|
|
52
65
|
str: A JSON string containing the response from the Slack API.
|
|
53
66
|
"""
|
|
54
67
|
try:
|
|
55
|
-
response = self.client.chat_postMessage(channel=channel, text=text)
|
|
68
|
+
response = self.client.chat_postMessage(channel=channel, text=text, mrkdwn=self.markdown)
|
|
56
69
|
return json.dumps(response.data)
|
|
57
70
|
except SlackApiError as e:
|
|
58
71
|
logger.error(f"Error sending message: {e}")
|
|
@@ -65,13 +78,15 @@ class SlackTools(Toolkit):
|
|
|
65
78
|
Args:
|
|
66
79
|
channel (str): The channel ID or name to send the message to.
|
|
67
80
|
text (str): The text of the message to send.
|
|
68
|
-
thread_ts (ts): The thread to reply to
|
|
81
|
+
thread_ts (ts): The thread to reply to.
|
|
69
82
|
|
|
70
83
|
Returns:
|
|
71
84
|
str: A JSON string containing the response from the Slack API.
|
|
72
85
|
"""
|
|
73
86
|
try:
|
|
74
|
-
response = self.client.chat_postMessage(
|
|
87
|
+
response = self.client.chat_postMessage(
|
|
88
|
+
channel=channel, text=text, thread_ts=thread_ts, mrkdwn=self.markdown
|
|
89
|
+
)
|
|
75
90
|
return json.dumps(response.data)
|
|
76
91
|
except SlackApiError as e:
|
|
77
92
|
logger.error(f"Error sending message: {e}")
|
agno/tools/spider.py
CHANGED
|
@@ -42,7 +42,7 @@ class SpiderTools(Toolkit):
|
|
|
42
42
|
|
|
43
43
|
tools: List[Any] = []
|
|
44
44
|
if enable_search or all:
|
|
45
|
-
tools.append(self.
|
|
45
|
+
tools.append(self.search_web)
|
|
46
46
|
if enable_scrape or all:
|
|
47
47
|
tools.append(self.scrape)
|
|
48
48
|
if enable_crawl or all:
|
|
@@ -50,7 +50,7 @@ class SpiderTools(Toolkit):
|
|
|
50
50
|
|
|
51
51
|
super().__init__(name="spider", tools=tools, **kwargs)
|
|
52
52
|
|
|
53
|
-
def
|
|
53
|
+
def search_web(self, query: str, max_results: int = 5) -> str:
|
|
54
54
|
"""Use this function to search the web.
|
|
55
55
|
Args:
|
|
56
56
|
query (str): The query to search the web with.
|