agno 2.0.1__py3-none-any.whl → 2.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +6015 -2823
- agno/api/api.py +2 -0
- agno/api/os.py +1 -1
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +385 -6
- agno/db/dynamo/dynamo.py +388 -81
- agno/db/dynamo/schemas.py +47 -10
- agno/db/dynamo/utils.py +63 -4
- agno/db/firestore/firestore.py +435 -64
- agno/db/firestore/schemas.py +11 -0
- agno/db/firestore/utils.py +102 -4
- agno/db/gcs_json/gcs_json_db.py +384 -42
- agno/db/gcs_json/utils.py +60 -26
- agno/db/in_memory/in_memory_db.py +351 -66
- agno/db/in_memory/utils.py +60 -2
- agno/db/json/json_db.py +339 -48
- agno/db/json/utils.py +60 -26
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +510 -37
- agno/db/migrations/versions/__init__.py +0 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +15 -1
- agno/db/mongo/async_mongo.py +2036 -0
- agno/db/mongo/mongo.py +653 -76
- agno/db/mongo/schemas.py +13 -0
- agno/db/mongo/utils.py +80 -8
- agno/db/mysql/mysql.py +687 -25
- agno/db/mysql/schemas.py +61 -37
- agno/db/mysql/utils.py +60 -2
- agno/db/postgres/__init__.py +2 -1
- agno/db/postgres/async_postgres.py +2001 -0
- agno/db/postgres/postgres.py +676 -57
- agno/db/postgres/schemas.py +43 -18
- agno/db/postgres/utils.py +164 -2
- agno/db/redis/redis.py +344 -38
- agno/db/redis/schemas.py +18 -0
- agno/db/redis/utils.py +60 -2
- agno/db/schemas/__init__.py +2 -1
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/memory.py +13 -0
- agno/db/singlestore/schemas.py +26 -1
- agno/db/singlestore/singlestore.py +687 -53
- agno/db/singlestore/utils.py +60 -2
- agno/db/sqlite/__init__.py +2 -1
- agno/db/sqlite/async_sqlite.py +2371 -0
- agno/db/sqlite/schemas.py +24 -0
- agno/db/sqlite/sqlite.py +774 -85
- agno/db/sqlite/utils.py +168 -5
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +309 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1361 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +50 -22
- agno/eval/accuracy.py +50 -43
- agno/eval/performance.py +6 -3
- agno/eval/reliability.py +6 -3
- agno/eval/utils.py +33 -16
- agno/exceptions.py +68 -1
- agno/filters.py +354 -0
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +52 -0
- agno/integrations/discord/client.py +1 -0
- agno/knowledge/chunking/agentic.py +13 -10
- agno/knowledge/chunking/fixed.py +1 -1
- agno/knowledge/chunking/semantic.py +40 -8
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/aws_bedrock.py +9 -4
- agno/knowledge/embedder/azure_openai.py +54 -0
- agno/knowledge/embedder/base.py +2 -0
- agno/knowledge/embedder/cohere.py +184 -5
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/embedder/google.py +79 -1
- agno/knowledge/embedder/huggingface.py +9 -4
- agno/knowledge/embedder/jina.py +63 -0
- agno/knowledge/embedder/mistral.py +78 -11
- agno/knowledge/embedder/nebius.py +1 -1
- agno/knowledge/embedder/ollama.py +13 -0
- agno/knowledge/embedder/openai.py +37 -65
- agno/knowledge/embedder/sentence_transformer.py +8 -4
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/embedder/voyageai.py +69 -16
- agno/knowledge/knowledge.py +594 -186
- agno/knowledge/reader/base.py +9 -2
- agno/knowledge/reader/csv_reader.py +8 -10
- agno/knowledge/reader/docx_reader.py +5 -6
- agno/knowledge/reader/field_labeled_csv_reader.py +290 -0
- agno/knowledge/reader/json_reader.py +6 -5
- agno/knowledge/reader/markdown_reader.py +13 -13
- agno/knowledge/reader/pdf_reader.py +43 -68
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +51 -6
- agno/knowledge/reader/s3_reader.py +3 -15
- agno/knowledge/reader/tavily_reader.py +194 -0
- agno/knowledge/reader/text_reader.py +13 -13
- agno/knowledge/reader/web_search_reader.py +2 -43
- agno/knowledge/reader/website_reader.py +43 -25
- agno/knowledge/reranker/__init__.py +2 -8
- agno/knowledge/types.py +9 -0
- agno/knowledge/utils.py +20 -0
- agno/media.py +72 -0
- agno/memory/manager.py +336 -82
- agno/models/aimlapi/aimlapi.py +2 -2
- agno/models/anthropic/claude.py +183 -37
- agno/models/aws/bedrock.py +52 -112
- agno/models/aws/claude.py +33 -1
- agno/models/azure/ai_foundry.py +33 -15
- agno/models/azure/openai_chat.py +25 -8
- agno/models/base.py +999 -519
- agno/models/cerebras/cerebras.py +19 -13
- agno/models/cerebras/cerebras_openai.py +8 -5
- agno/models/cohere/chat.py +27 -1
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +57 -0
- agno/models/dashscope/dashscope.py +1 -0
- agno/models/deepinfra/deepinfra.py +2 -2
- agno/models/deepseek/deepseek.py +2 -2
- agno/models/fireworks/fireworks.py +2 -2
- agno/models/google/gemini.py +103 -31
- agno/models/groq/groq.py +28 -11
- agno/models/huggingface/huggingface.py +2 -1
- agno/models/internlm/internlm.py +2 -2
- agno/models/langdb/langdb.py +4 -4
- agno/models/litellm/chat.py +18 -1
- agno/models/litellm/litellm_openai.py +2 -2
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/message.py +139 -0
- agno/models/meta/llama.py +27 -10
- agno/models/meta/llama_openai.py +5 -17
- agno/models/nebius/nebius.py +6 -6
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/nvidia.py +2 -2
- agno/models/ollama/chat.py +59 -5
- agno/models/openai/chat.py +69 -29
- agno/models/openai/responses.py +103 -106
- agno/models/openrouter/openrouter.py +41 -3
- agno/models/perplexity/perplexity.py +4 -5
- agno/models/portkey/portkey.py +3 -3
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +52 -0
- agno/models/response.py +77 -1
- agno/models/sambanova/sambanova.py +2 -2
- agno/models/siliconflow/__init__.py +5 -0
- agno/models/siliconflow/siliconflow.py +25 -0
- agno/models/together/together.py +2 -2
- agno/models/utils.py +254 -8
- agno/models/vercel/v0.py +2 -2
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +96 -0
- agno/models/vllm/vllm.py +1 -0
- agno/models/xai/xai.py +3 -2
- agno/os/app.py +543 -178
- agno/os/auth.py +24 -14
- agno/os/config.py +1 -0
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +250 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/agui.py +23 -7
- agno/os/interfaces/agui/router.py +27 -3
- agno/os/interfaces/agui/utils.py +242 -142
- agno/os/interfaces/base.py +6 -2
- agno/os/interfaces/slack/router.py +81 -23
- agno/os/interfaces/slack/slack.py +29 -14
- agno/os/interfaces/whatsapp/router.py +11 -4
- agno/os/interfaces/whatsapp/whatsapp.py +14 -7
- agno/os/mcp.py +111 -54
- agno/os/middleware/__init__.py +7 -0
- agno/os/middleware/jwt.py +233 -0
- agno/os/router.py +556 -139
- agno/os/routers/evals/evals.py +71 -34
- agno/os/routers/evals/schemas.py +31 -31
- agno/os/routers/evals/utils.py +6 -5
- agno/os/routers/health.py +31 -0
- agno/os/routers/home.py +52 -0
- agno/os/routers/knowledge/knowledge.py +185 -38
- agno/os/routers/knowledge/schemas.py +82 -22
- agno/os/routers/memory/memory.py +158 -53
- agno/os/routers/memory/schemas.py +20 -16
- agno/os/routers/metrics/metrics.py +20 -8
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +499 -38
- agno/os/schema.py +308 -198
- agno/os/utils.py +401 -41
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/azure_ai_foundry.py +2 -2
- agno/reasoning/deepseek.py +2 -2
- agno/reasoning/default.py +3 -1
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/groq.py +2 -2
- agno/reasoning/ollama.py +2 -2
- agno/reasoning/openai.py +7 -2
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +248 -94
- agno/run/base.py +44 -5
- agno/run/team.py +238 -97
- agno/run/workflow.py +144 -33
- agno/session/agent.py +105 -89
- agno/session/summary.py +65 -25
- agno/session/team.py +176 -96
- agno/session/workflow.py +406 -40
- agno/team/team.py +3854 -1610
- agno/tools/dalle.py +2 -4
- agno/tools/decorator.py +4 -2
- agno/tools/duckduckgo.py +15 -11
- agno/tools/e2b.py +14 -7
- agno/tools/eleven_labs.py +23 -25
- agno/tools/exa.py +21 -16
- agno/tools/file.py +153 -23
- agno/tools/file_generation.py +350 -0
- agno/tools/firecrawl.py +4 -4
- agno/tools/function.py +250 -30
- agno/tools/gmail.py +238 -14
- agno/tools/google_drive.py +270 -0
- agno/tools/googlecalendar.py +36 -8
- agno/tools/googlesheets.py +20 -5
- agno/tools/jira.py +20 -0
- agno/tools/knowledge.py +3 -3
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +284 -0
- agno/tools/mem0.py +11 -17
- agno/tools/memori.py +1 -53
- agno/tools/memory.py +419 -0
- agno/tools/models/nebius.py +5 -5
- agno/tools/models_labs.py +20 -10
- agno/tools/notion.py +204 -0
- agno/tools/parallel.py +314 -0
- agno/tools/scrapegraph.py +58 -31
- agno/tools/searxng.py +2 -2
- agno/tools/serper.py +2 -2
- agno/tools/slack.py +18 -3
- agno/tools/spider.py +2 -2
- agno/tools/tavily.py +146 -0
- agno/tools/whatsapp.py +1 -1
- agno/tools/workflow.py +278 -0
- agno/tools/yfinance.py +12 -11
- agno/utils/agent.py +820 -0
- agno/utils/audio.py +27 -0
- agno/utils/common.py +90 -1
- agno/utils/events.py +217 -2
- agno/utils/gemini.py +180 -22
- agno/utils/hooks.py +57 -0
- agno/utils/http.py +111 -0
- agno/utils/knowledge.py +12 -5
- agno/utils/log.py +1 -0
- agno/utils/mcp.py +92 -2
- agno/utils/media.py +188 -10
- agno/utils/merge_dict.py +22 -1
- agno/utils/message.py +60 -0
- agno/utils/models/claude.py +40 -11
- agno/utils/print_response/agent.py +105 -21
- agno/utils/print_response/team.py +103 -38
- agno/utils/print_response/workflow.py +251 -34
- agno/utils/reasoning.py +22 -1
- agno/utils/serialize.py +32 -0
- agno/utils/streamlit.py +16 -10
- agno/utils/string.py +41 -0
- agno/utils/team.py +98 -9
- agno/utils/tools.py +1 -1
- agno/vectordb/base.py +23 -4
- agno/vectordb/cassandra/cassandra.py +65 -9
- agno/vectordb/chroma/chromadb.py +182 -38
- agno/vectordb/clickhouse/clickhousedb.py +64 -11
- agno/vectordb/couchbase/couchbase.py +105 -10
- agno/vectordb/lancedb/lance_db.py +124 -133
- agno/vectordb/langchaindb/langchaindb.py +25 -7
- agno/vectordb/lightrag/lightrag.py +17 -3
- agno/vectordb/llamaindex/__init__.py +3 -0
- agno/vectordb/llamaindex/llamaindexdb.py +46 -7
- agno/vectordb/milvus/milvus.py +126 -9
- agno/vectordb/mongodb/__init__.py +7 -1
- agno/vectordb/mongodb/mongodb.py +112 -7
- agno/vectordb/pgvector/pgvector.py +142 -21
- agno/vectordb/pineconedb/pineconedb.py +80 -8
- agno/vectordb/qdrant/qdrant.py +125 -39
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +694 -0
- agno/vectordb/singlestore/singlestore.py +111 -25
- agno/vectordb/surrealdb/surrealdb.py +31 -5
- agno/vectordb/upstashdb/upstashdb.py +76 -8
- agno/vectordb/weaviate/weaviate.py +86 -15
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +112 -18
- agno/workflow/loop.py +69 -10
- agno/workflow/parallel.py +266 -118
- agno/workflow/router.py +110 -17
- agno/workflow/step.py +638 -129
- agno/workflow/steps.py +65 -6
- agno/workflow/types.py +61 -23
- agno/workflow/workflow.py +2085 -272
- {agno-2.0.1.dist-info → agno-2.3.0.dist-info}/METADATA +182 -58
- agno-2.3.0.dist-info/RECORD +577 -0
- agno/knowledge/reader/url_reader.py +0 -128
- agno/tools/googlesearch.py +0 -98
- agno/tools/mcp.py +0 -610
- agno/utils/models/aws_claude.py +0 -170
- agno-2.0.1.dist-info/RECORD +0 -515
- {agno-2.0.1.dist-info → agno-2.3.0.dist-info}/WHEEL +0 -0
- {agno-2.0.1.dist-info → agno-2.3.0.dist-info}/licenses/LICENSE +0 -0
- {agno-2.0.1.dist-info → agno-2.3.0.dist-info}/top_level.txt +0 -0
agno/tools/models_labs.py
CHANGED
|
@@ -4,10 +4,8 @@ from os import getenv
|
|
|
4
4
|
from typing import Any, Dict, List, Optional, Union
|
|
5
5
|
from uuid import uuid4
|
|
6
6
|
|
|
7
|
-
from agno.agent import Agent
|
|
8
7
|
from agno.media import Audio, Image, Video
|
|
9
8
|
from agno.models.response import FileType
|
|
10
|
-
from agno.team import Team
|
|
11
9
|
from agno.tools import Toolkit
|
|
12
10
|
from agno.tools.function import ToolResult
|
|
13
11
|
from agno.utils.log import log_debug, log_info, logger
|
|
@@ -22,12 +20,14 @@ MODELS_LAB_URLS = {
|
|
|
22
20
|
"MP4": "https://modelslab.com/api/v6/video/text2video",
|
|
23
21
|
"MP3": "https://modelslab.com/api/v6/voice/music_gen",
|
|
24
22
|
"GIF": "https://modelslab.com/api/v6/video/text2video",
|
|
23
|
+
"WAV": "https://modelslab.com/api/v6/voice/sfx",
|
|
25
24
|
}
|
|
26
25
|
|
|
27
26
|
MODELS_LAB_FETCH_URLS = {
|
|
28
27
|
"MP4": "https://modelslab.com/api/v6/video/fetch",
|
|
29
28
|
"MP3": "https://modelslab.com/api/v6/voice/fetch",
|
|
30
29
|
"GIF": "https://modelslab.com/api/v6/video/fetch",
|
|
30
|
+
"WAV": "https://modelslab.com/api/v6/voice/fetch",
|
|
31
31
|
}
|
|
32
32
|
|
|
33
33
|
|
|
@@ -78,6 +78,13 @@ class ModelsLabTools(Toolkit):
|
|
|
78
78
|
"output_type": self.file_type.value,
|
|
79
79
|
}
|
|
80
80
|
base_payload |= video_template # Use |= instead of update()
|
|
81
|
+
elif self.file_type == FileType.WAV:
|
|
82
|
+
sfx_template = {
|
|
83
|
+
"duration": 10,
|
|
84
|
+
"output_format": "wav",
|
|
85
|
+
"temp": False,
|
|
86
|
+
}
|
|
87
|
+
base_payload |= sfx_template # Use |= instead of update()
|
|
81
88
|
else:
|
|
82
89
|
audio_template = {
|
|
83
90
|
"base64": False,
|
|
@@ -101,7 +108,7 @@ class ModelsLabTools(Toolkit):
|
|
|
101
108
|
elif self.file_type == FileType.GIF:
|
|
102
109
|
image_artifact = Image(id=str(media_id), url=media_url)
|
|
103
110
|
artifacts["images"].append(image_artifact)
|
|
104
|
-
elif self.file_type
|
|
111
|
+
elif self.file_type in [FileType.MP3, FileType.WAV]:
|
|
105
112
|
audio_artifact = Audio(id=str(media_id), url=media_url)
|
|
106
113
|
artifacts["audios"].append(audio_artifact)
|
|
107
114
|
|
|
@@ -131,7 +138,7 @@ class ModelsLabTools(Toolkit):
|
|
|
131
138
|
|
|
132
139
|
return False
|
|
133
140
|
|
|
134
|
-
def generate_media(self,
|
|
141
|
+
def generate_media(self, prompt: str) -> ToolResult:
|
|
135
142
|
"""Generate media (video, image, or audio) given a prompt."""
|
|
136
143
|
if not self.api_key:
|
|
137
144
|
return ToolResult(content="Please set the MODELS_LAB_API_KEY")
|
|
@@ -157,7 +164,6 @@ class ModelsLabTools(Toolkit):
|
|
|
157
164
|
return ToolResult(content=f"Error: {result['error']}")
|
|
158
165
|
|
|
159
166
|
eta = result.get("eta")
|
|
160
|
-
url_links = result.get("future_links")
|
|
161
167
|
media_id = str(uuid4())
|
|
162
168
|
|
|
163
169
|
# Collect all media artifacts
|
|
@@ -165,17 +171,21 @@ class ModelsLabTools(Toolkit):
|
|
|
165
171
|
all_videos = []
|
|
166
172
|
all_audios = []
|
|
167
173
|
|
|
174
|
+
if self.file_type == FileType.WAV:
|
|
175
|
+
url_links = result.get("output", [])
|
|
176
|
+
else:
|
|
177
|
+
url_links = result.get("future_links")
|
|
168
178
|
for media_url in url_links:
|
|
169
179
|
artifacts = self._create_media_artifacts(media_id, media_url, str(eta))
|
|
170
180
|
all_images.extend(artifacts["images"])
|
|
171
181
|
all_videos.extend(artifacts["videos"])
|
|
172
182
|
all_audios.extend(artifacts["audios"])
|
|
173
183
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
184
|
+
if self.wait_for_completion and isinstance(eta, int):
|
|
185
|
+
if self._wait_for_media(media_id, eta):
|
|
186
|
+
log_info("Media generation completed successfully")
|
|
187
|
+
else:
|
|
188
|
+
logger.warning("Media generation timed out")
|
|
179
189
|
|
|
180
190
|
# Return ToolResult with appropriate media artifacts
|
|
181
191
|
return ToolResult(
|
agno/tools/notion.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
from typing import Any, Dict, List, Optional, cast
|
|
4
|
+
|
|
5
|
+
from agno.tools import Toolkit
|
|
6
|
+
from agno.utils.log import log_debug, logger
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from notion_client import Client
|
|
10
|
+
except ImportError:
|
|
11
|
+
raise ImportError("`notion-client` not installed. Please install using `pip install notion-client`")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class NotionTools(Toolkit):
|
|
15
|
+
"""
|
|
16
|
+
Notion toolkit for creating and managing Notion pages.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
api_key (Optional[str]): Notion API key (integration token). If not provided, uses NOTION_API_KEY env var.
|
|
20
|
+
database_id (Optional[str]): The ID of the database to work with. If not provided, uses NOTION_DATABASE_ID env var.
|
|
21
|
+
enable_create_page (bool): Enable creating pages. Default is True.
|
|
22
|
+
enable_update_page (bool): Enable updating pages. Default is True.
|
|
23
|
+
enable_search_pages (bool): Enable searching pages. Default is True.
|
|
24
|
+
all (bool): Enable all tools. Overrides individual flags when True. Default is False.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
api_key: Optional[str] = None,
|
|
30
|
+
database_id: Optional[str] = None,
|
|
31
|
+
enable_create_page: bool = True,
|
|
32
|
+
enable_update_page: bool = True,
|
|
33
|
+
enable_search_pages: bool = True,
|
|
34
|
+
all: bool = False,
|
|
35
|
+
**kwargs,
|
|
36
|
+
):
|
|
37
|
+
self.api_key = api_key or os.getenv("NOTION_API_KEY")
|
|
38
|
+
self.database_id = database_id or os.getenv("NOTION_DATABASE_ID")
|
|
39
|
+
|
|
40
|
+
if not self.api_key:
|
|
41
|
+
raise ValueError(
|
|
42
|
+
"Notion API key is required. Either pass api_key parameter or set NOTION_API_KEY environment variable."
|
|
43
|
+
)
|
|
44
|
+
if not self.database_id:
|
|
45
|
+
raise ValueError(
|
|
46
|
+
"Notion database ID is required. Either pass database_id parameter or set NOTION_DATABASE_ID environment variable."
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
self.client = Client(auth=self.api_key)
|
|
50
|
+
|
|
51
|
+
tools: List[Any] = []
|
|
52
|
+
if all or enable_create_page:
|
|
53
|
+
tools.append(self.create_page)
|
|
54
|
+
if all or enable_update_page:
|
|
55
|
+
tools.append(self.update_page)
|
|
56
|
+
if all or enable_search_pages:
|
|
57
|
+
tools.append(self.search_pages)
|
|
58
|
+
|
|
59
|
+
super().__init__(name="notion_tools", tools=tools, **kwargs)
|
|
60
|
+
|
|
61
|
+
def create_page(self, title: str, tag: str, content: str) -> str:
|
|
62
|
+
"""Create a new page in the Notion database with a title, tag, and content.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
title (str): The title of the page
|
|
66
|
+
tag (str): The tag/category for the page (e.g., travel, tech, general-blogs, fashion, documents)
|
|
67
|
+
content (str): The content to add to the page
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
str: JSON string with page creation details
|
|
71
|
+
"""
|
|
72
|
+
try:
|
|
73
|
+
log_debug(f"Creating Notion page with title: {title}, tag: {tag}")
|
|
74
|
+
|
|
75
|
+
# Create the page in the database
|
|
76
|
+
new_page = cast(
|
|
77
|
+
Dict[str, Any],
|
|
78
|
+
self.client.pages.create(
|
|
79
|
+
parent={"database_id": self.database_id},
|
|
80
|
+
properties={"Name": {"title": [{"text": {"content": title}}]}, "Tag": {"select": {"name": tag}}},
|
|
81
|
+
children=[
|
|
82
|
+
{
|
|
83
|
+
"object": "block",
|
|
84
|
+
"type": "paragraph",
|
|
85
|
+
"paragraph": {"rich_text": [{"type": "text", "text": {"content": content}}]},
|
|
86
|
+
}
|
|
87
|
+
],
|
|
88
|
+
),
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
result = {"success": True, "page_id": new_page["id"], "url": new_page["url"], "title": title, "tag": tag}
|
|
92
|
+
return json.dumps(result, indent=2)
|
|
93
|
+
|
|
94
|
+
except Exception as e:
|
|
95
|
+
logger.exception(e)
|
|
96
|
+
return json.dumps({"success": False, "error": str(e)})
|
|
97
|
+
|
|
98
|
+
def update_page(self, page_id: str, content: str) -> str:
|
|
99
|
+
"""Add content to an existing Notion page.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
page_id (str): The ID of the page to update
|
|
103
|
+
content (str): The content to append to the page
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
str: JSON string with update status
|
|
107
|
+
"""
|
|
108
|
+
try:
|
|
109
|
+
log_debug(f"Updating Notion page: {page_id}")
|
|
110
|
+
|
|
111
|
+
# Append content to the page
|
|
112
|
+
self.client.blocks.children.append(
|
|
113
|
+
block_id=page_id,
|
|
114
|
+
children=[
|
|
115
|
+
{
|
|
116
|
+
"object": "block",
|
|
117
|
+
"type": "paragraph",
|
|
118
|
+
"paragraph": {"rich_text": [{"type": "text", "text": {"content": content}}]},
|
|
119
|
+
}
|
|
120
|
+
],
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
result = {"success": True, "page_id": page_id, "message": "Content added successfully"}
|
|
124
|
+
return json.dumps(result, indent=2)
|
|
125
|
+
|
|
126
|
+
except Exception as e:
|
|
127
|
+
logger.exception(e)
|
|
128
|
+
return json.dumps({"success": False, "error": str(e)})
|
|
129
|
+
|
|
130
|
+
def search_pages(self, tag: str) -> str:
|
|
131
|
+
"""Search for pages in the database by tag.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
tag (str): The tag to search for
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
str: JSON string with list of matching pages
|
|
138
|
+
"""
|
|
139
|
+
try:
|
|
140
|
+
log_debug(f"Searching for pages with tag: {tag}")
|
|
141
|
+
|
|
142
|
+
import httpx
|
|
143
|
+
|
|
144
|
+
headers = {
|
|
145
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
146
|
+
"Notion-Version": "2022-06-28",
|
|
147
|
+
"Content-Type": "application/json",
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
payload = {"filter": {"property": "Tag", "select": {"equals": tag}}}
|
|
151
|
+
|
|
152
|
+
# The SDK client does not support the query method
|
|
153
|
+
response = httpx.post(
|
|
154
|
+
f"https://api.notion.com/v1/databases/{self.database_id}/query",
|
|
155
|
+
headers=headers,
|
|
156
|
+
json=payload,
|
|
157
|
+
timeout=30.0,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
if response.status_code != 200:
|
|
161
|
+
return json.dumps(
|
|
162
|
+
{
|
|
163
|
+
"success": False,
|
|
164
|
+
"error": f"API request failed with status {response.status_code}",
|
|
165
|
+
"message": response.text,
|
|
166
|
+
}
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
data = response.json()
|
|
170
|
+
pages = []
|
|
171
|
+
|
|
172
|
+
for page in data.get("results", []):
|
|
173
|
+
try:
|
|
174
|
+
page_title = "Untitled"
|
|
175
|
+
if page.get("properties", {}).get("Name", {}).get("title"):
|
|
176
|
+
page_title = page["properties"]["Name"]["title"][0]["text"]["content"]
|
|
177
|
+
|
|
178
|
+
page_tag = None
|
|
179
|
+
if page.get("properties", {}).get("Tag", {}).get("select"):
|
|
180
|
+
page_tag = page["properties"]["Tag"]["select"]["name"]
|
|
181
|
+
|
|
182
|
+
page_info = {
|
|
183
|
+
"page_id": page["id"],
|
|
184
|
+
"title": page_title,
|
|
185
|
+
"tag": page_tag,
|
|
186
|
+
"url": page.get("url", ""),
|
|
187
|
+
}
|
|
188
|
+
pages.append(page_info)
|
|
189
|
+
except Exception as page_error:
|
|
190
|
+
log_debug(f"Error parsing page: {page_error}")
|
|
191
|
+
continue
|
|
192
|
+
|
|
193
|
+
result = {"success": True, "count": len(pages), "pages": pages}
|
|
194
|
+
return json.dumps(result, indent=2)
|
|
195
|
+
|
|
196
|
+
except Exception as e:
|
|
197
|
+
logger.exception(e)
|
|
198
|
+
return json.dumps(
|
|
199
|
+
{
|
|
200
|
+
"success": False,
|
|
201
|
+
"error": str(e),
|
|
202
|
+
"message": "Failed to search pages. Make sure the database is shared with the integration and has a 'Tag' property.",
|
|
203
|
+
}
|
|
204
|
+
)
|
agno/tools/parallel.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from os import getenv
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
|
|
5
|
+
from agno.tools import Toolkit
|
|
6
|
+
from agno.utils.log import log_error
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from parallel import Parallel as ParallelClient
|
|
10
|
+
except ImportError:
|
|
11
|
+
raise ImportError("`parallel-web` not installed. Please install using `pip install parallel-web`")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CustomJSONEncoder(json.JSONEncoder):
|
|
15
|
+
"""Custom JSON encoder that handles non-serializable types by converting them to strings."""
|
|
16
|
+
|
|
17
|
+
def default(self, obj):
|
|
18
|
+
try:
|
|
19
|
+
return super().default(obj)
|
|
20
|
+
except TypeError:
|
|
21
|
+
return str(obj)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ParallelTools(Toolkit):
|
|
25
|
+
"""
|
|
26
|
+
ParallelTools provides access to Parallel's web search and extraction APIs.
|
|
27
|
+
|
|
28
|
+
Parallel offers powerful APIs optimized for AI agents:
|
|
29
|
+
- Search API: AI-optimized web search that returns relevant excerpts tailored for LLMs
|
|
30
|
+
- Extract API: Extract content from specific URLs in clean markdown format, handling JavaScript-heavy pages and PDFs
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
api_key (Optional[str]): Parallel API key. If not provided, will use PARALLEL_API_KEY environment variable.
|
|
34
|
+
enable_search (bool): Enable Search API functionality. Default is True.
|
|
35
|
+
enable_extract (bool): Enable Extract API functionality. Default is True.
|
|
36
|
+
all (bool): Enable all tools. Overrides individual flags when True. Default is False.
|
|
37
|
+
max_results (int): Default maximum number of results for search operations. Default is 10.
|
|
38
|
+
max_chars_per_result (int): Default maximum characters per result for search operations. Default is 10000.
|
|
39
|
+
beta_version (str): Beta API version header. Default is "search-extract-2025-10-10".
|
|
40
|
+
mode (Optional[str]): Default search mode. Options: "one-shot" or "agentic". Default is None.
|
|
41
|
+
include_domains (Optional[List[str]]): Default domains to restrict results to. Default is None.
|
|
42
|
+
exclude_domains (Optional[List[str]]): Default domains to exclude from results. Default is None.
|
|
43
|
+
max_age_seconds (Optional[int]): Default cache age threshold (minimum 600). Default is None.
|
|
44
|
+
timeout_seconds (Optional[float]): Default timeout for content retrieval. Default is None.
|
|
45
|
+
disable_cache_fallback (Optional[bool]): Default cache fallback behavior. Default is None.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
api_key: Optional[str] = None,
|
|
51
|
+
enable_search: bool = True,
|
|
52
|
+
enable_extract: bool = True,
|
|
53
|
+
all: bool = False,
|
|
54
|
+
max_results: int = 10,
|
|
55
|
+
max_chars_per_result: int = 10000,
|
|
56
|
+
beta_version: str = "search-extract-2025-10-10",
|
|
57
|
+
mode: Optional[str] = None,
|
|
58
|
+
include_domains: Optional[List[str]] = None,
|
|
59
|
+
exclude_domains: Optional[List[str]] = None,
|
|
60
|
+
max_age_seconds: Optional[int] = None,
|
|
61
|
+
timeout_seconds: Optional[float] = None,
|
|
62
|
+
disable_cache_fallback: Optional[bool] = None,
|
|
63
|
+
**kwargs,
|
|
64
|
+
):
|
|
65
|
+
self.api_key: Optional[str] = api_key or getenv("PARALLEL_API_KEY")
|
|
66
|
+
if not self.api_key:
|
|
67
|
+
log_error("PARALLEL_API_KEY not set. Please set the PARALLEL_API_KEY environment variable.")
|
|
68
|
+
|
|
69
|
+
self.max_results = max_results
|
|
70
|
+
self.max_chars_per_result = max_chars_per_result
|
|
71
|
+
self.beta_version = beta_version
|
|
72
|
+
self.mode = mode
|
|
73
|
+
self.include_domains = include_domains
|
|
74
|
+
self.exclude_domains = exclude_domains
|
|
75
|
+
self.max_age_seconds = max_age_seconds
|
|
76
|
+
self.timeout_seconds = timeout_seconds
|
|
77
|
+
self.disable_cache_fallback = disable_cache_fallback
|
|
78
|
+
|
|
79
|
+
self.parallel_client = ParallelClient(
|
|
80
|
+
api_key=self.api_key, default_headers={"parallel-beta": self.beta_version}
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
tools: List[Any] = []
|
|
84
|
+
if all or enable_search:
|
|
85
|
+
tools.append(self.parallel_search)
|
|
86
|
+
if all or enable_extract:
|
|
87
|
+
tools.append(self.parallel_extract)
|
|
88
|
+
|
|
89
|
+
super().__init__(name="parallel_tools", tools=tools, **kwargs)
|
|
90
|
+
|
|
91
|
+
def parallel_search(
|
|
92
|
+
self,
|
|
93
|
+
objective: Optional[str] = None,
|
|
94
|
+
search_queries: Optional[List[str]] = None,
|
|
95
|
+
max_results: Optional[int] = None,
|
|
96
|
+
max_chars_per_result: Optional[int] = None,
|
|
97
|
+
) -> str:
|
|
98
|
+
"""Use this function to search the web using Parallel's Search API with a natural language objective.
|
|
99
|
+
You must provide at least one of objective or search_queries.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
objective (Optional[str]): Natural-language description of what the web search is trying to find.
|
|
103
|
+
search_queries (Optional[List[str]]): Traditional keyword queries with optional search operators.
|
|
104
|
+
max_results (Optional[int]): Upper bound on results returned. Overrides constructor default.
|
|
105
|
+
max_chars_per_result (Optional[int]): Upper bound on total characters per url for excerpts.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
str: A JSON formatted string containing the search results with URLs, titles, publish dates, and relevant excerpts.
|
|
109
|
+
"""
|
|
110
|
+
try:
|
|
111
|
+
if not objective and not search_queries:
|
|
112
|
+
return json.dumps({"error": "Please provide at least one of: objective or search_queries"}, indent=2)
|
|
113
|
+
|
|
114
|
+
# Use instance defaults if not provided
|
|
115
|
+
final_max_results = max_results if max_results is not None else self.max_results
|
|
116
|
+
|
|
117
|
+
search_params: Dict[str, Any] = {
|
|
118
|
+
"max_results": final_max_results,
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# Add objective if provided
|
|
122
|
+
if objective:
|
|
123
|
+
search_params["objective"] = objective
|
|
124
|
+
|
|
125
|
+
# Add search_queries if provided
|
|
126
|
+
if search_queries:
|
|
127
|
+
search_params["search_queries"] = search_queries
|
|
128
|
+
|
|
129
|
+
# Add mode from constructor default
|
|
130
|
+
if self.mode:
|
|
131
|
+
search_params["mode"] = self.mode
|
|
132
|
+
|
|
133
|
+
# Add excerpts configuration
|
|
134
|
+
excerpts_config: Dict[str, Any] = {}
|
|
135
|
+
final_max_chars = max_chars_per_result if max_chars_per_result is not None else self.max_chars_per_result
|
|
136
|
+
if final_max_chars is not None:
|
|
137
|
+
excerpts_config["max_chars_per_result"] = final_max_chars
|
|
138
|
+
|
|
139
|
+
if excerpts_config:
|
|
140
|
+
search_params["excerpts"] = excerpts_config
|
|
141
|
+
|
|
142
|
+
# Add source_policy from constructor defaults
|
|
143
|
+
source_policy: Dict[str, Any] = {}
|
|
144
|
+
if self.include_domains:
|
|
145
|
+
source_policy["include_domains"] = self.include_domains
|
|
146
|
+
if self.exclude_domains:
|
|
147
|
+
source_policy["exclude_domains"] = self.exclude_domains
|
|
148
|
+
|
|
149
|
+
if source_policy:
|
|
150
|
+
search_params["source_policy"] = source_policy
|
|
151
|
+
|
|
152
|
+
# Add fetch_policy from constructor defaults
|
|
153
|
+
fetch_policy: Dict[str, Any] = {}
|
|
154
|
+
if self.max_age_seconds is not None:
|
|
155
|
+
fetch_policy["max_age_seconds"] = self.max_age_seconds
|
|
156
|
+
if self.timeout_seconds is not None:
|
|
157
|
+
fetch_policy["timeout_seconds"] = self.timeout_seconds
|
|
158
|
+
if self.disable_cache_fallback is not None:
|
|
159
|
+
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
|
|
160
|
+
|
|
161
|
+
if fetch_policy:
|
|
162
|
+
search_params["fetch_policy"] = fetch_policy
|
|
163
|
+
|
|
164
|
+
search_result = self.parallel_client.beta.search(**search_params)
|
|
165
|
+
|
|
166
|
+
# Use model_dump() if available, otherwise convert to dict
|
|
167
|
+
try:
|
|
168
|
+
if hasattr(search_result, "model_dump"):
|
|
169
|
+
return json.dumps(search_result.model_dump(), cls=CustomJSONEncoder)
|
|
170
|
+
except Exception:
|
|
171
|
+
pass
|
|
172
|
+
|
|
173
|
+
# Manually format the results
|
|
174
|
+
formatted_results: Dict[str, Any] = {
|
|
175
|
+
"search_id": getattr(search_result, "search_id", ""),
|
|
176
|
+
"results": [],
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if hasattr(search_result, "results") and search_result.results:
|
|
180
|
+
results_list: List[Dict[str, Any]] = []
|
|
181
|
+
for result in search_result.results:
|
|
182
|
+
formatted_result: Dict[str, Any] = {
|
|
183
|
+
"title": getattr(result, "title", ""),
|
|
184
|
+
"url": getattr(result, "url", ""),
|
|
185
|
+
"publish_date": getattr(result, "publish_date", ""),
|
|
186
|
+
"excerpt": getattr(result, "excerpt", ""),
|
|
187
|
+
}
|
|
188
|
+
results_list.append(formatted_result)
|
|
189
|
+
formatted_results["results"] = results_list
|
|
190
|
+
|
|
191
|
+
if hasattr(search_result, "warnings"):
|
|
192
|
+
formatted_results["warnings"] = search_result.warnings
|
|
193
|
+
|
|
194
|
+
if hasattr(search_result, "usage"):
|
|
195
|
+
formatted_results["usage"] = search_result.usage
|
|
196
|
+
|
|
197
|
+
return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
|
|
198
|
+
|
|
199
|
+
except Exception as e:
|
|
200
|
+
log_error(f"Error searching Parallel for objective '{objective}': {e}")
|
|
201
|
+
return json.dumps({"error": f"Search failed: {str(e)}"}, indent=2)
|
|
202
|
+
|
|
203
|
+
def parallel_extract(
|
|
204
|
+
self,
|
|
205
|
+
urls: List[str],
|
|
206
|
+
objective: Optional[str] = None,
|
|
207
|
+
search_queries: Optional[List[str]] = None,
|
|
208
|
+
excerpts: bool = True,
|
|
209
|
+
max_chars_per_excerpt: Optional[int] = None,
|
|
210
|
+
full_content: bool = False,
|
|
211
|
+
max_chars_for_full_content: Optional[int] = None,
|
|
212
|
+
) -> str:
|
|
213
|
+
"""Use this function to extract content from specific URLs using Parallel's Extract API.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
urls (List[str]): List of public URLs to extract content from.
|
|
217
|
+
objective (Optional[str]): Search focus to guide content extraction.
|
|
218
|
+
search_queries (Optional[List[str]]): Keywords for targeting relevant content.
|
|
219
|
+
excerpts (bool): Include relevant text snippets.
|
|
220
|
+
max_chars_per_excerpt (Optional[int]): Upper bound on total characters per url. Only used when excerpts is True.
|
|
221
|
+
full_content (bool): Include complete page text.
|
|
222
|
+
max_chars_for_full_content (Optional[int]): Limit on characters per url. Only used when full_content is True.
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
str: A JSON formatted string containing extracted content with titles, publish dates, excerpts and/or full content.
|
|
226
|
+
"""
|
|
227
|
+
try:
|
|
228
|
+
if not urls:
|
|
229
|
+
return json.dumps({"error": "Please provide at least one URL to extract"}, indent=2)
|
|
230
|
+
|
|
231
|
+
extract_params: Dict[str, Any] = {
|
|
232
|
+
"urls": urls,
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
# Add objective if provided
|
|
236
|
+
if objective:
|
|
237
|
+
extract_params["objective"] = objective
|
|
238
|
+
|
|
239
|
+
# Add search_queries if provided
|
|
240
|
+
if search_queries:
|
|
241
|
+
extract_params["search_queries"] = search_queries
|
|
242
|
+
|
|
243
|
+
# Add excerpts configuration
|
|
244
|
+
if excerpts and max_chars_per_excerpt is not None:
|
|
245
|
+
extract_params["excerpts"] = {"max_chars_per_result": max_chars_per_excerpt}
|
|
246
|
+
else:
|
|
247
|
+
extract_params["excerpts"] = excerpts
|
|
248
|
+
|
|
249
|
+
# Add full_content configuration
|
|
250
|
+
if full_content and max_chars_for_full_content is not None:
|
|
251
|
+
extract_params["full_content"] = {"max_chars_per_result": max_chars_for_full_content}
|
|
252
|
+
else:
|
|
253
|
+
extract_params["full_content"] = full_content
|
|
254
|
+
|
|
255
|
+
# Add fetch_policy from constructor defaults
|
|
256
|
+
fetch_policy: Dict[str, Any] = {}
|
|
257
|
+
if self.max_age_seconds is not None:
|
|
258
|
+
fetch_policy["max_age_seconds"] = self.max_age_seconds
|
|
259
|
+
if self.timeout_seconds is not None:
|
|
260
|
+
fetch_policy["timeout_seconds"] = self.timeout_seconds
|
|
261
|
+
if self.disable_cache_fallback is not None:
|
|
262
|
+
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
|
|
263
|
+
|
|
264
|
+
if fetch_policy:
|
|
265
|
+
extract_params["fetch_policy"] = fetch_policy
|
|
266
|
+
|
|
267
|
+
extract_result = self.parallel_client.beta.extract(**extract_params)
|
|
268
|
+
|
|
269
|
+
# Use model_dump() if available, otherwise convert to dict
|
|
270
|
+
try:
|
|
271
|
+
if hasattr(extract_result, "model_dump"):
|
|
272
|
+
return json.dumps(extract_result.model_dump(), cls=CustomJSONEncoder)
|
|
273
|
+
except Exception:
|
|
274
|
+
pass
|
|
275
|
+
|
|
276
|
+
# Manually format the results
|
|
277
|
+
formatted_results: Dict[str, Any] = {
|
|
278
|
+
"extract_id": getattr(extract_result, "extract_id", ""),
|
|
279
|
+
"results": [],
|
|
280
|
+
"errors": [],
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
if hasattr(extract_result, "results") and extract_result.results:
|
|
284
|
+
results_list: List[Dict[str, Any]] = []
|
|
285
|
+
for result in extract_result.results:
|
|
286
|
+
formatted_result: Dict[str, Any] = {
|
|
287
|
+
"url": getattr(result, "url", ""),
|
|
288
|
+
"title": getattr(result, "title", ""),
|
|
289
|
+
"publish_date": getattr(result, "publish_date", ""),
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
if excerpts and hasattr(result, "excerpts"):
|
|
293
|
+
formatted_result["excerpts"] = result.excerpts
|
|
294
|
+
|
|
295
|
+
if full_content and hasattr(result, "full_content"):
|
|
296
|
+
formatted_result["full_content"] = result.full_content
|
|
297
|
+
|
|
298
|
+
results_list.append(formatted_result)
|
|
299
|
+
formatted_results["results"] = results_list
|
|
300
|
+
|
|
301
|
+
if hasattr(extract_result, "errors") and extract_result.errors:
|
|
302
|
+
formatted_results["errors"] = extract_result.errors
|
|
303
|
+
|
|
304
|
+
if hasattr(extract_result, "warnings"):
|
|
305
|
+
formatted_results["warnings"] = extract_result.warnings
|
|
306
|
+
|
|
307
|
+
if hasattr(extract_result, "usage"):
|
|
308
|
+
formatted_results["usage"] = extract_result.usage
|
|
309
|
+
|
|
310
|
+
return json.dumps(formatted_results, cls=CustomJSONEncoder, indent=2)
|
|
311
|
+
|
|
312
|
+
except Exception as e:
|
|
313
|
+
log_error(f"Error extracting from Parallel: {e}")
|
|
314
|
+
return json.dumps({"error": f"Extract failed: {str(e)}"}, indent=2)
|