agno 2.3.26__py3-none-any.whl → 2.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/__init__.py +4 -0
- agno/agent/agent.py +1368 -541
- agno/agent/remote.py +13 -0
- agno/db/base.py +339 -0
- agno/db/postgres/async_postgres.py +116 -12
- agno/db/postgres/postgres.py +1242 -25
- agno/db/postgres/schemas.py +48 -1
- agno/db/sqlite/async_sqlite.py +119 -4
- agno/db/sqlite/schemas.py +51 -0
- agno/db/sqlite/sqlite.py +1186 -13
- agno/db/utils.py +37 -1
- agno/integrations/discord/client.py +12 -1
- agno/knowledge/__init__.py +4 -0
- agno/knowledge/chunking/code.py +1 -1
- agno/knowledge/chunking/semantic.py +1 -1
- agno/knowledge/chunking/strategy.py +4 -0
- agno/knowledge/filesystem.py +412 -0
- agno/knowledge/knowledge.py +3722 -2182
- agno/knowledge/protocol.py +134 -0
- agno/knowledge/reader/arxiv_reader.py +2 -2
- agno/knowledge/reader/base.py +9 -7
- agno/knowledge/reader/csv_reader.py +236 -13
- agno/knowledge/reader/docx_reader.py +2 -2
- agno/knowledge/reader/field_labeled_csv_reader.py +169 -5
- agno/knowledge/reader/firecrawl_reader.py +2 -2
- agno/knowledge/reader/json_reader.py +2 -2
- agno/knowledge/reader/markdown_reader.py +2 -2
- agno/knowledge/reader/pdf_reader.py +5 -4
- agno/knowledge/reader/pptx_reader.py +2 -2
- agno/knowledge/reader/reader_factory.py +118 -1
- agno/knowledge/reader/s3_reader.py +2 -2
- agno/knowledge/reader/tavily_reader.py +2 -2
- agno/knowledge/reader/text_reader.py +2 -2
- agno/knowledge/reader/web_search_reader.py +2 -2
- agno/knowledge/reader/website_reader.py +5 -3
- agno/knowledge/reader/wikipedia_reader.py +2 -2
- agno/knowledge/reader/youtube_reader.py +2 -2
- agno/knowledge/remote_content/__init__.py +29 -0
- agno/knowledge/remote_content/config.py +204 -0
- agno/knowledge/remote_content/remote_content.py +74 -17
- agno/knowledge/utils.py +37 -29
- agno/learn/__init__.py +6 -0
- agno/learn/machine.py +35 -0
- agno/learn/schemas.py +82 -11
- agno/learn/stores/__init__.py +3 -0
- agno/learn/stores/decision_log.py +1156 -0
- agno/learn/stores/learned_knowledge.py +6 -6
- agno/models/anthropic/claude.py +24 -0
- agno/models/aws/bedrock.py +20 -0
- agno/models/base.py +60 -6
- agno/models/cerebras/cerebras.py +34 -2
- agno/models/cohere/chat.py +25 -0
- agno/models/google/gemini.py +50 -5
- agno/models/litellm/chat.py +38 -0
- agno/models/n1n/__init__.py +3 -0
- agno/models/n1n/n1n.py +57 -0
- agno/models/openai/chat.py +25 -1
- agno/models/openrouter/openrouter.py +46 -0
- agno/models/perplexity/perplexity.py +2 -0
- agno/models/response.py +16 -0
- agno/os/app.py +83 -44
- agno/os/interfaces/slack/router.py +10 -1
- agno/os/interfaces/whatsapp/router.py +6 -0
- agno/os/middleware/__init__.py +2 -0
- agno/os/middleware/trailing_slash.py +27 -0
- agno/os/router.py +1 -0
- agno/os/routers/agents/router.py +29 -16
- agno/os/routers/agents/schema.py +6 -4
- agno/os/routers/components/__init__.py +3 -0
- agno/os/routers/components/components.py +475 -0
- agno/os/routers/evals/schemas.py +4 -3
- agno/os/routers/health.py +3 -3
- agno/os/routers/knowledge/knowledge.py +128 -3
- agno/os/routers/knowledge/schemas.py +12 -0
- agno/os/routers/memory/schemas.py +4 -2
- agno/os/routers/metrics/metrics.py +9 -11
- agno/os/routers/metrics/schemas.py +10 -6
- agno/os/routers/registry/__init__.py +3 -0
- agno/os/routers/registry/registry.py +337 -0
- agno/os/routers/teams/router.py +20 -8
- agno/os/routers/teams/schema.py +6 -4
- agno/os/routers/traces/traces.py +5 -5
- agno/os/routers/workflows/router.py +38 -11
- agno/os/routers/workflows/schema.py +1 -1
- agno/os/schema.py +92 -26
- agno/os/utils.py +84 -19
- agno/reasoning/anthropic.py +2 -2
- agno/reasoning/azure_ai_foundry.py +2 -2
- agno/reasoning/deepseek.py +2 -2
- agno/reasoning/default.py +6 -7
- agno/reasoning/gemini.py +2 -2
- agno/reasoning/helpers.py +6 -7
- agno/reasoning/manager.py +4 -10
- agno/reasoning/ollama.py +2 -2
- agno/reasoning/openai.py +2 -2
- agno/reasoning/vertexai.py +2 -2
- agno/registry/__init__.py +3 -0
- agno/registry/registry.py +68 -0
- agno/run/agent.py +59 -0
- agno/run/base.py +7 -0
- agno/run/team.py +57 -0
- agno/skills/agent_skills.py +10 -3
- agno/team/__init__.py +3 -1
- agno/team/team.py +1165 -330
- agno/tools/duckduckgo.py +25 -71
- agno/tools/exa.py +0 -21
- agno/tools/function.py +35 -83
- agno/tools/knowledge.py +9 -4
- agno/tools/mem0.py +11 -10
- agno/tools/memory.py +47 -46
- agno/tools/parallel.py +0 -7
- agno/tools/reasoning.py +30 -23
- agno/tools/tavily.py +4 -1
- agno/tools/websearch.py +93 -0
- agno/tools/website.py +1 -1
- agno/tools/wikipedia.py +1 -1
- agno/tools/workflow.py +48 -47
- agno/utils/agent.py +42 -5
- agno/utils/events.py +160 -2
- agno/utils/print_response/agent.py +0 -31
- agno/utils/print_response/team.py +0 -2
- agno/utils/print_response/workflow.py +0 -2
- agno/utils/team.py +61 -11
- agno/vectordb/lancedb/lance_db.py +4 -1
- agno/vectordb/mongodb/mongodb.py +1 -1
- agno/vectordb/pgvector/pgvector.py +3 -3
- agno/vectordb/qdrant/qdrant.py +4 -4
- agno/workflow/__init__.py +3 -1
- agno/workflow/condition.py +0 -21
- agno/workflow/loop.py +0 -21
- agno/workflow/parallel.py +0 -21
- agno/workflow/router.py +0 -21
- agno/workflow/step.py +117 -24
- agno/workflow/steps.py +0 -21
- agno/workflow/workflow.py +427 -63
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/METADATA +49 -76
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/RECORD +140 -126
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/WHEEL +1 -1
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.26.dist-info → agno-2.4.1.dist-info}/top_level.txt +0 -0
agno/tools/memory.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from textwrap import dedent
|
|
3
|
-
from typing import Any,
|
|
3
|
+
from typing import Any, List, Optional
|
|
4
4
|
from uuid import uuid4
|
|
5
5
|
|
|
6
6
|
from agno.db.base import BaseDb
|
|
7
7
|
from agno.db.schemas import UserMemory
|
|
8
|
+
from agno.run import RunContext
|
|
8
9
|
from agno.tools import Toolkit
|
|
9
10
|
from agno.utils.log import log_debug, log_error
|
|
10
11
|
|
|
@@ -62,7 +63,7 @@ class MemoryTools(Toolkit):
|
|
|
62
63
|
**kwargs,
|
|
63
64
|
)
|
|
64
65
|
|
|
65
|
-
def think(self,
|
|
66
|
+
def think(self, run_context: RunContext, thought: str) -> str:
|
|
66
67
|
"""Use this tool as a scratchpad to reason about memory operations, refine your approach, brainstorm memory content, or revise your plan.
|
|
67
68
|
|
|
68
69
|
Call `Think` whenever you need to figure out what to do next, analyze the user's requirements, plan memory operations, or decide on execution strategy.
|
|
@@ -75,14 +76,14 @@ class MemoryTools(Toolkit):
|
|
|
75
76
|
log_debug(f"Memory Thought: {thought}")
|
|
76
77
|
|
|
77
78
|
# Add the thought to the session state
|
|
78
|
-
if session_state is None:
|
|
79
|
-
session_state = {}
|
|
80
|
-
if "memory_thoughts" not in session_state:
|
|
81
|
-
session_state["memory_thoughts"] = []
|
|
82
|
-
session_state["memory_thoughts"].append(thought)
|
|
79
|
+
if run_context.session_state is None:
|
|
80
|
+
run_context.session_state = {}
|
|
81
|
+
if "memory_thoughts" not in run_context.session_state:
|
|
82
|
+
run_context.session_state["memory_thoughts"] = []
|
|
83
|
+
run_context.session_state["memory_thoughts"].append(thought)
|
|
83
84
|
|
|
84
85
|
# Return the full log of thoughts and the new thought
|
|
85
|
-
thoughts = "\n".join([f"- {t}" for t in session_state["memory_thoughts"]])
|
|
86
|
+
thoughts = "\n".join([f"- {t}" for t in run_context.session_state["memory_thoughts"]])
|
|
86
87
|
formatted_thoughts = dedent(
|
|
87
88
|
f"""Memory Thoughts:
|
|
88
89
|
{thoughts}
|
|
@@ -93,21 +94,21 @@ class MemoryTools(Toolkit):
|
|
|
93
94
|
log_error(f"Error recording memory thought: {e}")
|
|
94
95
|
return f"Error recording memory thought: {e}"
|
|
95
96
|
|
|
96
|
-
def get_memories(self,
|
|
97
|
+
def get_memories(self, run_context: RunContext) -> str:
|
|
97
98
|
"""
|
|
98
99
|
Use this tool to get a list of memories for the current user from the database.
|
|
99
100
|
"""
|
|
100
101
|
try:
|
|
101
|
-
# Get user info from
|
|
102
|
-
user_id =
|
|
102
|
+
# Get user info from run context
|
|
103
|
+
user_id = run_context.user_id
|
|
103
104
|
|
|
104
105
|
memories = self.db.get_user_memories(user_id=user_id)
|
|
105
106
|
|
|
106
107
|
# Store the result in session state for analysis
|
|
107
|
-
if session_state is None:
|
|
108
|
-
session_state = {}
|
|
109
|
-
if "memory_operations" not in session_state:
|
|
110
|
-
session_state["memory_operations"] = []
|
|
108
|
+
if run_context.session_state is None:
|
|
109
|
+
run_context.session_state = {}
|
|
110
|
+
if "memory_operations" not in run_context.session_state:
|
|
111
|
+
run_context.session_state["memory_operations"] = []
|
|
111
112
|
|
|
112
113
|
operation_result = {
|
|
113
114
|
"operation": "get_memories",
|
|
@@ -115,7 +116,7 @@ class MemoryTools(Toolkit):
|
|
|
115
116
|
"memories": [memory.to_dict() for memory in memories], # type: ignore
|
|
116
117
|
"error": None,
|
|
117
118
|
}
|
|
118
|
-
session_state["memory_operations"].append(operation_result)
|
|
119
|
+
run_context.session_state["memory_operations"].append(operation_result)
|
|
119
120
|
|
|
120
121
|
return json.dumps([memory.to_dict() for memory in memories], indent=2) # type: ignore
|
|
121
122
|
except Exception as e:
|
|
@@ -124,7 +125,7 @@ class MemoryTools(Toolkit):
|
|
|
124
125
|
|
|
125
126
|
def add_memory(
|
|
126
127
|
self,
|
|
127
|
-
|
|
128
|
+
run_context: RunContext,
|
|
128
129
|
memory: str,
|
|
129
130
|
topics: Optional[List[str]] = None,
|
|
130
131
|
) -> str:
|
|
@@ -140,8 +141,8 @@ class MemoryTools(Toolkit):
|
|
|
140
141
|
try:
|
|
141
142
|
log_debug(f"Adding memory: {memory}")
|
|
142
143
|
|
|
143
|
-
# Get user
|
|
144
|
-
user_id =
|
|
144
|
+
# Get user info from run context
|
|
145
|
+
user_id = run_context.user_id
|
|
145
146
|
|
|
146
147
|
# Create UserMemory object
|
|
147
148
|
user_memory = UserMemory(
|
|
@@ -155,10 +156,10 @@ class MemoryTools(Toolkit):
|
|
|
155
156
|
created_memory = self.db.upsert_user_memory(user_memory)
|
|
156
157
|
|
|
157
158
|
# Store the result in session state for analysis
|
|
158
|
-
if session_state is None:
|
|
159
|
-
session_state = {}
|
|
160
|
-
if "memory_operations" not in session_state:
|
|
161
|
-
session_state["memory_operations"] = []
|
|
159
|
+
if run_context.session_state is None:
|
|
160
|
+
run_context.session_state = {}
|
|
161
|
+
if "memory_operations" not in run_context.session_state:
|
|
162
|
+
run_context.session_state["memory_operations"] = []
|
|
162
163
|
|
|
163
164
|
memory_dict = created_memory.to_dict() if created_memory else None # type: ignore
|
|
164
165
|
|
|
@@ -168,7 +169,7 @@ class MemoryTools(Toolkit):
|
|
|
168
169
|
"memory": memory_dict,
|
|
169
170
|
"error": None,
|
|
170
171
|
}
|
|
171
|
-
session_state["memory_operations"].append(operation_result)
|
|
172
|
+
run_context.session_state["memory_operations"].append(operation_result)
|
|
172
173
|
|
|
173
174
|
if created_memory:
|
|
174
175
|
return json.dumps({"success": True, "operation": "add_memory", "memory": memory_dict}, indent=2)
|
|
@@ -183,7 +184,7 @@ class MemoryTools(Toolkit):
|
|
|
183
184
|
|
|
184
185
|
def update_memory(
|
|
185
186
|
self,
|
|
186
|
-
|
|
187
|
+
run_context: RunContext,
|
|
187
188
|
memory_id: str,
|
|
188
189
|
memory: Optional[str] = None,
|
|
189
190
|
topics: Optional[List[str]] = None,
|
|
@@ -221,10 +222,10 @@ class MemoryTools(Toolkit):
|
|
|
221
222
|
updated_result = self.db.upsert_user_memory(updated_memory)
|
|
222
223
|
|
|
223
224
|
# Store the result in session state for analysis
|
|
224
|
-
if session_state is None:
|
|
225
|
-
session_state = {}
|
|
226
|
-
if "memory_operations" not in session_state:
|
|
227
|
-
session_state["memory_operations"] = []
|
|
225
|
+
if run_context.session_state is None:
|
|
226
|
+
run_context.session_state = {}
|
|
227
|
+
if "memory_operations" not in run_context.session_state:
|
|
228
|
+
run_context.session_state["memory_operations"] = []
|
|
228
229
|
|
|
229
230
|
memory_dict = updated_result.to_dict() if updated_result else None # type: ignore
|
|
230
231
|
|
|
@@ -234,7 +235,7 @@ class MemoryTools(Toolkit):
|
|
|
234
235
|
"memory": memory_dict,
|
|
235
236
|
"error": None,
|
|
236
237
|
}
|
|
237
|
-
session_state["memory_operations"].append(operation_result)
|
|
238
|
+
run_context.session_state["memory_operations"].append(operation_result)
|
|
238
239
|
|
|
239
240
|
if updated_result:
|
|
240
241
|
return json.dumps({"success": True, "operation": "update_memory", "memory": memory_dict}, indent=2)
|
|
@@ -249,7 +250,7 @@ class MemoryTools(Toolkit):
|
|
|
249
250
|
|
|
250
251
|
def delete_memory(
|
|
251
252
|
self,
|
|
252
|
-
|
|
253
|
+
run_context: RunContext,
|
|
253
254
|
memory_id: str,
|
|
254
255
|
) -> str:
|
|
255
256
|
"""Use this tool to delete a memory from the database.
|
|
@@ -275,10 +276,10 @@ class MemoryTools(Toolkit):
|
|
|
275
276
|
self.db.delete_user_memory(memory_id)
|
|
276
277
|
|
|
277
278
|
# Store the result in session state for analysis
|
|
278
|
-
if session_state is None:
|
|
279
|
-
session_state = {}
|
|
280
|
-
if "memory_operations" not in session_state:
|
|
281
|
-
session_state["memory_operations"] = []
|
|
279
|
+
if run_context.session_state is None:
|
|
280
|
+
run_context.session_state = {}
|
|
281
|
+
if "memory_operations" not in run_context.session_state:
|
|
282
|
+
run_context.session_state["memory_operations"] = []
|
|
282
283
|
|
|
283
284
|
memory_dict = existing_memory.to_dict() if existing_memory else None # type: ignore
|
|
284
285
|
|
|
@@ -289,7 +290,7 @@ class MemoryTools(Toolkit):
|
|
|
289
290
|
"deleted_memory": memory_dict,
|
|
290
291
|
"error": None,
|
|
291
292
|
}
|
|
292
|
-
session_state["memory_operations"].append(operation_result)
|
|
293
|
+
run_context.session_state["memory_operations"].append(operation_result)
|
|
293
294
|
|
|
294
295
|
return json.dumps(
|
|
295
296
|
{
|
|
@@ -305,7 +306,7 @@ class MemoryTools(Toolkit):
|
|
|
305
306
|
log_error(f"Error deleting memory: {e}")
|
|
306
307
|
return json.dumps({"success": False, "operation": "delete_memory", "error": str(e)}, indent=2)
|
|
307
308
|
|
|
308
|
-
def analyze(self,
|
|
309
|
+
def analyze(self, run_context: RunContext, analysis: str) -> str:
|
|
309
310
|
"""Use this tool to evaluate whether the memory operations results are correct and sufficient.
|
|
310
311
|
If not, go back to "Think" or use memory operations with refined parameters.
|
|
311
312
|
|
|
@@ -316,14 +317,14 @@ class MemoryTools(Toolkit):
|
|
|
316
317
|
log_debug(f"Memory Analysis: {analysis}")
|
|
317
318
|
|
|
318
319
|
# Add the analysis to the session state
|
|
319
|
-
if session_state is None:
|
|
320
|
-
session_state = {}
|
|
321
|
-
if "memory_analysis" not in session_state:
|
|
322
|
-
session_state["memory_analysis"] = []
|
|
323
|
-
session_state["memory_analysis"].append(analysis)
|
|
320
|
+
if run_context.session_state is None:
|
|
321
|
+
run_context.session_state = {}
|
|
322
|
+
if "memory_analysis" not in run_context.session_state:
|
|
323
|
+
run_context.session_state["memory_analysis"] = []
|
|
324
|
+
run_context.session_state["memory_analysis"].append(analysis)
|
|
324
325
|
|
|
325
326
|
# Return the full log of analysis and the new analysis
|
|
326
|
-
analysis_log = "\n".join([f"- {a}" for a in session_state["memory_analysis"]])
|
|
327
|
+
analysis_log = "\n".join([f"- {a}" for a in run_context.session_state["memory_analysis"]])
|
|
327
328
|
formatted_analysis = dedent(
|
|
328
329
|
f"""Memory Analysis:
|
|
329
330
|
{analysis_log}
|
|
@@ -338,7 +339,7 @@ class MemoryTools(Toolkit):
|
|
|
338
339
|
You have access to the Think, Add Memory, Update Memory, Delete Memory, and Analyze tools that will help you manage user memories and analyze their operations. Use these tools as frequently as needed to successfully complete memory management tasks.
|
|
339
340
|
|
|
340
341
|
## How to use the Think, Memory Operations, and Analyze tools:
|
|
341
|
-
|
|
342
|
+
|
|
342
343
|
1. **Think**
|
|
343
344
|
- Purpose: A scratchpad for planning memory operations, brainstorming memory content, and refining your approach. You never reveal your "Think" content to the user.
|
|
344
345
|
- Usage: Call `think` whenever you need to figure out what memory operations to perform, analyze requirements, or decide on strategy.
|
|
@@ -407,12 +408,12 @@ class MemoryTools(Toolkit):
|
|
|
407
408
|
Analyze: Successfully deleted the outdated work schedule memory. The old information won't interfere with future scheduling requests.
|
|
408
409
|
|
|
409
410
|
Final Answer: I've removed your old work schedule information. Feel free to share your new schedule when you're ready, and I'll store the updated information.
|
|
410
|
-
|
|
411
|
+
|
|
411
412
|
#### Example 4: Retrieving Memories
|
|
412
413
|
|
|
413
414
|
User: What have you remembered about me?
|
|
414
415
|
Think: The user wants to retrieve memories about themselves. I should use the get_memories tool to retrieve the memories.
|
|
415
|
-
Get Memories:
|
|
416
|
+
Get Memories:
|
|
416
417
|
Analyze: Successfully retrieved the memories about the user. The memories are relevant to the user's preferences and activities.
|
|
417
418
|
|
|
418
419
|
Final Answer: I've retrieved the memories about you. You like to hike in the mountains on weekends and travel to new places and experience different cultures. You are planning to travel to Africa in December.\
|
agno/tools/parallel.py
CHANGED
|
@@ -41,7 +41,6 @@ class ParallelTools(Toolkit):
|
|
|
41
41
|
include_domains (Optional[List[str]]): Default domains to restrict results to. Default is None.
|
|
42
42
|
exclude_domains (Optional[List[str]]): Default domains to exclude from results. Default is None.
|
|
43
43
|
max_age_seconds (Optional[int]): Default cache age threshold (minimum 600). Default is None.
|
|
44
|
-
timeout_seconds (Optional[float]): Default timeout for content retrieval. Default is None.
|
|
45
44
|
disable_cache_fallback (Optional[bool]): Default cache fallback behavior. Default is None.
|
|
46
45
|
"""
|
|
47
46
|
|
|
@@ -58,7 +57,6 @@ class ParallelTools(Toolkit):
|
|
|
58
57
|
include_domains: Optional[List[str]] = None,
|
|
59
58
|
exclude_domains: Optional[List[str]] = None,
|
|
60
59
|
max_age_seconds: Optional[int] = None,
|
|
61
|
-
timeout_seconds: Optional[float] = None,
|
|
62
60
|
disable_cache_fallback: Optional[bool] = None,
|
|
63
61
|
**kwargs,
|
|
64
62
|
):
|
|
@@ -73,7 +71,6 @@ class ParallelTools(Toolkit):
|
|
|
73
71
|
self.include_domains = include_domains
|
|
74
72
|
self.exclude_domains = exclude_domains
|
|
75
73
|
self.max_age_seconds = max_age_seconds
|
|
76
|
-
self.timeout_seconds = timeout_seconds
|
|
77
74
|
self.disable_cache_fallback = disable_cache_fallback
|
|
78
75
|
|
|
79
76
|
self.parallel_client = ParallelClient(
|
|
@@ -153,8 +150,6 @@ class ParallelTools(Toolkit):
|
|
|
153
150
|
fetch_policy: Dict[str, Any] = {}
|
|
154
151
|
if self.max_age_seconds is not None:
|
|
155
152
|
fetch_policy["max_age_seconds"] = self.max_age_seconds
|
|
156
|
-
if self.timeout_seconds is not None:
|
|
157
|
-
fetch_policy["timeout_seconds"] = self.timeout_seconds
|
|
158
153
|
if self.disable_cache_fallback is not None:
|
|
159
154
|
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
|
|
160
155
|
|
|
@@ -256,8 +251,6 @@ class ParallelTools(Toolkit):
|
|
|
256
251
|
fetch_policy: Dict[str, Any] = {}
|
|
257
252
|
if self.max_age_seconds is not None:
|
|
258
253
|
fetch_policy["max_age_seconds"] = self.max_age_seconds
|
|
259
|
-
if self.timeout_seconds is not None:
|
|
260
|
-
fetch_policy["timeout_seconds"] = self.timeout_seconds
|
|
261
254
|
if self.disable_cache_fallback is not None:
|
|
262
255
|
fetch_policy["disable_cache_fallback"] = self.disable_cache_fallback
|
|
263
256
|
|
agno/tools/reasoning.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
from textwrap import dedent
|
|
2
|
-
from typing import Any,
|
|
2
|
+
from typing import Any, List, Optional
|
|
3
3
|
|
|
4
4
|
from agno.reasoning.step import NextAction, ReasoningStep
|
|
5
|
+
from agno.run import RunContext
|
|
5
6
|
from agno.tools import Toolkit
|
|
6
7
|
from agno.utils.log import log_debug, log_error
|
|
7
8
|
|
|
@@ -49,7 +50,7 @@ class ReasoningTools(Toolkit):
|
|
|
49
50
|
|
|
50
51
|
def think(
|
|
51
52
|
self,
|
|
52
|
-
|
|
53
|
+
run_context: RunContext,
|
|
53
54
|
title: str,
|
|
54
55
|
thought: str,
|
|
55
56
|
action: Optional[str] = None,
|
|
@@ -80,21 +81,24 @@ class ReasoningTools(Toolkit):
|
|
|
80
81
|
confidence=confidence,
|
|
81
82
|
)
|
|
82
83
|
|
|
83
|
-
current_run_id =
|
|
84
|
+
current_run_id = run_context.run_id
|
|
84
85
|
|
|
85
86
|
# Add this step to the Agent's session state
|
|
86
|
-
if session_state is None:
|
|
87
|
-
session_state = {}
|
|
88
|
-
if "reasoning_steps" not in session_state:
|
|
89
|
-
session_state["reasoning_steps"] = {}
|
|
90
|
-
if current_run_id not in session_state["reasoning_steps"]:
|
|
91
|
-
session_state["reasoning_steps"][current_run_id] = []
|
|
92
|
-
session_state["reasoning_steps"][current_run_id].append(reasoning_step.model_dump_json())
|
|
87
|
+
if run_context.session_state is None:
|
|
88
|
+
run_context.session_state = {}
|
|
89
|
+
if "reasoning_steps" not in run_context.session_state:
|
|
90
|
+
run_context.session_state["reasoning_steps"] = {}
|
|
91
|
+
if current_run_id not in run_context.session_state["reasoning_steps"]:
|
|
92
|
+
run_context.session_state["reasoning_steps"][current_run_id] = []
|
|
93
|
+
run_context.session_state["reasoning_steps"][current_run_id].append(reasoning_step.model_dump_json())
|
|
93
94
|
|
|
94
95
|
# Return all previous reasoning_steps and the new reasoning_step
|
|
95
|
-
if
|
|
96
|
+
if (
|
|
97
|
+
"reasoning_steps" in run_context.session_state
|
|
98
|
+
and current_run_id in run_context.session_state["reasoning_steps"]
|
|
99
|
+
):
|
|
96
100
|
formatted_reasoning_steps = ""
|
|
97
|
-
for i, step in enumerate(session_state["reasoning_steps"][current_run_id], 1):
|
|
101
|
+
for i, step in enumerate(run_context.session_state["reasoning_steps"][current_run_id], 1):
|
|
98
102
|
step_parsed = ReasoningStep.model_validate_json(step)
|
|
99
103
|
step_str = dedent(f"""\
|
|
100
104
|
Step {i}:
|
|
@@ -112,7 +116,7 @@ Confidence: {step_parsed.confidence}
|
|
|
112
116
|
|
|
113
117
|
def analyze(
|
|
114
118
|
self,
|
|
115
|
-
|
|
119
|
+
run_context: RunContext,
|
|
116
120
|
title: str,
|
|
117
121
|
result: str,
|
|
118
122
|
analysis: str,
|
|
@@ -150,20 +154,23 @@ Confidence: {step_parsed.confidence}
|
|
|
150
154
|
confidence=confidence,
|
|
151
155
|
)
|
|
152
156
|
|
|
153
|
-
current_run_id =
|
|
157
|
+
current_run_id = run_context.run_id
|
|
154
158
|
# Add this step to the Agent's session state
|
|
155
|
-
if session_state is None:
|
|
156
|
-
session_state = {}
|
|
157
|
-
if "reasoning_steps" not in session_state:
|
|
158
|
-
session_state["reasoning_steps"] = {}
|
|
159
|
-
if current_run_id not in session_state["reasoning_steps"]:
|
|
160
|
-
session_state["reasoning_steps"][current_run_id] = []
|
|
161
|
-
session_state["reasoning_steps"][current_run_id].append(reasoning_step.model_dump_json())
|
|
159
|
+
if run_context.session_state is None:
|
|
160
|
+
run_context.session_state = {}
|
|
161
|
+
if "reasoning_steps" not in run_context.session_state:
|
|
162
|
+
run_context.session_state["reasoning_steps"] = {}
|
|
163
|
+
if current_run_id not in run_context.session_state["reasoning_steps"]:
|
|
164
|
+
run_context.session_state["reasoning_steps"][current_run_id] = []
|
|
165
|
+
run_context.session_state["reasoning_steps"][current_run_id].append(reasoning_step.model_dump_json())
|
|
162
166
|
|
|
163
167
|
# Return all previous reasoning_steps and the new reasoning_step
|
|
164
|
-
if
|
|
168
|
+
if (
|
|
169
|
+
"reasoning_steps" in run_context.session_state
|
|
170
|
+
and current_run_id in run_context.session_state["reasoning_steps"]
|
|
171
|
+
):
|
|
165
172
|
formatted_reasoning_steps = ""
|
|
166
|
-
for i, step in enumerate(session_state["reasoning_steps"][current_run_id], 1):
|
|
173
|
+
for i, step in enumerate(run_context.session_state["reasoning_steps"][current_run_id], 1):
|
|
167
174
|
step_parsed = ReasoningStep.model_validate_json(step)
|
|
168
175
|
step_str = dedent(f"""\
|
|
169
176
|
Step {i}:
|
agno/tools/tavily.py
CHANGED
|
@@ -15,6 +15,7 @@ class TavilyTools(Toolkit):
|
|
|
15
15
|
def __init__(
|
|
16
16
|
self,
|
|
17
17
|
api_key: Optional[str] = None,
|
|
18
|
+
api_base_url: Optional[str] = None,
|
|
18
19
|
enable_search: bool = True,
|
|
19
20
|
enable_search_context: bool = False,
|
|
20
21
|
enable_extract: bool = False,
|
|
@@ -34,6 +35,7 @@ class TavilyTools(Toolkit):
|
|
|
34
35
|
|
|
35
36
|
Args:
|
|
36
37
|
api_key: Tavily API key. If not provided, will use TAVILY_API_KEY env var.
|
|
38
|
+
api_base_url: Tavily API base URL. If not provided, will use TAVILY_API_BASE_URL env var. Defaults to None. If None - will use https://api.tavily.com.
|
|
37
39
|
enable_search: Enable web search functionality. Defaults to True.
|
|
38
40
|
enable_search_context: Use search context mode instead of regular search. Defaults to False.
|
|
39
41
|
enable_extract: Enable URL content extraction functionality. Defaults to False.
|
|
@@ -52,8 +54,9 @@ class TavilyTools(Toolkit):
|
|
|
52
54
|
self.api_key = api_key or getenv("TAVILY_API_KEY")
|
|
53
55
|
if not self.api_key:
|
|
54
56
|
logger.error("TAVILY_API_KEY not provided")
|
|
57
|
+
self.api_base_url = api_base_url or getenv("TAVILY_API_BASE_URL")
|
|
55
58
|
|
|
56
|
-
self.client: TavilyClient = TavilyClient(api_key=self.api_key)
|
|
59
|
+
self.client: TavilyClient = TavilyClient(api_key=self.api_key, api_base_url=self.api_base_url)
|
|
57
60
|
self.search_depth: Literal["basic", "advanced"] = search_depth
|
|
58
61
|
self.extract_depth: Literal["basic", "advanced"] = extract_depth
|
|
59
62
|
self.max_tokens: int = max_tokens
|
agno/tools/websearch.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Any, List, Optional
|
|
3
|
+
|
|
4
|
+
from agno.tools import Toolkit
|
|
5
|
+
from agno.utils.log import log_debug
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
from ddgs import DDGS
|
|
9
|
+
except ImportError:
|
|
10
|
+
raise ImportError("`ddgs` not installed. Please install using `pip install ddgs`")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class WebSearchTools(Toolkit):
|
|
14
|
+
"""
|
|
15
|
+
Toolkit for searching the web. Uses the meta-search library DDGS.
|
|
16
|
+
Multiple search backends (e.g. google, bing, duckduckgo) are available.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
enable_search (bool): Enable web search function.
|
|
20
|
+
enable_news (bool): Enable news search function.
|
|
21
|
+
backend (str): The backend to use for searching. Defaults to "auto" which
|
|
22
|
+
automatically selects available backends. Other options include:
|
|
23
|
+
"duckduckgo", "google", "bing", "brave", "yandex", "yahoo", etc.
|
|
24
|
+
modifier (Optional[str]): A modifier to be prepended to search queries.
|
|
25
|
+
fixed_max_results (Optional[int]): A fixed number of maximum results.
|
|
26
|
+
proxy (Optional[str]): Proxy to be used for requests.
|
|
27
|
+
timeout (Optional[int]): The maximum number of seconds to wait for a response.
|
|
28
|
+
verify_ssl (bool): Whether to verify SSL certificates.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
enable_search: bool = True,
|
|
34
|
+
enable_news: bool = True,
|
|
35
|
+
backend: str = "auto",
|
|
36
|
+
modifier: Optional[str] = None,
|
|
37
|
+
fixed_max_results: Optional[int] = None,
|
|
38
|
+
proxy: Optional[str] = None,
|
|
39
|
+
timeout: Optional[int] = 10,
|
|
40
|
+
verify_ssl: bool = True,
|
|
41
|
+
**kwargs,
|
|
42
|
+
):
|
|
43
|
+
self.proxy: Optional[str] = proxy
|
|
44
|
+
self.timeout: Optional[int] = timeout
|
|
45
|
+
self.fixed_max_results: Optional[int] = fixed_max_results
|
|
46
|
+
self.modifier: Optional[str] = modifier
|
|
47
|
+
self.verify_ssl: bool = verify_ssl
|
|
48
|
+
self.backend: str = backend
|
|
49
|
+
|
|
50
|
+
tools: List[Any] = []
|
|
51
|
+
if enable_search:
|
|
52
|
+
tools.append(self.web_search)
|
|
53
|
+
if enable_news:
|
|
54
|
+
tools.append(self.search_news)
|
|
55
|
+
|
|
56
|
+
super().__init__(name="websearch", tools=tools, **kwargs)
|
|
57
|
+
|
|
58
|
+
def web_search(self, query: str, max_results: int = 5) -> str:
|
|
59
|
+
"""Use this function to search the web for a query.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
query(str): The query to search for.
|
|
63
|
+
max_results (optional, default=5): The maximum number of results to return.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
The search results from the web.
|
|
67
|
+
"""
|
|
68
|
+
actual_max_results = self.fixed_max_results or max_results
|
|
69
|
+
search_query = f"{self.modifier} {query}" if self.modifier else query
|
|
70
|
+
|
|
71
|
+
log_debug(f"Searching web for: {search_query} using backend: {self.backend}")
|
|
72
|
+
with DDGS(proxy=self.proxy, timeout=self.timeout, verify=self.verify_ssl) as ddgs:
|
|
73
|
+
results = ddgs.text(query=search_query, max_results=actual_max_results, backend=self.backend)
|
|
74
|
+
|
|
75
|
+
return json.dumps(results, indent=2)
|
|
76
|
+
|
|
77
|
+
def search_news(self, query: str, max_results: int = 5) -> str:
|
|
78
|
+
"""Use this function to get the latest news from the web.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
query(str): The query to search for.
|
|
82
|
+
max_results (optional, default=5): The maximum number of results to return.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
The latest news from the web.
|
|
86
|
+
"""
|
|
87
|
+
actual_max_results = self.fixed_max_results or max_results
|
|
88
|
+
|
|
89
|
+
log_debug(f"Searching web news for: {query} using backend: {self.backend}")
|
|
90
|
+
with DDGS(proxy=self.proxy, timeout=self.timeout, verify=self.verify_ssl) as ddgs:
|
|
91
|
+
results = ddgs.news(query=query, max_results=actual_max_results, backend=self.backend)
|
|
92
|
+
|
|
93
|
+
return json.dumps(results, indent=2)
|
agno/tools/website.py
CHANGED