clap-agents 0.1.1__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clap/__init__.py +13 -42
- clap/embedding/__init__.py +21 -0
- clap/embedding/base_embedding.py +28 -0
- clap/embedding/fastembed_embedding.py +75 -0
- clap/embedding/ollama_embedding.py +76 -0
- clap/embedding/sentence_transformer_embedding.py +44 -0
- clap/llm_services/__init__.py +15 -0
- clap/llm_services/base.py +3 -6
- clap/llm_services/google_openai_compat_service.py +1 -5
- clap/llm_services/groq_service.py +5 -13
- clap/llm_services/ollama_service.py +101 -0
- clap/mcp_client/client.py +13 -25
- clap/multiagent_pattern/agent.py +107 -34
- clap/multiagent_pattern/team.py +54 -29
- clap/react_pattern/react_agent.py +339 -126
- clap/tool_pattern/tool.py +94 -165
- clap/tool_pattern/tool_agent.py +171 -171
- clap/tools/__init__.py +1 -1
- clap/tools/email_tools.py +16 -19
- clap/tools/web_crawler.py +26 -18
- clap/utils/completions.py +35 -37
- clap/utils/extraction.py +3 -3
- clap/utils/rag_utils.py +183 -0
- clap/vector_stores/__init__.py +16 -0
- clap/vector_stores/base.py +85 -0
- clap/vector_stores/chroma_store.py +142 -0
- clap/vector_stores/qdrant_store.py +155 -0
- {clap_agents-0.1.1.dist-info → clap_agents-0.2.2.dist-info}/METADATA +201 -23
- clap_agents-0.2.2.dist-info/RECORD +38 -0
- clap_agents-0.1.1.dist-info/RECORD +0 -27
- {clap_agents-0.1.1.dist-info → clap_agents-0.2.2.dist-info}/WHEEL +0 -0
- {clap_agents-0.1.1.dist-info → clap_agents-0.2.2.dist-info}/licenses/LICENSE +0 -0
clap/tool_pattern/tool_agent.py
CHANGED
@@ -1,46 +1,59 @@
|
|
1
|
-
# --- START OF ASYNC MODIFIED tool_agent.py (Init Fix) ---
|
2
1
|
|
3
2
|
import json
|
4
3
|
import asyncio
|
5
|
-
from typing import List, Dict, Any, Optional
|
4
|
+
from typing import List, Dict, Any, Optional, Union # Added Union
|
6
5
|
|
7
6
|
from colorama import Fore
|
8
7
|
from dotenv import load_dotenv
|
9
|
-
from groq import AsyncGroq
|
10
8
|
|
11
9
|
from clap.tool_pattern.tool import Tool
|
12
10
|
from clap.mcp_client.client import MCPClientManager
|
13
|
-
from clap.utils.completions import build_prompt_structure
|
14
|
-
from clap.
|
15
|
-
from clap.
|
16
|
-
|
17
|
-
from
|
11
|
+
from clap.utils.completions import build_prompt_structure, ChatHistory, update_chat_history
|
12
|
+
from clap.llm_services.base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
|
13
|
+
from clap.vector_stores.base import VectorStoreInterface, QueryResult
|
14
|
+
|
15
|
+
from clap.multiagent_pattern.agent import VECTOR_QUERY_TOOL_SCHEMA
|
16
|
+
|
17
|
+
|
18
|
+
|
19
|
+
try:
|
20
|
+
from mcp import types as mcp_types
|
21
|
+
except ImportError:
|
22
|
+
mcp_types = None
|
18
23
|
|
19
24
|
load_dotenv()
|
20
25
|
|
21
26
|
NATIVE_TOOL_SYSTEM_PROMPT = """
|
22
|
-
You are a helpful assistant. Use the available tools (local or
|
27
|
+
You are a helpful assistant. Use the available tools (local functions, remote MCP tools, or vector_query for document retrieval) if necessary to answer the user's request.
|
23
28
|
If you use a tool, you will be given the results, and then you should provide the final response to the user based on those results.
|
24
29
|
If no tool is needed, answer directly.
|
30
|
+
When using vector_query, the 'query' argument should be the user's main question.
|
25
31
|
"""
|
26
32
|
|
27
33
|
class ToolAgent:
|
28
34
|
"""
|
29
|
-
A simple agent that uses native tool calling asynchronously
|
30
|
-
Supports
|
31
|
-
|
35
|
+
A simple agent that uses LLM native tool calling asynchronously.
|
36
|
+
Supports local, remote MCP tools, and RAG via vector_query tool, using an LLMServiceInterface.
|
37
|
+
Makes one attempt to call tools if needed, processes results,
|
32
38
|
and then generates a final response.
|
33
39
|
"""
|
34
40
|
|
35
41
|
def __init__(
|
36
42
|
self,
|
37
|
-
|
43
|
+
llm_service: LLMServiceInterface,
|
44
|
+
model: str,
|
45
|
+
tools: Optional[Union[Tool, List[Tool]]] = None,
|
38
46
|
mcp_manager: Optional[MCPClientManager] = None,
|
39
47
|
mcp_server_names: Optional[List[str]] = None,
|
40
|
-
|
48
|
+
vector_store: Optional[VectorStoreInterface] = None,
|
41
49
|
system_prompt: str = NATIVE_TOOL_SYSTEM_PROMPT,
|
42
50
|
) -> None:
|
43
|
-
|
51
|
+
if not isinstance(llm_service, LLMServiceInterface):
|
52
|
+
raise TypeError("llm_service must be an instance of LLMServiceInterface.")
|
53
|
+
if not model or not isinstance(model, str):
|
54
|
+
raise ValueError("A valid model name (string) is required.")
|
55
|
+
|
56
|
+
self.llm_service = llm_service
|
44
57
|
self.model = model
|
45
58
|
self.system_prompt = system_prompt
|
46
59
|
|
@@ -48,7 +61,7 @@ class ToolAgent:
|
|
48
61
|
self.local_tools = []
|
49
62
|
elif isinstance(tools, list):
|
50
63
|
self.local_tools = tools
|
51
|
-
else:
|
64
|
+
else:
|
52
65
|
self.local_tools = [tools]
|
53
66
|
|
54
67
|
self.local_tools_dict = {tool.name: tool for tool in self.local_tools}
|
@@ -56,186 +69,173 @@ class ToolAgent:
|
|
56
69
|
|
57
70
|
self.mcp_manager = mcp_manager
|
58
71
|
self.mcp_server_names = mcp_server_names or []
|
59
|
-
self.remote_tools_dict: Dict[str,
|
72
|
+
self.remote_tools_dict: Dict[str, Any] = {}
|
60
73
|
self.remote_tool_server_map: Dict[str, str] = {}
|
61
74
|
|
75
|
+
self.vector_store = vector_store
|
62
76
|
|
63
77
|
async def _get_combined_tool_schemas(self) -> List[Dict[str, Any]]:
|
64
|
-
|
65
|
-
|
66
|
-
self.remote_tools_dict = {}
|
78
|
+
all_schemas = list(self.local_tool_schemas)
|
79
|
+
|
80
|
+
self.remote_tools_dict = {}
|
67
81
|
self.remote_tool_server_map = {}
|
68
|
-
|
69
|
-
|
70
|
-
fetch_tasks = [
|
71
|
-
self.mcp_manager.list_remote_tools(name)
|
72
|
-
for name in self.mcp_server_names
|
73
|
-
]
|
82
|
+
if self.mcp_manager and self.mcp_server_names and mcp_types:
|
83
|
+
fetch_tasks = [self.mcp_manager.list_remote_tools(name) for name in self.mcp_server_names]
|
74
84
|
results = await asyncio.gather(*fetch_tasks, return_exceptions=True)
|
75
|
-
|
76
85
|
for server_name, result in zip(self.mcp_server_names, results):
|
77
|
-
if isinstance(result, Exception):
|
78
|
-
print(f"{Fore.RED}Error listing tools from MCP server '{server_name}': {result}{Fore.RESET}")
|
79
|
-
continue
|
80
|
-
|
86
|
+
if isinstance(result, Exception): print(f"{Fore.RED}ToolAgent: Error listing MCP tools '{server_name}': {result}{Fore.RESET}"); continue
|
81
87
|
if isinstance(result, list):
|
82
|
-
for
|
83
|
-
if isinstance(
|
84
|
-
if
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
self.remote_tool_server_map[tool.name] = server_name
|
93
|
-
|
94
|
-
translated_schema = {
|
95
|
-
"type": "function",
|
96
|
-
"function": {
|
97
|
-
"name": tool.name,
|
98
|
-
"description": tool.description or "",
|
99
|
-
"parameters": tool.inputSchema
|
100
|
-
}
|
101
|
-
}
|
88
|
+
for tool_obj in result:
|
89
|
+
if isinstance(tool_obj, mcp_types.Tool):
|
90
|
+
if tool_obj.name in self.local_tools_dict: print(f"{Fore.YELLOW}ToolAgent Warning: MCP tool '{tool_obj.name}' conflicts with local. Skipping.{Fore.RESET}"); continue
|
91
|
+
|
92
|
+
if self.vector_store and tool_obj.name == VECTOR_QUERY_TOOL_SCHEMA["function"]["name"]:
|
93
|
+
print(f"{Fore.YELLOW}ToolAgent Warning: MCP tool '{tool_obj.name}' conflicts with built-in vector_query tool. Skipping MCP version.{Fore.RESET}"); continue
|
94
|
+
if tool_obj.name in self.remote_tools_dict: print(f"{Fore.YELLOW}ToolAgent Warning: MCP tool '{tool_obj.name}' conflicts with another remote. Skipping.{Fore.RESET}"); continue
|
95
|
+
self.remote_tools_dict[tool_obj.name] = tool_obj
|
96
|
+
self.remote_tool_server_map[tool_obj.name] = server_name
|
97
|
+
translated_schema = {"type": "function", "function": {"name": tool_obj.name, "description": tool_obj.description or "", "parameters": tool_obj.inputSchema or {"type": "object", "properties": {}}}}
|
102
98
|
all_schemas.append(translated_schema)
|
103
|
-
else:
|
104
|
-
|
105
|
-
|
106
|
-
|
99
|
+
else: print(f"{Fore.YELLOW}ToolAgent Warning: Non-Tool object from {server_name}: {type(tool_obj)}{Fore.RESET}")
|
100
|
+
|
101
|
+
|
102
|
+
if self.vector_store:
|
103
|
+
|
104
|
+
if not any(schema["function"]["name"] == VECTOR_QUERY_TOOL_SCHEMA["function"]["name"] for schema in all_schemas):
|
105
|
+
all_schemas.append(VECTOR_QUERY_TOOL_SCHEMA)
|
106
|
+
print(f"{Fore.BLUE}ToolAgent: Vector query tool is available.{Fore.RESET}")
|
107
|
+
|
108
|
+
print(f"{Fore.BLUE}ToolAgent: Total tools available to LLM: {len(all_schemas)}{Fore.RESET}")
|
107
109
|
return all_schemas
|
108
110
|
|
109
|
-
async def
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
"""
|
114
|
-
observation_messages = []
|
115
|
-
if not isinstance(tool_calls, list):
|
116
|
-
print(f"{Fore.RED}Error: Expected a list of tool_calls, got {type(tool_calls)}{Fore.RESET}")
|
117
|
-
return observation_messages
|
118
|
-
|
119
|
-
tasks = [self._execute_single_tool_call(tc) for tc in tool_calls]
|
120
|
-
results = await asyncio.gather(*tasks, return_exceptions=True)
|
121
|
-
|
122
|
-
for result in results:
|
123
|
-
if isinstance(result, dict):
|
124
|
-
if len(result) == 1:
|
125
|
-
tool_call_id, result_str = list(result.items())[0]
|
126
|
-
observation_messages.append(
|
127
|
-
build_prompt_structure(role="tool", content=result_str, tool_call_id=tool_call_id)
|
128
|
-
)
|
129
|
-
else:
|
130
|
-
print(f"{Fore.RED}Error: Unexpected result format from tool execution: {result}{Fore.RESET}")
|
131
|
-
elif isinstance(result, Exception):
|
132
|
-
print(f"{Fore.RED}Error during concurrent tool execution: {result}{Fore.RESET}")
|
133
|
-
else:
|
134
|
-
print(f"{Fore.RED}Error: Unexpected item in tool execution results: {result}{Fore.RESET}")
|
135
|
-
|
136
|
-
return observation_messages
|
137
|
-
|
138
|
-
async def _execute_single_tool_call(self, tool_call: Any) -> Dict[str, Any]:
|
139
|
-
"""Helper to execute a single tool call (local or remote)."""
|
140
|
-
tool_call_id = getattr(tool_call, 'id', 'error_no_id')
|
141
|
-
function_call = getattr(tool_call, 'function', None)
|
142
|
-
tool_name = getattr(function_call, 'name', 'error_unknown_name')
|
143
|
-
result_str = f"Error: Processing failed for tool call '{tool_name}' (id: {tool_call_id})."
|
144
|
-
|
111
|
+
async def _execute_single_tool_call(self, tool_call: LLMToolCall) -> Dict[str, Any]:
|
112
|
+
tool_call_id = tool_call.id
|
113
|
+
tool_name = tool_call.function_name
|
114
|
+
result_str = f"Error: Processing tool call '{tool_name}' (id: {tool_call_id})."
|
145
115
|
try:
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
116
|
+
arguments = json.loads(tool_call.function_arguments_json_str)
|
117
|
+
|
118
|
+
|
119
|
+
if self.vector_store and tool_name == VECTOR_QUERY_TOOL_SCHEMA["function"]["name"]:
|
120
|
+
print(f"{Fore.CYAN}\nToolAgent: Executing Vector Store Query Tool: {tool_name} (ID: {tool_call_id}) Args: {arguments}{Fore.RESET}")
|
121
|
+
query_text = arguments.get("query")
|
122
|
+
|
123
|
+
top_k_value_from_llm = arguments.get("top_k")
|
124
|
+
default_top_k_from_schema = VECTOR_QUERY_TOOL_SCHEMA["function"]["parameters"]["properties"]["top_k"].get("default", 3)
|
125
|
+
top_k = default_top_k_from_schema
|
126
|
+
if top_k_value_from_llm is not None:
|
127
|
+
try: top_k = int(top_k_value_from_llm)
|
128
|
+
except (ValueError, TypeError):
|
129
|
+
print(f"{Fore.YELLOW}ToolAgent Warning: LLM provided top_k '{top_k_value_from_llm}' is invalid. Using default: {default_top_k_from_schema}.{Fore.RESET}")
|
130
|
+
|
131
|
+
if not query_text:
|
132
|
+
result_str = "Error: 'query' argument required for vector_query tool."
|
133
|
+
else:
|
134
|
+
query_results: QueryResult = await self.vector_store.aquery(
|
135
|
+
query_texts=[query_text], n_results=top_k,
|
136
|
+
include=["documents", "metadatas", "distances"]
|
137
|
+
)
|
138
|
+
|
139
|
+
formatted_chunks_for_llm = []
|
140
|
+
current_length = 0
|
141
|
+
max_obs_len = 4000
|
142
|
+
retrieved_docs = query_results.get("documents")
|
143
|
+
retrieved_ids = query_results.get("ids")
|
144
|
+
if retrieved_docs and isinstance(retrieved_docs, list) and len(retrieved_docs) > 0 and \
|
145
|
+
retrieved_ids and isinstance(retrieved_ids, list) and len(retrieved_ids) > 0:
|
146
|
+
docs_for_query, ids_for_query = retrieved_docs[0], retrieved_ids[0]
|
147
|
+
metas_list, distances_list = query_results.get("metadatas"), query_results.get("distances")
|
148
|
+
metas_for_query = metas_list[0] if metas_list and len(metas_list) > 0 else [None] * len(docs_for_query)
|
149
|
+
distances_for_query = distances_list[0] if distances_list and len(distances_list) > 0 else [None] * len(docs_for_query)
|
150
|
+
for i, doc_content_item in enumerate(docs_for_query):
|
151
|
+
current_meta = metas_for_query[i] if i < len(metas_for_query) else None
|
152
|
+
current_id = str(ids_for_query[i]) if i < len(ids_for_query) else "N/A"
|
153
|
+
current_distance = distances_for_query[i] if i < len(distances_for_query) and distances_for_query[i] is not None else float('nan')
|
154
|
+
meta_str = json.dumps(current_meta, ensure_ascii=False) if current_meta else "{}"
|
155
|
+
current_chunk_formatted = (
|
156
|
+
f"--- Retrieved Chunk {str(i+1)} (ID: {current_id}, Distance: {current_distance:.4f}) ---\n"
|
157
|
+
f"Metadata: {meta_str}\nContent: {str(doc_content_item)}\n\n")
|
158
|
+
chunk_len = len(current_chunk_formatted)
|
159
|
+
if current_length + chunk_len <= max_obs_len:
|
160
|
+
formatted_chunks_for_llm.append(current_chunk_formatted)
|
161
|
+
current_length += chunk_len
|
162
|
+
else:
|
163
|
+
print(f"{Fore.YELLOW}ToolAgent: Obs limit ({max_obs_len}) reached. Included {len(formatted_chunks_for_llm)} chunks.{Fore.RESET}"); break
|
164
|
+
if formatted_chunks_for_llm:
|
165
|
+
header = f"Retrieved {len(formatted_chunks_for_llm)} relevant document chunks (out of {len(docs_for_query)} found):\n\n"
|
166
|
+
result_str = header + "".join(formatted_chunks_for_llm).strip()
|
167
|
+
else: result_str = "No relevant documents found (or chunks too long for limit)."
|
168
|
+
else: result_str = "No relevant documents found in vector store for query."
|
169
|
+
|
170
|
+
|
171
|
+
elif tool_name in self.local_tools_dict:
|
172
|
+
tool_instance = self.local_tools_dict[tool_name]
|
173
|
+
result = await tool_instance.run(**arguments)
|
174
|
+
if not isinstance(result, str): result_str = json.dumps(result, ensure_ascii=False)
|
175
|
+
else: result_str = result
|
158
176
|
elif tool_name in self.remote_tool_server_map and self.mcp_manager:
|
159
177
|
server_name = self.remote_tool_server_map[tool_name]
|
160
|
-
|
161
|
-
print(f"Tool call ID: {tool_call_id}")
|
162
|
-
print(f"Arguments: {arguments}")
|
163
|
-
result = await self.mcp_manager.call_remote_tool(server_name, tool_name, arguments)
|
178
|
+
result_str = await self.mcp_manager.call_remote_tool(server_name, tool_name, arguments)
|
164
179
|
else:
|
165
|
-
|
166
|
-
|
180
|
+
result_str = f"Error: Tool '{tool_name}' not available."
|
181
|
+
print(f"{Fore.RED}ToolAgent: {result_str}{Fore.RESET}")
|
167
182
|
return {tool_call_id: result_str}
|
168
|
-
|
169
|
-
|
170
|
-
result_str = str(result)
|
171
|
-
else:
|
172
|
-
try: result_str = json.dumps(result)
|
173
|
-
except TypeError: result_str = str(result)
|
174
|
-
print(f"{Fore.GREEN}Tool '{tool_name}' result: {result_str[:100]}...{Fore.RESET}")
|
175
|
-
|
183
|
+
|
184
|
+
print(f"{Fore.GREEN}ToolAgent: Tool '{tool_name}' observation: {result_str[:150]}...{Fore.RESET}")
|
176
185
|
except json.JSONDecodeError:
|
177
|
-
|
178
|
-
|
186
|
+
result_str = f"Error: Invalid arguments JSON for {tool_name}."
|
187
|
+
print(f"{Fore.RED}ToolAgent: {result_str} Data: {tool_call.function_arguments_json_str}{Fore.RESET}")
|
179
188
|
except Exception as e:
|
180
|
-
|
181
|
-
|
182
|
-
|
189
|
+
result_str = f"Error executing tool {tool_name}: {e}"
|
190
|
+
print(f"{Fore.RED}ToolAgent: Error for tool {tool_name} (ID: {tool_call_id}): {e}{Fore.RESET}")
|
183
191
|
return {tool_call_id: result_str}
|
184
192
|
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
193
|
+
|
194
|
+
async def process_tool_calls(self, tool_calls: List[LLMToolCall]) -> List[Dict[str, Any]]:
|
195
|
+
observation_messages = []
|
196
|
+
if not isinstance(tool_calls, list): return observation_messages
|
197
|
+
tasks = [self._execute_single_tool_call(tc) for tc in tool_calls]
|
198
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
199
|
+
for result in results:
|
200
|
+
if isinstance(result, dict) and len(result) == 1:
|
201
|
+
tool_call_id, result_str = list(result.items())[0]
|
202
|
+
observation_messages.append(build_prompt_structure(role="tool", content=result_str, tool_call_id=tool_call_id))
|
203
|
+
elif isinstance(result, Exception): print(f"{Fore.RED}ToolAgent: Error in concurrent tool execution: {result}{Fore.RESET}")
|
204
|
+
else: print(f"{Fore.RED}ToolAgent: Unexpected item in tool results: {result}{Fore.RESET}")
|
205
|
+
return observation_messages
|
194
206
|
|
207
|
+
async def run(self, user_msg: str) -> str:
|
208
|
+
combined_tool_schemas = await self._get_combined_tool_schemas()
|
195
209
|
initial_user_message = build_prompt_structure(role="user", content=user_msg)
|
196
210
|
chat_history = ChatHistory(
|
197
|
-
[
|
198
|
-
build_prompt_structure(role="system", content=self.system_prompt),
|
199
|
-
initial_user_message,
|
200
|
-
]
|
211
|
+
[build_prompt_structure(role="system", content=self.system_prompt), initial_user_message]
|
201
212
|
)
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
messages=list(chat_history),
|
207
|
-
model=self.model,
|
208
|
-
tools=combined_tool_schemas,
|
209
|
-
tool_choice="auto"
|
213
|
+
llm_response_1: StandardizedLLMResponse = await self.llm_service.get_llm_response(
|
214
|
+
model=self.model, messages=list(chat_history),
|
215
|
+
tools=combined_tool_schemas if combined_tool_schemas else None,
|
216
|
+
tool_choice="auto" if combined_tool_schemas else "none"
|
210
217
|
)
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
self.
|
227
|
-
messages=list(chat_history),
|
228
|
-
model=self.model,
|
218
|
+
assistant_msg_1_dict: Dict[str, Any] = {"role": "assistant"}
|
219
|
+
if llm_response_1.text_content: assistant_msg_1_dict["content"] = llm_response_1.text_content
|
220
|
+
if llm_response_1.tool_calls:
|
221
|
+
assistant_msg_1_dict["tool_calls"] = [
|
222
|
+
{"id": tc.id, "type": "function", "function": {"name": tc.function_name, "arguments": tc.function_arguments_json_str}}
|
223
|
+
for tc in llm_response_1.tool_calls
|
224
|
+
]
|
225
|
+
if "content" in assistant_msg_1_dict or "tool_calls" in assistant_msg_1_dict:
|
226
|
+
update_chat_history(chat_history, assistant_msg_1_dict)
|
227
|
+
|
228
|
+
final_response = "ToolAgent encountered an issue."
|
229
|
+
if llm_response_1.tool_calls:
|
230
|
+
observation_messages = await self.process_tool_calls(llm_response_1.tool_calls)
|
231
|
+
for obs_msg in observation_messages: update_chat_history(chat_history, obs_msg)
|
232
|
+
llm_response_2: StandardizedLLMResponse = await self.llm_service.get_llm_response(
|
233
|
+
model=self.model, messages=list(chat_history)
|
229
234
|
)
|
230
|
-
final_response =
|
231
|
-
|
232
|
-
|
233
|
-
print(f"{Fore.CYAN}\nAssistant provided direct response (no tools used):{Fore.RESET}")
|
234
|
-
final_response = assistant_message_1.content
|
235
|
+
final_response = llm_response_2.text_content if llm_response_2.text_content else "Agent provided no final response after using tools."
|
236
|
+
elif llm_response_1.text_content is not None:
|
237
|
+
final_response = llm_response_1.text_content
|
235
238
|
else:
|
236
|
-
print(f"{Fore.RED}Error:
|
237
|
-
final_response = "Error:
|
238
|
-
|
239
|
-
print(f"{Fore.GREEN}\nFinal Response:\n{final_response}{Fore.RESET}")
|
239
|
+
print(f"{Fore.RED}ToolAgent Error: LLM message has neither content nor tool_calls.{Fore.RESET}")
|
240
|
+
final_response = "Error: ToolAgent received an unexpected empty response from the LLM."
|
240
241
|
return final_response
|
241
|
-
|
clap/tools/__init__.py
CHANGED
clap/tools/email_tools.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1
|
-
# --- START OF agentic_patterns/tools/email_tools.py ---
|
2
1
|
|
3
2
|
import os
|
4
3
|
import smtplib
|
5
4
|
import imaplib
|
6
5
|
import email
|
7
|
-
import json
|
6
|
+
import json
|
8
7
|
from email.mime.text import MIMEText
|
9
8
|
from email.mime.multipart import MIMEMultipart
|
10
9
|
from email.mime.base import MIMEBase
|
@@ -44,25 +43,25 @@ def _send_email_sync(recipient: str, subject: str, body: str, attachment_path: O
|
|
44
43
|
encoders.encode_base64(part)
|
45
44
|
part.add_header("Content-Disposition", f"attachment; filename={os.path.basename(attachment_path)}")
|
46
45
|
msg.attach(part)
|
47
|
-
|
46
|
+
|
48
47
|
with smtplib.SMTP(SMTP_HOST, SMTP_PORT) as server:
|
49
48
|
server.starttls()
|
50
49
|
server.login(SMTP_USERNAME, SMTP_PASSWORD)
|
51
50
|
server.sendmail(SMTP_USERNAME, recipient, msg.as_string())
|
52
|
-
|
51
|
+
|
53
52
|
if attachment_path and attachment_path.startswith("temp_attachments"):
|
54
53
|
try: os.remove(attachment_path)
|
55
|
-
except OSError: pass
|
54
|
+
except OSError: pass
|
56
55
|
return "Email sent successfully."
|
57
56
|
except Exception as e:
|
58
57
|
return f"Failed to send email: {e}"
|
59
58
|
|
60
59
|
def _download_attachment_sync(attachment_url: str, attachment_filename: str) -> str:
|
61
60
|
"""Synchronous helper to download an attachment."""
|
62
|
-
temp_dir = "temp_attachments"
|
61
|
+
temp_dir = "temp_attachments"
|
63
62
|
os.makedirs(temp_dir, exist_ok=True)
|
64
63
|
file_path = os.path.join(temp_dir, attachment_filename)
|
65
|
-
|
64
|
+
|
66
65
|
with requests.get(attachment_url, stream=True) as r:
|
67
66
|
r.raise_for_status()
|
68
67
|
with open(file_path, "wb") as f:
|
@@ -72,7 +71,7 @@ def _download_attachment_sync(attachment_url: str, attachment_filename: str) ->
|
|
72
71
|
|
73
72
|
def _get_pre_staged_attachment_sync(attachment_name: str) -> Optional[str]:
|
74
73
|
"""Synchronous helper to get a pre-staged attachment."""
|
75
|
-
attachment_dir = "available_attachments"
|
74
|
+
attachment_dir = "available_attachments"
|
76
75
|
file_path = os.path.join(attachment_dir, attachment_name)
|
77
76
|
return file_path if os.path.exists(file_path) else None
|
78
77
|
|
@@ -95,7 +94,7 @@ def _fetch_emails_sync(folder: str, limit: int) -> str:
|
|
95
94
|
return f"No emails found in folder '{folder}'."
|
96
95
|
|
97
96
|
email_ids = data[0].split()
|
98
|
-
|
97
|
+
|
99
98
|
ids_to_fetch = email_ids[-(limit):]
|
100
99
|
|
101
100
|
for email_id_bytes in reversed(ids_to_fetch):
|
@@ -109,7 +108,7 @@ def _fetch_emails_sync(folder: str, limit: int) -> str:
|
|
109
108
|
subject = subject.decode(encoding or "utf-8")
|
110
109
|
from_ = msg.get("From", "")
|
111
110
|
date_ = msg.get("Date", "")
|
112
|
-
|
111
|
+
|
113
112
|
snippet = ""
|
114
113
|
if msg.is_multipart():
|
115
114
|
for part in msg.walk():
|
@@ -119,8 +118,8 @@ def _fetch_emails_sync(folder: str, limit: int) -> str:
|
|
119
118
|
try:
|
120
119
|
body = part.get_payload(decode=True)
|
121
120
|
snippet = body.decode(part.get_content_charset() or 'utf-8')
|
122
|
-
snippet = " ".join(snippet.splitlines())
|
123
|
-
snippet = snippet[:150] + "..."
|
121
|
+
snippet = " ".join(snippet.splitlines())
|
122
|
+
snippet = snippet[:150] + "..."
|
124
123
|
break
|
125
124
|
except Exception:
|
126
125
|
snippet = "[Could not decode body]"
|
@@ -141,7 +140,7 @@ def _fetch_emails_sync(folder: str, limit: int) -> str:
|
|
141
140
|
"date": date_,
|
142
141
|
"snippet": snippet
|
143
142
|
})
|
144
|
-
if len(emails_data) >= limit:
|
143
|
+
if len(emails_data) >= limit:
|
145
144
|
break
|
146
145
|
|
147
146
|
mail.logout()
|
@@ -149,7 +148,7 @@ def _fetch_emails_sync(folder: str, limit: int) -> str:
|
|
149
148
|
if not emails_data:
|
150
149
|
return f"No emails found in folder '{folder}'."
|
151
150
|
|
152
|
-
|
151
|
+
|
153
152
|
result_text = f"Recent emails from {folder} (up to {limit}):\n\n"
|
154
153
|
for i, email_data in enumerate(emails_data, 1):
|
155
154
|
result_text += f"{i}. From: {email_data['from']}\n"
|
@@ -161,7 +160,7 @@ def _fetch_emails_sync(folder: str, limit: int) -> str:
|
|
161
160
|
except Exception as e:
|
162
161
|
return f"Failed to fetch emails: {e}"
|
163
162
|
|
164
|
-
|
163
|
+
|
165
164
|
|
166
165
|
@tool
|
167
166
|
async def send_email(recipient: str, subject: str, body: str,
|
@@ -195,7 +194,7 @@ async def send_email(recipient: str, subject: str, body: str,
|
|
195
194
|
return f"Failed to download attachment from URL: {e}"
|
196
195
|
elif attachment_name:
|
197
196
|
try:
|
198
|
-
|
197
|
+
|
199
198
|
print(f"[Email Tool] Checking for pre-staged attachment: {attachment_name}...")
|
200
199
|
final_attachment_path = await anyio.to_thread.run_sync(
|
201
200
|
_get_pre_staged_attachment_sync, attachment_name
|
@@ -206,7 +205,7 @@ async def send_email(recipient: str, subject: str, body: str,
|
|
206
205
|
except Exception as e:
|
207
206
|
return f"Error accessing pre-staged attachment: {e}"
|
208
207
|
|
209
|
-
|
208
|
+
|
210
209
|
print(f"[Email Tool] Sending email to {recipient}...")
|
211
210
|
return await anyio.to_thread.run_sync(
|
212
211
|
_send_email_sync, recipient, subject, body, final_attachment_path
|
@@ -224,7 +223,5 @@ async def fetch_recent_emails(folder: str = "INBOX", limit: int = 5) -> str:
|
|
224
223
|
Returns:
|
225
224
|
A formatted string containing details of the recent emails or an error message.
|
226
225
|
"""
|
227
|
-
# Run synchronous IMAP fetching in thread
|
228
226
|
print(f"[Email Tool] Fetching up to {limit} emails from folder '{folder}'...")
|
229
227
|
return await anyio.to_thread.run_sync(_fetch_emails_sync, folder, limit)
|
230
|
-
|