clap-agents 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clap/__init__.py +13 -42
- clap/embedding/__init__.py +21 -0
- clap/embedding/base_embedding.py +28 -0
- clap/embedding/fastembed_embedding.py +75 -0
- clap/embedding/ollama_embedding.py +76 -0
- clap/embedding/sentence_transformer_embedding.py +44 -0
- clap/llm_services/__init__.py +15 -0
- clap/llm_services/base.py +3 -6
- clap/llm_services/google_openai_compat_service.py +1 -5
- clap/llm_services/groq_service.py +5 -13
- clap/llm_services/ollama_service.py +101 -0
- clap/mcp_client/client.py +7 -20
- clap/multiagent_pattern/agent.py +107 -34
- clap/multiagent_pattern/team.py +54 -29
- clap/react_pattern/react_agent.py +339 -126
- clap/tool_pattern/tool.py +94 -165
- clap/tool_pattern/tool_agent.py +171 -171
- clap/tools/__init__.py +1 -1
- clap/tools/email_tools.py +16 -19
- clap/tools/web_crawler.py +26 -18
- clap/utils/completions.py +35 -37
- clap/utils/extraction.py +3 -3
- clap/utils/rag_utils.py +183 -0
- clap/vector_stores/__init__.py +16 -0
- clap/vector_stores/base.py +85 -0
- clap/vector_stores/chroma_store.py +142 -0
- clap/vector_stores/qdrant_store.py +155 -0
- {clap_agents-0.1.1.dist-info → clap_agents-0.2.1.dist-info}/METADATA +201 -23
- clap_agents-0.2.1.dist-info/RECORD +38 -0
- clap_agents-0.1.1.dist-info/RECORD +0 -27
- {clap_agents-0.1.1.dist-info → clap_agents-0.2.1.dist-info}/WHEEL +0 -0
- {clap_agents-0.1.1.dist-info → clap_agents-0.2.1.dist-info}/licenses/LICENSE +0 -0
@@ -10,144 +10,305 @@ from dotenv import load_dotenv
|
|
10
10
|
|
11
11
|
from clap.llm_services.base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
|
12
12
|
from clap.tool_pattern.tool import Tool
|
13
|
-
from clap.mcp_client.client import MCPClientManager
|
13
|
+
from clap.mcp_client.client import MCPClientManager
|
14
|
+
# from clap.mcp_client.client import MCPClientManager, SseServerConfig
|
14
15
|
from clap.utils.completions import build_prompt_structure, ChatHistory, update_chat_history
|
15
|
-
from
|
16
|
+
from clap.vector_stores.base import VectorStoreInterface, QueryResult
|
17
|
+
from clap.multiagent_pattern.agent import VECTOR_QUERY_TOOL_SCHEMA
|
18
|
+
|
19
|
+
try:
|
20
|
+
from mcp import types as mcp_types
|
21
|
+
except ImportError:
|
22
|
+
mcp_types = None
|
16
23
|
|
17
24
|
|
18
25
|
load_dotenv()
|
19
26
|
|
20
27
|
CORE_SYSTEM_PROMPT = """
|
21
|
-
You are an AI assistant
|
22
|
-
|
23
|
-
**Your Interaction Loop:**
|
24
|
-
1. **Thought:** You MUST first analyze the query/situation and formulate a plan. Start your response **only** with your thought process, prefixed with "**Thought:**" on a new line.
|
25
|
-
2. **Action Decision:** Based on your thought, decide if a tool is needed.
|
26
|
-
3. **Observation:** If a tool is called, the system will provide the result. Analyze this in your next Thought.
|
27
|
-
4. **Final Response:** When you have enough information, provide the final answer. Start this **only** with "**Final Response:**" on a new line, following your final thought.
|
28
|
-
|
29
|
-
**Output Syntax:**
|
30
|
-
|
31
|
-
* **For Tool Use:**
|
32
|
-
Thought: [Your reasoning and plan to use a tool]
|
33
|
-
*(System executes tool based on your thought's intent)*
|
34
|
-
|
35
|
-
* **After Observation:**
|
36
|
-
Thought: [Your analysis of the observation and next step]
|
37
|
-
*(Either signal another tool use implicitly or provide final response)*
|
38
|
-
|
39
|
-
* **For Final Answer:**
|
40
|
-
Thought: [Your final reasoning]
|
41
|
-
Final Response: [Your final answer to the user]
|
28
|
+
You are an AI assistant using the ReAct (Reason->Act) process with tools (local, remote MCP, `vector_query`).
|
42
29
|
|
43
|
-
|
30
|
+
**ReAct Loop:**
|
31
|
+
1. **Thought:** REQUIRED start. Analyze the query/situation and plan next action. Start response ONLY with "Thought:".
|
32
|
+
2. **Action Decision:** Decide if a tool is needed. **If using `vector_query`, the `query` argument MUST be the 'User Query:' from the main prompt.** Determine arguments for other tools based on your reasoning.
|
33
|
+
3. **Tool Call / Next Step:** Use standard tool call format if applicable. If no tool call, proceed to step 5 (or 6 if done).
|
34
|
+
4. **Observation:** (System provides tool results if a tool was called).
|
35
|
+
5. **Thought:** Analyze observation (if any) and decide next step (another tool or final response).
|
36
|
+
6. **Final Response:** REQUIRED end for the final answer. Must immediately follow the last Thought.
|
44
37
|
|
45
|
-
**
|
38
|
+
**Output Format:** Always start responses with "Thought:". Use "Final Response:" ONLY for the final answer, directly after the concluding Thought. No extra text before these prefixes. Be precise with tool arguments.
|
46
39
|
"""
|
47
40
|
|
48
41
|
class ReactAgent:
|
49
42
|
"""
|
50
|
-
Async ReAct agent supporting local
|
43
|
+
Async ReAct agent supporting local tools, remote MCP tools, and vector store queries,
|
44
|
+
using a configurable LLM service.
|
51
45
|
"""
|
52
46
|
|
53
47
|
def __init__(
|
54
48
|
self,
|
55
49
|
llm_service: LLMServiceInterface,
|
56
|
-
model: str,
|
50
|
+
model: str,
|
51
|
+
agent_name: str = "ReactAgent",
|
57
52
|
tools: Optional[List[Tool]] = None,
|
58
|
-
mcp_manager: Optional[MCPClientManager] = None,
|
59
|
-
mcp_server_names: Optional[List[str]] = None,
|
53
|
+
mcp_manager: Optional[MCPClientManager] = None,
|
54
|
+
mcp_server_names: Optional[List[str]] = None,
|
55
|
+
vector_store: Optional[VectorStoreInterface] = None,
|
60
56
|
system_prompt: str = "",
|
61
57
|
) -> None:
|
62
58
|
self.llm_service = llm_service
|
63
59
|
self.model = model
|
60
|
+
self.agent_name = agent_name
|
64
61
|
self.system_prompt = (system_prompt + "\n\n" + CORE_SYSTEM_PROMPT).strip()
|
65
62
|
|
63
|
+
|
66
64
|
self.local_tools = tools if tools else []
|
67
65
|
self.local_tools_dict = {tool.name: tool for tool in self.local_tools}
|
68
66
|
self.local_tool_schemas = [tool.fn_schema for tool in self.local_tools]
|
69
67
|
|
68
|
+
|
70
69
|
self.mcp_manager = mcp_manager
|
71
70
|
self.mcp_server_names = mcp_server_names or []
|
72
|
-
self.remote_tools_dict: Dict[str,
|
71
|
+
self.remote_tools_dict: Dict[str, Any] = {}
|
73
72
|
self.remote_tool_server_map: Dict[str, str] = {}
|
74
73
|
|
74
|
+
|
75
|
+
self.vector_store = vector_store
|
76
|
+
|
77
|
+
|
75
78
|
async def _get_combined_tool_schemas(self) -> List[Dict[str, Any]]:
|
76
|
-
|
79
|
+
"""Combines schemas for local tools, remote MCP tools, and vector store query tool."""
|
80
|
+
all_schemas = list(self.local_tool_schemas)
|
81
|
+
|
82
|
+
|
83
|
+
if self.vector_store:
|
84
|
+
all_schemas.append(VECTOR_QUERY_TOOL_SCHEMA)
|
85
|
+
print(f"{Fore.BLUE}[{self.agent_name}] Vector query tool is available.{Fore.RESET}")
|
86
|
+
|
87
|
+
|
77
88
|
self.remote_tools_dict = {}
|
78
89
|
self.remote_tool_server_map = {}
|
79
|
-
if self.mcp_manager and self.mcp_server_names:
|
90
|
+
if self.mcp_manager and self.mcp_server_names and mcp_types:
|
80
91
|
fetch_tasks = [self.mcp_manager.list_remote_tools(name) for name in self.mcp_server_names]
|
81
92
|
results = await asyncio.gather(*fetch_tasks, return_exceptions=True)
|
82
93
|
for server_name, result in zip(self.mcp_server_names, results):
|
83
94
|
if isinstance(result, Exception):
|
84
|
-
print(f"{Fore.RED}Error listing tools from MCP server '{server_name}': {result}{Fore.RESET}")
|
95
|
+
print(f"{Fore.RED}[{self.agent_name}] Error listing tools from MCP server '{server_name}': {result}{Fore.RESET}")
|
85
96
|
continue
|
86
97
|
if isinstance(result, list):
|
87
98
|
for tool in result:
|
99
|
+
|
88
100
|
if isinstance(tool, mcp_types.Tool):
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
101
|
+
if tool.name in self.local_tools_dict or tool.name == VECTOR_QUERY_TOOL_SCHEMA["function"]["name"]:
|
102
|
+
print(f"{Fore.YELLOW}Warning: Remote MCP tool '{tool.name}' conflicts with a local/vector tool. Skipping.{Fore.RESET}")
|
103
|
+
continue
|
104
|
+
if tool.name in self.remote_tools_dict:
|
105
|
+
print(f"{Fore.YELLOW}Warning: Remote MCP tool '{tool.name}' conflicts with another remote tool. Skipping duplicate.{Fore.RESET}")
|
106
|
+
continue
|
107
|
+
|
108
|
+
self.remote_tools_dict[tool.name] = tool
|
109
|
+
self.remote_tool_server_map[tool.name] = server_name
|
110
|
+
|
111
|
+
translated_schema = {
|
112
|
+
"type": "function",
|
113
|
+
"function": {
|
114
|
+
"name": tool.name,
|
115
|
+
"description": tool.description or "",
|
116
|
+
"parameters": tool.inputSchema or {"type": "object", "properties": {}} # Handle potentially missing schema
|
117
|
+
}
|
118
|
+
}
|
119
|
+
all_schemas.append(translated_schema)
|
120
|
+
else:
|
121
|
+
print(f"{Fore.YELLOW}Warning: Received non-Tool object from {server_name}: {type(tool)}{Fore.RESET}")
|
122
|
+
|
123
|
+
print(f"{Fore.BLUE}[{self.agent_name}] Total tools available to LLM: {len(all_schemas)}{Fore.RESET}")
|
124
|
+
# print(f"Schemas: {json.dumps(all_schemas, indent=2)}")
|
97
125
|
return all_schemas
|
98
126
|
|
99
|
-
async def process_tool_calls(self, tool_calls: List[LLMToolCall]) -> Dict[str, Any]: # Type hint changed
|
100
|
-
observations = {}
|
101
|
-
if not isinstance(tool_calls, list):
|
102
|
-
print(f"{Fore.RED}Error: Expected a list of LLMToolCall, got {type(tool_calls)}{Fore.RESET}")
|
103
|
-
return observations
|
104
|
-
tasks = [self._execute_single_tool_call(tc) for tc in tool_calls]
|
105
|
-
results = await asyncio.gather(*tasks, return_exceptions=True)
|
106
|
-
for result in results:
|
107
|
-
if isinstance(result, dict): observations.update(result)
|
108
|
-
elif isinstance(result, Exception): print(f"{Fore.RED}Error during concurrent tool execution: {result}{Fore.RESET}")
|
109
|
-
else: print(f"{Fore.RED}Error: Unexpected item in tool execution results: {result}{Fore.RESET}")
|
110
|
-
return observations
|
111
127
|
|
112
|
-
|
128
|
+
|
129
|
+
async def _execute_single_tool_call(self, tool_call: LLMToolCall) -> Dict[str, Any]:
|
130
|
+
"""
|
131
|
+
Executes a single tool call (local, remote MCP, or vector query),
|
132
|
+
handling observation length limits for vector queries.
|
133
|
+
"""
|
113
134
|
tool_call_id = tool_call.id
|
114
135
|
tool_name = tool_call.function_name
|
115
|
-
result_str = f"Error: Processing failed for tool call '{tool_name}' (id: {tool_call_id})."
|
136
|
+
result_str = f"Error: Processing failed for tool call '{tool_name}' (id: {tool_call_id})." # Default error message
|
137
|
+
|
116
138
|
try:
|
117
139
|
arguments = json.loads(tool_call.function_arguments_json_str)
|
118
|
-
|
140
|
+
|
141
|
+
|
142
|
+
if tool_name == VECTOR_QUERY_TOOL_SCHEMA["function"]["name"]:
|
143
|
+
if not self.vector_store:
|
144
|
+
print(f"{Fore.RED}Error: Agent {self.agent_name} received call for '{tool_name}' but has no vector store configured.{Fore.RESET}")
|
145
|
+
result_str = f"Error: Vector store not available for agent {self.agent_name}."
|
146
|
+
else:
|
147
|
+
print(f"{Fore.CYAN}\n[{self.agent_name}] Executing Vector Store Query Tool: {tool_name}{Fore.RESET}")
|
148
|
+
print(f"Tool call ID: {tool_call_id}")
|
149
|
+
print(f"Arguments: {arguments}")
|
150
|
+
|
151
|
+
query_text = arguments.get("query")
|
152
|
+
|
153
|
+
top_k_value_from_llm = arguments.get("top_k")
|
154
|
+
default_top_k_from_schema = VECTOR_QUERY_TOOL_SCHEMA["function"]["parameters"]["properties"]["top_k"].get("default", 3)
|
155
|
+
top_k = default_top_k_from_schema
|
156
|
+
|
157
|
+
if top_k_value_from_llm is not None:
|
158
|
+
try:
|
159
|
+
top_k = int(top_k_value_from_llm)
|
160
|
+
except (ValueError, TypeError):
|
161
|
+
print(f"{Fore.YELLOW}Warning: LLM provided top_k '{top_k_value_from_llm}' is not a valid integer. Using schema default: {default_top_k_from_schema}.{Fore.RESET}")
|
162
|
+
|
163
|
+
|
164
|
+
|
165
|
+
if not query_text:
|
166
|
+
result_str = "Error: 'query' argument is required for vector_query tool."
|
167
|
+
else:
|
168
|
+
query_results: QueryResult = await self.vector_store.aquery(
|
169
|
+
query_texts=[query_text],
|
170
|
+
n_results=top_k,
|
171
|
+
include=["documents", "metadatas", "distances"]
|
172
|
+
)
|
173
|
+
|
174
|
+
formatted_chunks_for_llm = []
|
175
|
+
current_length = 0
|
176
|
+
max_obs_len = 4000
|
177
|
+
|
178
|
+
|
179
|
+
retrieved_docs = query_results.get("documents")
|
180
|
+
retrieved_ids = query_results.get("ids")
|
181
|
+
|
182
|
+
if retrieved_docs and isinstance(retrieved_docs, list) and len(retrieved_docs) > 0 and \
|
183
|
+
retrieved_ids and isinstance(retrieved_ids, list) and len(retrieved_ids) > 0:
|
184
|
+
|
185
|
+
docs_for_query = retrieved_docs[0]
|
186
|
+
ids_for_query = retrieved_ids[0]
|
187
|
+
|
188
|
+
metas_for_query = []
|
189
|
+
if query_results.get("metadatas") and isinstance(query_results["metadatas"], list) and len(query_results["metadatas"]) > 0:
|
190
|
+
metas_for_query = query_results["metadatas"][0]
|
191
|
+
else:
|
192
|
+
metas_for_query = [None] * len(docs_for_query)
|
193
|
+
|
194
|
+
distances_for_query = []
|
195
|
+
if query_results.get("distances") and isinstance(query_results["distances"], list) and len(query_results["distances"]) > 0:
|
196
|
+
distances_for_query = query_results["distances"][0]
|
197
|
+
else:
|
198
|
+
distances_for_query = [None] * len(docs_for_query)
|
199
|
+
|
200
|
+
|
201
|
+
for i, doc_content_item in enumerate(docs_for_query):
|
202
|
+
current_meta = metas_for_query[i] if i < len(metas_for_query) else None
|
203
|
+
current_id = str(ids_for_query[i]) if i < len(ids_for_query) else "N/A"
|
204
|
+
current_distance = distances_for_query[i] if i < len(distances_for_query) and distances_for_query[i] is not None else float('nan')
|
205
|
+
|
206
|
+
meta_str = json.dumps(current_meta, ensure_ascii=False) if current_meta else "{}"
|
207
|
+
|
208
|
+
|
209
|
+
current_chunk_formatted = (
|
210
|
+
f"--- Retrieved Chunk {str(i+1)} (ID: {current_id}, Distance: {current_distance:.4f}) ---\n"
|
211
|
+
f"Metadata: {meta_str}\n"
|
212
|
+
f"Content: {str(doc_content_item)}\n\n"
|
213
|
+
)
|
214
|
+
|
215
|
+
chunk_len = len(current_chunk_formatted)
|
216
|
+
|
217
|
+
if current_length + chunk_len <= max_obs_len:
|
218
|
+
formatted_chunks_for_llm.append(current_chunk_formatted)
|
219
|
+
current_length += chunk_len
|
220
|
+
else:
|
221
|
+
print(f"{Fore.YELLOW}[{self.agent_name}] Observation limit ({max_obs_len} chars) reached. Included {len(formatted_chunks_for_llm)} full chunks out of {len(docs_for_query)} retrieved.{Fore.RESET}")
|
222
|
+
break
|
223
|
+
|
224
|
+
if formatted_chunks_for_llm:
|
225
|
+
header = f"Retrieved {len(formatted_chunks_for_llm)} relevant document chunks (out of {len(docs_for_query)} found for the query):\n\n"
|
226
|
+
result_str = header + "".join(formatted_chunks_for_llm).strip()
|
227
|
+
else:
|
228
|
+
result_str = "No relevant documents found (or all retrieved documents were too long to fit context limit)."
|
229
|
+
else:
|
230
|
+
result_str = "No relevant documents found in vector store for the query."
|
231
|
+
|
232
|
+
|
233
|
+
elif tool_name in self.local_tools_dict:
|
119
234
|
tool = self.local_tools_dict[tool_name]
|
120
|
-
print(f"{Fore.GREEN}\
|
235
|
+
print(f"{Fore.GREEN}\n[{self.agent_name}] Executing Local Tool: {tool_name}{Fore.RESET}")
|
236
|
+
print(f"Tool call ID: {tool_call_id}")
|
237
|
+
print(f"Arguments: {arguments}")
|
121
238
|
result = await tool.run(**arguments)
|
239
|
+
|
240
|
+
if not isinstance(result, str):
|
241
|
+
try:
|
242
|
+
result_str = json.dumps(result, ensure_ascii=False)
|
243
|
+
except TypeError:
|
244
|
+
result_str = str(result)
|
245
|
+
else:
|
246
|
+
result_str = result
|
247
|
+
|
248
|
+
|
122
249
|
elif tool_name in self.remote_tool_server_map and self.mcp_manager:
|
123
250
|
server_name = self.remote_tool_server_map[tool_name]
|
124
|
-
print(f"{Fore.CYAN}\
|
125
|
-
|
251
|
+
print(f"{Fore.CYAN}\n[{self.agent_name}] Executing Remote MCP Tool: {tool_name} on {server_name}{Fore.RESET}")
|
252
|
+
print(f"Tool call ID: {tool_call_id}")
|
253
|
+
print(f"Arguments: {arguments}")
|
254
|
+
|
255
|
+
result_str = await self.mcp_manager.call_remote_tool(server_name, tool_name, arguments)
|
256
|
+
|
257
|
+
|
126
258
|
else:
|
127
|
-
print(f"{Fore.RED}Error: Tool '{tool_name}' not found.{Fore.RESET}")
|
259
|
+
print(f"{Fore.RED}Error: Tool '{tool_name}' not found locally, remotely, or as vector query.{Fore.RESET}")
|
128
260
|
result_str = f"Error: Tool '{tool_name}' is not available."
|
129
|
-
return {tool_call_id: result_str}
|
130
261
|
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
except TypeError: result_str = str(result)
|
136
|
-
print(f"{Fore.GREEN}Tool '{tool_name}' result: {result_str[:100]}...{Fore.RESET}")
|
262
|
+
|
263
|
+
print(f"{Fore.GREEN}Tool '{tool_name}' observation prepared: {result_str[:150]}...{Fore.RESET}")
|
264
|
+
|
265
|
+
|
137
266
|
except json.JSONDecodeError:
|
138
267
|
print(f"{Fore.RED}Error decoding arguments for {tool_name}: {tool_call.function_arguments_json_str}{Fore.RESET}")
|
139
|
-
result_str = f"Error: Invalid arguments JSON provided for {tool_name}"
|
268
|
+
result_str = f"Error: Invalid arguments JSON provided for {tool_name}."
|
140
269
|
except Exception as e:
|
141
|
-
|
142
|
-
|
270
|
+
print(f"{Fore.RED}Error executing/processing tool {tool_name} (id: {tool_call_id}): {e}{Fore.RESET}")
|
271
|
+
|
272
|
+
result_str = f"Error during execution of tool {tool_name}: {e}"
|
273
|
+
|
274
|
+
|
143
275
|
return {tool_call_id: result_str}
|
144
276
|
|
145
277
|
|
278
|
+
|
279
|
+
|
280
|
+
|
281
|
+
async def process_tool_calls(self, tool_calls: List[LLMToolCall]) -> Dict[str, Any]:
|
282
|
+
"""Processes multiple tool calls concurrently."""
|
283
|
+
observations = {}
|
284
|
+
if not isinstance(tool_calls, list):
|
285
|
+
print(f"{Fore.RED}Error: Expected a list of LLMToolCall, got {type(tool_calls)}{Fore.RESET}")
|
286
|
+
return observations
|
287
|
+
|
288
|
+
tasks = [self._execute_single_tool_call(tc) for tc in tool_calls]
|
289
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
290
|
+
|
291
|
+
for result in results:
|
292
|
+
if isinstance(result, dict) and len(result) == 1:
|
293
|
+
observations.update(result)
|
294
|
+
elif isinstance(result, Exception):
|
295
|
+
|
296
|
+
print(f"{Fore.RED}Error during concurrent tool execution gather: {result}{Fore.RESET}")
|
297
|
+
|
298
|
+
# observations[f"error_{len(observations)}"] = f"Tool execution failed: {result}"
|
299
|
+
else:
|
300
|
+
print(f"{Fore.RED}Error: Unexpected item in tool execution results: {result}{Fore.RESET}")
|
301
|
+
|
302
|
+
return observations
|
303
|
+
|
304
|
+
|
146
305
|
async def run(
|
147
306
|
self,
|
148
307
|
user_msg: str,
|
149
308
|
max_rounds: int = 5,
|
150
309
|
) -> str:
|
310
|
+
"""Runs the ReAct loop for the agent."""
|
311
|
+
print(f"--- [{self.agent_name}] Starting ReAct Loop ---")
|
151
312
|
combined_tool_schemas = await self._get_combined_tool_schemas()
|
152
313
|
|
153
314
|
initial_user_message = build_prompt_structure(role="user", content=user_msg)
|
@@ -158,107 +319,159 @@ class ReactAgent:
|
|
158
319
|
]
|
159
320
|
)
|
160
321
|
|
161
|
-
final_response = "Agent failed to produce a response."
|
322
|
+
final_response = f"Agent {self.agent_name} failed to produce a final response."
|
162
323
|
|
163
324
|
for round_num in range(max_rounds):
|
164
|
-
print(Fore.CYAN + f"\n--- Round {round_num + 1} ---")
|
325
|
+
print(Fore.CYAN + f"\n--- [{self.agent_name}] Round {round_num + 1}/{max_rounds} ---")
|
326
|
+
|
327
|
+
|
165
328
|
current_tools = combined_tool_schemas if combined_tool_schemas else None
|
166
329
|
current_tool_choice = "auto" if current_tools else "none"
|
167
330
|
|
331
|
+
print(f"[{self.agent_name}] Calling LLM...")
|
168
332
|
llm_response: StandardizedLLMResponse = await self.llm_service.get_llm_response(
|
169
333
|
model=self.model,
|
170
334
|
messages=list(chat_history),
|
171
335
|
tools=current_tools,
|
172
336
|
tool_choice=current_tool_choice
|
173
337
|
)
|
174
|
-
# --- End Change ---
|
175
338
|
|
176
|
-
assistant_content = llm_response.text_content
|
339
|
+
assistant_content = llm_response.text_content
|
340
|
+
llm_tool_calls = llm_response.tool_calls
|
341
|
+
|
177
342
|
extracted_thought = None
|
178
343
|
potential_final_response = None
|
179
344
|
|
345
|
+
|
180
346
|
if assistant_content is not None:
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
347
|
+
lines = assistant_content.strip().split('\n')
|
348
|
+
thought_lines = []
|
349
|
+
response_lines = []
|
350
|
+
in_thought = False
|
351
|
+
in_response = False
|
352
|
+
for line in lines:
|
187
353
|
stripped_line = line.strip()
|
188
354
|
if stripped_line.startswith("Thought:"):
|
189
355
|
in_thought = True; in_response = False
|
190
356
|
thought_content = stripped_line[len("Thought:"):].strip()
|
191
357
|
if thought_content: thought_lines.append(thought_content)
|
192
358
|
elif stripped_line.startswith("Final Response:"):
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
elif in_thought:
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
359
|
+
in_response = True; in_thought = False
|
360
|
+
response_content = stripped_line[len("Final Response:"):].strip()
|
361
|
+
if response_content: response_lines.append(response_content)
|
362
|
+
elif in_thought:
|
363
|
+
|
364
|
+
thought_lines.append(line)
|
365
|
+
elif in_response:
|
366
|
+
response_lines.append(line)
|
367
|
+
|
368
|
+
if thought_lines:
|
369
|
+
extracted_thought = "\n".join(thought_lines).strip()
|
370
|
+
print(f"{Fore.MAGENTA}\n[{self.agent_name}] Thought:\n{extracted_thought}{Fore.RESET}")
|
371
|
+
else:
|
372
|
+
print(f"{Fore.YELLOW}Warning: No 'Thought:' prefix found in LLM response content.{Fore.RESET}")
|
373
|
+
|
374
|
+
|
375
|
+
if response_lines:
|
376
|
+
potential_final_response = "\n".join(response_lines).strip()
|
377
|
+
|
378
|
+
|
379
|
+
|
205
380
|
assistant_msg_dict: Dict[str, Any] = {"role": "assistant"}
|
206
|
-
if assistant_content:
|
207
|
-
assistant_msg_dict["content"] = assistant_content
|
208
|
-
if
|
381
|
+
if assistant_content:
|
382
|
+
assistant_msg_dict["content"] = assistant_content
|
383
|
+
if llm_tool_calls:
|
384
|
+
|
209
385
|
assistant_msg_dict["tool_calls"] = [
|
210
386
|
{
|
211
387
|
"id": tc.id,
|
212
|
-
"type": "function",
|
388
|
+
"type": "function",
|
213
389
|
"function": {
|
214
390
|
"name": tc.function_name,
|
215
391
|
"arguments": tc.function_arguments_json_str,
|
216
392
|
}
|
217
|
-
} for tc in
|
393
|
+
} for tc in llm_tool_calls
|
218
394
|
]
|
395
|
+
|
219
396
|
update_chat_history(chat_history, assistant_msg_dict)
|
220
397
|
|
398
|
+
|
399
|
+
if llm_tool_calls:
|
400
|
+
print(f"{Fore.YELLOW}\n[{self.agent_name}] Assistant requests tool calls:{Fore.RESET}")
|
401
|
+
|
402
|
+
observations = await self.process_tool_calls(llm_tool_calls)
|
403
|
+
print(f"{Fore.BLUE}\n[{self.agent_name}] Observations generated: {len(observations)} items.{Fore.RESET}")
|
404
|
+
|
405
|
+
if not observations:
|
406
|
+
print(f"{Fore.RED}Error: Tool processing failed to return any observations.{Fore.RESET}")
|
407
|
+
|
408
|
+
error_message = build_prompt_structure(role="user", content="System Error: Tool execution failed to produce results. Please try again or proceed without tool results.")
|
409
|
+
update_chat_history(chat_history, error_message)
|
410
|
+
continue
|
411
|
+
|
412
|
+
|
413
|
+
|
414
|
+
tool_messages_added = 0
|
415
|
+
for tool_call in llm_tool_calls:
|
416
|
+
tool_call_id = tool_call.id
|
417
|
+
observation_content = observations.get(tool_call_id)
|
418
|
+
if observation_content is None:
|
419
|
+
print(f"{Fore.RED}Error: Observation missing for tool call ID {tool_call_id}.{Fore.RESET}")
|
420
|
+
observation_content = f"Error: Result for tool call {tool_call_id} was not found."
|
421
|
+
|
422
|
+
tool_message = build_prompt_structure(
|
423
|
+
role="tool",
|
424
|
+
content=str(observation_content),
|
425
|
+
tool_call_id=tool_call_id
|
426
|
+
)
|
427
|
+
update_chat_history(chat_history, tool_message)
|
428
|
+
tool_messages_added += 1
|
429
|
+
|
430
|
+
if tool_messages_added == 0:
|
431
|
+
print(f"{Fore.RED}Critical Error: No tool messages were added to history despite tool calls being present.{Fore.RESET}")
|
432
|
+
|
433
|
+
return f"Error: Agent {self.agent_name} failed during tool observation processing."
|
221
434
|
|
222
|
-
has_tool_calls = bool(llm_response.tool_calls)
|
223
|
-
|
224
|
-
if has_tool_calls:
|
225
|
-
print(f"{Fore.YELLOW}\nAssistant requests tool calls:{Fore.RESET}")
|
226
|
-
observations = await self.process_tool_calls(llm_response.tool_calls)
|
227
|
-
print(f"{Fore.BLUE}\nObservations: {observations}{Fore.RESET}")
|
228
|
-
|
229
|
-
for tool_call in llm_response.tool_calls:
|
230
|
-
tool_call_id = tool_call.id
|
231
|
-
result = observations.get(tool_call_id, "Error: Observation not found.")
|
232
|
-
tool_message = build_prompt_structure(role="tool", content=str(result), tool_call_id=tool_call_id)
|
233
|
-
update_chat_history(chat_history, tool_message)
|
234
435
|
|
235
436
|
elif potential_final_response is not None:
|
236
|
-
|
437
|
+
|
438
|
+
print(f"{Fore.GREEN}\n[{self.agent_name}] Assistant provides final response:{Fore.RESET}")
|
237
439
|
final_response = potential_final_response
|
238
440
|
print(f"{Fore.GREEN}{final_response}{Fore.RESET}")
|
239
441
|
return final_response
|
240
442
|
|
241
|
-
elif assistant_content is not None and not
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
443
|
+
elif assistant_content is not None and not llm_tool_calls:
|
444
|
+
|
445
|
+
print(f"{Fore.YELLOW}\n[{self.agent_name}] Assistant provided content without 'Final Response:' prefix and no tool calls. Treating as final answer.{Fore.RESET}")
|
446
|
+
final_response = assistant_content.strip()
|
447
|
+
|
448
|
+
if final_response.startswith("Thought:"):
|
449
|
+
final_response = final_response[len("Thought:"):].strip()
|
450
|
+
print(f"{Fore.GREEN}{final_response}{Fore.RESET}")
|
451
|
+
return final_response
|
452
|
+
|
453
|
+
elif not llm_tool_calls and assistant_content is None:
|
454
|
+
|
455
|
+
print(f"{Fore.RED}Error: Assistant message has neither content nor tool calls.{Fore.RESET}")
|
456
|
+
final_response = f"Error: Agent {self.agent_name} received an empty response from the LLM."
|
457
|
+
return final_response
|
458
|
+
|
246
459
|
|
247
460
|
|
248
|
-
|
249
|
-
|
250
|
-
final_response = "Error: Received an unexpected empty or invalid response from the assistant."
|
251
|
-
return final_response
|
461
|
+
|
462
|
+
print(f"{Fore.YELLOW}\n[{self.agent_name}] Maximum rounds ({max_rounds}) reached.{Fore.RESET}")
|
252
463
|
|
253
|
-
|
254
|
-
if potential_final_response and not has_tool_calls:
|
464
|
+
if potential_final_response and not llm_tool_calls:
|
255
465
|
final_response = potential_final_response
|
256
|
-
print(f"{Fore.GREEN}(Last response from agent): {final_response}{Fore.RESET}")
|
257
|
-
elif assistant_content and not
|
258
|
-
|
259
|
-
|
466
|
+
print(f"{Fore.GREEN}(Last response from agent {self.agent_name}): {final_response}{Fore.RESET}")
|
467
|
+
elif assistant_content and not llm_tool_calls:
|
468
|
+
|
469
|
+
final_response = assistant_content.strip()
|
470
|
+
if final_response.startswith("Thought:"):
|
471
|
+
final_response = final_response[len("Thought:"):].strip()
|
472
|
+
print(f"{Fore.GREEN}(Last raw content from agent {self.agent_name}): {final_response}{Fore.RESET}")
|
260
473
|
else:
|
261
|
-
final_response = "Agent stopped after maximum rounds without reaching a final answer."
|
474
|
+
final_response = f"Agent {self.agent_name} stopped after maximum rounds without reaching a final answer."
|
262
475
|
print(f"{Fore.YELLOW}{final_response}{Fore.RESET}")
|
263
476
|
|
264
477
|
return final_response
|