clap-agents 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- clap/__init__.py +57 -0
- clap/llm_services/__init__.py +0 -0
- clap/llm_services/base.py +68 -0
- clap/llm_services/google_openai_compat_service.py +122 -0
- clap/llm_services/groq_service.py +100 -0
- clap/mcp_client/__init__.py +0 -0
- clap/mcp_client/client.py +208 -0
- clap/multiagent_pattern/__init__.py +0 -0
- clap/multiagent_pattern/agent.py +128 -0
- clap/multiagent_pattern/team.py +154 -0
- clap/react_pattern/__init__.py +0 -0
- clap/react_pattern/react_agent.py +265 -0
- clap/tool_pattern/__init__.py +0 -0
- clap/tool_pattern/tool.py +229 -0
- clap/tool_pattern/tool_agent.py +241 -0
- clap/tools/__init__.py +13 -0
- clap/tools/email_tools.py +230 -0
- clap/tools/web_crawler.py +82 -0
- clap/tools/web_search.py +24 -0
- clap/utils/__init__.py +0 -0
- clap/utils/completions.py +173 -0
- clap/utils/extraction.py +42 -0
- clap/utils/logging.py +28 -0
- clap_agents-0.1.1.dist-info/METADATA +346 -0
- clap_agents-0.1.1.dist-info/RECORD +27 -0
- clap_agents-0.1.1.dist-info/WHEEL +4 -0
- clap_agents-0.1.1.dist-info/licenses/LICENSE +202 -0
@@ -0,0 +1,128 @@
|
|
1
|
+
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
import json
|
5
|
+
from textwrap import dedent
|
6
|
+
from typing import Any, List, Optional
|
7
|
+
|
8
|
+
from clap.llm_services.base import LLMServiceInterface
|
9
|
+
from clap.llm_services.groq_service import GroqService
|
10
|
+
from clap.mcp_client.client import MCPClientManager
|
11
|
+
from clap.react_pattern.react_agent import ReactAgent
|
12
|
+
from clap.tool_pattern.tool import Tool
|
13
|
+
|
14
|
+
class Agent:
|
15
|
+
"""
|
16
|
+
Represents an AI agent using a configurable LLM Service.
|
17
|
+
Can work in a team and use local or remote MCP tools.
|
18
|
+
|
19
|
+
Args:
|
20
|
+
name (str): Agent name.
|
21
|
+
backstory (str): Agent background/persona.
|
22
|
+
task_description (str): Description of the agent's specific task.
|
23
|
+
task_expected_output (str): Expected output format.
|
24
|
+
tools (Optional[List[Tool]]): Local tools for the agent.
|
25
|
+
llm (str): Model identifier string (passed to llm_service).
|
26
|
+
llm_service (Optional[LLMServiceInterface]): Service for LLM calls (defaults to GroqService).
|
27
|
+
mcp_manager (Optional[MCPClientManager]): Shared MCP client manager.
|
28
|
+
mcp_server_names (Optional[List[str]]): MCP servers this agent uses.
|
29
|
+
"""
|
30
|
+
def __init__(
|
31
|
+
self,
|
32
|
+
name: str,
|
33
|
+
backstory: str,
|
34
|
+
task_description: str,
|
35
|
+
task_expected_output: str = "",
|
36
|
+
tools: Optional[List[Tool]] = None,
|
37
|
+
model: str = "llama-3.3-70b-versatile",
|
38
|
+
llm_service: Optional[LLMServiceInterface] = None,
|
39
|
+
mcp_manager: Optional[MCPClientManager] = None,
|
40
|
+
mcp_server_names: Optional[List[str]] = None,
|
41
|
+
):
|
42
|
+
self.name = name
|
43
|
+
self.backstory = backstory
|
44
|
+
self.task_description = task_description
|
45
|
+
self.task_expected_output = task_expected_output
|
46
|
+
self.mcp_manager = mcp_manager
|
47
|
+
self.mcp_server_names = mcp_server_names or []
|
48
|
+
|
49
|
+
llm_service_instance = llm_service or GroqService()
|
50
|
+
|
51
|
+
self.react_agent = ReactAgent(
|
52
|
+
llm_service=llm_service_instance,
|
53
|
+
model=model,
|
54
|
+
system_prompt=self.backstory,
|
55
|
+
tools=tools or [],
|
56
|
+
mcp_manager=self.mcp_manager,
|
57
|
+
mcp_server_names=self.mcp_server_names
|
58
|
+
)
|
59
|
+
|
60
|
+
self.dependencies: List['Agent'] = []
|
61
|
+
self.dependents: List['Agent'] = []
|
62
|
+
self.received_context: dict[str, Any] = {}
|
63
|
+
|
64
|
+
from clap.multiagent_pattern.team import Team
|
65
|
+
Team.register_agent(self)
|
66
|
+
|
67
|
+
|
68
|
+
def __repr__(self): return f"{self.name}"
|
69
|
+
|
70
|
+
def __rshift__(self, other: 'Agent') -> 'Agent': self.add_dependent(other); return other
|
71
|
+
|
72
|
+
def __lshift__(self, other: 'Agent') -> 'Agent': self.add_dependency(other); return other
|
73
|
+
|
74
|
+
def __rrshift__(self, other: List['Agent'] | 'Agent'): self.add_dependency(other); return self
|
75
|
+
|
76
|
+
def __rlshift__(self, other: List['Agent'] | 'Agent'): self.add_dependent(other); return self
|
77
|
+
|
78
|
+
def add_dependency(self, other: 'Agent' | List['Agent']):
|
79
|
+
AgentClass = type(self)
|
80
|
+
if isinstance(other, AgentClass):
|
81
|
+
if other not in self.dependencies: self.dependencies.append(other)
|
82
|
+
if self not in other.dependents: other.dependents.append(self)
|
83
|
+
elif isinstance(other, list) and all(isinstance(item, AgentClass) for item in other):
|
84
|
+
for item in other:
|
85
|
+
if item not in self.dependencies: self.dependencies.append(item)
|
86
|
+
if self not in item.dependents: item.dependents.append(self)
|
87
|
+
else: raise TypeError("The dependency must be an instance or list of Agent.")
|
88
|
+
def add_dependent(self, other: 'Agent' | List['Agent']):
|
89
|
+
AgentClass = type(self)
|
90
|
+
if isinstance(other, AgentClass):
|
91
|
+
if self not in other.dependencies: other.dependencies.append(self)
|
92
|
+
if other not in self.dependents: self.dependents.append(other)
|
93
|
+
elif isinstance(other, list) and all(isinstance(item, AgentClass) for item in other):
|
94
|
+
for item in other:
|
95
|
+
if self not in item.dependencies: item.dependencies.append(self)
|
96
|
+
if item not in self.dependents: self.dependents.append(item)
|
97
|
+
else: raise TypeError("The dependent must be an instance or list of Agent.")
|
98
|
+
def receive_context(self, sender_name: str, input_data: Any): self.received_context[sender_name] = input_data
|
99
|
+
def create_prompt(self) -> str:
|
100
|
+
context_str = "\n---\n".join(f"Context from {name}:\n{json.dumps(data, indent=2) if isinstance(data, dict) else str(data)}" for name, data in self.received_context.items())
|
101
|
+
if not context_str: context_str = "No context received from other agents."
|
102
|
+
prompt = dedent(f"""
|
103
|
+
You are an AI agent named {self.name}. Your backstory: {self.backstory}
|
104
|
+
You are part of a team of agents working together to complete a task.
|
105
|
+
Your immediate task is described below. Use the provided context from other agents if relevant.
|
106
|
+
|
107
|
+
<task_description>
|
108
|
+
{self.task_description}
|
109
|
+
</task_description>
|
110
|
+
|
111
|
+
<task_expected_output>
|
112
|
+
{self.task_expected_output or 'Produce a meaningful response to complete the task.'}
|
113
|
+
</task_expected_output>
|
114
|
+
|
115
|
+
<context>
|
116
|
+
{context_str}
|
117
|
+
</context>
|
118
|
+
|
119
|
+
Now, execute your task based on the description, context, and expected output. Your response:
|
120
|
+
""").strip(); return prompt
|
121
|
+
async def run(self) -> dict[str, Any]:
|
122
|
+
msg = self.create_prompt()
|
123
|
+
raw_output = await self.react_agent.run(user_msg=msg)
|
124
|
+
output_data = {"output": raw_output}
|
125
|
+
for dependent in self.dependents:
|
126
|
+
dependent.receive_context(self.name, output_data)
|
127
|
+
return output_data
|
128
|
+
|
@@ -0,0 +1,154 @@
|
|
1
|
+
# --- START OF team.py (Parallel Execution - Python 3.10 Compatible) ---
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
from collections import deque
|
5
|
+
from typing import Any, Dict, List
|
6
|
+
|
7
|
+
from colorama import Fore
|
8
|
+
from graphviz import Digraph
|
9
|
+
|
10
|
+
from clap.utils.logging import fancy_print
|
11
|
+
from clap.multiagent_pattern.agent import Agent
|
12
|
+
|
13
|
+
class Team:
|
14
|
+
"""
|
15
|
+
A class representing a team of agents working together asynchronously.
|
16
|
+
Supports parallel execution of agents where dependencies allow.
|
17
|
+
|
18
|
+
Attributes:
|
19
|
+
current_team (Team | None): Class-level variable to track the active Team context. None if no team context is active.
|
20
|
+
agents (list[Agent]): A list of agents in the team.
|
21
|
+
results (dict[str, Any]): Stores the final results of each agent run.
|
22
|
+
"""
|
23
|
+
current_team = None
|
24
|
+
|
25
|
+
def __init__(self):
|
26
|
+
self.agents: List[Agent] = []
|
27
|
+
self.results: dict[str, Any] = {}
|
28
|
+
|
29
|
+
# __enter__, __exit__, add_agent, register_agent remain the same
|
30
|
+
def __enter__(self): Team.current_team = self; return self
|
31
|
+
def __exit__(self, exc_type, exc_val, exc_tb): Team.current_team = None
|
32
|
+
def add_agent(self, agent: Agent):
|
33
|
+
if agent not in self.agents: self.agents.append(agent)
|
34
|
+
@staticmethod
|
35
|
+
def register_agent(agent: Agent):
|
36
|
+
if Team.current_team is not None: Team.current_team.add_agent(agent)
|
37
|
+
|
38
|
+
# topological_sort and plot remain the same
|
39
|
+
def topological_sort(self) -> List[Agent]:
|
40
|
+
in_degree: Dict[Agent, int] = {agent: 0 for agent in self.agents}
|
41
|
+
adj: Dict[Agent, List[Agent]] = {agent: [] for agent in self.agents}
|
42
|
+
agent_map: Dict[str, Agent] = {agent.name: agent for agent in self.agents}
|
43
|
+
for agent in self.agents:
|
44
|
+
valid_dependencies = [dep for dep in agent.dependencies if dep in self.agents]
|
45
|
+
agent.dependencies = valid_dependencies
|
46
|
+
for dependency in agent.dependencies:
|
47
|
+
if dependency in agent_map.values():
|
48
|
+
adj[dependency].append(agent)
|
49
|
+
in_degree[agent] += 1
|
50
|
+
queue: deque[Agent] = deque([agent for agent in self.agents if in_degree[agent] == 0])
|
51
|
+
sorted_agents: List[Agent] = []
|
52
|
+
processed_edges = 0
|
53
|
+
while queue:
|
54
|
+
current_agent = queue.popleft()
|
55
|
+
sorted_agents.append(current_agent)
|
56
|
+
for potential_dependent in self.agents:
|
57
|
+
if current_agent in potential_dependent.dependencies:
|
58
|
+
in_degree[potential_dependent] -= 1
|
59
|
+
processed_edges +=1
|
60
|
+
if in_degree[potential_dependent] == 0:
|
61
|
+
queue.append(potential_dependent)
|
62
|
+
if len(sorted_agents) != len(self.agents):
|
63
|
+
detected_agents = {a.name for a in sorted_agents}
|
64
|
+
missing_agents = {a.name for a in self.agents} - detected_agents
|
65
|
+
remaining_degrees = {a.name: in_degree[a] for a in self.agents if a not in sorted_agents}
|
66
|
+
raise ValueError(
|
67
|
+
"Circular dependencies detected. Cannot perform topological sort. "
|
68
|
+
f"Agents processed: {list(detected_agents)}. "
|
69
|
+
f"Agents potentially in cycle: {list(missing_agents)}. "
|
70
|
+
f"Remaining degrees: {remaining_degrees}"
|
71
|
+
)
|
72
|
+
return sorted_agents
|
73
|
+
|
74
|
+
def plot(self):
|
75
|
+
dot = Digraph(format="png")
|
76
|
+
for agent in self.agents: dot.node(agent.name)
|
77
|
+
for agent in self.agents:
|
78
|
+
for dependent in agent.dependents:
|
79
|
+
if dependent in self.agents: dot.edge(agent.name, dependent.name)
|
80
|
+
return dot
|
81
|
+
|
82
|
+
# --- Modified run method for parallel execution (Python 3.10 compatible) ---
|
83
|
+
async def run(self):
|
84
|
+
"""
|
85
|
+
Runs all agents in the team asynchronously, executing them in parallel
|
86
|
+
when their dependencies are met. Compatible with Python 3.10+.
|
87
|
+
"""
|
88
|
+
try:
|
89
|
+
sorted_agents = self.topological_sort()
|
90
|
+
except ValueError as e:
|
91
|
+
print(f"{Fore.RED}Error during team setup: {e}{Fore.RESET}")
|
92
|
+
return
|
93
|
+
|
94
|
+
self.results = {}
|
95
|
+
agent_tasks: Dict[Agent, asyncio.Task] = {}
|
96
|
+
# Use a standard try/except block for Python 3.10 compatibility
|
97
|
+
try:
|
98
|
+
# Use asyncio.gather for task management, requiring manual cancellation on error
|
99
|
+
tasks_to_gather = []
|
100
|
+
for agent in sorted_agents:
|
101
|
+
# Create the task but store it before adding to gather list
|
102
|
+
task = asyncio.create_task(self._run_agent_task(agent, agent_tasks))
|
103
|
+
agent_tasks[agent] = task
|
104
|
+
tasks_to_gather.append(task)
|
105
|
+
|
106
|
+
# Wait for all tasks to complete. If one fails, gather will raise that first exception.
|
107
|
+
await asyncio.gather(*tasks_to_gather)
|
108
|
+
print(f"{Fore.BLUE}--- All agent tasks finished ---{Fore.RESET}")
|
109
|
+
|
110
|
+
except Exception as e:
|
111
|
+
# If any task failed, asyncio.gather raises the exception of the first task that failed.
|
112
|
+
print(f"{Fore.RED}One or more agents failed during execution:{Fore.RESET}")
|
113
|
+
print(f"{Fore.RED}- Error: {e}{Fore.RESET}")
|
114
|
+
# Note: With asyncio.gather, cancelling sibling tasks automatically
|
115
|
+
# when one fails requires more complex manual handling.
|
116
|
+
# For simplicity here, we let other tasks potentially run to completion
|
117
|
+
# or fail independently, but we report the first failure.
|
118
|
+
# Consider using anyio's TaskGroup if Python 3.11+ is an option for better cancellation.
|
119
|
+
|
120
|
+
|
121
|
+
async def _run_agent_task(self, agent: Agent, all_tasks: Dict[Agent, asyncio.Task]):
|
122
|
+
"""
|
123
|
+
An internal async function that wraps the execution of a single agent.
|
124
|
+
It waits for dependencies to complete before running the agent.
|
125
|
+
"""
|
126
|
+
dependency_tasks = [
|
127
|
+
all_tasks[dep] for dep in agent.dependencies if dep in all_tasks
|
128
|
+
]
|
129
|
+
if dependency_tasks:
|
130
|
+
print(f"{Fore.YELLOW}Agent {agent.name} waiting for dependencies: {[dep.name for dep in agent.dependencies if dep in all_tasks]}...{Fore.RESET}")
|
131
|
+
# Wait for all dependency tasks using asyncio.gather
|
132
|
+
# We want errors in dependencies to propagate, so return_exceptions=False
|
133
|
+
await asyncio.gather(*dependency_tasks)
|
134
|
+
print(f"{Fore.GREEN}Agent {agent.name} dependencies met.{Fore.RESET}")
|
135
|
+
|
136
|
+
fancy_print(f"STARTING AGENT: {agent.name}")
|
137
|
+
try:
|
138
|
+
agent_result = await agent.run()
|
139
|
+
self.results[agent.name] = agent_result
|
140
|
+
|
141
|
+
if isinstance(agent_result, dict) and 'output' in agent_result:
|
142
|
+
print(f"{Fore.GREEN}Agent {agent.name} Result:\n{agent_result['output']}{Fore.RESET}")
|
143
|
+
else:
|
144
|
+
print(f"{Fore.YELLOW}Agent {agent.name} Result (raw):\n{str(agent_result)}{Fore.RESET}")
|
145
|
+
fancy_print(f"FINISHED AGENT: {agent.name}")
|
146
|
+
|
147
|
+
except Exception as e:
|
148
|
+
fancy_print(f"ERROR IN AGENT: {agent.name}")
|
149
|
+
print(f"{Fore.RED}Agent {agent.name} failed: {e}{Fore.RESET}")
|
150
|
+
self.results[agent.name] = {"error": str(e)}
|
151
|
+
# Re-raise the exception so asyncio.gather catches it
|
152
|
+
raise
|
153
|
+
|
154
|
+
# --- END OF team.py (Parallel Execution - Python 3.10 Compatible) ---
|
File without changes
|
@@ -0,0 +1,265 @@
|
|
1
|
+
|
2
|
+
import json
|
3
|
+
import re
|
4
|
+
from typing import List, Dict, Any, Optional
|
5
|
+
import asyncio
|
6
|
+
|
7
|
+
from colorama import Fore
|
8
|
+
from dotenv import load_dotenv
|
9
|
+
|
10
|
+
|
11
|
+
from clap.llm_services.base import LLMServiceInterface, StandardizedLLMResponse, LLMToolCall
|
12
|
+
from clap.tool_pattern.tool import Tool
|
13
|
+
from clap.mcp_client.client import MCPClientManager, SseServerConfig
|
14
|
+
from clap.utils.completions import build_prompt_structure, ChatHistory, update_chat_history
|
15
|
+
from mcp import types as mcp_types
|
16
|
+
|
17
|
+
|
18
|
+
load_dotenv()
|
19
|
+
|
20
|
+
CORE_SYSTEM_PROMPT = """
|
21
|
+
You are an AI assistant that uses the ReAct (**Reason**->**Act**) process to answer questions and perform tasks using available tools (both local and remote MCP tools).
|
22
|
+
|
23
|
+
**Your Interaction Loop:**
|
24
|
+
1. **Thought:** You MUST first analyze the query/situation and formulate a plan. Start your response **only** with your thought process, prefixed with "**Thought:**" on a new line.
|
25
|
+
2. **Action Decision:** Based on your thought, decide if a tool is needed.
|
26
|
+
3. **Observation:** If a tool is called, the system will provide the result. Analyze this in your next Thought.
|
27
|
+
4. **Final Response:** When you have enough information, provide the final answer. Start this **only** with "**Final Response:**" on a new line, following your final thought.
|
28
|
+
|
29
|
+
**Output Syntax:**
|
30
|
+
|
31
|
+
* **For Tool Use:**
|
32
|
+
Thought: [Your reasoning and plan to use a tool]
|
33
|
+
*(System executes tool based on your thought's intent)*
|
34
|
+
|
35
|
+
* **After Observation:**
|
36
|
+
Thought: [Your analysis of the observation and next step]
|
37
|
+
*(Either signal another tool use implicitly or provide final response)*
|
38
|
+
|
39
|
+
* **For Final Answer:**
|
40
|
+
Thought: [Your final reasoning]
|
41
|
+
Final Response: [Your final answer to the user]
|
42
|
+
|
43
|
+
---
|
44
|
+
|
45
|
+
**Constraint:** Always begin your response content with "Thought:". If providing the final answer, include "Final Response:" after the final thought. Do not add any other text before "Thought:" or "Final Response:" on their respective lines.
|
46
|
+
"""
|
47
|
+
|
48
|
+
class ReactAgent:
|
49
|
+
"""
|
50
|
+
Async ReAct agent supporting local and remote MCP tools, using a configurable LLM service.
|
51
|
+
"""
|
52
|
+
|
53
|
+
def __init__(
|
54
|
+
self,
|
55
|
+
llm_service: LLMServiceInterface,
|
56
|
+
model: str, # Still need model name to pass TO the service
|
57
|
+
tools: Optional[List[Tool]] = None,
|
58
|
+
mcp_manager: Optional[MCPClientManager] = None,
|
59
|
+
mcp_server_names: Optional[List[str]] = None,
|
60
|
+
system_prompt: str = "",
|
61
|
+
) -> None:
|
62
|
+
self.llm_service = llm_service
|
63
|
+
self.model = model
|
64
|
+
self.system_prompt = (system_prompt + "\n\n" + CORE_SYSTEM_PROMPT).strip()
|
65
|
+
|
66
|
+
self.local_tools = tools if tools else []
|
67
|
+
self.local_tools_dict = {tool.name: tool for tool in self.local_tools}
|
68
|
+
self.local_tool_schemas = [tool.fn_schema for tool in self.local_tools]
|
69
|
+
|
70
|
+
self.mcp_manager = mcp_manager
|
71
|
+
self.mcp_server_names = mcp_server_names or []
|
72
|
+
self.remote_tools_dict: Dict[str, mcp_types.Tool] = {}
|
73
|
+
self.remote_tool_server_map: Dict[str, str] = {}
|
74
|
+
|
75
|
+
async def _get_combined_tool_schemas(self) -> List[Dict[str, Any]]:
|
76
|
+
all_schemas = list(self.local_tool_schemas)
|
77
|
+
self.remote_tools_dict = {}
|
78
|
+
self.remote_tool_server_map = {}
|
79
|
+
if self.mcp_manager and self.mcp_server_names:
|
80
|
+
fetch_tasks = [self.mcp_manager.list_remote_tools(name) for name in self.mcp_server_names]
|
81
|
+
results = await asyncio.gather(*fetch_tasks, return_exceptions=True)
|
82
|
+
for server_name, result in zip(self.mcp_server_names, results):
|
83
|
+
if isinstance(result, Exception):
|
84
|
+
print(f"{Fore.RED}Error listing tools from MCP server '{server_name}': {result}{Fore.RESET}")
|
85
|
+
continue
|
86
|
+
if isinstance(result, list):
|
87
|
+
for tool in result:
|
88
|
+
if isinstance(tool, mcp_types.Tool):
|
89
|
+
if tool.name in self.local_tools_dict: continue # Skip conflicts
|
90
|
+
if tool.name in self.remote_tools_dict: continue # Skip conflicts
|
91
|
+
self.remote_tools_dict[tool.name] = tool
|
92
|
+
self.remote_tool_server_map[tool.name] = server_name
|
93
|
+
translated_schema = {"type": "function", "function": {"name": tool.name, "description": tool.description or "", "parameters": tool.inputSchema}}
|
94
|
+
all_schemas.append(translated_schema)
|
95
|
+
else: print(f"{Fore.YELLOW}Warning: Received non-Tool object from {server_name}: {type(tool)}{Fore.RESET}")
|
96
|
+
print(f"{Fore.BLUE}Total tools available to LLM: {len(all_schemas)}{Fore.RESET}")
|
97
|
+
return all_schemas
|
98
|
+
|
99
|
+
async def process_tool_calls(self, tool_calls: List[LLMToolCall]) -> Dict[str, Any]: # Type hint changed
|
100
|
+
observations = {}
|
101
|
+
if not isinstance(tool_calls, list):
|
102
|
+
print(f"{Fore.RED}Error: Expected a list of LLMToolCall, got {type(tool_calls)}{Fore.RESET}")
|
103
|
+
return observations
|
104
|
+
tasks = [self._execute_single_tool_call(tc) for tc in tool_calls]
|
105
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
106
|
+
for result in results:
|
107
|
+
if isinstance(result, dict): observations.update(result)
|
108
|
+
elif isinstance(result, Exception): print(f"{Fore.RED}Error during concurrent tool execution: {result}{Fore.RESET}")
|
109
|
+
else: print(f"{Fore.RED}Error: Unexpected item in tool execution results: {result}{Fore.RESET}")
|
110
|
+
return observations
|
111
|
+
|
112
|
+
async def _execute_single_tool_call(self, tool_call: LLMToolCall) -> Dict[str, Any]: # Type hint changed
|
113
|
+
tool_call_id = tool_call.id
|
114
|
+
tool_name = tool_call.function_name
|
115
|
+
result_str = f"Error: Processing failed for tool call '{tool_name}' (id: {tool_call_id})."
|
116
|
+
try:
|
117
|
+
arguments = json.loads(tool_call.function_arguments_json_str)
|
118
|
+
if tool_name in self.local_tools_dict:
|
119
|
+
tool = self.local_tools_dict[tool_name]
|
120
|
+
print(f"{Fore.GREEN}\nExecuting Local Tool: {tool_name}{Fore.RESET}...")
|
121
|
+
result = await tool.run(**arguments)
|
122
|
+
elif tool_name in self.remote_tool_server_map and self.mcp_manager:
|
123
|
+
server_name = self.remote_tool_server_map[tool_name]
|
124
|
+
print(f"{Fore.CYAN}\nExecuting Remote MCP Tool: {tool_name} on {server_name}{Fore.RESET}...")
|
125
|
+
result = await self.mcp_manager.call_remote_tool(server_name, tool_name, arguments)
|
126
|
+
else:
|
127
|
+
print(f"{Fore.RED}Error: Tool '{tool_name}' not found.{Fore.RESET}")
|
128
|
+
result_str = f"Error: Tool '{tool_name}' is not available."
|
129
|
+
return {tool_call_id: result_str}
|
130
|
+
|
131
|
+
if not isinstance(result, (str, int, float, bool, list, dict, type(None))):
|
132
|
+
result_str = str(result)
|
133
|
+
else:
|
134
|
+
try: result_str = json.dumps(result)
|
135
|
+
except TypeError: result_str = str(result)
|
136
|
+
print(f"{Fore.GREEN}Tool '{tool_name}' result: {result_str[:100]}...{Fore.RESET}")
|
137
|
+
except json.JSONDecodeError:
|
138
|
+
print(f"{Fore.RED}Error decoding arguments for {tool_name}: {tool_call.function_arguments_json_str}{Fore.RESET}")
|
139
|
+
result_str = f"Error: Invalid arguments JSON provided for {tool_name}"
|
140
|
+
except Exception as e:
|
141
|
+
print(f"{Fore.RED}Error executing tool {tool_name} (id: {tool_call_id}): {e}{Fore.RESET}")
|
142
|
+
result_str = f"Error executing tool {tool_name}: {e}"
|
143
|
+
return {tool_call_id: result_str}
|
144
|
+
|
145
|
+
|
146
|
+
async def run(
|
147
|
+
self,
|
148
|
+
user_msg: str,
|
149
|
+
max_rounds: int = 5,
|
150
|
+
) -> str:
|
151
|
+
combined_tool_schemas = await self._get_combined_tool_schemas()
|
152
|
+
|
153
|
+
initial_user_message = build_prompt_structure(role="user", content=user_msg)
|
154
|
+
chat_history = ChatHistory(
|
155
|
+
[
|
156
|
+
build_prompt_structure(role="system", content=self.system_prompt),
|
157
|
+
initial_user_message,
|
158
|
+
]
|
159
|
+
)
|
160
|
+
|
161
|
+
final_response = "Agent failed to produce a response."
|
162
|
+
|
163
|
+
for round_num in range(max_rounds):
|
164
|
+
print(Fore.CYAN + f"\n--- Round {round_num + 1} ---")
|
165
|
+
current_tools = combined_tool_schemas if combined_tool_schemas else None
|
166
|
+
current_tool_choice = "auto" if current_tools else "none"
|
167
|
+
|
168
|
+
llm_response: StandardizedLLMResponse = await self.llm_service.get_llm_response(
|
169
|
+
model=self.model,
|
170
|
+
messages=list(chat_history),
|
171
|
+
tools=current_tools,
|
172
|
+
tool_choice=current_tool_choice
|
173
|
+
)
|
174
|
+
# --- End Change ---
|
175
|
+
|
176
|
+
assistant_content = llm_response.text_content # Use standardized response field
|
177
|
+
extracted_thought = None
|
178
|
+
potential_final_response = None
|
179
|
+
|
180
|
+
if assistant_content is not None:
|
181
|
+
lines = assistant_content.strip().split('\n')
|
182
|
+
thought_lines = []
|
183
|
+
response_lines = []
|
184
|
+
in_thought = False
|
185
|
+
in_response = False
|
186
|
+
for line in lines:
|
187
|
+
stripped_line = line.strip()
|
188
|
+
if stripped_line.startswith("Thought:"):
|
189
|
+
in_thought = True; in_response = False
|
190
|
+
thought_content = stripped_line[len("Thought:"):].strip()
|
191
|
+
if thought_content: thought_lines.append(thought_content)
|
192
|
+
elif stripped_line.startswith("Final Response:"):
|
193
|
+
in_response = True; in_thought = False
|
194
|
+
response_content = stripped_line[len("Final Response:"):].strip()
|
195
|
+
if response_content: response_lines.append(response_content)
|
196
|
+
elif in_thought: thought_lines.append(line)
|
197
|
+
elif in_response: response_lines.append(line)
|
198
|
+
if thought_lines:
|
199
|
+
extracted_thought = "\n".join(thought_lines).strip()
|
200
|
+
print(f"{Fore.MAGENTA}\nThought: {extracted_thought}{Fore.RESET}")
|
201
|
+
if response_lines:
|
202
|
+
potential_final_response = "\n".join(response_lines).strip()
|
203
|
+
# --- End prefix parsing ---
|
204
|
+
|
205
|
+
assistant_msg_dict: Dict[str, Any] = {"role": "assistant"}
|
206
|
+
if assistant_content:
|
207
|
+
assistant_msg_dict["content"] = assistant_content # Store original content with prefixes
|
208
|
+
if llm_response.tool_calls:
|
209
|
+
assistant_msg_dict["tool_calls"] = [
|
210
|
+
{
|
211
|
+
"id": tc.id,
|
212
|
+
"type": "function", # Assuming 'function' type
|
213
|
+
"function": {
|
214
|
+
"name": tc.function_name,
|
215
|
+
"arguments": tc.function_arguments_json_str,
|
216
|
+
}
|
217
|
+
} for tc in llm_response.tool_calls
|
218
|
+
]
|
219
|
+
update_chat_history(chat_history, assistant_msg_dict)
|
220
|
+
|
221
|
+
|
222
|
+
has_tool_calls = bool(llm_response.tool_calls)
|
223
|
+
|
224
|
+
if has_tool_calls:
|
225
|
+
print(f"{Fore.YELLOW}\nAssistant requests tool calls:{Fore.RESET}")
|
226
|
+
observations = await self.process_tool_calls(llm_response.tool_calls)
|
227
|
+
print(f"{Fore.BLUE}\nObservations: {observations}{Fore.RESET}")
|
228
|
+
|
229
|
+
for tool_call in llm_response.tool_calls:
|
230
|
+
tool_call_id = tool_call.id
|
231
|
+
result = observations.get(tool_call_id, "Error: Observation not found.")
|
232
|
+
tool_message = build_prompt_structure(role="tool", content=str(result), tool_call_id=tool_call_id)
|
233
|
+
update_chat_history(chat_history, tool_message)
|
234
|
+
|
235
|
+
elif potential_final_response is not None:
|
236
|
+
print(f"{Fore.CYAN}\nAssistant provides final response:{Fore.RESET}")
|
237
|
+
final_response = potential_final_response
|
238
|
+
print(f"{Fore.GREEN}{final_response}{Fore.RESET}")
|
239
|
+
return final_response
|
240
|
+
|
241
|
+
elif assistant_content is not None and not has_tool_calls:
|
242
|
+
print(f"{Fore.YELLOW}\nAssistant provided content without 'Final Response:' prefix and no tool calls.{Fore.RESET}")
|
243
|
+
final_response = assistant_content.strip()
|
244
|
+
print(f"{Fore.GREEN}{final_response}{Fore.RESET}")
|
245
|
+
return final_response
|
246
|
+
|
247
|
+
|
248
|
+
elif not has_tool_calls and assistant_content is None:
|
249
|
+
print(f"{Fore.RED}Error: Assistant message has neither content nor tool calls.{Fore.RESET}")
|
250
|
+
final_response = "Error: Received an unexpected empty or invalid response from the assistant."
|
251
|
+
return final_response
|
252
|
+
|
253
|
+
print(f"{Fore.YELLOW}\nMaximum rounds ({max_rounds}) reached.{Fore.RESET}")
|
254
|
+
if potential_final_response and not has_tool_calls:
|
255
|
+
final_response = potential_final_response
|
256
|
+
print(f"{Fore.GREEN}(Last response from agent): {final_response}{Fore.RESET}")
|
257
|
+
elif assistant_content and not has_tool_calls:
|
258
|
+
final_response = assistant_content.strip() # Use stripped content
|
259
|
+
print(f"{Fore.GREEN}(Last raw content from agent): {final_response}{Fore.RESET}")
|
260
|
+
else:
|
261
|
+
final_response = "Agent stopped after maximum rounds without reaching a final answer."
|
262
|
+
print(f"{Fore.YELLOW}{final_response}{Fore.RESET}")
|
263
|
+
|
264
|
+
return final_response
|
265
|
+
|
File without changes
|