universal-mcp-agents 0.1.9__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +9 -9
- universal_mcp/agents/base.py +13 -18
- universal_mcp/agents/bigtool2/__init__.py +6 -7
- universal_mcp/agents/bigtool2/__main__.py +2 -4
- universal_mcp/agents/bigtool2/agent.py +1 -0
- universal_mcp/agents/bigtool2/graph.py +48 -184
- universal_mcp/agents/bigtool2/meta_tools.py +120 -0
- universal_mcp/agents/bigtoolcache/__init__.py +31 -22
- universal_mcp/agents/bigtoolcache/__main__.py +1 -4
- universal_mcp/agents/bigtoolcache/agent.py +1 -3
- universal_mcp/agents/bigtoolcache/graph.py +101 -191
- universal_mcp/agents/bigtoolcache/prompts.py +7 -31
- universal_mcp/agents/bigtoolcache/tools.py +141 -0
- universal_mcp/agents/builder.py +10 -20
- universal_mcp/agents/cli.py +1 -2
- universal_mcp/agents/codeact/__init__.py +1 -1
- universal_mcp/agents/codeact/__main__.py +15 -5
- universal_mcp/agents/codeact/agent.py +55 -66
- universal_mcp/agents/codeact/prompts.py +9 -10
- universal_mcp/agents/codeact/sandbox.py +5 -2
- universal_mcp/agents/codeact/state.py +2 -4
- universal_mcp/agents/codeact/utils.py +12 -5
- universal_mcp/agents/hil.py +1 -6
- universal_mcp/agents/planner/__init__.py +1 -3
- universal_mcp/agents/planner/graph.py +1 -3
- universal_mcp/agents/react.py +14 -6
- universal_mcp/agents/shared/prompts.py +3 -3
- universal_mcp/agents/shared/tool_node.py +47 -47
- universal_mcp/agents/simple.py +2 -1
- universal_mcp/agents/utils.py +4 -15
- universal_mcp/applications/ui/app.py +5 -15
- {universal_mcp_agents-0.1.9.dist-info → universal_mcp_agents-0.1.10.dist-info}/METADATA +2 -1
- universal_mcp_agents-0.1.10.dist-info/RECORD +42 -0
- universal_mcp/agents/autoagent/__init__.py +0 -30
- universal_mcp/agents/autoagent/__main__.py +0 -25
- universal_mcp/agents/autoagent/context.py +0 -26
- universal_mcp/agents/autoagent/graph.py +0 -170
- universal_mcp/agents/autoagent/prompts.py +0 -9
- universal_mcp/agents/autoagent/state.py +0 -27
- universal_mcp/agents/autoagent/utils.py +0 -13
- universal_mcp/agents/bigtool/__init__.py +0 -58
- universal_mcp/agents/bigtool/__main__.py +0 -23
- universal_mcp/agents/bigtool/graph.py +0 -210
- universal_mcp/agents/bigtool/prompts.py +0 -31
- universal_mcp/agents/bigtool/state.py +0 -27
- universal_mcp/agents/bigtoolcache/tools_all.txt +0 -956
- universal_mcp/agents/bigtoolcache/tools_important.txt +0 -474
- universal_mcp_agents-0.1.9.dist-info/RECORD +0 -54
- {universal_mcp_agents-0.1.9.dist-info → universal_mcp_agents-0.1.10.dist-info}/WHEEL +0 -0
universal_mcp/agents/builder.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
from collections import defaultdict
|
|
2
3
|
from collections.abc import Sequence
|
|
3
4
|
from typing import Annotated, TypedDict
|
|
4
5
|
|
|
@@ -15,7 +16,7 @@ from universal_mcp.agents.base import BaseAgent
|
|
|
15
16
|
from universal_mcp.agents.llm import load_chat_model
|
|
16
17
|
from universal_mcp.agents.shared.tool_node import build_tool_node_graph
|
|
17
18
|
from universal_mcp.agents.utils import messages_to_list
|
|
18
|
-
|
|
19
|
+
|
|
19
20
|
|
|
20
21
|
class Agent(BaseModel):
|
|
21
22
|
"""Agent that can be created by the builder."""
|
|
@@ -24,9 +25,7 @@ class Agent(BaseModel):
|
|
|
24
25
|
description: str = Field(description="A small description of the agent.")
|
|
25
26
|
expertise: str = Field(description="The expertise of the agent.")
|
|
26
27
|
instructions: str = Field(description="The instructions for the agent to follow.")
|
|
27
|
-
schedule: str | None = Field(
|
|
28
|
-
description="The cron expression for the agent to run on.", default=None
|
|
29
|
-
)
|
|
28
|
+
schedule: str | None = Field(description="The cron expression for the agent to run on.", default=None)
|
|
30
29
|
|
|
31
30
|
|
|
32
31
|
class BuilderState(TypedDict):
|
|
@@ -79,9 +78,7 @@ Create an agent that feels thoughtfully designed, intelligent, and professionall
|
|
|
79
78
|
"""
|
|
80
79
|
|
|
81
80
|
|
|
82
|
-
async def generate_agent(
|
|
83
|
-
llm: BaseChatModel, task: str, old_agent: Agent | None = None
|
|
84
|
-
) -> Agent:
|
|
81
|
+
async def generate_agent(llm: BaseChatModel, task: str, old_agent: Agent | None = None) -> Agent:
|
|
85
82
|
"""Generates an agent from a task, optionally modifying an existing one."""
|
|
86
83
|
prompt_parts = [AGENT_BUILDER_INSTRUCTIONS]
|
|
87
84
|
if old_agent:
|
|
@@ -111,7 +108,7 @@ class BuilderAgent(BaseAgent):
|
|
|
111
108
|
):
|
|
112
109
|
super().__init__(name, instructions, model, memory, **kwargs)
|
|
113
110
|
self.registry = registry
|
|
114
|
-
self.llm: BaseChatModel = load_chat_model(model)
|
|
111
|
+
self.llm: BaseChatModel = load_chat_model(model, thinking=False)
|
|
115
112
|
|
|
116
113
|
async def _create_agent(self, state: BuilderState):
|
|
117
114
|
last_message = state["messages"][-1]
|
|
@@ -129,11 +126,7 @@ class BuilderAgent(BaseAgent):
|
|
|
129
126
|
yield {
|
|
130
127
|
"user_task": task,
|
|
131
128
|
"generated_agent": generated_agent,
|
|
132
|
-
"messages": [
|
|
133
|
-
AIMessage(
|
|
134
|
-
content=("I've designed an agent to help you with your task.")
|
|
135
|
-
)
|
|
136
|
-
],
|
|
129
|
+
"messages": [AIMessage(content=("I've designed an agent to help you with your task."))],
|
|
137
130
|
}
|
|
138
131
|
|
|
139
132
|
async def _create_tool_config(self, state: BuilderState):
|
|
@@ -146,7 +139,7 @@ class BuilderAgent(BaseAgent):
|
|
|
146
139
|
]
|
|
147
140
|
}
|
|
148
141
|
tool_finder_graph = build_tool_node_graph(self.llm, self.registry)
|
|
149
|
-
|
|
142
|
+
|
|
150
143
|
initial_state = {
|
|
151
144
|
"original_task": task,
|
|
152
145
|
"messages": [HumanMessage(content=task)],
|
|
@@ -165,9 +158,7 @@ class BuilderAgent(BaseAgent):
|
|
|
165
158
|
apps_with_tools[app_id].extend(tool_ids)
|
|
166
159
|
|
|
167
160
|
# Convert to a regular dict and remove any duplicate tool_ids for the same app
|
|
168
|
-
tool_config = {
|
|
169
|
-
app_id: list(set(tools)) for app_id, tools in apps_with_tools.items()
|
|
170
|
-
}
|
|
161
|
+
tool_config = {app_id: list(set(tools)) for app_id, tools in apps_with_tools.items()}
|
|
171
162
|
final_message = "I have selected the necessary tools for the agent. The agent is ready!"
|
|
172
163
|
else:
|
|
173
164
|
# Handle the case where the graph failed to create a plan
|
|
@@ -175,9 +166,7 @@ class BuilderAgent(BaseAgent):
|
|
|
175
166
|
|
|
176
167
|
yield {
|
|
177
168
|
"tool_config": tool_config,
|
|
178
|
-
"messages": [
|
|
179
|
-
AIMessage(content=final_message)
|
|
180
|
-
],
|
|
169
|
+
"messages": [AIMessage(content=final_message)],
|
|
181
170
|
}
|
|
182
171
|
|
|
183
172
|
async def _build_graph(self):
|
|
@@ -205,6 +194,7 @@ async def main():
|
|
|
205
194
|
"Send a daily email to manoj@agentr.dev with daily agenda of the day",
|
|
206
195
|
)
|
|
207
196
|
from rich import print
|
|
197
|
+
|
|
208
198
|
print(messages_to_list(result["messages"]))
|
|
209
199
|
print(result["generated_agent"])
|
|
210
200
|
print(result["tool_config"])
|
universal_mcp/agents/cli.py
CHANGED
|
@@ -20,7 +20,6 @@ def run(name: str = "react"):
|
|
|
20
20
|
"""Run the agent CLI"""
|
|
21
21
|
import asyncio
|
|
22
22
|
|
|
23
|
-
|
|
24
23
|
setup_logger(log_file=None, level="ERROR")
|
|
25
24
|
client = AgentrClient()
|
|
26
25
|
params = {
|
|
@@ -30,7 +29,7 @@ def run(name: str = "react"):
|
|
|
30
29
|
"memory": MemorySaver(),
|
|
31
30
|
"tools": {
|
|
32
31
|
"google_mail": ["send_email"],
|
|
33
|
-
}
|
|
32
|
+
},
|
|
34
33
|
}
|
|
35
34
|
agent_cls = get_agent(name)
|
|
36
35
|
agent = agent_cls(name, **params)
|
|
@@ -1,23 +1,33 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
|
|
3
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
4
|
+
from rich import print
|
|
3
5
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
6
|
+
|
|
4
7
|
from universal_mcp.agents.codeact.agent import CodeActAgent
|
|
5
8
|
from universal_mcp.agents.utils import messages_to_list
|
|
6
9
|
|
|
7
10
|
|
|
8
11
|
async def main():
|
|
12
|
+
memory = MemorySaver()
|
|
9
13
|
agent = CodeActAgent(
|
|
10
14
|
"CodeAct Agent",
|
|
11
15
|
instructions="Be very concise in your answers.",
|
|
12
|
-
model="
|
|
13
|
-
tools={"google_mail": ["
|
|
16
|
+
model="anthropic:claude-4-sonnet-20250514",
|
|
17
|
+
tools={"google_mail": ["list_messages"]},
|
|
14
18
|
registry=AgentrRegistry(),
|
|
19
|
+
memory=memory,
|
|
15
20
|
)
|
|
21
|
+
print("Starting agent...")
|
|
22
|
+
# await agent.ainit()
|
|
23
|
+
# await agent.run_interactive()
|
|
24
|
+
# async for event in agent.stream(
|
|
25
|
+
# user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
|
|
26
|
+
# ):
|
|
27
|
+
# print(event.content, end="")
|
|
16
28
|
result = await agent.invoke(
|
|
17
|
-
"
|
|
29
|
+
user_input="Fetch unsubscribe links from my Gmail inbox for promo emails I have received in the last 7 days"
|
|
18
30
|
)
|
|
19
|
-
from rich import print
|
|
20
|
-
|
|
21
31
|
print(messages_to_list(result["messages"]))
|
|
22
32
|
|
|
23
33
|
|
|
@@ -1,25 +1,25 @@
|
|
|
1
|
-
import
|
|
2
|
-
from typing import Callable, Union
|
|
1
|
+
from collections.abc import Callable
|
|
3
2
|
|
|
4
|
-
from langchain_core.
|
|
5
|
-
from langchain_core.tools import StructuredTool
|
|
3
|
+
from langchain_core.messages import AIMessageChunk
|
|
4
|
+
from langchain_core.tools import StructuredTool
|
|
5
|
+
from langchain_core.tools import tool as create_tool
|
|
6
6
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
7
7
|
from langgraph.graph import END, StateGraph
|
|
8
|
-
from
|
|
8
|
+
from universal_mcp.logger import logger
|
|
9
9
|
from universal_mcp.tools.registry import ToolRegistry
|
|
10
10
|
from universal_mcp.types import ToolConfig, ToolFormat
|
|
11
11
|
|
|
12
12
|
from universal_mcp.agents.base import BaseAgent
|
|
13
|
-
from universal_mcp.agents.llm import load_chat_model
|
|
14
13
|
from universal_mcp.agents.codeact.prompts import (
|
|
15
|
-
create_default_prompt,
|
|
16
|
-
make_safe_function_name,
|
|
17
14
|
REFLECTION_PROMPT,
|
|
18
15
|
RETRY_PROMPT,
|
|
16
|
+
create_default_prompt,
|
|
17
|
+
make_safe_function_name,
|
|
19
18
|
)
|
|
20
19
|
from universal_mcp.agents.codeact.sandbox import eval_unsafe
|
|
21
20
|
from universal_mcp.agents.codeact.state import CodeActState
|
|
22
21
|
from universal_mcp.agents.codeact.utils import extract_and_combine_codeblocks
|
|
22
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
class CodeActAgent(BaseAgent):
|
|
@@ -31,51 +31,41 @@ class CodeActAgent(BaseAgent):
|
|
|
31
31
|
memory: BaseCheckpointSaver | None = None,
|
|
32
32
|
tools: ToolConfig | None = None,
|
|
33
33
|
registry: ToolRegistry | None = None,
|
|
34
|
-
|
|
35
|
-
reflection_prompt: str = None,
|
|
36
|
-
reflection_model: BaseChatModel = None,
|
|
37
|
-
max_reflections: int = 3,
|
|
34
|
+
sandbox_timeout: int = 20,
|
|
38
35
|
**kwargs,
|
|
39
36
|
):
|
|
40
37
|
super().__init__(name, instructions, model, memory, **kwargs)
|
|
41
|
-
self.model_instance = load_chat_model(model)
|
|
38
|
+
self.model_instance = load_chat_model(model, thinking=False)
|
|
42
39
|
self.tools_config = tools or {}
|
|
43
40
|
self.registry = registry
|
|
44
41
|
self.eval_fn = eval_unsafe
|
|
45
|
-
self.reflection_prompt =
|
|
46
|
-
self.reflection_model =
|
|
47
|
-
self.max_reflections =
|
|
42
|
+
self.reflection_prompt = REFLECTION_PROMPT
|
|
43
|
+
self.reflection_model = self.model_instance
|
|
44
|
+
self.max_reflections = 3
|
|
48
45
|
self.tools_context = {}
|
|
49
|
-
self.
|
|
46
|
+
self.context = {}
|
|
47
|
+
self.sandbox_timeout = sandbox_timeout
|
|
48
|
+
self.processed_tools: list[StructuredTool | Callable] = []
|
|
50
49
|
|
|
51
50
|
async def _build_graph(self):
|
|
52
51
|
if self.tools_config:
|
|
53
52
|
if not self.registry:
|
|
54
53
|
raise ValueError("Tools are configured but no registry is provided")
|
|
55
54
|
# Langchain tools are fine
|
|
56
|
-
exported_tools = await self.registry.export_tools(
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
t if isinstance(t, StructuredTool) else create_tool(t)
|
|
61
|
-
for t in exported_tools
|
|
62
|
-
]
|
|
63
|
-
|
|
64
|
-
self.instructions = create_default_prompt(
|
|
65
|
-
self.processed_tools, self.instructions
|
|
66
|
-
)
|
|
55
|
+
exported_tools = await self.registry.export_tools(self.tools_config, ToolFormat.LANGCHAIN)
|
|
56
|
+
self.processed_tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in exported_tools]
|
|
57
|
+
|
|
58
|
+
self.instructions = create_default_prompt(self.processed_tools, self.instructions)
|
|
67
59
|
|
|
68
60
|
for tool in self.processed_tools:
|
|
69
61
|
safe_name = make_safe_function_name(tool.name)
|
|
70
|
-
tool_callable = (
|
|
71
|
-
tool.coroutine
|
|
72
|
-
if hasattr(tool, "coroutine") and tool.coroutine is not None
|
|
73
|
-
else tool.func
|
|
74
|
-
)
|
|
62
|
+
tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
|
|
75
63
|
self.tools_context[safe_name] = tool_callable
|
|
64
|
+
|
|
65
|
+
self.context = {**self.context, **self.tools_context}
|
|
76
66
|
|
|
77
67
|
agent = StateGraph(CodeActState)
|
|
78
|
-
agent.add_node("call_model",
|
|
68
|
+
agent.add_node("call_model", self.call_model)
|
|
79
69
|
agent.add_node("sandbox", self.sandbox)
|
|
80
70
|
|
|
81
71
|
agent.set_entry_point("call_model")
|
|
@@ -91,21 +81,34 @@ class CodeActAgent(BaseAgent):
|
|
|
91
81
|
return agent.compile(checkpointer=self.memory)
|
|
92
82
|
|
|
93
83
|
def should_run_sandbox(self, state: CodeActState) -> str:
|
|
84
|
+
last_message = state["messages"][-1]
|
|
85
|
+
if isinstance(last_message.content, str) and "TASK_COMPLETE" in last_message.content:
|
|
86
|
+
return END
|
|
87
|
+
|
|
94
88
|
if state.get("script"):
|
|
95
89
|
return "sandbox"
|
|
96
90
|
return END
|
|
97
91
|
|
|
98
|
-
def
|
|
99
|
-
|
|
100
|
-
|
|
92
|
+
def _extract_content(self, response: AIMessageChunk) -> str:
|
|
93
|
+
if isinstance(response.content, list):
|
|
94
|
+
content = " ".join([c.get("text", "") for c in response.content])
|
|
95
|
+
else:
|
|
96
|
+
content = response.content
|
|
97
|
+
return content
|
|
98
|
+
|
|
99
|
+
async def call_model(self, state: CodeActState) -> dict:
|
|
101
100
|
model = self.model_instance
|
|
102
101
|
reflection_model = self.reflection_model
|
|
103
102
|
|
|
104
|
-
messages = [{"role": "system", "content": instructions}] + state["messages"]
|
|
103
|
+
messages = [{"role": "system", "content": self.instructions}] + state["messages"]
|
|
105
104
|
|
|
106
|
-
response = model.
|
|
105
|
+
response = await model.ainvoke(messages)
|
|
107
106
|
|
|
108
|
-
|
|
107
|
+
text_content = self._extract_content(response)
|
|
108
|
+
if not isinstance(text_content, str):
|
|
109
|
+
raise ValueError(f"Content is not a string: {text_content}")
|
|
110
|
+
code = extract_and_combine_codeblocks(text_content)
|
|
111
|
+
logger.debug(f"Code: {code}")
|
|
109
112
|
|
|
110
113
|
if self.max_reflections > 0 and code:
|
|
111
114
|
reflection_count = 0
|
|
@@ -116,34 +119,28 @@ class CodeActAgent(BaseAgent):
|
|
|
116
119
|
for m in state["messages"]
|
|
117
120
|
]
|
|
118
121
|
)
|
|
119
|
-
conversation_history +=
|
|
120
|
-
f'\n<message role="assistant">\n{response.content}\n</message>'
|
|
121
|
-
)
|
|
122
|
+
conversation_history += f'\n<message role="assistant">\n{response.content}\n</message>'
|
|
122
123
|
|
|
123
|
-
formatted_prompt = REFLECTION_PROMPT.format(
|
|
124
|
-
conversation_history=conversation_history
|
|
125
|
-
)
|
|
124
|
+
formatted_prompt = REFLECTION_PROMPT.format(conversation_history=conversation_history)
|
|
126
125
|
|
|
127
126
|
reflection_messages = [
|
|
128
127
|
{"role": "system", "content": self.reflection_prompt},
|
|
129
128
|
{"role": "user", "content": formatted_prompt},
|
|
130
129
|
]
|
|
131
|
-
reflection_result = reflection_model.
|
|
130
|
+
reflection_result = await reflection_model.ainvoke(reflection_messages)
|
|
132
131
|
|
|
133
132
|
if "NONE" in reflection_result.content:
|
|
134
133
|
break
|
|
135
134
|
|
|
136
|
-
retry_prompt = RETRY_PROMPT.format(
|
|
137
|
-
reflection_result=reflection_result.content
|
|
138
|
-
)
|
|
135
|
+
retry_prompt = RETRY_PROMPT.format(reflection_result=reflection_result.content)
|
|
139
136
|
|
|
140
137
|
regeneration_messages = [
|
|
141
|
-
{"role": "system", "content": instructions},
|
|
138
|
+
{"role": "system", "content": self.instructions},
|
|
142
139
|
*state["messages"],
|
|
143
140
|
{"role": "assistant", "content": response.content},
|
|
144
141
|
{"role": "user", "content": retry_prompt},
|
|
145
142
|
]
|
|
146
|
-
response = model.
|
|
143
|
+
response = await model.ainvoke(regeneration_messages)
|
|
147
144
|
|
|
148
145
|
code = extract_and_combine_codeblocks(response.content)
|
|
149
146
|
|
|
@@ -152,20 +149,12 @@ class CodeActAgent(BaseAgent):
|
|
|
152
149
|
|
|
153
150
|
reflection_count += 1
|
|
154
151
|
|
|
155
|
-
|
|
156
|
-
return {"messages": [response], "script": code}
|
|
157
|
-
else:
|
|
158
|
-
return {"messages": [response], "script": None}
|
|
152
|
+
return {"messages": [response], "script": code}
|
|
159
153
|
|
|
160
154
|
async def sandbox(self, state: CodeActState) -> dict:
|
|
161
|
-
|
|
162
|
-
context = {**
|
|
163
|
-
if inspect.iscoroutinefunction(self.eval_fn):
|
|
164
|
-
output, new_vars = await self.eval_fn(state["script"], context)
|
|
165
|
-
else:
|
|
166
|
-
output, new_vars = self.eval_fn(state["script"], context)
|
|
167
|
-
new_context = {**existing_context, **new_vars}
|
|
155
|
+
output, new_vars = await self.eval_fn(state["script"], self.context, timeout=self.sandbox_timeout)
|
|
156
|
+
self.context = {**self.context, **new_vars}
|
|
168
157
|
return {
|
|
169
|
-
"messages": [
|
|
170
|
-
"
|
|
171
|
-
}
|
|
158
|
+
"messages": [AIMessageChunk(content=output.strip())],
|
|
159
|
+
"script": None,
|
|
160
|
+
}
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import inspect
|
|
2
2
|
import re
|
|
3
|
-
from
|
|
3
|
+
from collections.abc import Sequence
|
|
4
4
|
|
|
5
|
-
from langchain_core.tools import StructuredTool
|
|
5
|
+
from langchain_core.tools import StructuredTool
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
def make_safe_function_name(name: str) -> str:
|
|
@@ -20,7 +20,7 @@ def make_safe_function_name(name: str) -> str:
|
|
|
20
20
|
|
|
21
21
|
def create_default_prompt(
|
|
22
22
|
tools: Sequence[StructuredTool],
|
|
23
|
-
base_prompt:
|
|
23
|
+
base_prompt: str | None = None,
|
|
24
24
|
):
|
|
25
25
|
"""Create default prompt for the CodeAct agent."""
|
|
26
26
|
prompt = f"{base_prompt}\n\n" if base_prompt else ""
|
|
@@ -32,11 +32,7 @@ In addition to the Python Standard Library, you can use the following functions:
|
|
|
32
32
|
|
|
33
33
|
for tool in tools:
|
|
34
34
|
# Use coroutine if it exists, otherwise use func
|
|
35
|
-
tool_callable = (
|
|
36
|
-
tool.coroutine
|
|
37
|
-
if hasattr(tool, "coroutine") and tool.coroutine is not None
|
|
38
|
-
else tool.func
|
|
39
|
-
)
|
|
35
|
+
tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
|
|
40
36
|
# Create a safe function name
|
|
41
37
|
safe_name = make_safe_function_name(tool.name)
|
|
42
38
|
# Determine if it's an async function
|
|
@@ -44,7 +40,7 @@ In addition to the Python Standard Library, you can use the following functions:
|
|
|
44
40
|
# Add appropriate function definition
|
|
45
41
|
prompt += f'''\n{"async " if is_async else ""}def {safe_name}{str(inspect.signature(tool_callable))}:
|
|
46
42
|
"""{tool.description}"""
|
|
47
|
-
...
|
|
43
|
+
...
|
|
48
44
|
'''
|
|
49
45
|
|
|
50
46
|
prompt += """
|
|
@@ -66,7 +62,10 @@ This means:
|
|
|
66
62
|
- Print a sample entry: print(f"Sample: {result[0] if isinstance(result, (list, tuple)) and len(result) > 0 else result}")
|
|
67
63
|
- Then, based on this knowledge, write the code to process/use this data
|
|
68
64
|
|
|
69
|
-
Reminder: use Python code snippets to call tools
|
|
65
|
+
Reminder: use Python code snippets to call tools
|
|
66
|
+
|
|
67
|
+
When you have completely finished the task and provided the final answer, you MUST end your response with the exact phrase "TASK_COMPLETE".
|
|
68
|
+
"""
|
|
70
69
|
return prompt
|
|
71
70
|
|
|
72
71
|
|
|
@@ -5,7 +5,7 @@ import io
|
|
|
5
5
|
from typing import Any
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
async def eval_unsafe(code: str, _locals: dict[str, Any]) -> tuple[str, dict[str, Any]]:
|
|
8
|
+
async def eval_unsafe(code: str, _locals: dict[str, Any], timeout: int = 10) -> tuple[str, dict[str, Any]]:
|
|
9
9
|
"""
|
|
10
10
|
Execute code in a non-blocking way and return the output and changed variables.
|
|
11
11
|
"""
|
|
@@ -29,7 +29,10 @@ async def eval_unsafe(code: str, _locals: dict[str, Any]) -> tuple[str, dict[str
|
|
|
29
29
|
return f"Error during execution: {repr(e)}"
|
|
30
30
|
|
|
31
31
|
# Run the synchronous exec in a separate thread to avoid blocking the event loop.
|
|
32
|
-
|
|
32
|
+
try:
|
|
33
|
+
output = await asyncio.wait_for(asyncio.to_thread(sync_eval_in_thread), timeout=timeout)
|
|
34
|
+
except asyncio.TimeoutError:
|
|
35
|
+
output = f"Error: Code execution timed out after {timeout} seconds."
|
|
33
36
|
result += output
|
|
34
37
|
|
|
35
38
|
# Identify all variables that are not part of the original builtins
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any
|
|
1
|
+
from typing import Any
|
|
2
2
|
|
|
3
3
|
from langgraph.graph import MessagesState
|
|
4
4
|
|
|
@@ -6,7 +6,5 @@ from langgraph.graph import MessagesState
|
|
|
6
6
|
class CodeActState(MessagesState):
|
|
7
7
|
"""State for CodeAct agent."""
|
|
8
8
|
|
|
9
|
-
script:
|
|
9
|
+
script: str | None
|
|
10
10
|
"""The Python code script to be executed."""
|
|
11
|
-
context: dict[str, Any]
|
|
12
|
-
"""Dictionary containing the execution context with available tools and variables."""
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import re
|
|
2
2
|
|
|
3
|
+
from universal_mcp.logger import logger
|
|
4
|
+
|
|
3
5
|
BACKTICK_PATTERN = r"(?:^|\n)```(.*?)(?:```(?:\n|$))"
|
|
4
6
|
|
|
5
7
|
|
|
@@ -37,7 +39,12 @@ def extract_and_combine_codeblocks(text: str) -> str:
|
|
|
37
39
|
"""
|
|
38
40
|
# Find all code blocks in the text using regex
|
|
39
41
|
# Pattern matches anything between triple backticks, with or without a language identifier
|
|
40
|
-
|
|
42
|
+
try:
|
|
43
|
+
code_blocks = re.findall(BACKTICK_PATTERN, text, re.DOTALL)
|
|
44
|
+
except Exception as e:
|
|
45
|
+
logger.error(f"Error extracting code blocks: {e}")
|
|
46
|
+
logger.error(f"Text: {text}")
|
|
47
|
+
return ""
|
|
41
48
|
|
|
42
49
|
if not code_blocks:
|
|
43
50
|
return ""
|
|
@@ -46,15 +53,15 @@ def extract_and_combine_codeblocks(text: str) -> str:
|
|
|
46
53
|
processed_blocks = []
|
|
47
54
|
for block in code_blocks:
|
|
48
55
|
# Strip leading and trailing whitespace
|
|
49
|
-
|
|
56
|
+
cleaned_block = block.strip()
|
|
50
57
|
|
|
51
58
|
# If the first line looks like a language identifier, remove it
|
|
52
|
-
lines =
|
|
59
|
+
lines = cleaned_block.split("\n")
|
|
53
60
|
if lines and (not lines[0].strip() or " " not in lines[0].strip()):
|
|
54
61
|
# First line is empty or likely a language identifier (no spaces)
|
|
55
|
-
|
|
62
|
+
cleaned_block = "\n".join(lines[1:])
|
|
56
63
|
|
|
57
|
-
processed_blocks.append(
|
|
64
|
+
processed_blocks.append(cleaned_block)
|
|
58
65
|
|
|
59
66
|
# Combine all codeblocks with newlines between them
|
|
60
67
|
combined_code = "\n\n".join(processed_blocks)
|
universal_mcp/agents/hil.py
CHANGED
|
@@ -63,12 +63,7 @@ def handle_interrupt(interrupt: Interrupt) -> str | bool:
|
|
|
63
63
|
value = input("Do you accept this? (y/n): " + interrupt.value["question"])
|
|
64
64
|
return value.lower() in ["y", "yes"]
|
|
65
65
|
elif interrupt_type == "choice":
|
|
66
|
-
value = input(
|
|
67
|
-
"Enter your choice: "
|
|
68
|
-
+ interrupt.value["question"]
|
|
69
|
-
+ " "
|
|
70
|
-
+ ", ".join(interrupt.value["choices"])
|
|
71
|
-
)
|
|
66
|
+
value = input("Enter your choice: " + interrupt.value["question"] + " " + ", ".join(interrupt.value["choices"]))
|
|
72
67
|
if value in interrupt.value["choices"]:
|
|
73
68
|
return value
|
|
74
69
|
else:
|
|
@@ -26,9 +26,7 @@ class PlannerAgent(BaseAgent):
|
|
|
26
26
|
self.executor_agent_cls = executor_agent_cls
|
|
27
27
|
|
|
28
28
|
def _build_system_message(self):
|
|
29
|
-
return DEVELOPER_PROMPT.format(
|
|
30
|
-
name=self.name, instructions=self.instructions
|
|
31
|
-
)
|
|
29
|
+
return DEVELOPER_PROMPT.format(name=self.name, instructions=self.instructions)
|
|
32
30
|
|
|
33
31
|
async def _build_graph(self):
|
|
34
32
|
return build_graph(
|
|
@@ -18,9 +18,7 @@ def build_graph(llm, registry, instructions, model, executor_agent_cls):
|
|
|
18
18
|
task = state["messages"][-1].content
|
|
19
19
|
logger.info(f"Running tool finder for task: {task}")
|
|
20
20
|
tool_finder_graph = build_tool_node_graph(llm, registry)
|
|
21
|
-
tool_finder_state = await tool_finder_graph.ainvoke(
|
|
22
|
-
{"original_task": task, "messages": state["messages"]}
|
|
23
|
-
)
|
|
21
|
+
tool_finder_state = await tool_finder_graph.ainvoke({"original_task": task, "messages": state["messages"]})
|
|
24
22
|
|
|
25
23
|
if not tool_finder_state.get("apps_required"):
|
|
26
24
|
logger.info("Tool finder determined no apps are required.")
|
universal_mcp/agents/react.py
CHANGED
|
@@ -4,6 +4,7 @@ from loguru import logger
|
|
|
4
4
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
5
5
|
from universal_mcp.tools.registry import ToolRegistry
|
|
6
6
|
from universal_mcp.types import ToolConfig, ToolFormat
|
|
7
|
+
from rich import print
|
|
7
8
|
|
|
8
9
|
from universal_mcp.agents.base import BaseAgent
|
|
9
10
|
from universal_mcp.agents.llm import load_chat_model
|
|
@@ -40,7 +41,18 @@ class ReactAgent(BaseAgent):
|
|
|
40
41
|
self.llm = load_chat_model(model)
|
|
41
42
|
self.tools = tools or {}
|
|
42
43
|
if "ui" not in self.tools:
|
|
43
|
-
self.tools["ui"] = [
|
|
44
|
+
self.tools["ui"] = [
|
|
45
|
+
"create_bar_chart",
|
|
46
|
+
"create_line_chart",
|
|
47
|
+
"create_pie_chart",
|
|
48
|
+
"create_table",
|
|
49
|
+
"http_get",
|
|
50
|
+
"http_post",
|
|
51
|
+
"http_put",
|
|
52
|
+
"http_delete",
|
|
53
|
+
"http_patch",
|
|
54
|
+
"read_file",
|
|
55
|
+
]
|
|
44
56
|
self.max_iterations = max_iterations
|
|
45
57
|
self.registry = registry
|
|
46
58
|
|
|
@@ -54,7 +66,6 @@ class ReactAgent(BaseAgent):
|
|
|
54
66
|
else:
|
|
55
67
|
tools = []
|
|
56
68
|
|
|
57
|
-
|
|
58
69
|
logger.debug(f"Initialized ReactAgent: name={self.name}, model={self.model}")
|
|
59
70
|
return create_react_agent(
|
|
60
71
|
self.llm,
|
|
@@ -75,10 +86,7 @@ async def main():
|
|
|
75
86
|
tools={"google-mail": ["send_email"]},
|
|
76
87
|
registry=AgentrRegistry(),
|
|
77
88
|
)
|
|
78
|
-
result = await agent.invoke(
|
|
79
|
-
"Send an email with the subject 'testing react agent' to manoj@agentr.dev"
|
|
80
|
-
)
|
|
81
|
-
from rich import print
|
|
89
|
+
result = await agent.invoke("Send an email with the subject 'testing react agent' to manoj@agentr.dev")
|
|
82
90
|
|
|
83
91
|
print(messages_to_list(result["messages"]))
|
|
84
92
|
|
|
@@ -6,7 +6,7 @@ You are an expert planner. Your goal is to consolidate a complex user request in
|
|
|
6
6
|
2. **Focus on Data Handoffs:** A good decomposition often involves one sub-task to *retrieve* information and a subsequent sub-task to *use* that information.
|
|
7
7
|
3. **Assume Internal Capabilities:** Do NOT create sub-tasks for abstract cognitive work like 'summarize' or 'analyze'.
|
|
8
8
|
4. **Simplify Single Actions:** If the user's task is already a single, simple action, the output should be a single sub-task that concisely describes that action. Do not make it identical to the user's input.
|
|
9
|
-
5. **General purpose sub tasks:** You also need to realise that these subtasks are going to be used to search for tools and apps. And the names and description of these tools and apps are going to be general in nature so the sub tasks should not be too specific. The task which you will get may be specific in nature but the sub taks must be general.
|
|
9
|
+
5. **General purpose sub tasks:** You also need to realise that these subtasks are going to be used to search for tools and apps. And the names and description of these tools and apps are going to be general in nature so the sub tasks should not be too specific. The task which you will get may be specific in nature but the sub taks must be general.
|
|
10
10
|
**--- EXAMPLES ---**
|
|
11
11
|
|
|
12
12
|
**EXAMPLE 1:**
|
|
@@ -116,7 +116,7 @@ You are an AI assistant that selects the most appropriate tool(s) from a list to
|
|
|
116
116
|
**INSTRUCTIONS:**
|
|
117
117
|
1. Carefully review the sub-task to understand the required action.
|
|
118
118
|
2. Examine the list of available tools and their descriptions.
|
|
119
|
-
3. Select the best tool ID that matches the sub-task. You are encouraged to select multiple tools if there are multiple tools with similar capabilties
|
|
119
|
+
3. Select the best tool ID that matches the sub-task. You are encouraged to select multiple tools if there are multiple tools with similar capabilties
|
|
120
120
|
or names. It is always good to have more tools than having insufficent tools.
|
|
121
121
|
4. If no tool is a good fit, return an empty list.
|
|
122
122
|
5. Only return the tool IDs.
|
|
@@ -129,4 +129,4 @@ or names. It is always good to have more tools than having insufficent tools.
|
|
|
129
129
|
{tool_candidates}
|
|
130
130
|
|
|
131
131
|
**YOUR SELECTED TOOL ID(s):**
|
|
132
|
-
"""
|
|
132
|
+
"""
|