universal-mcp-agents 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +19 -0
- universal_mcp/agents/autoagent/__init__.py +1 -1
- universal_mcp/agents/autoagent/__main__.py +1 -1
- universal_mcp/agents/autoagent/graph.py +2 -2
- universal_mcp/agents/base.py +24 -13
- universal_mcp/agents/bigtool/__init__.py +10 -8
- universal_mcp/agents/bigtool/__main__.py +6 -7
- universal_mcp/agents/bigtool/graph.py +18 -27
- universal_mcp/agents/bigtool/prompts.py +3 -3
- universal_mcp/agents/bigtool2/__init__.py +16 -3
- universal_mcp/agents/bigtool2/__main__.py +6 -5
- universal_mcp/agents/bigtool2/agent.py +1 -1
- universal_mcp/agents/bigtool2/graph.py +55 -20
- universal_mcp/agents/bigtool2/prompts.py +8 -5
- universal_mcp/agents/bigtoolcache/agent.py +2 -2
- universal_mcp/agents/bigtoolcache/graph.py +5 -6
- universal_mcp/agents/bigtoolcache/prompts.py +1 -2
- universal_mcp/agents/builder.py +47 -14
- universal_mcp/agents/cli.py +19 -5
- universal_mcp/agents/codeact/test.py +2 -1
- universal_mcp/agents/llm.py +7 -3
- universal_mcp/agents/planner/__init__.py +8 -2
- universal_mcp/agents/planner/__main__.py +10 -8
- universal_mcp/agents/planner/graph.py +6 -2
- universal_mcp/agents/planner/prompts.py +14 -1
- universal_mcp/agents/planner/state.py +0 -1
- universal_mcp/agents/react.py +36 -27
- universal_mcp/agents/shared/tool_node.py +2 -3
- universal_mcp/agents/simple.py +19 -3
- universal_mcp/agents/utils.py +36 -36
- universal_mcp/applications/ui/app.py +305 -0
- {universal_mcp_agents-0.1.4.dist-info → universal_mcp_agents-0.1.6.dist-info}/METADATA +2 -2
- universal_mcp_agents-0.1.6.dist-info/RECORD +50 -0
- universal_mcp/agents/autoagent/studio.py +0 -19
- universal_mcp/agents/bigtool/context.py +0 -24
- universal_mcp/agents/bigtool2/context.py +0 -32
- universal_mcp/agents/tools.py +0 -41
- universal_mcp_agents-0.1.4.dist-info/RECORD +0 -53
- {universal_mcp_agents-0.1.4.dist-info → universal_mcp_agents-0.1.6.dist-info}/WHEEL +0 -0
universal_mcp/agents/__init__.py
CHANGED
|
@@ -7,6 +7,25 @@ from universal_mcp.agents.planner import PlannerAgent
|
|
|
7
7
|
from universal_mcp.agents.react import ReactAgent
|
|
8
8
|
from universal_mcp.agents.simple import SimpleAgent
|
|
9
9
|
|
|
10
|
+
|
|
11
|
+
def get_agent(agent_name: str):
|
|
12
|
+
if agent_name == "auto":
|
|
13
|
+
return AutoAgent
|
|
14
|
+
elif agent_name == "react":
|
|
15
|
+
return ReactAgent
|
|
16
|
+
elif agent_name == "simple":
|
|
17
|
+
return SimpleAgent
|
|
18
|
+
elif agent_name == "builder":
|
|
19
|
+
return BuilderAgent
|
|
20
|
+
elif agent_name == "planner":
|
|
21
|
+
return PlannerAgent
|
|
22
|
+
elif agent_name == "bigtool":
|
|
23
|
+
return BigToolAgent
|
|
24
|
+
elif agent_name == "bigtool2":
|
|
25
|
+
return BigToolAgent2
|
|
26
|
+
else:
|
|
27
|
+
raise ValueError(f"Unknown agent: {agent_name}. Possible values: auto, react, simple, builder, planner, bigtool, bigtool2")
|
|
28
|
+
|
|
10
29
|
__all__ = [
|
|
11
30
|
"BaseAgent",
|
|
12
31
|
"ReactAgent",
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
2
3
|
|
|
3
4
|
from universal_mcp.agents.autoagent.graph import build_graph
|
|
4
5
|
from universal_mcp.agents.base import BaseAgent
|
|
5
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
6
6
|
|
|
7
7
|
|
|
8
8
|
class AutoAgent(BaseAgent):
|
|
@@ -6,13 +6,13 @@ from langchain_core.messages import AIMessage, ToolMessage
|
|
|
6
6
|
from langchain_core.tools import tool
|
|
7
7
|
from langgraph.graph import END, START, StateGraph
|
|
8
8
|
from langgraph.runtime import Runtime
|
|
9
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
10
|
+
from universal_mcp.types import ToolFormat
|
|
9
11
|
|
|
10
12
|
from universal_mcp.agents.autoagent.context import Context
|
|
11
13
|
from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
|
|
12
14
|
from universal_mcp.agents.autoagent.state import State
|
|
13
15
|
from universal_mcp.agents.llm import load_chat_model
|
|
14
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
15
|
-
from universal_mcp.types import ToolFormat
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
|
universal_mcp/agents/base.py
CHANGED
|
@@ -2,9 +2,11 @@
|
|
|
2
2
|
from typing import cast
|
|
3
3
|
from uuid import uuid4
|
|
4
4
|
|
|
5
|
-
from langchain_core.messages import AIMessageChunk
|
|
5
|
+
from langchain_core.messages import AIMessage, AIMessageChunk
|
|
6
6
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
7
|
+
from langgraph.graph import StateGraph
|
|
7
8
|
from langgraph.types import Command
|
|
9
|
+
from universal_mcp.logger import logger
|
|
8
10
|
|
|
9
11
|
from .utils import RichCLI
|
|
10
12
|
|
|
@@ -31,7 +33,7 @@ class BaseAgent:
|
|
|
31
33
|
self._graph = await self._build_graph()
|
|
32
34
|
self._initialized = True
|
|
33
35
|
|
|
34
|
-
async def _build_graph(self):
|
|
36
|
+
async def _build_graph(self) -> StateGraph:
|
|
35
37
|
raise NotImplementedError("Subclasses must implement this method")
|
|
36
38
|
|
|
37
39
|
async def stream(self, thread_id: str, user_input: str, metadata: dict = None):
|
|
@@ -60,32 +62,40 @@ class BaseAgent:
|
|
|
60
62
|
):
|
|
61
63
|
# Only forward assistant token chunks that are not tool-related.
|
|
62
64
|
type_ = type(event)
|
|
63
|
-
if type_ != AIMessageChunk:
|
|
64
|
-
continue
|
|
65
|
-
event = cast(AIMessageChunk, event)
|
|
66
|
-
aggregate = event if aggregate is None else aggregate + event
|
|
67
65
|
tags = metadata.get("tags", []) if isinstance(metadata, dict) else []
|
|
68
66
|
is_quiet = isinstance(tags, list) and ("quiet" in tags)
|
|
69
|
-
|
|
70
67
|
if is_quiet:
|
|
71
68
|
continue
|
|
69
|
+
# Handle different types of messages
|
|
70
|
+
if type_ in (AIMessage, AIMessageChunk):
|
|
71
|
+
# Accumulate billing and aggregate message
|
|
72
|
+
aggregate = event if aggregate is None else aggregate + event
|
|
73
|
+
# Ignore intermeddite finish messages
|
|
72
74
|
if "finish_reason" in event.response_metadata:
|
|
73
75
|
# Got LLM finish reason ignore it
|
|
74
|
-
|
|
76
|
+
logger.error(f"Finish event: {event}, reason: {event.response_metadata['finish_reason']}, Metadata: {metadata}")
|
|
75
77
|
pass
|
|
76
78
|
else:
|
|
77
|
-
|
|
79
|
+
logger.debug(f"Event: {event}, Metadata: {metadata}")
|
|
78
80
|
yield event
|
|
79
81
|
# Send a final finished message
|
|
80
82
|
# The last event would be finish
|
|
81
83
|
event = cast(AIMessageChunk, event)
|
|
84
|
+
event.usage_metadata = aggregate.usage_metadata
|
|
85
|
+
logger.debug(f"Usage metadata: {event.usage_metadata}")
|
|
82
86
|
yield event
|
|
83
87
|
|
|
84
88
|
async def stream_interactive(self, thread_id: str, user_input: str):
|
|
85
89
|
await self.ainit()
|
|
86
90
|
with self.cli.display_agent_response_streaming(self.name) as stream_updater:
|
|
87
91
|
async for event in self.stream(thread_id, user_input):
|
|
88
|
-
|
|
92
|
+
if isinstance(event.content, list):
|
|
93
|
+
thinking_content = "".join([c.get("thinking", "") for c in event.content])
|
|
94
|
+
stream_updater.update(thinking_content, type_="thinking")
|
|
95
|
+
content = "".join([c.get("text", "") for c in event.content])
|
|
96
|
+
stream_updater.update(content, type_="text")
|
|
97
|
+
else:
|
|
98
|
+
stream_updater.update(event.content, type_="text")
|
|
89
99
|
|
|
90
100
|
async def invoke(
|
|
91
101
|
self, user_input: str, thread_id: str = str(uuid4()), metadata: dict = None
|
|
@@ -106,11 +116,12 @@ class BaseAgent:
|
|
|
106
116
|
"metadata": run_metadata,
|
|
107
117
|
}
|
|
108
118
|
|
|
109
|
-
|
|
119
|
+
result = await self._graph.ainvoke(
|
|
110
120
|
{"messages": [{"role": "user", "content": user_input}]},
|
|
111
121
|
config=run_config,
|
|
112
122
|
context={"system_prompt": self.instructions, "model": self.model},
|
|
113
123
|
)
|
|
124
|
+
return result
|
|
114
125
|
|
|
115
126
|
async def run_interactive(self, thread_id: str = str(uuid4())):
|
|
116
127
|
"""Main application loop"""
|
|
@@ -145,7 +156,7 @@ class BaseAgent:
|
|
|
145
156
|
f"Agent is {self.name}. {self.instructions}"
|
|
146
157
|
)
|
|
147
158
|
continue
|
|
148
|
-
elif command
|
|
159
|
+
elif command in {"exit", "quit", "q"}:
|
|
149
160
|
self.cli.display_info("Goodbye! 👋")
|
|
150
161
|
break
|
|
151
162
|
elif command == "reset":
|
|
@@ -170,6 +181,6 @@ class BaseAgent:
|
|
|
170
181
|
break
|
|
171
182
|
except Exception as e:
|
|
172
183
|
import traceback
|
|
173
|
-
|
|
174
184
|
traceback.print_exc()
|
|
175
185
|
self.cli.display_error(f"An error occurred: {str(e)}")
|
|
186
|
+
break
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
+
from universal_mcp.logger import logger
|
|
3
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
2
4
|
|
|
3
5
|
from universal_mcp.agents.base import BaseAgent
|
|
4
6
|
from universal_mcp.agents.llm import load_chat_model
|
|
5
|
-
from universal_mcp.logger import logger
|
|
6
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
7
7
|
|
|
8
8
|
from .graph import build_graph
|
|
9
9
|
from .prompts import SYSTEM_PROMPT
|
|
@@ -19,18 +19,20 @@ class BigToolAgent(BaseAgent):
|
|
|
19
19
|
memory: BaseCheckpointSaver | None = None,
|
|
20
20
|
**kwargs,
|
|
21
21
|
):
|
|
22
|
-
|
|
23
|
-
full_instructions = f"{SYSTEM_PROMPT}\n\n**User Instructions:**\n{instructions}"
|
|
24
|
-
super().__init__(name, full_instructions, model, memory, **kwargs)
|
|
25
|
-
|
|
22
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
|
26
23
|
self.registry = registry
|
|
27
24
|
self.llm = load_chat_model(self.model)
|
|
28
|
-
self.tool_selection_llm = load_chat_model("gemini/gemini-2.0-flash-001")
|
|
29
25
|
|
|
30
26
|
logger.info(
|
|
31
27
|
f"BigToolAgent '{self.name}' initialized with model '{self.model}'."
|
|
32
28
|
)
|
|
33
29
|
|
|
30
|
+
def _build_system_message(self):
|
|
31
|
+
return SYSTEM_PROMPT.format(
|
|
32
|
+
name=self.name,
|
|
33
|
+
instructions=self.instructions,
|
|
34
|
+
)
|
|
35
|
+
|
|
34
36
|
async def _build_graph(self):
|
|
35
37
|
"""Build the bigtool agent graph using the existing create_agent function."""
|
|
36
38
|
logger.info(f"Building graph for BigToolAgent '{self.name}'...")
|
|
@@ -38,7 +40,7 @@ class BigToolAgent(BaseAgent):
|
|
|
38
40
|
graph_builder = build_graph(
|
|
39
41
|
tool_registry=self.registry,
|
|
40
42
|
llm=self.llm,
|
|
41
|
-
|
|
43
|
+
system_prompt=self._build_system_message(),
|
|
42
44
|
)
|
|
43
45
|
|
|
44
46
|
compiled_graph = graph_builder.compile(checkpointer=self.memory)
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
|
|
3
3
|
from loguru import logger
|
|
4
|
-
|
|
5
4
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
5
|
+
|
|
6
6
|
from universal_mcp.agents.bigtool import BigToolAgent
|
|
7
|
+
from universal_mcp.agents.utils import messages_to_list
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
async def main():
|
|
@@ -13,12 +14,10 @@ async def main():
|
|
|
13
14
|
model="azure/gpt-4.1",
|
|
14
15
|
registry=AgentrRegistry(),
|
|
15
16
|
)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
)
|
|
20
|
-
logger.info(event.content)
|
|
21
|
-
|
|
17
|
+
await agent.ainit()
|
|
18
|
+
output = await agent.invoke(
|
|
19
|
+
user_input="Send an email to manoj@agentr.dev")
|
|
20
|
+
logger.info(messages_to_list(output["messages"]))
|
|
22
21
|
|
|
23
22
|
if __name__ == "__main__":
|
|
24
23
|
asyncio.run(main())
|
|
@@ -1,28 +1,24 @@
|
|
|
1
1
|
import json
|
|
2
|
-
from datetime import UTC, datetime
|
|
3
2
|
from typing import Literal, TypedDict, cast
|
|
4
3
|
|
|
5
|
-
from langchain_anthropic import ChatAnthropic
|
|
6
4
|
from langchain_core.language_models import BaseChatModel
|
|
7
5
|
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
6
|
from langchain_core.tools import tool
|
|
9
7
|
from langgraph.graph import StateGraph
|
|
10
|
-
from langgraph.runtime import Runtime
|
|
11
8
|
from langgraph.types import Command
|
|
12
|
-
|
|
13
|
-
from universal_mcp.agents.bigtool.context import Context
|
|
14
|
-
from universal_mcp.agents.bigtool.state import State
|
|
15
9
|
from universal_mcp.logger import logger
|
|
16
10
|
from universal_mcp.tools.registry import ToolRegistry
|
|
17
11
|
from universal_mcp.types import ToolFormat
|
|
18
12
|
|
|
13
|
+
from universal_mcp.agents.bigtool.state import State
|
|
14
|
+
|
|
19
15
|
from .prompts import SELECT_TOOL_PROMPT
|
|
20
16
|
|
|
21
17
|
|
|
22
18
|
def build_graph(
|
|
23
19
|
tool_registry: ToolRegistry,
|
|
24
20
|
llm: BaseChatModel,
|
|
25
|
-
|
|
21
|
+
system_prompt: str,
|
|
26
22
|
):
|
|
27
23
|
@tool
|
|
28
24
|
async def retrieve_tools(task_query: str) -> list[str]:
|
|
@@ -40,7 +36,7 @@ def build_graph(
|
|
|
40
36
|
class ToolSelectionOutput(TypedDict):
|
|
41
37
|
tool_names: list[str]
|
|
42
38
|
|
|
43
|
-
model =
|
|
39
|
+
model = llm
|
|
44
40
|
app_ids = await tool_registry.list_all_apps()
|
|
45
41
|
connections = await tool_registry.list_connected_apps()
|
|
46
42
|
connection_ids = set([connection["app_id"] for connection in connections])
|
|
@@ -76,16 +72,14 @@ def build_graph(
|
|
|
76
72
|
logger.error(f"Error retrieving tools: {e}")
|
|
77
73
|
return []
|
|
78
74
|
|
|
75
|
+
|
|
79
76
|
async def call_model(
|
|
80
|
-
state: State
|
|
77
|
+
state: State
|
|
81
78
|
) -> Command[Literal["select_tools", "call_tools"]]:
|
|
82
79
|
logger.info("Calling model...")
|
|
83
80
|
try:
|
|
84
|
-
system_message = runtime.context.system_prompt.format(
|
|
85
|
-
system_time=datetime.now(tz=UTC).isoformat()
|
|
86
|
-
)
|
|
87
81
|
messages = [
|
|
88
|
-
{"role": "system", "content":
|
|
82
|
+
{"role": "system", "content": system_prompt},
|
|
89
83
|
*state["messages"],
|
|
90
84
|
]
|
|
91
85
|
|
|
@@ -98,18 +92,15 @@ def build_graph(
|
|
|
98
92
|
else:
|
|
99
93
|
selected_tools = []
|
|
100
94
|
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
[retrieve_tools, *selected_tools], tool_choice="auto"
|
|
111
|
-
)
|
|
112
|
-
response = cast(AIMessage, await model_with_tools.ainvoke(messages))
|
|
95
|
+
model_with_tools = llm.bind_tools(
|
|
96
|
+
[retrieve_tools, *selected_tools], tool_choice="auto"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
response = await model_with_tools.ainvoke(messages)
|
|
101
|
+
cast(AIMessage, response)
|
|
102
|
+
logger.debug(f"Response: {response}")
|
|
103
|
+
|
|
113
104
|
|
|
114
105
|
if response.tool_calls:
|
|
115
106
|
logger.info(
|
|
@@ -151,7 +142,7 @@ def build_graph(
|
|
|
151
142
|
raise
|
|
152
143
|
|
|
153
144
|
async def select_tools(
|
|
154
|
-
state: State
|
|
145
|
+
state: State
|
|
155
146
|
) -> Command[Literal["call_model"]]:
|
|
156
147
|
logger.info("Selecting tools...")
|
|
157
148
|
try:
|
|
@@ -210,7 +201,7 @@ def build_graph(
|
|
|
210
201
|
update={"messages": outputs, "selected_tool_ids": recent_tool_ids},
|
|
211
202
|
)
|
|
212
203
|
|
|
213
|
-
builder = StateGraph(State
|
|
204
|
+
builder = StateGraph(State)
|
|
214
205
|
|
|
215
206
|
builder.add_node(call_model)
|
|
216
207
|
builder.add_node(select_tools)
|
|
@@ -1,15 +1,15 @@
|
|
|
1
1
|
"""Default prompts used by the agent."""
|
|
2
2
|
|
|
3
|
-
SYSTEM_PROMPT = """You are a helpful AI assistant.
|
|
3
|
+
SYSTEM_PROMPT = """You are {name}, a helpful AI assistant.
|
|
4
4
|
|
|
5
5
|
**Core Directives:**
|
|
6
6
|
1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event), you MUST use a tool. Do not answer from your own knowledge or refuse a task if a tool might exist for it.
|
|
7
7
|
2. **First Step is ALWAYS `retrieve_tools`:** Before you can use any other tool, you MUST first call the `retrieve_tools` function to find the right tool for the user's request. This is your mandatory first action.
|
|
8
8
|
3. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `retrieve_tools` with a concise query describing the core task. Do not engage in conversation.
|
|
9
9
|
|
|
10
|
-
System time: {system_time}
|
|
11
|
-
|
|
12
10
|
When multiple tools are available for the same task, you must ask the user.
|
|
11
|
+
|
|
12
|
+
{instructions}
|
|
13
13
|
"""
|
|
14
14
|
|
|
15
15
|
SELECT_TOOL_PROMPT = """You are an AI assistant that helps the user perform tasks using various apps (each app has multiple tools).
|
|
@@ -4,6 +4,7 @@ from universal_mcp.agents.base import BaseAgent
|
|
|
4
4
|
from universal_mcp.agents.llm import load_chat_model
|
|
5
5
|
from universal_mcp.logger import logger
|
|
6
6
|
from universal_mcp.tools.registry import ToolRegistry
|
|
7
|
+
from universal_mcp.types import ToolConfig, ToolFormat
|
|
7
8
|
|
|
8
9
|
from .graph import build_graph
|
|
9
10
|
from .prompts import SYSTEM_PROMPT
|
|
@@ -17,27 +18,39 @@ class BigToolAgent2(BaseAgent):
|
|
|
17
18
|
model: str,
|
|
18
19
|
registry: ToolRegistry,
|
|
19
20
|
memory: BaseCheckpointSaver | None = None,
|
|
21
|
+
tools: ToolConfig | None = None,
|
|
20
22
|
**kwargs,
|
|
21
23
|
):
|
|
22
|
-
|
|
23
|
-
full_instructions = f"{SYSTEM_PROMPT}\n\n**User Instructions:**\n{instructions}"
|
|
24
|
-
super().__init__(name, full_instructions, model, memory, **kwargs)
|
|
24
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
|
25
25
|
|
|
26
26
|
self.registry = registry
|
|
27
27
|
self.llm = load_chat_model(self.model)
|
|
28
28
|
self.recursion_limit = kwargs.get("recursion_limit", 10)
|
|
29
|
+
self.tools = tools or {}
|
|
30
|
+
if "ui" not in self.tools:
|
|
31
|
+
# self.tools["ui"] = ["create_bar_chart", "create_line_chart", "create_pie_chart", "create_table", "http_get", "http_post", "http_put", "http_delete", "http_patch", "read_file"]
|
|
32
|
+
self.tools["ui"] = ["create_table"]
|
|
29
33
|
|
|
30
34
|
logger.info(
|
|
31
35
|
f"BigToolAgent '{self.name}' initialized with model '{self.model}'."
|
|
32
36
|
)
|
|
33
37
|
|
|
38
|
+
def _build_system_message(self):
|
|
39
|
+
return SYSTEM_PROMPT.format(
|
|
40
|
+
name=self.name,
|
|
41
|
+
instructions=f"**User Instructions:**\n{self.instructions}",
|
|
42
|
+
)
|
|
43
|
+
|
|
34
44
|
async def _build_graph(self):
|
|
35
45
|
"""Build the bigtool agent graph using the existing create_agent function."""
|
|
36
46
|
logger.info(f"Building graph for BigToolAgent '{self.name}'...")
|
|
37
47
|
try:
|
|
48
|
+
default_tools = await self.registry.export_tools(self.tools, ToolFormat.LANGCHAIN)
|
|
38
49
|
graph_builder = build_graph(
|
|
39
50
|
tool_registry=self.registry,
|
|
40
51
|
llm=self.llm,
|
|
52
|
+
system_prompt=self._build_system_message(),
|
|
53
|
+
default_tools=default_tools,
|
|
41
54
|
)
|
|
42
55
|
|
|
43
56
|
compiled_graph = graph_builder.compile(checkpointer=self.memory)
|
|
@@ -4,6 +4,7 @@ from loguru import logger
|
|
|
4
4
|
|
|
5
5
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
6
6
|
from universal_mcp.agents.bigtool2 import BigToolAgent2
|
|
7
|
+
from universal_mcp.agents.utils import messages_to_list
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
async def main():
|
|
@@ -13,11 +14,11 @@ async def main():
|
|
|
13
14
|
model="azure/gpt-4.1",
|
|
14
15
|
registry=AgentrRegistry(),
|
|
15
16
|
)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
)
|
|
20
|
-
|
|
17
|
+
await agent.ainit()
|
|
18
|
+
output = await agent.invoke(
|
|
19
|
+
user_input="Send an email to manoj@agentr.dev"
|
|
20
|
+
)
|
|
21
|
+
logger.info(messages_to_list(output["messages"]))
|
|
21
22
|
|
|
22
23
|
|
|
23
24
|
if __name__ == "__main__":
|
|
@@ -1,23 +1,22 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from datetime import UTC, datetime
|
|
3
|
-
from typing import Literal,
|
|
3
|
+
from typing import Literal, cast
|
|
4
4
|
|
|
5
|
-
from langchain_anthropic import ChatAnthropic
|
|
6
5
|
from langchain_core.language_models import BaseChatModel
|
|
7
6
|
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
|
-
from langchain_core.tools import tool
|
|
7
|
+
from langchain_core.tools import BaseTool, tool
|
|
9
8
|
from langgraph.graph import StateGraph
|
|
10
|
-
from langgraph.runtime import Runtime
|
|
11
9
|
from langgraph.types import Command
|
|
12
10
|
|
|
13
|
-
from universal_mcp.agents.bigtool2.context import Context
|
|
14
11
|
from universal_mcp.agents.bigtool2.state import State
|
|
15
12
|
from universal_mcp.logger import logger
|
|
16
13
|
from universal_mcp.tools.registry import ToolRegistry
|
|
17
14
|
from universal_mcp.types import ToolFormat
|
|
18
15
|
|
|
19
16
|
|
|
20
|
-
def build_graph(
|
|
17
|
+
def build_graph(
|
|
18
|
+
tool_registry: ToolRegistry, llm: BaseChatModel, system_prompt: str, default_tools: list[BaseTool]
|
|
19
|
+
):
|
|
21
20
|
@tool
|
|
22
21
|
async def search_tools(queries: list[str]) -> str:
|
|
23
22
|
"""Search tools for a given list of queries
|
|
@@ -32,22 +31,23 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
|
|
|
32
31
|
connected_apps = [
|
|
33
32
|
app["id"] for app in app_ids if app["id"] in connection_ids
|
|
34
33
|
]
|
|
35
|
-
|
|
34
|
+
[
|
|
36
35
|
app["id"] for app in app_ids if app["id"] not in connection_ids
|
|
37
36
|
]
|
|
38
37
|
app_tools = {}
|
|
39
38
|
for task_query in queries:
|
|
40
|
-
|
|
39
|
+
apps_list = await tool_registry.search_apps(task_query, limit=5)
|
|
40
|
+
tools_list = []
|
|
41
|
+
for app in apps_list:
|
|
42
|
+
tools_list.extend(await tool_registry.search_tools(task_query, limit=5, app_id=app["id"]))
|
|
41
43
|
tool_candidates = [
|
|
42
44
|
f"{tool['id']}: {tool['description']}" for tool in tools_list
|
|
43
45
|
]
|
|
44
46
|
for tool in tool_candidates:
|
|
45
47
|
app = tool.split("__")[0]
|
|
46
48
|
if app not in app_tools:
|
|
47
|
-
if len(app_tools.keys()) >= 10:
|
|
48
|
-
break
|
|
49
49
|
app_tools[app] = []
|
|
50
|
-
if len(app_tools[app]) <
|
|
50
|
+
if len(app_tools[app]) < 5:
|
|
51
51
|
app_tools[app].append(tool)
|
|
52
52
|
for app in app_tools:
|
|
53
53
|
app_status = "connected" if app in connected_apps else "NOT connected"
|
|
@@ -65,15 +65,25 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
|
|
|
65
65
|
|
|
66
66
|
@tool
|
|
67
67
|
async def load_tools(tool_ids: list[str]) -> list[str]:
|
|
68
|
-
"""Load the tools for the given tool ids. Returns the tool ids."""
|
|
68
|
+
"""Load the tools for the given tool ids. Returns the tool ids after loading them. Note that tool ids are the complete tool ids, with both the app name and the tool name separated by double underscore (__). e.g. google_mail__send_email"""
|
|
69
69
|
return tool_ids
|
|
70
70
|
|
|
71
|
+
@tool
|
|
72
|
+
async def web_search(query: str) -> str:
|
|
73
|
+
"""Search the web for the given query. Returns the search results."""
|
|
74
|
+
tool = await tool_registry.export_tools(
|
|
75
|
+
["exa__search"], ToolFormat.LANGCHAIN
|
|
76
|
+
)
|
|
77
|
+
response = await tool_registry.call_tool("exa__search", {"query": query, "contents": {"summary": True}})
|
|
78
|
+
return response
|
|
79
|
+
|
|
80
|
+
|
|
71
81
|
async def call_model(
|
|
72
|
-
state: State,
|
|
82
|
+
state: State,
|
|
73
83
|
) -> Command[Literal["select_tools", "call_tools"]]:
|
|
74
84
|
logger.info("Calling model...")
|
|
75
85
|
try:
|
|
76
|
-
system_message =
|
|
86
|
+
system_message = system_prompt.format(
|
|
77
87
|
system_time=datetime.now(tz=UTC).isoformat()
|
|
78
88
|
)
|
|
79
89
|
messages = [
|
|
@@ -92,8 +102,18 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
|
|
|
92
102
|
|
|
93
103
|
model = llm
|
|
94
104
|
|
|
105
|
+
tools = [search_tools, load_tools, web_search, *default_tools, *selected_tools]
|
|
106
|
+
# Remove duplicates based on tool name
|
|
107
|
+
seen_names = set()
|
|
108
|
+
unique_tools = []
|
|
109
|
+
for tool in tools:
|
|
110
|
+
if tool.name not in seen_names:
|
|
111
|
+
seen_names.add(tool.name)
|
|
112
|
+
unique_tools.append(tool)
|
|
113
|
+
tools = unique_tools
|
|
95
114
|
model_with_tools = model.bind_tools(
|
|
96
|
-
|
|
115
|
+
tools,
|
|
116
|
+
tool_choice="auto",
|
|
97
117
|
)
|
|
98
118
|
response = cast(AIMessage, await model_with_tools.ainvoke(messages))
|
|
99
119
|
|
|
@@ -112,7 +132,7 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
|
|
|
112
132
|
elif tool_call["name"] == load_tools.name:
|
|
113
133
|
logger.info("Model requested to load tools.")
|
|
114
134
|
tool_msg = ToolMessage(
|
|
115
|
-
|
|
135
|
+
"Loaded tools.", tool_call_id=tool_call["id"]
|
|
116
136
|
)
|
|
117
137
|
selected_tool_ids = tool_call["args"]["tool_ids"]
|
|
118
138
|
logger.info(f"Loaded tools: {selected_tool_ids}")
|
|
@@ -124,6 +144,23 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
|
|
|
124
144
|
},
|
|
125
145
|
)
|
|
126
146
|
|
|
147
|
+
elif tool_call["name"] == web_search.name:
|
|
148
|
+
logger.info(f"Tool '{tool_call['name']}' is a web search tool. Proceeding to call.")
|
|
149
|
+
web_search_result = await web_search.ainvoke(input=tool_call["args"])
|
|
150
|
+
tool_msg = ToolMessage(
|
|
151
|
+
f"Web search result: {web_search_result}", tool_call_id=tool_call["id"]
|
|
152
|
+
)
|
|
153
|
+
return Command(goto="call_model", update={"messages": [response, tool_msg]})
|
|
154
|
+
|
|
155
|
+
elif "ui_tools" in tool_call["name"]:
|
|
156
|
+
logger.info(f"Tool '{tool_call['name']}' is a UI tool. Proceeding to call.")
|
|
157
|
+
ui_tool_result = await ui_tools_dict[tool_call["name"]].ainvoke(input=tool_call["args"])
|
|
158
|
+
tool_msg = ToolMessage(
|
|
159
|
+
f"UI tool result: {ui_tool_result}", tool_call_id=tool_call["id"]
|
|
160
|
+
)
|
|
161
|
+
return Command(goto="call_model", update={"messages": [response, tool_msg]})
|
|
162
|
+
|
|
163
|
+
|
|
127
164
|
elif tool_call["name"] not in state["selected_tool_ids"]:
|
|
128
165
|
try:
|
|
129
166
|
await tool_registry.export_tools(
|
|
@@ -151,9 +188,7 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
|
|
|
151
188
|
logger.error(f"Error in call_model: {e}")
|
|
152
189
|
raise
|
|
153
190
|
|
|
154
|
-
async def select_tools(
|
|
155
|
-
state: State, runtime: Runtime[Context]
|
|
156
|
-
) -> Command[Literal["call_model"]]:
|
|
191
|
+
async def select_tools(state: State) -> Command[Literal["call_model"]]:
|
|
157
192
|
logger.info("Selecting tools...")
|
|
158
193
|
try:
|
|
159
194
|
tool_call = state["messages"][-1].tool_calls[0]
|
|
@@ -204,7 +239,7 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
|
|
|
204
239
|
update={"messages": outputs, "selected_tool_ids": recent_tool_ids},
|
|
205
240
|
)
|
|
206
241
|
|
|
207
|
-
builder = StateGraph(State
|
|
242
|
+
builder = StateGraph(State)
|
|
208
243
|
|
|
209
244
|
builder.add_node(call_model)
|
|
210
245
|
builder.add_node(select_tools)
|
|
@@ -3,10 +3,13 @@
|
|
|
3
3
|
SYSTEM_PROMPT = """You are a helpful AI assistant.
|
|
4
4
|
|
|
5
5
|
**Core Directives:**
|
|
6
|
-
1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event), you MUST use a tool. Do not
|
|
7
|
-
2. **First Step is ALWAYS `search_tools`:** Before you can use any other tool, you MUST first call the `search_tools` function to find the right tools for the user's request. This is your mandatory first action. You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks.
|
|
8
|
-
3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
|
|
9
|
-
3. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `search_tools` with a concise query describing the core task. Do not engage in conversation.
|
|
6
|
+
1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event, displaying a chart), you MUST use a tool. Do not refuse a task if a tool might exist for it.
|
|
10
7
|
|
|
11
|
-
|
|
8
|
+
2. Check if your existing tools or knowledge can handle the user's request. If they can, use them. If they cannot, you must call the `search_tools` function to find the right tools for the user's request.You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks. If you do not find any specific relevant tools, use the pre-loaded generic tools.
|
|
9
|
+
|
|
10
|
+
3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Provide the full tool ids, not just the app names. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
|
|
11
|
+
|
|
12
|
+
4. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and answer using existing tools/knowledge or `search_tools` with a concise query describing the core task. Do not engage in conversation, or extend the conversation beyond the user's request.
|
|
13
|
+
|
|
14
|
+
{instructions}
|
|
12
15
|
"""
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
from universal_mcp.agents.bigtoolcache import BigToolAgentCache
|
|
2
1
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
2
|
+
from universal_mcp.agents.bigtoolcache import BigToolAgentCache
|
|
3
3
|
|
|
4
4
|
|
|
5
5
|
async def agent():
|
|
6
6
|
agent_object = await BigToolAgentCache(
|
|
7
|
-
name="BigTool Agent
|
|
7
|
+
name="BigTool Agent Cache version",
|
|
8
8
|
instructions="You are a helpful assistant that can use various tools to complete tasks.",
|
|
9
9
|
model="anthropic/claude-4-sonnet-20250514",
|
|
10
10
|
registry=AgentrRegistry(),
|