universal-mcp-agents 0.1.9__py3-none-any.whl → 0.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +9 -9
- universal_mcp/agents/base.py +13 -18
- universal_mcp/agents/bigtool2/__init__.py +6 -7
- universal_mcp/agents/bigtool2/__main__.py +2 -4
- universal_mcp/agents/bigtool2/agent.py +1 -0
- universal_mcp/agents/bigtool2/graph.py +48 -184
- universal_mcp/agents/bigtool2/meta_tools.py +120 -0
- universal_mcp/agents/bigtoolcache/__init__.py +31 -22
- universal_mcp/agents/bigtoolcache/__main__.py +1 -4
- universal_mcp/agents/bigtoolcache/agent.py +1 -3
- universal_mcp/agents/bigtoolcache/graph.py +101 -191
- universal_mcp/agents/bigtoolcache/prompts.py +7 -31
- universal_mcp/agents/bigtoolcache/tools.py +141 -0
- universal_mcp/agents/builder.py +10 -20
- universal_mcp/agents/cli.py +1 -2
- universal_mcp/agents/codeact/__init__.py +1 -1
- universal_mcp/agents/codeact/__main__.py +15 -5
- universal_mcp/agents/codeact/agent.py +67 -100
- universal_mcp/agents/codeact/prompts.py +32 -42
- universal_mcp/agents/codeact/sandbox.py +30 -39
- universal_mcp/agents/codeact/state.py +3 -6
- universal_mcp/agents/codeact/utils.py +12 -5
- universal_mcp/agents/hil.py +1 -6
- universal_mcp/agents/planner/__init__.py +1 -3
- universal_mcp/agents/planner/graph.py +1 -3
- universal_mcp/agents/react.py +14 -6
- universal_mcp/agents/shared/prompts.py +3 -3
- universal_mcp/agents/shared/tool_node.py +47 -47
- universal_mcp/agents/simple.py +2 -1
- universal_mcp/agents/utils.py +4 -15
- universal_mcp/applications/ui/app.py +5 -15
- {universal_mcp_agents-0.1.9.dist-info → universal_mcp_agents-0.1.11.dist-info}/METADATA +2 -1
- universal_mcp_agents-0.1.11.dist-info/RECORD +42 -0
- universal_mcp/agents/autoagent/__init__.py +0 -30
- universal_mcp/agents/autoagent/__main__.py +0 -25
- universal_mcp/agents/autoagent/context.py +0 -26
- universal_mcp/agents/autoagent/graph.py +0 -170
- universal_mcp/agents/autoagent/prompts.py +0 -9
- universal_mcp/agents/autoagent/state.py +0 -27
- universal_mcp/agents/autoagent/utils.py +0 -13
- universal_mcp/agents/bigtool/__init__.py +0 -58
- universal_mcp/agents/bigtool/__main__.py +0 -23
- universal_mcp/agents/bigtool/graph.py +0 -210
- universal_mcp/agents/bigtool/prompts.py +0 -31
- universal_mcp/agents/bigtool/state.py +0 -27
- universal_mcp/agents/bigtoolcache/tools_all.txt +0 -956
- universal_mcp/agents/bigtoolcache/tools_important.txt +0 -474
- universal_mcp_agents-0.1.9.dist-info/RECORD +0 -54
- {universal_mcp_agents-0.1.9.dist-info → universal_mcp_agents-0.1.11.dist-info}/WHEEL +0 -0
universal_mcp/agents/__init__.py
CHANGED
|
@@ -1,18 +1,15 @@
|
|
|
1
|
-
from universal_mcp.agents.autoagent import AutoAgent
|
|
2
1
|
from universal_mcp.agents.base import BaseAgent
|
|
3
|
-
from universal_mcp.agents.bigtool import BigToolAgent
|
|
4
2
|
from universal_mcp.agents.bigtool2 import BigToolAgent2
|
|
3
|
+
from universal_mcp.agents.bigtoolcache import BigToolAgentCache
|
|
5
4
|
from universal_mcp.agents.builder import BuilderAgent
|
|
5
|
+
from universal_mcp.agents.codeact import CodeActAgent
|
|
6
6
|
from universal_mcp.agents.planner import PlannerAgent
|
|
7
7
|
from universal_mcp.agents.react import ReactAgent
|
|
8
8
|
from universal_mcp.agents.simple import SimpleAgent
|
|
9
|
-
from universal_mcp.agents.codeact import CodeActAgent
|
|
10
9
|
|
|
11
10
|
|
|
12
11
|
def get_agent(agent_name: str):
|
|
13
|
-
if agent_name == "
|
|
14
|
-
return AutoAgent
|
|
15
|
-
elif agent_name == "react":
|
|
12
|
+
if agent_name == "react":
|
|
16
13
|
return ReactAgent
|
|
17
14
|
elif agent_name == "simple":
|
|
18
15
|
return SimpleAgent
|
|
@@ -20,14 +17,17 @@ def get_agent(agent_name: str):
|
|
|
20
17
|
return BuilderAgent
|
|
21
18
|
elif agent_name == "planner":
|
|
22
19
|
return PlannerAgent
|
|
23
|
-
elif agent_name == "bigtool":
|
|
24
|
-
return BigToolAgent
|
|
25
20
|
elif agent_name == "bigtool2":
|
|
26
21
|
return BigToolAgent2
|
|
22
|
+
elif agent_name == "bigtoolcache":
|
|
23
|
+
return BigToolAgentCache
|
|
27
24
|
elif agent_name == "codeact":
|
|
28
25
|
return CodeActAgent
|
|
29
26
|
else:
|
|
30
|
-
raise ValueError(
|
|
27
|
+
raise ValueError(
|
|
28
|
+
f"Unknown agent: {agent_name}. Possible values: react, simple, builder, planner, bigtoolcache, bigtool2, codeact"
|
|
29
|
+
)
|
|
30
|
+
|
|
31
31
|
|
|
32
32
|
__all__ = [
|
|
33
33
|
"BaseAgent",
|
universal_mcp/agents/base.py
CHANGED
|
@@ -36,7 +36,7 @@ class BaseAgent:
|
|
|
36
36
|
async def _build_graph(self) -> StateGraph:
|
|
37
37
|
raise NotImplementedError("Subclasses must implement this method")
|
|
38
38
|
|
|
39
|
-
async def stream(self,
|
|
39
|
+
async def stream(self, user_input: str, thread_id: str = str(uuid4()), metadata: dict = None):
|
|
40
40
|
await self.ainit()
|
|
41
41
|
aggregate = None
|
|
42
42
|
|
|
@@ -53,7 +53,7 @@ class BaseAgent:
|
|
|
53
53
|
"metadata": run_metadata,
|
|
54
54
|
}
|
|
55
55
|
|
|
56
|
-
async for event,
|
|
56
|
+
async for event, meta in self._graph.astream(
|
|
57
57
|
{"messages": [{"role": "user", "content": user_input}]},
|
|
58
58
|
config=run_config,
|
|
59
59
|
context={"system_prompt": self.instructions, "model": self.model},
|
|
@@ -62,21 +62,23 @@ class BaseAgent:
|
|
|
62
62
|
):
|
|
63
63
|
# Only forward assistant token chunks that are not tool-related.
|
|
64
64
|
type_ = type(event)
|
|
65
|
-
tags =
|
|
65
|
+
tags = meta.get("tags", []) if isinstance(meta, dict) else []
|
|
66
66
|
is_quiet = isinstance(tags, list) and ("quiet" in tags)
|
|
67
67
|
if is_quiet:
|
|
68
68
|
continue
|
|
69
69
|
# Handle different types of messages
|
|
70
|
-
if type_
|
|
70
|
+
if type_ == AIMessageChunk:
|
|
71
71
|
# Accumulate billing and aggregate message
|
|
72
72
|
aggregate = event if aggregate is None else aggregate + event
|
|
73
73
|
# Ignore intermeddite finish messages
|
|
74
74
|
if "finish_reason" in event.response_metadata:
|
|
75
75
|
# Got LLM finish reason ignore it
|
|
76
|
-
logger.
|
|
76
|
+
logger.debug(
|
|
77
|
+
f"Finish event: {event}, reason: {event.response_metadata['finish_reason']}, Metadata: {meta}"
|
|
78
|
+
)
|
|
77
79
|
pass
|
|
78
80
|
else:
|
|
79
|
-
logger.debug(f"Event: {event}, Metadata: {
|
|
81
|
+
logger.debug(f"Event: {event}, Metadata: {meta}")
|
|
80
82
|
yield event
|
|
81
83
|
# Send a final finished message
|
|
82
84
|
# The last event would be finish
|
|
@@ -97,9 +99,7 @@ class BaseAgent:
|
|
|
97
99
|
else:
|
|
98
100
|
stream_updater.update(event.content, type_="text")
|
|
99
101
|
|
|
100
|
-
async def invoke(
|
|
101
|
-
self, user_input: str, thread_id: str = str(uuid4()), metadata: dict = None
|
|
102
|
-
):
|
|
102
|
+
async def invoke(self, user_input: str, thread_id: str = str(uuid4()), metadata: dict = None):
|
|
103
103
|
"""Run the agent"""
|
|
104
104
|
await self.ainit()
|
|
105
105
|
|
|
@@ -133,9 +133,7 @@ class BaseAgent:
|
|
|
133
133
|
# Main loop
|
|
134
134
|
while True:
|
|
135
135
|
try:
|
|
136
|
-
state = self._graph.get_state(
|
|
137
|
-
config={"configurable": {"thread_id": thread_id}}
|
|
138
|
-
)
|
|
136
|
+
state = self._graph.get_state(config={"configurable": {"thread_id": thread_id}})
|
|
139
137
|
if state.interrupts:
|
|
140
138
|
value = self.cli.handle_interrupt(state.interrupts[0])
|
|
141
139
|
self._graph.invoke(
|
|
@@ -152,9 +150,7 @@ class BaseAgent:
|
|
|
152
150
|
if user_input.startswith("/"):
|
|
153
151
|
command = user_input.lower().lstrip("/")
|
|
154
152
|
if command == "about":
|
|
155
|
-
self.cli.display_info(
|
|
156
|
-
f"Agent is {self.name}. {self.instructions}"
|
|
157
|
-
)
|
|
153
|
+
self.cli.display_info(f"Agent is {self.name}. {self.instructions}")
|
|
158
154
|
continue
|
|
159
155
|
elif command in {"exit", "quit", "q"}:
|
|
160
156
|
self.cli.display_info("Goodbye! 👋")
|
|
@@ -165,9 +161,7 @@ class BaseAgent:
|
|
|
165
161
|
thread_id = str(uuid4())
|
|
166
162
|
continue
|
|
167
163
|
elif command == "help":
|
|
168
|
-
self.cli.display_info(
|
|
169
|
-
"Available commands: /about, /exit, /quit, /q, /reset"
|
|
170
|
-
)
|
|
164
|
+
self.cli.display_info("Available commands: /about, /exit, /quit, /q, /reset")
|
|
171
165
|
continue
|
|
172
166
|
else:
|
|
173
167
|
self.cli.display_error(f"Unknown command: {command}")
|
|
@@ -181,6 +175,7 @@ class BaseAgent:
|
|
|
181
175
|
break
|
|
182
176
|
except Exception as e:
|
|
183
177
|
import traceback
|
|
178
|
+
|
|
184
179
|
traceback.print_exc()
|
|
185
180
|
self.cli.display_error(f"An error occurred: {str(e)}")
|
|
186
181
|
break
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
-
|
|
3
|
-
from universal_mcp.agents.base import BaseAgent
|
|
4
|
-
from universal_mcp.agents.llm import load_chat_model
|
|
5
2
|
from universal_mcp.logger import logger
|
|
6
3
|
from universal_mcp.tools.registry import ToolRegistry
|
|
7
4
|
from universal_mcp.types import ToolConfig, ToolFormat
|
|
8
5
|
|
|
6
|
+
from universal_mcp.agents.base import BaseAgent
|
|
7
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
8
|
+
|
|
9
9
|
from .graph import build_graph
|
|
10
|
+
from .meta_tools import create_meta_tools
|
|
10
11
|
from .prompts import SYSTEM_PROMPT
|
|
11
12
|
|
|
12
13
|
|
|
@@ -31,9 +32,7 @@ class BigToolAgent2(BaseAgent):
|
|
|
31
32
|
# self.tools["ui"] = ["create_bar_chart", "create_line_chart", "create_pie_chart", "create_table", "http_get", "http_post", "http_put", "http_delete", "http_patch", "read_file"]
|
|
32
33
|
self.tools["ui"] = ["create_table"]
|
|
33
34
|
|
|
34
|
-
logger.info(
|
|
35
|
-
f"BigToolAgent '{self.name}' initialized with model '{self.model}'."
|
|
36
|
-
)
|
|
35
|
+
logger.info(f"BigToolAgent '{self.name}' initialized with model '{self.model}'.")
|
|
37
36
|
|
|
38
37
|
def _build_system_message(self):
|
|
39
38
|
return SYSTEM_PROMPT.format(
|
|
@@ -65,4 +64,4 @@ class BigToolAgent2(BaseAgent):
|
|
|
65
64
|
return self._graph
|
|
66
65
|
|
|
67
66
|
|
|
68
|
-
__all__ = ["BigToolAgent2"]
|
|
67
|
+
__all__ = ["BigToolAgent2", "create_meta_tools"]
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
|
|
3
3
|
from loguru import logger
|
|
4
|
-
|
|
5
4
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
5
|
+
|
|
6
6
|
from universal_mcp.agents.bigtool2 import BigToolAgent2
|
|
7
7
|
from universal_mcp.agents.utils import messages_to_list
|
|
8
8
|
|
|
@@ -15,9 +15,7 @@ async def main():
|
|
|
15
15
|
registry=AgentrRegistry(),
|
|
16
16
|
)
|
|
17
17
|
await agent.ainit()
|
|
18
|
-
output = await agent.invoke(
|
|
19
|
-
user_input="Send an email to manoj@agentr.dev"
|
|
20
|
-
)
|
|
18
|
+
output = await agent.invoke(user_input="Send an email to manoj@agentr.dev")
|
|
21
19
|
logger.info(messages_to_list(output["messages"]))
|
|
22
20
|
|
|
23
21
|
|
|
@@ -1,134 +1,30 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from datetime import UTC, datetime
|
|
3
3
|
from typing import Literal, cast
|
|
4
|
-
import asyncio
|
|
5
4
|
|
|
6
5
|
from langchain_core.language_models import BaseChatModel
|
|
7
6
|
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
|
-
from langchain_core.tools import BaseTool
|
|
7
|
+
from langchain_core.tools import BaseTool
|
|
9
8
|
from langgraph.graph import StateGraph
|
|
10
9
|
from langgraph.types import Command
|
|
11
|
-
|
|
12
|
-
from universal_mcp.agents.bigtool2.state import State
|
|
13
10
|
from universal_mcp.logger import logger
|
|
14
11
|
from universal_mcp.tools.registry import ToolRegistry
|
|
15
12
|
from universal_mcp.types import ToolFormat
|
|
16
13
|
|
|
14
|
+
from universal_mcp.agents.bigtool2.meta_tools import create_meta_tools
|
|
15
|
+
from universal_mcp.agents.bigtool2.state import State
|
|
17
16
|
|
|
18
|
-
def build_graph(
|
|
19
|
-
tool_registry: ToolRegistry, llm: BaseChatModel, system_prompt: str, default_tools: list[BaseTool]
|
|
20
|
-
):
|
|
21
|
-
@tool
|
|
22
|
-
async def search_tools(queries: list[str]) -> str:
|
|
23
|
-
"""Search tools for a given list of queries
|
|
24
|
-
Each single query should be atomic (doable with a single tool).
|
|
25
|
-
For tasks requiring multiple tools, add separate queries for each subtask"""
|
|
26
|
-
logger.info(f"Searching tools for queries: '{queries}'")
|
|
27
|
-
try:
|
|
28
|
-
all_tool_candidates = ""
|
|
29
|
-
app_ids = await tool_registry.list_all_apps()
|
|
30
|
-
connections = await tool_registry.list_connected_apps()
|
|
31
|
-
connection_ids = set([connection["app_id"] for connection in connections])
|
|
32
|
-
connected_apps = [
|
|
33
|
-
app["id"] for app in app_ids if app["id"] in connection_ids
|
|
34
|
-
]
|
|
35
|
-
[
|
|
36
|
-
app["id"] for app in app_ids if app["id"] not in connection_ids
|
|
37
|
-
]
|
|
38
|
-
app_tools = {}
|
|
39
|
-
for task_query in queries:
|
|
40
|
-
apps_list = await tool_registry.search_apps(task_query, limit=5)
|
|
41
|
-
tools_list = []
|
|
42
|
-
for app in apps_list:
|
|
43
|
-
tools_list.extend(await tool_registry.search_tools(task_query, limit=5, app_id=app["id"]))
|
|
44
|
-
tool_candidates = [
|
|
45
|
-
f"{tool['id']}: {tool['description']}" for tool in tools_list
|
|
46
|
-
]
|
|
47
|
-
for tool in tool_candidates:
|
|
48
|
-
app = tool.split("__")[0]
|
|
49
|
-
if app not in app_tools:
|
|
50
|
-
app_tools[app] = []
|
|
51
|
-
if len(app_tools[app]) < 5:
|
|
52
|
-
app_tools[app].append(tool)
|
|
53
|
-
for app in app_tools:
|
|
54
|
-
app_status = "connected" if app in connected_apps else "NOT connected"
|
|
55
|
-
all_tool_candidates += (
|
|
56
|
-
f"Tools from {app} (status: {app_status} by user):\n"
|
|
57
|
-
)
|
|
58
|
-
for tool in app_tools[app]:
|
|
59
|
-
all_tool_candidates += f" - {tool}\n"
|
|
60
|
-
all_tool_candidates += "\n"
|
|
61
|
-
|
|
62
|
-
return all_tool_candidates
|
|
63
|
-
except Exception as e:
|
|
64
|
-
logger.error(f"Error retrieving tools: {e}")
|
|
65
|
-
return "Error: " + str(e)
|
|
66
|
-
|
|
67
|
-
@tool
|
|
68
|
-
async def load_tools(tool_ids: list[str]) -> list[str]:
|
|
69
|
-
"""
|
|
70
|
-
Load the tools for the given tool ids. Returns the valid tool ids after loading.
|
|
71
|
-
Tool ids are of form 'appid__toolid'. Example: 'google_mail__send_email'
|
|
72
|
-
"""
|
|
73
|
-
correct, incorrect = [], []
|
|
74
|
-
app_tool_list: dict[str, list[str]] = {}
|
|
75
|
-
|
|
76
|
-
# Group tool_ids by app for fewer registry calls
|
|
77
|
-
app_to_tools: dict[str, list[str]] = {}
|
|
78
|
-
for tool_id in tool_ids:
|
|
79
|
-
if "__" not in tool_id:
|
|
80
|
-
incorrect.append(tool_id)
|
|
81
|
-
continue
|
|
82
|
-
app, tool = tool_id.split("__", 1)
|
|
83
|
-
app_to_tools.setdefault(app, []).append((tool_id, tool))
|
|
84
|
-
|
|
85
|
-
# Fetch all apps concurrently
|
|
86
|
-
async def fetch_tools(app: str):
|
|
87
|
-
try:
|
|
88
|
-
tools_dict = await tool_registry.list_tools(app)
|
|
89
|
-
return app, {tool_unit["name"] for tool_unit in tools_dict}
|
|
90
|
-
except Exception as e:
|
|
91
|
-
return app, None
|
|
92
|
-
|
|
93
|
-
results = await asyncio.gather(*(fetch_tools(app) for app in app_to_tools))
|
|
94
|
-
|
|
95
|
-
# Build map of available tools per app
|
|
96
|
-
for app, tools in results:
|
|
97
|
-
if tools is not None:
|
|
98
|
-
app_tool_list[app] = tools
|
|
99
|
-
|
|
100
|
-
# Validate tool_ids
|
|
101
|
-
for app, tool_entries in app_to_tools.items():
|
|
102
|
-
available = app_tool_list.get(app)
|
|
103
|
-
if available is None:
|
|
104
|
-
incorrect.extend(tool_id for tool_id, _ in tool_entries)
|
|
105
|
-
continue
|
|
106
|
-
for tool_id, tool in tool_entries:
|
|
107
|
-
if tool in available:
|
|
108
|
-
correct.append(tool_id)
|
|
109
|
-
else:
|
|
110
|
-
incorrect.append(tool_id)
|
|
111
|
-
|
|
112
|
-
return correct
|
|
113
|
-
|
|
114
|
-
@tool
|
|
115
|
-
async def web_search(query: str) -> str:
|
|
116
|
-
"""Search the web for the given query. Returns the search results. Do not use for app-specific searches (for example, reddit or linkedin searches should be done using the app's tools)"""
|
|
117
|
-
tool = await tool_registry.export_tools(
|
|
118
|
-
["exa__search_with_filters"], ToolFormat.LANGCHAIN
|
|
119
|
-
)
|
|
120
|
-
response = await tool_registry.call_tool("exa__search_with_filters", {"query": query, "contents": {"summary": True}})
|
|
121
|
-
return response
|
|
122
17
|
|
|
18
|
+
def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel, system_prompt: str, default_tools: list[BaseTool]):
|
|
19
|
+
# Instantiate meta tools (search, load, web_search)
|
|
20
|
+
search_tools, load_tools, web_search = create_meta_tools(tool_registry)
|
|
123
21
|
|
|
124
22
|
async def call_model(
|
|
125
23
|
state: State,
|
|
126
24
|
) -> Command[Literal["select_tools", "call_tools"]]:
|
|
127
25
|
logger.info("Calling model...")
|
|
128
26
|
try:
|
|
129
|
-
system_message = system_prompt.format(
|
|
130
|
-
system_time=datetime.now(tz=UTC).isoformat()
|
|
131
|
-
)
|
|
27
|
+
system_message = system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
|
|
132
28
|
messages = [
|
|
133
29
|
{"role": "system", "content": system_message},
|
|
134
30
|
*state["messages"],
|
|
@@ -161,68 +57,7 @@ def build_graph(
|
|
|
161
57
|
response = cast(AIMessage, await model_with_tools.ainvoke(messages))
|
|
162
58
|
|
|
163
59
|
if response.tool_calls:
|
|
164
|
-
logger.info(
|
|
165
|
-
f"Model responded with {len(response.tool_calls)} tool calls."
|
|
166
|
-
)
|
|
167
|
-
if len(response.tool_calls) > 1:
|
|
168
|
-
raise Exception(
|
|
169
|
-
"Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')"
|
|
170
|
-
)
|
|
171
|
-
tool_call = response.tool_calls[0]
|
|
172
|
-
if tool_call["name"] == search_tools.name:
|
|
173
|
-
logger.info("Model requested to select tools.")
|
|
174
|
-
return Command(goto="select_tools", update={"messages": [response]})
|
|
175
|
-
elif tool_call["name"] == load_tools.name:
|
|
176
|
-
logger.info("Model requested to load tools.")
|
|
177
|
-
selected_tool_ids = await load_tools.ainvoke(tool_call["args"])
|
|
178
|
-
tool_msg = ToolMessage(
|
|
179
|
-
f"Loaded tools- {selected_tool_ids}", tool_call_id=tool_call["id"]
|
|
180
|
-
)
|
|
181
|
-
logger.info(f"Loaded tools: {selected_tool_ids}")
|
|
182
|
-
return Command(
|
|
183
|
-
goto="call_model",
|
|
184
|
-
update={
|
|
185
|
-
"messages": [response, tool_msg],
|
|
186
|
-
"selected_tool_ids": selected_tool_ids,
|
|
187
|
-
},
|
|
188
|
-
)
|
|
189
|
-
|
|
190
|
-
elif tool_call["name"] == web_search.name:
|
|
191
|
-
logger.info(f"Tool '{tool_call['name']}' is a web search tool. Proceeding to call.")
|
|
192
|
-
web_search_result = await web_search.ainvoke(input=tool_call["args"])
|
|
193
|
-
tool_msg = ToolMessage(
|
|
194
|
-
f"Web search result: {web_search_result}", tool_call_id=tool_call["id"]
|
|
195
|
-
)
|
|
196
|
-
return Command(goto="call_model", update={"messages": [response, tool_msg]})
|
|
197
|
-
|
|
198
|
-
elif "ui_tools" in tool_call["name"]:
|
|
199
|
-
logger.info(f"Tool '{tool_call['name']}' is a UI tool. Proceeding to call.")
|
|
200
|
-
ui_tool_result = await ui_tools_dict[tool_call["name"]].ainvoke(input=tool_call["args"])
|
|
201
|
-
tool_msg = ToolMessage(
|
|
202
|
-
f"UI tool result: {ui_tool_result}", tool_call_id=tool_call["id"]
|
|
203
|
-
)
|
|
204
|
-
return Command(goto="call_model", update={"messages": [response, tool_msg]})
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
elif tool_call["name"] not in state["selected_tool_ids"]:
|
|
208
|
-
try:
|
|
209
|
-
await tool_registry.export_tools(
|
|
210
|
-
[tool_call["name"]], ToolFormat.LANGCHAIN
|
|
211
|
-
)
|
|
212
|
-
logger.info(
|
|
213
|
-
f"Tool '{tool_call['name']}' not in selected tools, but available. Proceeding to call."
|
|
214
|
-
)
|
|
215
|
-
return Command(
|
|
216
|
-
goto="call_tools", update={"messages": [response]}
|
|
217
|
-
)
|
|
218
|
-
except Exception as e:
|
|
219
|
-
logger.error(
|
|
220
|
-
f"Unexpected tool call: {tool_call['name']}. Error: {e}"
|
|
221
|
-
)
|
|
222
|
-
raise Exception(
|
|
223
|
-
f"Unexpected tool call: {tool_call['name']}. Available tools: {state['selected_tool_ids']}"
|
|
224
|
-
) from e
|
|
225
|
-
logger.info(f"Proceeding to call tool: {tool_call['name']}")
|
|
60
|
+
logger.info(f"Model responded with {len(response.tool_calls)} tool calls.")
|
|
226
61
|
return Command(goto="call_tools", update={"messages": [response]})
|
|
227
62
|
else:
|
|
228
63
|
logger.info("Model responded with a message, ending execution.")
|
|
@@ -237,7 +72,8 @@ def build_graph(
|
|
|
237
72
|
tool_call = state["messages"][-1].tool_calls[0]
|
|
238
73
|
searched_tools = await search_tools.ainvoke(input=tool_call["args"])
|
|
239
74
|
tool_msg = ToolMessage(
|
|
240
|
-
f"Available tool_ids: {searched_tools}. Call load_tools to select the required tools only.",
|
|
75
|
+
f"Available tool_ids: {searched_tools}. Call load_tools to select the required tools only.",
|
|
76
|
+
tool_call_id=tool_call["id"],
|
|
241
77
|
)
|
|
242
78
|
return Command(goto="call_model", update={"messages": [tool_msg]})
|
|
243
79
|
except Exception as e:
|
|
@@ -249,17 +85,45 @@ def build_graph(
|
|
|
249
85
|
outputs = []
|
|
250
86
|
recent_tool_ids = []
|
|
251
87
|
for tool_call in state["messages"][-1].tool_calls:
|
|
252
|
-
logger.info(
|
|
253
|
-
f"Executing tool: {tool_call['name']} with args: {tool_call['args']}"
|
|
254
|
-
)
|
|
255
88
|
try:
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
89
|
+
# Handle special tools internally (no export needed)
|
|
90
|
+
if tool_call["name"] == search_tools.name:
|
|
91
|
+
search_result = await search_tools.ainvoke(input=tool_call["args"])
|
|
92
|
+
outputs.append(
|
|
93
|
+
ToolMessage(
|
|
94
|
+
content=search_result,
|
|
95
|
+
name=tool_call["name"],
|
|
96
|
+
tool_call_id=tool_call["id"],
|
|
97
|
+
)
|
|
98
|
+
)
|
|
99
|
+
continue
|
|
100
|
+
|
|
101
|
+
if tool_call["name"] == load_tools.name:
|
|
102
|
+
selected_tool_ids = await load_tools.ainvoke(tool_call["args"])
|
|
103
|
+
outputs.append(
|
|
104
|
+
ToolMessage(
|
|
105
|
+
content=json.dumps(f"Loaded tools- {selected_tool_ids}"),
|
|
106
|
+
name=tool_call["name"],
|
|
107
|
+
tool_call_id=tool_call["id"],
|
|
108
|
+
)
|
|
109
|
+
)
|
|
110
|
+
recent_tool_ids = selected_tool_ids
|
|
111
|
+
continue
|
|
112
|
+
|
|
113
|
+
if tool_call["name"] == web_search.name:
|
|
114
|
+
web_search_result = await web_search.ainvoke(input=tool_call["args"])
|
|
115
|
+
outputs.append(
|
|
116
|
+
ToolMessage(
|
|
117
|
+
content=json.dumps(f"Web search result: {web_search_result}"),
|
|
118
|
+
name=tool_call["name"],
|
|
119
|
+
tool_call_id=tool_call["id"],
|
|
120
|
+
)
|
|
121
|
+
)
|
|
122
|
+
continue
|
|
123
|
+
|
|
124
|
+
# For other tools: export and call via registry
|
|
125
|
+
await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
|
|
126
|
+
tool_result = await tool_registry.call_tool(tool_call["name"], tool_call["args"])
|
|
263
127
|
outputs.append(
|
|
264
128
|
ToolMessage(
|
|
265
129
|
content=json.dumps(tool_result),
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
from langchain_core.tools import BaseTool, tool
|
|
5
|
+
from universal_mcp.logger import logger
|
|
6
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
7
|
+
from universal_mcp.types import ToolFormat
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def create_meta_tools(tool_registry: ToolRegistry) -> list[BaseTool]:
|
|
11
|
+
@tool
|
|
12
|
+
async def search_tools(queries: list[str]) -> str:
|
|
13
|
+
"""Search tools for a given list of queries
|
|
14
|
+
Each single query should be atomic (doable with a single tool).
|
|
15
|
+
For tasks requiring multiple tools, add separate queries for each subtask"""
|
|
16
|
+
logger.info(f"Searching tools for queries: '{queries}'")
|
|
17
|
+
try:
|
|
18
|
+
all_tool_candidates = ""
|
|
19
|
+
|
|
20
|
+
async def fetch_app_and_connection_metadata():
|
|
21
|
+
return await asyncio.gather(
|
|
22
|
+
tool_registry.list_all_apps(),
|
|
23
|
+
tool_registry.list_connected_apps(),
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
app_ids, connections = await fetch_app_and_connection_metadata()
|
|
27
|
+
connection_ids = set([connection["app_id"] for connection in connections])
|
|
28
|
+
connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
|
|
29
|
+
app_tools: dict[str, list[str]] = {}
|
|
30
|
+
|
|
31
|
+
async def find_tools_for_app(task_query: str, app_id: str) -> list[dict[str, Any]]:
|
|
32
|
+
return await tool_registry.search_tools(task_query, limit=5, app_id=app_id)
|
|
33
|
+
|
|
34
|
+
async def find_tools_for_query(task_query: str) -> list[str]:
|
|
35
|
+
apps_list = await tool_registry.search_apps(task_query, limit=5)
|
|
36
|
+
per_app_tool_lists = await asyncio.gather(
|
|
37
|
+
*(find_tools_for_app(task_query, app_entry["id"]) for app_entry in apps_list)
|
|
38
|
+
)
|
|
39
|
+
tools_flat = [tool for sublist in per_app_tool_lists for tool in sublist]
|
|
40
|
+
return [f"{tool['id']}: {tool['description']}" for tool in tools_flat]
|
|
41
|
+
|
|
42
|
+
# Run all queries concurrently
|
|
43
|
+
query_results = await asyncio.gather(*(find_tools_for_query(q) for q in queries))
|
|
44
|
+
|
|
45
|
+
# Aggregate per-app with cap of 5 per app across all queries
|
|
46
|
+
for tool_desc in [tool for result in query_results for tool in result]:
|
|
47
|
+
app = tool_desc.split("__")[0]
|
|
48
|
+
if app not in app_tools:
|
|
49
|
+
app_tools[app] = []
|
|
50
|
+
if len(app_tools[app]) < 5 and tool_desc not in app_tools[app]:
|
|
51
|
+
app_tools[app].append(tool_desc)
|
|
52
|
+
for app in app_tools:
|
|
53
|
+
app_status = "connected" if app in connected_apps else "NOT connected"
|
|
54
|
+
all_tool_candidates += f"Tools from {app} (status: {app_status} by user):\n"
|
|
55
|
+
for tool in app_tools[app]:
|
|
56
|
+
all_tool_candidates += f" - {tool}\n"
|
|
57
|
+
all_tool_candidates += "\n"
|
|
58
|
+
|
|
59
|
+
return all_tool_candidates
|
|
60
|
+
except Exception as e:
|
|
61
|
+
logger.error(f"Error retrieving tools: {e}")
|
|
62
|
+
return "Error: " + str(e)
|
|
63
|
+
|
|
64
|
+
@tool
|
|
65
|
+
async def load_tools(tool_ids: list[str]) -> list[str]:
|
|
66
|
+
"""
|
|
67
|
+
Load the tools for the given tool ids. Returns the valid tool ids after loading.
|
|
68
|
+
Tool ids are of form 'appid__toolid'. Example: 'google_mail__send_email'
|
|
69
|
+
"""
|
|
70
|
+
correct, incorrect = [], []
|
|
71
|
+
app_tool_list: dict[str, list[str]] = {}
|
|
72
|
+
|
|
73
|
+
# Group tool_ids by app for fewer registry calls
|
|
74
|
+
app_to_tools: dict[str, list[str]] = {}
|
|
75
|
+
for tool_id in tool_ids:
|
|
76
|
+
if "__" not in tool_id:
|
|
77
|
+
incorrect.append(tool_id)
|
|
78
|
+
continue
|
|
79
|
+
app, tool = tool_id.split("__", 1)
|
|
80
|
+
app_to_tools.setdefault(app, []).append((tool_id, tool))
|
|
81
|
+
|
|
82
|
+
# Fetch all apps concurrently
|
|
83
|
+
async def fetch_tools(app: str):
|
|
84
|
+
try:
|
|
85
|
+
tools_dict = await tool_registry.list_tools(app)
|
|
86
|
+
return app, {tool_unit["name"] for tool_unit in tools_dict}
|
|
87
|
+
except Exception:
|
|
88
|
+
return app, None
|
|
89
|
+
|
|
90
|
+
results = await asyncio.gather(*(fetch_tools(app) for app in app_to_tools))
|
|
91
|
+
|
|
92
|
+
# Build map of available tools per app
|
|
93
|
+
for app, tools in results:
|
|
94
|
+
if tools is not None:
|
|
95
|
+
app_tool_list[app] = tools
|
|
96
|
+
|
|
97
|
+
# Validate tool_ids
|
|
98
|
+
for app, tool_entries in app_to_tools.items():
|
|
99
|
+
available = app_tool_list.get(app)
|
|
100
|
+
if available is None:
|
|
101
|
+
incorrect.extend(tool_id for tool_id, _ in tool_entries)
|
|
102
|
+
continue
|
|
103
|
+
for tool_id, tool in tool_entries:
|
|
104
|
+
if tool in available:
|
|
105
|
+
correct.append(tool_id)
|
|
106
|
+
else:
|
|
107
|
+
incorrect.append(tool_id)
|
|
108
|
+
|
|
109
|
+
return correct
|
|
110
|
+
|
|
111
|
+
@tool
|
|
112
|
+
async def web_search(query: str) -> str:
|
|
113
|
+
"""Search the web for the given query. Returns the search results. Do not use for app-specific searches (for example, reddit or linkedin searches should be done using the app's tools)"""
|
|
114
|
+
await tool_registry.export_tools(["exa__search_with_filters"], ToolFormat.LANGCHAIN)
|
|
115
|
+
response = await tool_registry.call_tool(
|
|
116
|
+
"exa__search_with_filters", {"query": query, "contents": {"summary": True}}
|
|
117
|
+
)
|
|
118
|
+
return response
|
|
119
|
+
|
|
120
|
+
return [search_tools, load_tools, web_search]
|