universal-mcp-agents 0.1.9__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +9 -9
- universal_mcp/agents/base.py +13 -18
- universal_mcp/agents/bigtool2/__init__.py +6 -7
- universal_mcp/agents/bigtool2/__main__.py +2 -4
- universal_mcp/agents/bigtool2/agent.py +1 -0
- universal_mcp/agents/bigtool2/graph.py +48 -184
- universal_mcp/agents/bigtool2/meta_tools.py +120 -0
- universal_mcp/agents/bigtoolcache/__init__.py +31 -22
- universal_mcp/agents/bigtoolcache/__main__.py +1 -4
- universal_mcp/agents/bigtoolcache/agent.py +1 -3
- universal_mcp/agents/bigtoolcache/graph.py +101 -191
- universal_mcp/agents/bigtoolcache/prompts.py +7 -31
- universal_mcp/agents/bigtoolcache/tools.py +141 -0
- universal_mcp/agents/builder.py +10 -20
- universal_mcp/agents/cli.py +1 -2
- universal_mcp/agents/codeact/__init__.py +1 -1
- universal_mcp/agents/codeact/__main__.py +15 -5
- universal_mcp/agents/codeact/agent.py +55 -66
- universal_mcp/agents/codeact/prompts.py +9 -10
- universal_mcp/agents/codeact/sandbox.py +5 -2
- universal_mcp/agents/codeact/state.py +2 -4
- universal_mcp/agents/codeact/utils.py +12 -5
- universal_mcp/agents/hil.py +1 -6
- universal_mcp/agents/planner/__init__.py +1 -3
- universal_mcp/agents/planner/graph.py +1 -3
- universal_mcp/agents/react.py +14 -6
- universal_mcp/agents/shared/prompts.py +3 -3
- universal_mcp/agents/shared/tool_node.py +47 -47
- universal_mcp/agents/simple.py +2 -1
- universal_mcp/agents/utils.py +4 -15
- universal_mcp/applications/ui/app.py +5 -15
- {universal_mcp_agents-0.1.9.dist-info → universal_mcp_agents-0.1.10.dist-info}/METADATA +2 -1
- universal_mcp_agents-0.1.10.dist-info/RECORD +42 -0
- universal_mcp/agents/autoagent/__init__.py +0 -30
- universal_mcp/agents/autoagent/__main__.py +0 -25
- universal_mcp/agents/autoagent/context.py +0 -26
- universal_mcp/agents/autoagent/graph.py +0 -170
- universal_mcp/agents/autoagent/prompts.py +0 -9
- universal_mcp/agents/autoagent/state.py +0 -27
- universal_mcp/agents/autoagent/utils.py +0 -13
- universal_mcp/agents/bigtool/__init__.py +0 -58
- universal_mcp/agents/bigtool/__main__.py +0 -23
- universal_mcp/agents/bigtool/graph.py +0 -210
- universal_mcp/agents/bigtool/prompts.py +0 -31
- universal_mcp/agents/bigtool/state.py +0 -27
- universal_mcp/agents/bigtoolcache/tools_all.txt +0 -956
- universal_mcp/agents/bigtoolcache/tools_important.txt +0 -474
- universal_mcp_agents-0.1.9.dist-info/RECORD +0 -54
- {universal_mcp_agents-0.1.9.dist-info → universal_mcp_agents-0.1.10.dist-info}/WHEEL +0 -0
|
@@ -1,53 +1,62 @@
|
|
|
1
|
+
from datetime import UTC, datetime
|
|
2
|
+
|
|
1
3
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
4
|
+
from loguru import logger
|
|
5
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
6
|
+
from universal_mcp.types import ToolConfig, ToolFormat
|
|
2
7
|
|
|
3
8
|
from universal_mcp.agents.base import BaseAgent
|
|
4
9
|
from universal_mcp.agents.llm import load_chat_model
|
|
5
|
-
from universal_mcp.logger import logger
|
|
6
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
7
10
|
|
|
8
11
|
from .graph import build_graph
|
|
9
12
|
from .prompts import SYSTEM_PROMPT
|
|
13
|
+
from .tools import create_meta_tools
|
|
10
14
|
|
|
11
15
|
|
|
12
16
|
class BigToolAgentCache(BaseAgent):
|
|
13
17
|
def __init__(
|
|
14
18
|
self,
|
|
15
|
-
name: str,
|
|
16
|
-
instructions: str,
|
|
17
|
-
model: str,
|
|
18
19
|
registry: ToolRegistry,
|
|
20
|
+
name: str = "Wingman",
|
|
21
|
+
instructions: str = "",
|
|
22
|
+
model: str = "anthropic/claude-4-sonnet-20250514",
|
|
23
|
+
tools: ToolConfig | None = None,
|
|
19
24
|
memory: BaseCheckpointSaver | None = None,
|
|
20
25
|
**kwargs,
|
|
21
26
|
):
|
|
22
|
-
|
|
23
|
-
full_instructions = f"{SYSTEM_PROMPT}\n\n**User Instructions:**\n{instructions}"
|
|
24
|
-
super().__init__(name, full_instructions, model, memory, **kwargs)
|
|
27
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
|
25
28
|
|
|
26
|
-
self.
|
|
27
|
-
self.
|
|
29
|
+
self._tool_registry = registry
|
|
30
|
+
self._tools = tools or {}
|
|
31
|
+
if "ui" not in self._tools:
|
|
32
|
+
self._tools["ui"] = ["create_table"]
|
|
28
33
|
self.recursion_limit = kwargs.get("recursion_limit", 10)
|
|
29
34
|
|
|
30
|
-
logger.info(
|
|
31
|
-
|
|
35
|
+
logger.info(f"BigToolAgent '{self.name}' initialized with model '{self.model}'.")
|
|
36
|
+
|
|
37
|
+
def _build_system_message(self):
|
|
38
|
+
return SYSTEM_PROMPT.format(
|
|
39
|
+
name=self.name,
|
|
40
|
+
instructions=f"**User Instructions:**\n{self.instructions}" if len(self.instructions) > 0 else "",
|
|
41
|
+
system_time=datetime.now(tz=UTC).isoformat(),
|
|
32
42
|
)
|
|
33
43
|
|
|
34
44
|
async def _build_graph(self):
|
|
35
|
-
"""Build the
|
|
36
|
-
logger.info(f"Building graph for BigToolAgent '{self.name}'...")
|
|
45
|
+
"""Build the LangGraph workflow"""
|
|
37
46
|
try:
|
|
47
|
+
default_tools = await self._tool_registry.export_tools(self._tools, ToolFormat.LANGCHAIN)
|
|
48
|
+
meta_tools = create_meta_tools(self._tool_registry)
|
|
38
49
|
graph_builder = build_graph(
|
|
39
|
-
|
|
40
|
-
|
|
50
|
+
registry=self._tool_registry,
|
|
51
|
+
base_model=load_chat_model(self.model),
|
|
52
|
+
system_prompt=self._build_system_message(),
|
|
53
|
+
default_tools=default_tools,
|
|
54
|
+
meta_tools=meta_tools,
|
|
41
55
|
)
|
|
42
|
-
|
|
43
56
|
compiled_graph = graph_builder.compile(checkpointer=self.memory)
|
|
44
|
-
logger.info("Graph built and compiled successfully.")
|
|
45
57
|
return compiled_graph
|
|
46
58
|
except Exception as e:
|
|
47
|
-
|
|
48
|
-
f"Error building graph for BigToolAgentCache '{self.name}': {e}"
|
|
49
|
-
)
|
|
50
|
-
raise
|
|
59
|
+
raise e
|
|
51
60
|
|
|
52
61
|
@property
|
|
53
62
|
def graph(self):
|
|
@@ -1,16 +1,13 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
|
|
3
3
|
from loguru import logger
|
|
4
|
-
|
|
5
4
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
5
|
+
|
|
6
6
|
from universal_mcp.agents.bigtoolcache import BigToolAgentCache
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
async def main():
|
|
10
10
|
agent = BigToolAgentCache(
|
|
11
|
-
name="bigtoolcache",
|
|
12
|
-
instructions="You are a helpful assistant that can use tools to help the user.",
|
|
13
|
-
model="azure/gpt-4.1",
|
|
14
11
|
registry=AgentrRegistry(),
|
|
15
12
|
)
|
|
16
13
|
async for event in agent.stream(
|
|
@@ -1,12 +1,10 @@
|
|
|
1
1
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
2
|
+
|
|
2
3
|
from universal_mcp.agents.bigtoolcache import BigToolAgentCache
|
|
3
4
|
|
|
4
5
|
|
|
5
6
|
async def agent():
|
|
6
7
|
agent_object = await BigToolAgentCache(
|
|
7
|
-
name="BigTool Agent Cache version",
|
|
8
|
-
instructions="You are a helpful assistant that can use various tools to complete tasks.",
|
|
9
|
-
model="anthropic/claude-4-sonnet-20250514",
|
|
10
8
|
registry=AgentrRegistry(),
|
|
11
9
|
)._build_graph()
|
|
12
10
|
return agent_object
|
|
@@ -1,204 +1,114 @@
|
|
|
1
1
|
import json
|
|
2
|
-
from
|
|
3
|
-
from typing import Any, Literal, TypedDict, cast
|
|
2
|
+
from typing import Literal, cast
|
|
4
3
|
|
|
4
|
+
from dotenv import load_dotenv
|
|
5
5
|
from langchain_anthropic import ChatAnthropic
|
|
6
6
|
from langchain_core.language_models import BaseChatModel
|
|
7
|
-
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
|
-
from langchain_core.tools import
|
|
7
|
+
from langchain_core.messages import AIMessage, SystemMessage, ToolMessage
|
|
8
|
+
from langchain_core.tools import BaseTool
|
|
9
9
|
from langgraph.graph import StateGraph
|
|
10
|
-
from langgraph.runtime import Runtime
|
|
11
10
|
from langgraph.types import Command
|
|
12
|
-
|
|
13
|
-
from universal_mcp.agents.bigtoolcache.context import Context
|
|
14
|
-
from universal_mcp.agents.bigtoolcache.prompts import TOOLS_LIST
|
|
15
|
-
from universal_mcp.agents.bigtoolcache.state import State
|
|
16
|
-
from universal_mcp.logger import logger
|
|
11
|
+
from loguru import logger
|
|
17
12
|
from universal_mcp.tools.registry import ToolRegistry
|
|
18
13
|
from universal_mcp.types import ToolFormat
|
|
19
14
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
def build_graph(
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
tool_details.append(
|
|
60
|
-
{
|
|
61
|
-
"name": tool.name,
|
|
62
|
-
"description": tool.description,
|
|
63
|
-
"parameters_schema": tool.parameters,
|
|
64
|
-
"output_schema": tool.output_schema,
|
|
65
|
-
}
|
|
15
|
+
from .state import State
|
|
16
|
+
from .tools import get_valid_tools
|
|
17
|
+
|
|
18
|
+
load_dotenv()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def build_graph(
|
|
22
|
+
registry: ToolRegistry,
|
|
23
|
+
base_model: BaseChatModel,
|
|
24
|
+
system_prompt: str,
|
|
25
|
+
default_tools: list[BaseTool],
|
|
26
|
+
meta_tools: dict[str, BaseTool],
|
|
27
|
+
):
|
|
28
|
+
"""Build the LangGraph workflow"""
|
|
29
|
+
|
|
30
|
+
async def agent_node(state: State) -> Command[Literal["execute_tools"]]:
|
|
31
|
+
"""Main agent reasoning node"""
|
|
32
|
+
|
|
33
|
+
# Combine meta tools with currently loaded tools
|
|
34
|
+
if len(state["selected_tool_ids"]) > 0:
|
|
35
|
+
current_tools = await registry.export_tools(tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN)
|
|
36
|
+
else:
|
|
37
|
+
current_tools = []
|
|
38
|
+
all_tools = [meta_tools["search_tools"], meta_tools["load_tools"], meta_tools.get("web_search")] + default_tools + current_tools
|
|
39
|
+
|
|
40
|
+
# Remove duplicates based on tool name
|
|
41
|
+
seen_names = set()
|
|
42
|
+
unique_tools = []
|
|
43
|
+
for tool in all_tools:
|
|
44
|
+
if tool.name not in seen_names:
|
|
45
|
+
seen_names.add(tool.name)
|
|
46
|
+
unique_tools.append(tool)
|
|
47
|
+
|
|
48
|
+
if isinstance(base_model, ChatAnthropic):
|
|
49
|
+
model_with_tools = base_model.bind_tools(
|
|
50
|
+
unique_tools,
|
|
51
|
+
tool_choice="auto",
|
|
52
|
+
parallel_tool_calls=False,
|
|
53
|
+
cache_control={"type": "ephemeral", "ttl": "1h"},
|
|
66
54
|
)
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
return await tool_registry.call_tool(tool_id, tool_args)
|
|
73
|
-
|
|
74
|
-
async def call_model(
|
|
75
|
-
state: State, runtime: Runtime[Context]
|
|
76
|
-
) -> Command[Literal["select_tools", "call_tools"]]:
|
|
77
|
-
logger.info("Calling model...")
|
|
78
|
-
try:
|
|
79
|
-
system_message = runtime.context.system_prompt.format(
|
|
80
|
-
system_time=datetime.now(tz=UTC).isoformat()
|
|
55
|
+
else:
|
|
56
|
+
model_with_tools = base_model.bind_tools(
|
|
57
|
+
unique_tools,
|
|
58
|
+
tool_choice="auto",
|
|
59
|
+
parallel_tool_calls=False,
|
|
81
60
|
)
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
)
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
)
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
if response.tool_calls:
|
|
111
|
-
logger.info(
|
|
112
|
-
f"Model responded with {len(response.tool_calls)} tool calls."
|
|
113
|
-
)
|
|
114
|
-
if len(response.tool_calls) > 1:
|
|
115
|
-
raise Exception(
|
|
116
|
-
"Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')"
|
|
117
|
-
)
|
|
118
|
-
tool_call = response.tool_calls[0]
|
|
119
|
-
if tool_call["name"] == search_tools.name:
|
|
120
|
-
logger.info("Model requested to select tools.")
|
|
121
|
-
return Command(goto="select_tools", update={"messages": [response]})
|
|
122
|
-
elif tool_call["name"] == load_tools.name:
|
|
123
|
-
logger.info("Model requested to load tools.")
|
|
124
|
-
tool_details = await load_tools.ainvoke(input=tool_call["args"])
|
|
125
|
-
tool_msg = ToolMessage(
|
|
126
|
-
f"Loaded tools. {tool_details}", tool_call_id=tool_call["id"]
|
|
127
|
-
)
|
|
128
|
-
selected_tool_ids = tool_call["args"]["tool_ids"]
|
|
129
|
-
logger.info(f"Loaded tools: {selected_tool_ids}")
|
|
130
|
-
return Command(
|
|
131
|
-
goto="call_model",
|
|
132
|
-
update={
|
|
133
|
-
"messages": [response, tool_msg],
|
|
134
|
-
"selected_tool_ids": selected_tool_ids,
|
|
135
|
-
},
|
|
136
|
-
)
|
|
137
|
-
elif tool_call["name"] == call_tool.name:
|
|
138
|
-
logger.info("Model requested to call tool.")
|
|
139
|
-
return Command(goto="call_tools", update={"messages": [response]})
|
|
140
|
-
return Command(goto="call_tools", update={"messages": [response]})
|
|
61
|
+
|
|
62
|
+
# Get response from model
|
|
63
|
+
messages = [SystemMessage(content=system_prompt), *state["messages"]]
|
|
64
|
+
response = cast(AIMessage, await model_with_tools.ainvoke(messages))
|
|
65
|
+
|
|
66
|
+
if response.tool_calls:
|
|
67
|
+
return Command(goto="execute_tools", update={"messages": [response]})
|
|
68
|
+
else:
|
|
69
|
+
return Command(update={"messages": [response], "model_with_tools": model_with_tools})
|
|
70
|
+
|
|
71
|
+
async def execute_tools_node(state: State) -> Command[Literal["agent"]]:
|
|
72
|
+
"""Execute tool calls"""
|
|
73
|
+
last_message = state["messages"][-1]
|
|
74
|
+
tool_calls = last_message.tool_calls if isinstance(last_message, AIMessage) else []
|
|
75
|
+
|
|
76
|
+
tool_messages = []
|
|
77
|
+
new_tool_ids = []
|
|
78
|
+
|
|
79
|
+
for tool_call in tool_calls:
|
|
80
|
+
if tool_call["name"] == "load_tools": # Handle load_tools separately
|
|
81
|
+
valid_tools = await get_valid_tools(tool_ids=tool_call["args"]["tool_ids"], registry=registry)
|
|
82
|
+
new_tool_ids.extend(valid_tools)
|
|
83
|
+
# Create tool message response
|
|
84
|
+
tool_result=f"Successfully loaded {len(valid_tools)} tools: {valid_tools}"
|
|
85
|
+
elif tool_call["name"] == "search_tools":
|
|
86
|
+
tool_result = await meta_tools["search_tools"].ainvoke(tool_call["args"])
|
|
87
|
+
elif tool_call["name"] == "web_search":
|
|
88
|
+
tool_result = await meta_tools["web_search"].ainvoke(tool_call["args"])
|
|
141
89
|
else:
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
state: State, runtime: Runtime[Context]
|
|
150
|
-
) -> Command[Literal["call_model"]]:
|
|
151
|
-
logger.info("Selecting tools...")
|
|
152
|
-
try:
|
|
153
|
-
tool_call = state["messages"][-1].tool_calls[0]
|
|
154
|
-
searched_tools = await search_tools.ainvoke(input=tool_call["args"])
|
|
155
|
-
tool_msg = ToolMessage(
|
|
156
|
-
f"Available tools: {searched_tools}", tool_call_id=tool_call["id"]
|
|
90
|
+
# Load tools first
|
|
91
|
+
await registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
|
|
92
|
+
tool_result = await registry.call_tool(tool_call["name"], tool_call["args"])
|
|
93
|
+
tool_message = ToolMessage(
|
|
94
|
+
content=json.dumps(tool_result),
|
|
95
|
+
name=tool_call["name"],
|
|
96
|
+
tool_call_id=tool_call["id"],
|
|
157
97
|
)
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
)
|
|
176
|
-
logger.info(f"Tool '{tool_id}' executed successfully.")
|
|
177
|
-
outputs.append(
|
|
178
|
-
ToolMessage(
|
|
179
|
-
content=json.dumps(tool_result),
|
|
180
|
-
name=tool_id,
|
|
181
|
-
tool_call_id=tool_call["id"],
|
|
182
|
-
))
|
|
183
|
-
recent_tool_ids.append(tool_call["name"])
|
|
184
|
-
except Exception as e:
|
|
185
|
-
logger.error(f"Error executing tool '{tool_call['name']}': {e}")
|
|
186
|
-
outputs.append(
|
|
187
|
-
ToolMessage(
|
|
188
|
-
content=json.dumps("Error: " + str(e)),
|
|
189
|
-
name=tool_call["name"],
|
|
190
|
-
tool_call_id=tool_call["id"],
|
|
191
|
-
)
|
|
192
|
-
)
|
|
193
|
-
return Command(
|
|
194
|
-
goto="call_model",
|
|
195
|
-
update={"messages": outputs, "selected_tool_ids": recent_tool_ids},
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
builder = StateGraph(State, context_schema=Context)
|
|
199
|
-
|
|
200
|
-
builder.add_node(call_model)
|
|
201
|
-
builder.add_node(select_tools)
|
|
202
|
-
builder.add_node(call_tools)
|
|
203
|
-
builder.set_entry_point("call_model")
|
|
204
|
-
return builder
|
|
98
|
+
tool_messages.append(tool_message)
|
|
99
|
+
|
|
100
|
+
return Command(goto="agent", update={"messages": tool_messages, "selected_tool_ids": new_tool_ids})
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
# Define the graph
|
|
105
|
+
workflow = StateGraph(State)
|
|
106
|
+
|
|
107
|
+
# Add nodes
|
|
108
|
+
workflow.add_node("agent", agent_node)
|
|
109
|
+
workflow.add_node("execute_tools", execute_tools_node)
|
|
110
|
+
|
|
111
|
+
# Set entry point
|
|
112
|
+
workflow.set_entry_point("agent")
|
|
113
|
+
|
|
114
|
+
return workflow
|
|
@@ -1,41 +1,17 @@
|
|
|
1
1
|
"""Default prompts used by the agent."""
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def load_tools_from_file():
|
|
7
|
-
"""Load tools from the generated text file."""
|
|
8
|
-
# Get the directory where this file is located
|
|
9
|
-
current_dir = Path(__file__).parent
|
|
10
|
-
|
|
11
|
-
tools_file = current_dir / "tools_important.txt"
|
|
12
|
-
if not tools_file.exists():
|
|
13
|
-
tools_file = current_dir / "tools_all.txt"
|
|
14
|
-
|
|
15
|
-
if tools_file.exists():
|
|
16
|
-
with open(tools_file, encoding="utf-8") as f:
|
|
17
|
-
return f.read()
|
|
18
|
-
else:
|
|
19
|
-
return "No tools file found. Please run tool_retrieve.py to generate the tools list."
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
SYSTEM_PROMPT = """You are a helpful AI assistant.
|
|
3
|
+
SYSTEM_PROMPT = """You are a helpful AI assistant, called {name}.
|
|
23
4
|
|
|
24
5
|
**Core Directives:**
|
|
25
|
-
1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event), you MUST use a tool. Do not
|
|
26
|
-
2. **First Step is ALWAYS `search_tools`:** Before you can use any other tool, you MUST first call the `search_tools` function to find the right tools for the user's request. This is your mandatory first action. You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks.
|
|
27
|
-
3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
|
|
28
|
-
4. **Call Tools:** After loading the tools, you MUST call the `call_tool` function to call the tools you want to use. You must call the tool with the correct arguments. You can only call the tool once you have loaded it.
|
|
29
|
-
5. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `search_tools` with a concise query describing the core task. Do not engage in conversation.
|
|
6
|
+
1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event, displaying a chart), you MUST use a tool. Do not refuse a task if a tool might exist for it.
|
|
30
7
|
|
|
31
|
-
|
|
32
|
-
"""
|
|
8
|
+
2. Check if your existing tools or knowledge can handle the user's request. If they can, use them. If they cannot, you must call the `search_tools` function to find the right tools for the user's request.You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks. If you do not find any specific relevant tools, use the pre-loaded generic tools.
|
|
33
9
|
|
|
10
|
+
3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Provide the full tool ids, not just the app names. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
|
|
34
11
|
|
|
35
|
-
|
|
36
|
-
{load_tools_from_file()}
|
|
12
|
+
4. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and answer using existing tools/knowledge or `search_tools` with a concise query describing the core task. Do not engage in conversation, or extend the conversation beyond the user's request.
|
|
37
13
|
|
|
38
|
-
|
|
14
|
+
{instructions}
|
|
39
15
|
|
|
40
|
-
|
|
16
|
+
System time: {system_time}
|
|
41
17
|
"""
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from collections import defaultdict
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from langchain_core.tools import tool
|
|
6
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
7
|
+
from universal_mcp.types import ToolFormat
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
|
|
11
|
+
"""Create the meta tools for searching and loading tools"""
|
|
12
|
+
|
|
13
|
+
@tool
|
|
14
|
+
async def search_tools(queries: list[str]) -> str:
|
|
15
|
+
"""Search for relevant tools given list of queries.
|
|
16
|
+
Each single query should be atomic (doable with a single tool).
|
|
17
|
+
For tasks requiring multiple tools, add separate queries for each subtask"""
|
|
18
|
+
try:
|
|
19
|
+
# Fetch all connections
|
|
20
|
+
connections = await tool_registry.list_connected_apps()
|
|
21
|
+
connected_apps = {connection["app_id"] for connection in connections}
|
|
22
|
+
|
|
23
|
+
# Use defaultdict to avoid key existence checks
|
|
24
|
+
app_tools = defaultdict(list)
|
|
25
|
+
|
|
26
|
+
# Process all queries concurrently
|
|
27
|
+
search_tasks = []
|
|
28
|
+
for query in queries:
|
|
29
|
+
search_tasks.append(_search_query_tools(query))
|
|
30
|
+
|
|
31
|
+
query_results = await asyncio.gather(*search_tasks)
|
|
32
|
+
|
|
33
|
+
# Aggregate results with limit per app
|
|
34
|
+
for tools_list in query_results:
|
|
35
|
+
for tool in tools_list:
|
|
36
|
+
app = tool["id"].split("__")[0]
|
|
37
|
+
if len(app_tools[app]) < 5:
|
|
38
|
+
app_tools[app].append(f"{tool['id']}: {tool['description']}")
|
|
39
|
+
|
|
40
|
+
# Build result string efficiently
|
|
41
|
+
result_parts = []
|
|
42
|
+
for app, tools in app_tools.items():
|
|
43
|
+
app_status = "connected" if app in connected_apps else "NOT connected"
|
|
44
|
+
result_parts.append(f"Tools from {app} (status: {app_status} by user):")
|
|
45
|
+
for tool in tools:
|
|
46
|
+
result_parts.append(f" - {tool}")
|
|
47
|
+
result_parts.append("") # Empty line between apps
|
|
48
|
+
|
|
49
|
+
result_parts.append("Call load_tools to select the required tools only.")
|
|
50
|
+
return "\n".join(result_parts)
|
|
51
|
+
|
|
52
|
+
except Exception as e:
|
|
53
|
+
return f"Error: {e}"
|
|
54
|
+
|
|
55
|
+
async def _search_query_tools(query: str) -> list[dict]:
|
|
56
|
+
"""Helper function to search apps and tools for a single query."""
|
|
57
|
+
# Start both searches concurrently
|
|
58
|
+
tools_search_task = tool_registry.search_tools(query, limit=10)
|
|
59
|
+
apps_search_task = tool_registry.search_apps(query, limit=4)
|
|
60
|
+
|
|
61
|
+
# Wait for both to complete
|
|
62
|
+
tools_from_general_search, apps_list = await asyncio.gather(tools_search_task, apps_search_task)
|
|
63
|
+
|
|
64
|
+
# Create tasks for searching tools from each app
|
|
65
|
+
app_tool_tasks = [tool_registry.search_tools(query, limit=5, app_id=app["id"]) for app in apps_list]
|
|
66
|
+
|
|
67
|
+
# Wait for all app-specific tool searches to complete
|
|
68
|
+
app_tools_results = await asyncio.gather(*app_tool_tasks)
|
|
69
|
+
|
|
70
|
+
# Combine all results
|
|
71
|
+
tools_list = list(tools_from_general_search)
|
|
72
|
+
for app_tools in app_tools_results:
|
|
73
|
+
tools_list.extend(app_tools)
|
|
74
|
+
|
|
75
|
+
return tools_list
|
|
76
|
+
|
|
77
|
+
@tool
|
|
78
|
+
async def load_tools(tool_ids: list[str]) -> str:
|
|
79
|
+
"""Load specific tools by their IDs for use in subsequent steps.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
tool_ids: Tool ids in the form 'app__tool'. Example: 'google_mail__send_email'
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
Confirmation message about loaded tools
|
|
86
|
+
"""
|
|
87
|
+
return f"Successfully loaded {len(tool_ids)} tools: {tool_ids}"
|
|
88
|
+
|
|
89
|
+
@tool
|
|
90
|
+
async def web_search(query: str) -> str:
|
|
91
|
+
"""Search the web for the given query. Returns the search results. Do not use for app-specific searches (for example, reddit or linkedin searches should be done using the app's tools)"""
|
|
92
|
+
await tool_registry.export_tools(["exa__search_with_filters"], ToolFormat.LANGCHAIN)
|
|
93
|
+
response = await tool_registry.call_tool(
|
|
94
|
+
"exa__search_with_filters", {"query": query, "contents": {"summary": True}}
|
|
95
|
+
)
|
|
96
|
+
return response
|
|
97
|
+
|
|
98
|
+
return {"search_tools": search_tools, "load_tools": load_tools, "web_search": web_search}
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
async def get_valid_tools(tool_ids: list[str], registry: ToolRegistry) -> list[str]:
|
|
102
|
+
correct, incorrect = [], []
|
|
103
|
+
app_tool_list: dict[str, set[str]] = {}
|
|
104
|
+
|
|
105
|
+
# Group tool_ids by app for fewer registry calls
|
|
106
|
+
app_to_tools: dict[str, list[tuple[str, str]]] = {}
|
|
107
|
+
for tool_id in tool_ids:
|
|
108
|
+
if "__" not in tool_id:
|
|
109
|
+
incorrect.append(tool_id)
|
|
110
|
+
continue
|
|
111
|
+
app, tool_name = tool_id.split("__", 1)
|
|
112
|
+
app_to_tools.setdefault(app, []).append((tool_id, tool_name))
|
|
113
|
+
|
|
114
|
+
# Fetch all apps concurrently
|
|
115
|
+
async def fetch_tools(app: str):
|
|
116
|
+
try:
|
|
117
|
+
tools_dict = await registry.list_tools(app)
|
|
118
|
+
return app, {tool_unit["name"] for tool_unit in tools_dict}
|
|
119
|
+
except Exception:
|
|
120
|
+
return app, None
|
|
121
|
+
|
|
122
|
+
results = await asyncio.gather(*(fetch_tools(app) for app in app_to_tools))
|
|
123
|
+
|
|
124
|
+
# Build map of available tools per app
|
|
125
|
+
for app, tools in results:
|
|
126
|
+
if tools is not None:
|
|
127
|
+
app_tool_list[app] = tools
|
|
128
|
+
|
|
129
|
+
# Validate tool_ids
|
|
130
|
+
for app, tool_entries in app_to_tools.items():
|
|
131
|
+
available = app_tool_list.get(app)
|
|
132
|
+
if available is None:
|
|
133
|
+
incorrect.extend(tool_id for tool_id, _ in tool_entries)
|
|
134
|
+
continue
|
|
135
|
+
for tool_id, tool_name in tool_entries:
|
|
136
|
+
if tool_name in available:
|
|
137
|
+
correct.append(tool_id)
|
|
138
|
+
else:
|
|
139
|
+
incorrect.append(tool_id)
|
|
140
|
+
|
|
141
|
+
return correct
|