universal-mcp-agents 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agents/__init__.py +19 -0
- universal_mcp/agents/autoagent/__init__.py +1 -1
- universal_mcp/agents/autoagent/__main__.py +1 -1
- universal_mcp/agents/autoagent/graph.py +32 -13
- universal_mcp/agents/autoagent/studio.py +3 -8
- universal_mcp/agents/base.py +80 -22
- universal_mcp/agents/bigtool/__init__.py +13 -9
- universal_mcp/agents/bigtool/__main__.py +6 -7
- universal_mcp/agents/bigtool/graph.py +84 -40
- universal_mcp/agents/bigtool/prompts.py +3 -3
- universal_mcp/agents/bigtool2/__init__.py +16 -6
- universal_mcp/agents/bigtool2/__main__.py +7 -6
- universal_mcp/agents/bigtool2/agent.py +4 -2
- universal_mcp/agents/bigtool2/graph.py +78 -36
- universal_mcp/agents/bigtool2/prompts.py +1 -1
- universal_mcp/agents/bigtoolcache/__init__.py +8 -4
- universal_mcp/agents/bigtoolcache/__main__.py +1 -1
- universal_mcp/agents/bigtoolcache/agent.py +5 -3
- universal_mcp/agents/bigtoolcache/context.py +0 -1
- universal_mcp/agents/bigtoolcache/graph.py +99 -69
- universal_mcp/agents/bigtoolcache/prompts.py +28 -0
- universal_mcp/agents/bigtoolcache/tools_all.txt +956 -0
- universal_mcp/agents/bigtoolcache/tools_important.txt +474 -0
- universal_mcp/agents/builder.py +62 -20
- universal_mcp/agents/cli.py +19 -5
- universal_mcp/agents/codeact/__init__.py +16 -4
- universal_mcp/agents/codeact/test.py +2 -1
- universal_mcp/agents/hil.py +16 -4
- universal_mcp/agents/llm.py +12 -4
- universal_mcp/agents/planner/__init__.py +14 -4
- universal_mcp/agents/planner/__main__.py +10 -6
- universal_mcp/agents/planner/graph.py +9 -3
- universal_mcp/agents/planner/prompts.py +14 -1
- universal_mcp/agents/planner/state.py +0 -1
- universal_mcp/agents/react.py +36 -22
- universal_mcp/agents/shared/tool_node.py +26 -11
- universal_mcp/agents/simple.py +27 -4
- universal_mcp/agents/tools.py +9 -4
- universal_mcp/agents/ui_tools.py +305 -0
- universal_mcp/agents/utils.py +55 -17
- {universal_mcp_agents-0.1.3.dist-info → universal_mcp_agents-0.1.5.dist-info}/METADATA +3 -2
- universal_mcp_agents-0.1.5.dist-info/RECORD +52 -0
- universal_mcp/agents/bigtool/context.py +0 -24
- universal_mcp/agents/bigtool2/context.py +0 -33
- universal_mcp_agents-0.1.3.dist-info/RECORD +0 -51
- {universal_mcp_agents-0.1.3.dist-info → universal_mcp_agents-0.1.5.dist-info}/WHEEL +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from datetime import UTC, datetime
|
|
3
|
-
from typing import
|
|
3
|
+
from typing import Literal, TypedDict, cast
|
|
4
4
|
|
|
5
5
|
from langchain_anthropic import ChatAnthropic
|
|
6
6
|
from langchain_core.language_models import BaseChatModel
|
|
@@ -15,94 +15,107 @@ from universal_mcp.agents.bigtoolcache.state import State
|
|
|
15
15
|
from universal_mcp.logger import logger
|
|
16
16
|
from universal_mcp.tools.registry import ToolRegistry
|
|
17
17
|
from universal_mcp.types import ToolFormat
|
|
18
|
+
from universal_mcp.agents.bigtoolcache.prompts import TOOLS_LIST
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
22
|
+
class ToolSelectionOutput(TypedDict):
|
|
23
|
+
connected_tool_ids: list[str]
|
|
24
|
+
unconnected_tool_ids: list[str]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
|
|
25
28
|
@tool
|
|
26
29
|
async def search_tools(queries: list[str]) -> str:
|
|
27
30
|
"""Search tools for a given list of queries
|
|
28
31
|
Each single query should be atomic (doable with a single tool).
|
|
29
32
|
For tasks requiring multiple tools, add separate queries for each subtask"""
|
|
30
33
|
logger.info(f"Searching tools for queries: '{queries}'")
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
if tool["id"] not in app_tools[app]:
|
|
49
|
-
app_tools[app][tool["id"]] = tool["description"]
|
|
50
|
-
for app in app_tools:
|
|
51
|
-
app_status = "connected" if app in connected_apps else "NOT connected"
|
|
52
|
-
all_tool_candidates += f"Tools from {app} (status: {app_status} by user):\n"
|
|
53
|
-
for tool in app_tools[app]:
|
|
54
|
-
all_tool_candidates += f" - {tool}: {app_tools[app][tool]}\n"
|
|
55
|
-
all_tool_candidates += "\n"
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
return all_tool_candidates
|
|
59
|
-
except Exception as e:
|
|
60
|
-
logger.error(f"Error retrieving tools: {e}")
|
|
61
|
-
return "Error: " + str(e)
|
|
62
|
-
|
|
34
|
+
messages = [
|
|
35
|
+
{
|
|
36
|
+
"role": "system",
|
|
37
|
+
"content": [
|
|
38
|
+
{
|
|
39
|
+
"type": "text",
|
|
40
|
+
"text": TOOLS_LIST,
|
|
41
|
+
"cache_control": {"type": "ephemeral", "ttl": "1h"},
|
|
42
|
+
}
|
|
43
|
+
],
|
|
44
|
+
},
|
|
45
|
+
{"role": "user", "content": str(queries)},
|
|
46
|
+
]
|
|
47
|
+
response = llm.with_structured_output(ToolSelectionOutput).invoke(messages)
|
|
48
|
+
response_text = f"Connected tools: {response['connected_tool_ids']}\nUnconnected tools: {response['unconnected_tool_ids']}"
|
|
49
|
+
return response_text
|
|
50
|
+
|
|
63
51
|
@tool
|
|
64
52
|
async def load_tools(tool_ids: list[str]) -> list[dict[str, Any]]:
|
|
65
|
-
"""Load the tools for the given tool ids. Returns the tool name, description, parameters schema, and output schema."""
|
|
53
|
+
"""Load the tools for the given tool ids. Returns the tool name, description, parameters schema, and output schema. A tool id is made up using the app_id and the tool_name, attached by double underscore (__). e.g. google_mail__send_email"""
|
|
66
54
|
temp_manager = tool_registry.tool_manager
|
|
67
55
|
temp_manager.clear_tools()
|
|
68
56
|
await tool_registry.export_tools(tool_ids, format=ToolFormat.NATIVE)
|
|
69
57
|
tool_details = []
|
|
70
58
|
for tool_id in tool_ids:
|
|
71
59
|
tool = temp_manager.get_tool(tool_id)
|
|
72
|
-
tool_details.append(
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
60
|
+
tool_details.append(
|
|
61
|
+
{
|
|
62
|
+
"name": tool.name,
|
|
63
|
+
"description": tool.description,
|
|
64
|
+
"parameters_schema": tool.parameters,
|
|
65
|
+
"output_schema": tool.output_schema,
|
|
66
|
+
}
|
|
67
|
+
)
|
|
78
68
|
return tool_details
|
|
79
|
-
|
|
69
|
+
|
|
80
70
|
@tool
|
|
81
71
|
async def call_tool(tool_id: str, tool_args: dict[str, Any]) -> Any:
|
|
82
72
|
"""Call the tool with the given id and arguments."""
|
|
83
73
|
return await tool_registry.call_tool(tool_id, tool_args)
|
|
84
74
|
|
|
85
|
-
|
|
86
|
-
|
|
75
|
+
async def call_model(
|
|
76
|
+
state: State, runtime: Runtime[Context]
|
|
77
|
+
) -> Command[Literal["select_tools", "call_tools"]]:
|
|
87
78
|
logger.info("Calling model...")
|
|
88
79
|
try:
|
|
89
|
-
system_message = runtime.context.system_prompt.format(
|
|
90
|
-
|
|
80
|
+
system_message = runtime.context.system_prompt.format(
|
|
81
|
+
system_time=datetime.now(tz=UTC).isoformat()
|
|
82
|
+
)
|
|
83
|
+
messages = [
|
|
84
|
+
{
|
|
85
|
+
"role": "system",
|
|
86
|
+
"content": [
|
|
87
|
+
{
|
|
88
|
+
"type": "text",
|
|
89
|
+
"text": system_message,
|
|
90
|
+
"cache_control": {"type": "ephemeral", "ttl": "1h"},
|
|
91
|
+
}
|
|
92
|
+
],
|
|
93
|
+
},
|
|
94
|
+
*state["messages"],
|
|
95
|
+
]
|
|
91
96
|
|
|
92
97
|
model = llm
|
|
93
98
|
|
|
94
99
|
if isinstance(model, ChatAnthropic):
|
|
95
100
|
model_with_tools = model.bind_tools(
|
|
96
|
-
[search_tools, load_tools, call_tool],
|
|
101
|
+
[search_tools, load_tools, call_tool],
|
|
102
|
+
tool_choice="auto",
|
|
103
|
+
cache_control={"type": "ephemeral", "ttl": "1h"},
|
|
97
104
|
)
|
|
98
105
|
else:
|
|
99
|
-
model_with_tools = model.bind_tools(
|
|
106
|
+
model_with_tools = model.bind_tools(
|
|
107
|
+
[search_tools, load_tools, call_tool], tool_choice="auto"
|
|
108
|
+
)
|
|
100
109
|
response = cast(AIMessage, await model_with_tools.ainvoke(messages))
|
|
101
110
|
|
|
102
111
|
if response.tool_calls:
|
|
103
|
-
logger.info(
|
|
112
|
+
logger.info(
|
|
113
|
+
f"Model responded with {len(response.tool_calls)} tool calls."
|
|
114
|
+
)
|
|
104
115
|
if len(response.tool_calls) > 1:
|
|
105
|
-
raise Exception(
|
|
116
|
+
raise Exception(
|
|
117
|
+
"Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')"
|
|
118
|
+
)
|
|
106
119
|
tool_call = response.tool_calls[0]
|
|
107
120
|
if tool_call["name"] == search_tools.name:
|
|
108
121
|
logger.info("Model requested to select tools.")
|
|
@@ -110,10 +123,18 @@ def build_graph(
|
|
|
110
123
|
elif tool_call["name"] == load_tools.name:
|
|
111
124
|
logger.info("Model requested to load tools.")
|
|
112
125
|
tool_details = await load_tools.ainvoke(input=tool_call["args"])
|
|
113
|
-
tool_msg = ToolMessage(
|
|
126
|
+
tool_msg = ToolMessage(
|
|
127
|
+
f"Loaded tools. {tool_details}", tool_call_id=tool_call["id"]
|
|
128
|
+
)
|
|
114
129
|
selected_tool_ids = tool_call["args"]["tool_ids"]
|
|
115
130
|
logger.info(f"Loaded tools: {selected_tool_ids}")
|
|
116
|
-
return Command(
|
|
131
|
+
return Command(
|
|
132
|
+
goto="call_model",
|
|
133
|
+
update={
|
|
134
|
+
"messages": [response, tool_msg],
|
|
135
|
+
"selected_tool_ids": selected_tool_ids,
|
|
136
|
+
},
|
|
137
|
+
)
|
|
117
138
|
elif tool_call["name"] == call_tool.name:
|
|
118
139
|
logger.info("Model requested to call tool.")
|
|
119
140
|
return Command(goto="call_tools", update={"messages": [response]})
|
|
@@ -125,12 +146,16 @@ def build_graph(
|
|
|
125
146
|
logger.error(f"Error in call_model: {e}")
|
|
126
147
|
raise
|
|
127
148
|
|
|
128
|
-
async def select_tools(
|
|
149
|
+
async def select_tools(
|
|
150
|
+
state: State, runtime: Runtime[Context]
|
|
151
|
+
) -> Command[Literal["call_model"]]:
|
|
129
152
|
logger.info("Selecting tools...")
|
|
130
153
|
try:
|
|
131
154
|
tool_call = state["messages"][-1].tool_calls[0]
|
|
132
|
-
searched_tools= await search_tools.ainvoke(input=tool_call["args"])
|
|
133
|
-
tool_msg = ToolMessage(
|
|
155
|
+
searched_tools = await search_tools.ainvoke(input=tool_call["args"])
|
|
156
|
+
tool_msg = ToolMessage(
|
|
157
|
+
f"Available tools: {searched_tools}", tool_call_id=tool_call["id"]
|
|
158
|
+
)
|
|
134
159
|
return Command(goto="call_model", update={"messages": [tool_msg]})
|
|
135
160
|
except Exception as e:
|
|
136
161
|
logger.error(f"Error in select_tools: {e}")
|
|
@@ -146,7 +171,9 @@ def build_graph(
|
|
|
146
171
|
logger.info(f"Executing tool: {tool_id} with args: {tool_args}")
|
|
147
172
|
try:
|
|
148
173
|
await tool_registry.export_tools([tool_id], ToolFormat.LANGCHAIN)
|
|
149
|
-
tool_result = await call_tool.ainvoke(
|
|
174
|
+
tool_result = await call_tool.ainvoke(
|
|
175
|
+
input={"tool_id": tool_id, "tool_args": tool_args}
|
|
176
|
+
)
|
|
150
177
|
logger.info(f"Tool '{tool_id}' executed successfully.")
|
|
151
178
|
outputs.append(
|
|
152
179
|
ToolMessage(
|
|
@@ -154,18 +181,21 @@ def build_graph(
|
|
|
154
181
|
name=tool_id,
|
|
155
182
|
tool_call_id=tool_call["id"],
|
|
156
183
|
)
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
184
|
+
recent_tool_ids.append(tool_call["name"])
|
|
185
|
+
except Exception as e:
|
|
186
|
+
logger.error(f"Error executing tool '{tool_call['name']}': {e}")
|
|
187
|
+
outputs.append(
|
|
188
|
+
ToolMessage(
|
|
189
|
+
content=json.dumps("Error: " + str(e)),
|
|
190
|
+
name=tool_call["name"],
|
|
191
|
+
tool_call_id=tool_call["id"],
|
|
192
|
+
)
|
|
166
193
|
)
|
|
167
194
|
)
|
|
168
|
-
return Command(
|
|
195
|
+
return Command(
|
|
196
|
+
goto="call_model",
|
|
197
|
+
update={"messages": outputs, "selected_tool_ids": recent_tool_ids},
|
|
198
|
+
)
|
|
169
199
|
|
|
170
200
|
builder = StateGraph(State, context_schema=Context)
|
|
171
201
|
|
|
@@ -1,5 +1,24 @@
|
|
|
1
1
|
"""Default prompts used by the agent."""
|
|
2
2
|
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def load_tools_from_file():
|
|
7
|
+
"""Load tools from the generated text file."""
|
|
8
|
+
# Get the directory where this file is located
|
|
9
|
+
current_dir = Path(__file__).parent
|
|
10
|
+
|
|
11
|
+
tools_file = current_dir / "tools_important.txt"
|
|
12
|
+
if not tools_file.exists():
|
|
13
|
+
tools_file = current_dir / "tools_all.txt"
|
|
14
|
+
|
|
15
|
+
if tools_file.exists():
|
|
16
|
+
with open(tools_file, encoding="utf-8") as f:
|
|
17
|
+
return f.read()
|
|
18
|
+
else:
|
|
19
|
+
return "No tools file found. Please run tool_retrieve.py to generate the tools list."
|
|
20
|
+
|
|
21
|
+
|
|
3
22
|
SYSTEM_PROMPT = """You are a helpful AI assistant.
|
|
4
23
|
|
|
5
24
|
**Core Directives:**
|
|
@@ -11,3 +30,12 @@ SYSTEM_PROMPT = """You are a helpful AI assistant.
|
|
|
11
30
|
|
|
12
31
|
System time: {system_time}
|
|
13
32
|
"""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
TOOLS_LIST = f""" This is the list of all the tools available to you:
|
|
36
|
+
{load_tools_from_file()}
|
|
37
|
+
|
|
38
|
+
You will be provided a list of queries (which may be similar or different from each other). Your job is to select the relavent tools for the user's request. sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must return both apps tools. If the query specifically asks you to use an app that is not connected, return the tools for that app, they can still be connected by the user.
|
|
39
|
+
|
|
40
|
+
You have to return the tool_ids by constructing the tool_id from the app_id and the tool_name, attached by double underscore (__). e.g. google_mail__send_email
|
|
41
|
+
"""
|