universal-mcp-agents 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-agents might be problematic. Click here for more details.

Files changed (32) hide show
  1. universal_mcp/agents/autoagent/graph.py +30 -11
  2. universal_mcp/agents/autoagent/studio.py +1 -7
  3. universal_mcp/agents/base.py +55 -9
  4. universal_mcp/agents/bigtool/__init__.py +3 -1
  5. universal_mcp/agents/bigtool/graph.py +78 -25
  6. universal_mcp/agents/bigtool2/__init__.py +3 -1
  7. universal_mcp/agents/bigtool2/agent.py +2 -1
  8. universal_mcp/agents/bigtool2/context.py +0 -1
  9. universal_mcp/agents/bigtool2/graph.py +76 -32
  10. universal_mcp/agents/bigtoolcache/__init__.py +6 -2
  11. universal_mcp/agents/bigtoolcache/agent.py +2 -1
  12. universal_mcp/agents/bigtoolcache/context.py +0 -1
  13. universal_mcp/agents/bigtoolcache/graph.py +88 -59
  14. universal_mcp/agents/bigtoolcache/prompts.py +29 -0
  15. universal_mcp/agents/bigtoolcache/tools_all.txt +956 -0
  16. universal_mcp/agents/bigtoolcache/tools_important.txt +474 -0
  17. universal_mcp/agents/builder.py +19 -5
  18. universal_mcp/agents/codeact/__init__.py +16 -4
  19. universal_mcp/agents/hil.py +16 -4
  20. universal_mcp/agents/llm.py +5 -1
  21. universal_mcp/agents/planner/__init__.py +7 -3
  22. universal_mcp/agents/planner/__main__.py +3 -1
  23. universal_mcp/agents/planner/graph.py +3 -1
  24. universal_mcp/agents/react.py +5 -1
  25. universal_mcp/agents/shared/tool_node.py +24 -8
  26. universal_mcp/agents/simple.py +8 -1
  27. universal_mcp/agents/tools.py +9 -3
  28. universal_mcp/agents/utils.py +35 -7
  29. {universal_mcp_agents-0.1.2.dist-info → universal_mcp_agents-0.1.4.dist-info}/METADATA +4 -4
  30. universal_mcp_agents-0.1.4.dist-info/RECORD +53 -0
  31. universal_mcp_agents-0.1.2.dist-info/RECORD +0 -51
  32. {universal_mcp_agents-0.1.2.dist-info → universal_mcp_agents-0.1.4.dist-info}/WHEEL +0 -0
@@ -27,7 +27,9 @@ class BigToolAgentCache(BaseAgent):
27
27
  self.llm = load_chat_model(self.model)
28
28
  self.recursion_limit = kwargs.get("recursion_limit", 10)
29
29
 
30
- logger.info(f"BigToolAgent '{self.name}' initialized with model '{self.model}'.")
30
+ logger.info(
31
+ f"BigToolAgent '{self.name}' initialized with model '{self.model}'."
32
+ )
31
33
 
32
34
  async def _build_graph(self):
33
35
  """Build the bigtool agent graph using the existing create_agent function."""
@@ -42,7 +44,9 @@ class BigToolAgentCache(BaseAgent):
42
44
  logger.info("Graph built and compiled successfully.")
43
45
  return compiled_graph
44
46
  except Exception as e:
45
- logger.error(f"Error building graph for BigToolAgentCache '{self.name}': {e}")
47
+ logger.error(
48
+ f"Error building graph for BigToolAgentCache '{self.name}': {e}"
49
+ )
46
50
  raise
47
51
 
48
52
  @property
@@ -1,6 +1,7 @@
1
1
  from universal_mcp.agents.bigtoolcache import BigToolAgentCache
2
2
  from universal_mcp.agentr.registry import AgentrRegistry
3
3
 
4
+
4
5
  async def agent():
5
6
  agent_object = await BigToolAgentCache(
6
7
  name="BigTool Agent 2",
@@ -8,4 +9,4 @@ async def agent():
8
9
  model="anthropic/claude-4-sonnet-20250514",
9
10
  registry=AgentrRegistry(),
10
11
  )._build_graph()
11
- return agent_object
12
+ return agent_object
@@ -30,4 +30,3 @@ class Context:
30
30
  "This is to prevent infinite recursion."
31
31
  },
32
32
  )
33
-
@@ -15,94 +15,106 @@ from universal_mcp.agents.bigtoolcache.state import State
15
15
  from universal_mcp.logger import logger
16
16
  from universal_mcp.tools.registry import ToolRegistry
17
17
  from universal_mcp.types import ToolFormat
18
+ from universal_mcp.agents.bigtoolcache.prompts import TOOLS_LIST
18
19
 
19
20
 
21
+ class ToolSelectionOutput(TypedDict):
22
+ connected_tool_ids: list[str]
23
+ unconnected_tool_ids: list[str]
20
24
 
21
- def build_graph(
22
- tool_registry: ToolRegistry,
23
- llm: BaseChatModel
24
- ):
25
+
26
+ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
25
27
  @tool
26
28
  async def search_tools(queries: list[str]) -> str:
27
29
  """Search tools for a given list of queries
28
30
  Each single query should be atomic (doable with a single tool).
29
31
  For tasks requiring multiple tools, add separate queries for each subtask"""
30
32
  logger.info(f"Searching tools for queries: '{queries}'")
31
- try:
32
- all_tool_candidates = ""
33
- app_ids = await tool_registry.list_all_apps()
34
- connections = await tool_registry.list_connected_apps()
35
- connection_ids = set([connection["app_id"] for connection in connections])
36
- connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
37
- unconnected_apps = [app["id"] for app in app_ids if app["id"] not in connection_ids]
38
- app_tools = {}
39
- for task_query in queries:
40
- tools_list = await tool_registry.search_tools(task_query, limit=40)
41
- for tool in tools_list:
42
- app = tool["id"].split("__")[0]
43
- if app not in app_tools:
44
- if len(app_tools.keys()) >= 10:
45
- break
46
- app_tools[app] = {}
47
- if len(app_tools[app]) < 3:
48
- if tool["id"] not in app_tools[app]:
49
- app_tools[app][tool["id"]] = tool["description"]
50
- for app in app_tools:
51
- app_status = "connected" if app in connected_apps else "NOT connected"
52
- all_tool_candidates += f"Tools from {app} (status: {app_status} by user):\n"
53
- for tool in app_tools[app]:
54
- all_tool_candidates += f" - {tool}: {app_tools[app][tool]}\n"
55
- all_tool_candidates += "\n"
56
-
57
-
58
- return all_tool_candidates
59
- except Exception as e:
60
- logger.error(f"Error retrieving tools: {e}")
61
- return "Error: " + str(e)
62
-
33
+ messages = [
34
+ {
35
+ "role": "system",
36
+ "content": [
37
+ {
38
+ "type": "text",
39
+ "text": TOOLS_LIST,
40
+ "cache_control": {"type": "ephemeral", "ttl": "1h"},
41
+ }
42
+ ],
43
+ },
44
+ {"role": "user", "content": str(queries)},
45
+ ]
46
+ response = llm.with_structured_output(ToolSelectionOutput).invoke(messages)
47
+ response_text = f"Connected tools: {response['connected_tool_ids']}\nUnconnected tools: {response['unconnected_tool_ids']}"
48
+ return response_text
49
+
63
50
  @tool
64
51
  async def load_tools(tool_ids: list[str]) -> list[dict[str, Any]]:
65
- """Load the tools for the given tool ids. Returns the tool name, description, parameters schema, and output schema."""
52
+ """Load the tools for the given tool ids. Returns the tool name, description, parameters schema, and output schema. A tool id is made up using the app_id and the tool_name, attached by double underscore (__). e.g. google_mail__send_email"""
66
53
  temp_manager = tool_registry.tool_manager
67
54
  temp_manager.clear_tools()
68
55
  await tool_registry.export_tools(tool_ids, format=ToolFormat.NATIVE)
69
56
  tool_details = []
70
57
  for tool_id in tool_ids:
71
58
  tool = temp_manager.get_tool(tool_id)
72
- tool_details.append({
73
- "name": tool.name,
74
- "description": tool.description,
75
- "parameters_schema": tool.parameters,
76
- "output_schema": tool.output_schema,
77
- })
59
+ tool_details.append(
60
+ {
61
+ "name": tool.name,
62
+ "description": tool.description,
63
+ "parameters_schema": tool.parameters,
64
+ "output_schema": tool.output_schema,
65
+ }
66
+ )
78
67
  return tool_details
79
-
68
+
80
69
  @tool
81
70
  async def call_tool(tool_id: str, tool_args: dict[str, Any]) -> Any:
82
71
  """Call the tool with the given id and arguments."""
83
72
  return await tool_registry.call_tool(tool_id, tool_args)
84
73
 
85
-
86
- async def call_model(state: State, runtime: Runtime[Context]) -> Command[Literal["select_tools", "call_tools"]]:
74
+ async def call_model(
75
+ state: State, runtime: Runtime[Context]
76
+ ) -> Command[Literal["select_tools", "call_tools"]]:
87
77
  logger.info("Calling model...")
88
78
  try:
89
- system_message = runtime.context.system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
90
- messages = [{"role": "system", "content": system_message}, *state["messages"]]
79
+ system_message = runtime.context.system_prompt.format(
80
+ system_time=datetime.now(tz=UTC).isoformat()
81
+ )
82
+ messages = [
83
+ {
84
+ "role": "system",
85
+ "content": [
86
+ {
87
+ "type": "text",
88
+ "text": system_message,
89
+ "cache_control": {"type": "ephemeral", "ttl": "1h"},
90
+ }
91
+ ],
92
+ },
93
+ *state["messages"],
94
+ ]
91
95
 
92
96
  model = llm
93
97
 
94
98
  if isinstance(model, ChatAnthropic):
95
99
  model_with_tools = model.bind_tools(
96
- [search_tools, load_tools, call_tool], tool_choice="auto", cache_control={"type": "ephemeral"}
100
+ [search_tools, load_tools, call_tool],
101
+ tool_choice="auto",
102
+ cache_control={"type": "ephemeral", "ttl": "1h"},
97
103
  )
98
104
  else:
99
- model_with_tools = model.bind_tools([search_tools, load_tools, call_tool], tool_choice="auto")
105
+ model_with_tools = model.bind_tools(
106
+ [search_tools, load_tools, call_tool], tool_choice="auto"
107
+ )
100
108
  response = cast(AIMessage, await model_with_tools.ainvoke(messages))
101
109
 
102
110
  if response.tool_calls:
103
- logger.info(f"Model responded with {len(response.tool_calls)} tool calls.")
111
+ logger.info(
112
+ f"Model responded with {len(response.tool_calls)} tool calls."
113
+ )
104
114
  if len(response.tool_calls) > 1:
105
- raise Exception("Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')")
115
+ raise Exception(
116
+ "Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')"
117
+ )
106
118
  tool_call = response.tool_calls[0]
107
119
  if tool_call["name"] == search_tools.name:
108
120
  logger.info("Model requested to select tools.")
@@ -110,10 +122,18 @@ def build_graph(
110
122
  elif tool_call["name"] == load_tools.name:
111
123
  logger.info("Model requested to load tools.")
112
124
  tool_details = await load_tools.ainvoke(input=tool_call["args"])
113
- tool_msg = ToolMessage(f"Loaded tools. {tool_details}", tool_call_id=tool_call["id"])
125
+ tool_msg = ToolMessage(
126
+ f"Loaded tools. {tool_details}", tool_call_id=tool_call["id"]
127
+ )
114
128
  selected_tool_ids = tool_call["args"]["tool_ids"]
115
129
  logger.info(f"Loaded tools: {selected_tool_ids}")
116
- return Command(goto="call_model", update={ "messages": [response, tool_msg], "selected_tool_ids": selected_tool_ids})
130
+ return Command(
131
+ goto="call_model",
132
+ update={
133
+ "messages": [response, tool_msg],
134
+ "selected_tool_ids": selected_tool_ids,
135
+ },
136
+ )
117
137
  elif tool_call["name"] == call_tool.name:
118
138
  logger.info("Model requested to call tool.")
119
139
  return Command(goto="call_tools", update={"messages": [response]})
@@ -125,12 +145,16 @@ def build_graph(
125
145
  logger.error(f"Error in call_model: {e}")
126
146
  raise
127
147
 
128
- async def select_tools(state: State, runtime: Runtime[Context]) -> Command[Literal["call_model"]]:
148
+ async def select_tools(
149
+ state: State, runtime: Runtime[Context]
150
+ ) -> Command[Literal["call_model"]]:
129
151
  logger.info("Selecting tools...")
130
152
  try:
131
153
  tool_call = state["messages"][-1].tool_calls[0]
132
- searched_tools= await search_tools.ainvoke(input=tool_call["args"])
133
- tool_msg = ToolMessage(f"Available tools: {searched_tools}", tool_call_id=tool_call["id"])
154
+ searched_tools = await search_tools.ainvoke(input=tool_call["args"])
155
+ tool_msg = ToolMessage(
156
+ f"Available tools: {searched_tools}", tool_call_id=tool_call["id"]
157
+ )
134
158
  return Command(goto="call_model", update={"messages": [tool_msg]})
135
159
  except Exception as e:
136
160
  logger.error(f"Error in select_tools: {e}")
@@ -146,7 +170,9 @@ def build_graph(
146
170
  logger.info(f"Executing tool: {tool_id} with args: {tool_args}")
147
171
  try:
148
172
  await tool_registry.export_tools([tool_id], ToolFormat.LANGCHAIN)
149
- tool_result = await call_tool.ainvoke(input={"tool_id": tool_id, "tool_args": tool_args})
173
+ tool_result = await call_tool.ainvoke(
174
+ input={"tool_id": tool_id, "tool_args": tool_args}
175
+ )
150
176
  logger.info(f"Tool '{tool_id}' executed successfully.")
151
177
  outputs.append(
152
178
  ToolMessage(
@@ -165,7 +191,10 @@ def build_graph(
165
191
  tool_call_id=tool_call["id"],
166
192
  )
167
193
  )
168
- return Command(goto="call_model", update={"messages": outputs, "selected_tool_ids": recent_tool_ids})
194
+ return Command(
195
+ goto="call_model",
196
+ update={"messages": outputs, "selected_tool_ids": recent_tool_ids},
197
+ )
169
198
 
170
199
  builder = StateGraph(State, context_schema=Context)
171
200
 
@@ -1,5 +1,25 @@
1
1
  """Default prompts used by the agent."""
2
2
 
3
+ import os
4
+ from pathlib import Path
5
+
6
+
7
+ def load_tools_from_file():
8
+ """Load tools from the generated text file."""
9
+ # Get the directory where this file is located
10
+ current_dir = Path(__file__).parent
11
+
12
+ tools_file = current_dir / "tools_important.txt"
13
+ if not tools_file.exists():
14
+ tools_file = current_dir / "tools_all.txt"
15
+
16
+ if tools_file.exists():
17
+ with open(tools_file, "r", encoding="utf-8") as f:
18
+ return f.read()
19
+ else:
20
+ return "No tools file found. Please run tool_retrieve.py to generate the tools list."
21
+
22
+
3
23
  SYSTEM_PROMPT = """You are a helpful AI assistant.
4
24
 
5
25
  **Core Directives:**
@@ -11,3 +31,12 @@ SYSTEM_PROMPT = """You are a helpful AI assistant.
11
31
 
12
32
  System time: {system_time}
13
33
  """
34
+
35
+
36
+ TOOLS_LIST = f""" This is the list of all the tools available to you:
37
+ {load_tools_from_file()}
38
+
39
+ You will be provided a list of queries (which may be similar or different from each other). Your job is to select the relavent tools for the user's request. sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must return both apps tools. If the query specifically asks you to use an app that is not connected, return the tools for that app, they can still be connected by the user.
40
+
41
+ You have to return the tool_ids by constructing the tool_id from the app_id and the tool_name, attached by double underscore (__). e.g. google_mail__send_email
42
+ """