universal-mcp-agents 0.1.4__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. universal_mcp/agents/__init__.py +19 -0
  2. universal_mcp/agents/autoagent/__init__.py +1 -1
  3. universal_mcp/agents/autoagent/__main__.py +1 -1
  4. universal_mcp/agents/autoagent/graph.py +2 -2
  5. universal_mcp/agents/autoagent/studio.py +2 -1
  6. universal_mcp/agents/base.py +25 -13
  7. universal_mcp/agents/bigtool/__init__.py +10 -8
  8. universal_mcp/agents/bigtool/__main__.py +6 -7
  9. universal_mcp/agents/bigtool/graph.py +18 -27
  10. universal_mcp/agents/bigtool/prompts.py +3 -3
  11. universal_mcp/agents/bigtool2/__init__.py +13 -5
  12. universal_mcp/agents/bigtool2/__main__.py +7 -6
  13. universal_mcp/agents/bigtool2/agent.py +2 -1
  14. universal_mcp/agents/bigtool2/graph.py +14 -16
  15. universal_mcp/agents/bigtool2/prompts.py +1 -1
  16. universal_mcp/agents/bigtoolcache/__init__.py +2 -2
  17. universal_mcp/agents/bigtoolcache/__main__.py +1 -1
  18. universal_mcp/agents/bigtoolcache/agent.py +3 -2
  19. universal_mcp/agents/bigtoolcache/graph.py +11 -10
  20. universal_mcp/agents/bigtoolcache/prompts.py +1 -2
  21. universal_mcp/agents/builder.py +43 -15
  22. universal_mcp/agents/cli.py +19 -5
  23. universal_mcp/agents/codeact/test.py +2 -1
  24. universal_mcp/agents/llm.py +7 -3
  25. universal_mcp/agents/planner/__init__.py +8 -2
  26. universal_mcp/agents/planner/__main__.py +10 -8
  27. universal_mcp/agents/planner/graph.py +6 -2
  28. universal_mcp/agents/planner/prompts.py +14 -1
  29. universal_mcp/agents/planner/state.py +0 -1
  30. universal_mcp/agents/react.py +35 -25
  31. universal_mcp/agents/shared/tool_node.py +2 -3
  32. universal_mcp/agents/simple.py +19 -3
  33. universal_mcp/agents/tools.py +0 -1
  34. universal_mcp/agents/ui_tools.py +305 -0
  35. universal_mcp/agents/utils.py +46 -36
  36. {universal_mcp_agents-0.1.4.dist-info → universal_mcp_agents-0.1.5.dist-info}/METADATA +2 -1
  37. universal_mcp_agents-0.1.5.dist-info/RECORD +52 -0
  38. universal_mcp/agents/bigtool/context.py +0 -24
  39. universal_mcp/agents/bigtool2/context.py +0 -32
  40. universal_mcp_agents-0.1.4.dist-info/RECORD +0 -53
  41. {universal_mcp_agents-0.1.4.dist-info → universal_mcp_agents-0.1.5.dist-info}/WHEEL +0 -0
@@ -7,6 +7,25 @@ from universal_mcp.agents.planner import PlannerAgent
7
7
  from universal_mcp.agents.react import ReactAgent
8
8
  from universal_mcp.agents.simple import SimpleAgent
9
9
 
10
+
11
+ def get_agent(agent_name: str):
12
+ if agent_name == "auto":
13
+ return AutoAgent
14
+ elif agent_name == "react":
15
+ return ReactAgent
16
+ elif agent_name == "simple":
17
+ return SimpleAgent
18
+ elif agent_name == "builder":
19
+ return BuilderAgent
20
+ elif agent_name == "planner":
21
+ return PlannerAgent
22
+ elif agent_name == "bigtool":
23
+ return BigToolAgent
24
+ elif agent_name == "bigtool2":
25
+ return BigToolAgent2
26
+ else:
27
+ raise ValueError(f"Unknown agent: {agent_name}. Possible values: auto, react, simple, builder, planner, bigtool, bigtool2")
28
+
10
29
  __all__ = [
11
30
  "BaseAgent",
12
31
  "ReactAgent",
@@ -1,8 +1,8 @@
1
1
  from langgraph.checkpoint.base import BaseCheckpointSaver
2
+ from universal_mcp.tools.registry import ToolRegistry
2
3
 
3
4
  from universal_mcp.agents.autoagent.graph import build_graph
4
5
  from universal_mcp.agents.base import BaseAgent
5
- from universal_mcp.tools.registry import ToolRegistry
6
6
 
7
7
 
8
8
  class AutoAgent(BaseAgent):
@@ -1,8 +1,8 @@
1
1
  import asyncio
2
2
 
3
3
  from loguru import logger
4
-
5
4
  from universal_mcp.agentr.registry import AgentrRegistry
5
+
6
6
  from universal_mcp.agents.autoagent import AutoAgent
7
7
 
8
8
 
@@ -6,13 +6,13 @@ from langchain_core.messages import AIMessage, ToolMessage
6
6
  from langchain_core.tools import tool
7
7
  from langgraph.graph import END, START, StateGraph
8
8
  from langgraph.runtime import Runtime
9
+ from universal_mcp.tools.registry import ToolRegistry
10
+ from universal_mcp.types import ToolFormat
9
11
 
10
12
  from universal_mcp.agents.autoagent.context import Context
11
13
  from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
12
14
  from universal_mcp.agents.autoagent.state import State
13
15
  from universal_mcp.agents.llm import load_chat_model
14
- from universal_mcp.tools.registry import ToolRegistry
15
- from universal_mcp.types import ToolFormat
16
16
 
17
17
 
18
18
  async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
@@ -1,9 +1,10 @@
1
1
  import asyncio
2
2
 
3
3
  from universal_mcp.agentr.registry import AgentrRegistry
4
- from universal_mcp.agents.autoagent import build_graph
5
4
  from universal_mcp.tools import ToolManager
6
5
 
6
+ from universal_mcp.agents.autoagent import build_graph
7
+
7
8
  tool_registry = AgentrRegistry()
8
9
  tool_manager = ToolManager()
9
10
 
@@ -2,9 +2,11 @@
2
2
  from typing import cast
3
3
  from uuid import uuid4
4
4
 
5
- from langchain_core.messages import AIMessageChunk
5
+ from langchain_core.messages import AIMessage, AIMessageChunk
6
6
  from langgraph.checkpoint.base import BaseCheckpointSaver
7
+ from langgraph.graph import StateGraph
7
8
  from langgraph.types import Command
9
+ from universal_mcp.logger import logger
8
10
 
9
11
  from .utils import RichCLI
10
12
 
@@ -31,7 +33,7 @@ class BaseAgent:
31
33
  self._graph = await self._build_graph()
32
34
  self._initialized = True
33
35
 
34
- async def _build_graph(self):
36
+ async def _build_graph(self) -> StateGraph:
35
37
  raise NotImplementedError("Subclasses must implement this method")
36
38
 
37
39
  async def stream(self, thread_id: str, user_input: str, metadata: dict = None):
@@ -60,32 +62,41 @@ class BaseAgent:
60
62
  ):
61
63
  # Only forward assistant token chunks that are not tool-related.
62
64
  type_ = type(event)
63
- if type_ != AIMessageChunk:
64
- continue
65
- event = cast(AIMessageChunk, event)
66
- aggregate = event if aggregate is None else aggregate + event
67
65
  tags = metadata.get("tags", []) if isinstance(metadata, dict) else []
68
66
  is_quiet = isinstance(tags, list) and ("quiet" in tags)
69
-
70
67
  if is_quiet:
71
68
  continue
69
+ # Handle different types of messages
70
+ if type_ in (AIMessage, AIMessageChunk):
71
+ # Accumulate billing and aggregate message
72
+ aggregate = event if aggregate is None else aggregate + event
73
+ # Ignore intermeddite finish messages
72
74
  if "finish_reason" in event.response_metadata:
73
75
  # Got LLM finish reason ignore it
74
- # logger.debug(f"Finish event: {event}, Metadata: {metadata}")
76
+ logger.debug(f"Finish event: {event}, Metadata: {metadata}")
75
77
  pass
76
78
  else:
77
- # logger.debug(f"Event: {event}, Metadata: {metadata}")
79
+ logger.debug(f"Event: {event}, Metadata: {metadata}")
78
80
  yield event
79
81
  # Send a final finished message
80
82
  # The last event would be finish
81
83
  event = cast(AIMessageChunk, event)
84
+ event.usage_metadata = aggregate.usage_metadata
85
+ logger.debug(f"Usage metadata: {event.usage_metadata}")
82
86
  yield event
83
87
 
84
88
  async def stream_interactive(self, thread_id: str, user_input: str):
85
89
  await self.ainit()
86
90
  with self.cli.display_agent_response_streaming(self.name) as stream_updater:
87
91
  async for event in self.stream(thread_id, user_input):
88
- stream_updater.update(event.content)
92
+
93
+ if isinstance(event.content, list):
94
+ thinking_content = "".join([c.get("thinking", "") for c in event.content])
95
+ stream_updater.update(thinking_content, type_="thinking")
96
+ content = "".join([c.get("text", "") for c in event.content])
97
+ stream_updater.update(content, type_="text")
98
+ else:
99
+ stream_updater.update(event.content, type_="text")
89
100
 
90
101
  async def invoke(
91
102
  self, user_input: str, thread_id: str = str(uuid4()), metadata: dict = None
@@ -106,11 +117,12 @@ class BaseAgent:
106
117
  "metadata": run_metadata,
107
118
  }
108
119
 
109
- return await self._graph.ainvoke(
120
+ result = await self._graph.ainvoke(
110
121
  {"messages": [{"role": "user", "content": user_input}]},
111
122
  config=run_config,
112
123
  context={"system_prompt": self.instructions, "model": self.model},
113
124
  )
125
+ return result
114
126
 
115
127
  async def run_interactive(self, thread_id: str = str(uuid4())):
116
128
  """Main application loop"""
@@ -145,7 +157,7 @@ class BaseAgent:
145
157
  f"Agent is {self.name}. {self.instructions}"
146
158
  )
147
159
  continue
148
- elif command == "exit" or command == "quit" or command == "q":
160
+ elif command in {"exit", "quit", "q"}:
149
161
  self.cli.display_info("Goodbye! 👋")
150
162
  break
151
163
  elif command == "reset":
@@ -170,6 +182,6 @@ class BaseAgent:
170
182
  break
171
183
  except Exception as e:
172
184
  import traceback
173
-
174
185
  traceback.print_exc()
175
186
  self.cli.display_error(f"An error occurred: {str(e)}")
187
+ break
@@ -1,9 +1,9 @@
1
1
  from langgraph.checkpoint.base import BaseCheckpointSaver
2
+ from universal_mcp.logger import logger
3
+ from universal_mcp.tools.registry import ToolRegistry
2
4
 
3
5
  from universal_mcp.agents.base import BaseAgent
4
6
  from universal_mcp.agents.llm import load_chat_model
5
- from universal_mcp.logger import logger
6
- from universal_mcp.tools.registry import ToolRegistry
7
7
 
8
8
  from .graph import build_graph
9
9
  from .prompts import SYSTEM_PROMPT
@@ -19,18 +19,20 @@ class BigToolAgent(BaseAgent):
19
19
  memory: BaseCheckpointSaver | None = None,
20
20
  **kwargs,
21
21
  ):
22
- # Combine the base system prompt with agent-specific instructions
23
- full_instructions = f"{SYSTEM_PROMPT}\n\n**User Instructions:**\n{instructions}"
24
- super().__init__(name, full_instructions, model, memory, **kwargs)
25
-
22
+ super().__init__(name, instructions, model, memory, **kwargs)
26
23
  self.registry = registry
27
24
  self.llm = load_chat_model(self.model)
28
- self.tool_selection_llm = load_chat_model("gemini/gemini-2.0-flash-001")
29
25
 
30
26
  logger.info(
31
27
  f"BigToolAgent '{self.name}' initialized with model '{self.model}'."
32
28
  )
33
29
 
30
+ def _build_system_message(self):
31
+ return SYSTEM_PROMPT.format(
32
+ name=self.name,
33
+ instructions=self.instructions,
34
+ )
35
+
34
36
  async def _build_graph(self):
35
37
  """Build the bigtool agent graph using the existing create_agent function."""
36
38
  logger.info(f"Building graph for BigToolAgent '{self.name}'...")
@@ -38,7 +40,7 @@ class BigToolAgent(BaseAgent):
38
40
  graph_builder = build_graph(
39
41
  tool_registry=self.registry,
40
42
  llm=self.llm,
41
- tool_selection_llm=self.tool_selection_llm,
43
+ system_prompt=self._build_system_message(),
42
44
  )
43
45
 
44
46
  compiled_graph = graph_builder.compile(checkpointer=self.memory)
@@ -1,9 +1,10 @@
1
1
  import asyncio
2
2
 
3
3
  from loguru import logger
4
-
5
4
  from universal_mcp.agentr.registry import AgentrRegistry
5
+
6
6
  from universal_mcp.agents.bigtool import BigToolAgent
7
+ from universal_mcp.agents.utils import messages_to_list
7
8
 
8
9
 
9
10
  async def main():
@@ -13,12 +14,10 @@ async def main():
13
14
  model="azure/gpt-4.1",
14
15
  registry=AgentrRegistry(),
15
16
  )
16
- async for event in agent.stream(
17
- user_input="Send an email to manoj@agentr.dev",
18
- thread_id="test123",
19
- ):
20
- logger.info(event.content)
21
-
17
+ await agent.ainit()
18
+ output = await agent.invoke(
19
+ user_input="Send an email to manoj@agentr.dev")
20
+ logger.info(messages_to_list(output["messages"]))
22
21
 
23
22
  if __name__ == "__main__":
24
23
  asyncio.run(main())
@@ -1,28 +1,24 @@
1
1
  import json
2
- from datetime import UTC, datetime
3
2
  from typing import Literal, TypedDict, cast
4
3
 
5
- from langchain_anthropic import ChatAnthropic
6
4
  from langchain_core.language_models import BaseChatModel
7
5
  from langchain_core.messages import AIMessage, ToolMessage
8
6
  from langchain_core.tools import tool
9
7
  from langgraph.graph import StateGraph
10
- from langgraph.runtime import Runtime
11
8
  from langgraph.types import Command
12
-
13
- from universal_mcp.agents.bigtool.context import Context
14
- from universal_mcp.agents.bigtool.state import State
15
9
  from universal_mcp.logger import logger
16
10
  from universal_mcp.tools.registry import ToolRegistry
17
11
  from universal_mcp.types import ToolFormat
18
12
 
13
+ from universal_mcp.agents.bigtool.state import State
14
+
19
15
  from .prompts import SELECT_TOOL_PROMPT
20
16
 
21
17
 
22
18
  def build_graph(
23
19
  tool_registry: ToolRegistry,
24
20
  llm: BaseChatModel,
25
- tool_selection_llm: BaseChatModel,
21
+ system_prompt: str,
26
22
  ):
27
23
  @tool
28
24
  async def retrieve_tools(task_query: str) -> list[str]:
@@ -40,7 +36,7 @@ def build_graph(
40
36
  class ToolSelectionOutput(TypedDict):
41
37
  tool_names: list[str]
42
38
 
43
- model = tool_selection_llm
39
+ model = llm
44
40
  app_ids = await tool_registry.list_all_apps()
45
41
  connections = await tool_registry.list_connected_apps()
46
42
  connection_ids = set([connection["app_id"] for connection in connections])
@@ -76,16 +72,14 @@ def build_graph(
76
72
  logger.error(f"Error retrieving tools: {e}")
77
73
  return []
78
74
 
75
+
79
76
  async def call_model(
80
- state: State, runtime: Runtime[Context]
77
+ state: State
81
78
  ) -> Command[Literal["select_tools", "call_tools"]]:
82
79
  logger.info("Calling model...")
83
80
  try:
84
- system_message = runtime.context.system_prompt.format(
85
- system_time=datetime.now(tz=UTC).isoformat()
86
- )
87
81
  messages = [
88
- {"role": "system", "content": system_message},
82
+ {"role": "system", "content": system_prompt},
89
83
  *state["messages"],
90
84
  ]
91
85
 
@@ -98,18 +92,15 @@ def build_graph(
98
92
  else:
99
93
  selected_tools = []
100
94
 
101
- model = llm
102
- if isinstance(model, ChatAnthropic):
103
- model_with_tools = model.bind_tools(
104
- [retrieve_tools, *selected_tools],
105
- tool_choice="auto",
106
- cache_control={"type": "ephemeral"},
107
- )
108
- else:
109
- model_with_tools = model.bind_tools(
110
- [retrieve_tools, *selected_tools], tool_choice="auto"
111
- )
112
- response = cast(AIMessage, await model_with_tools.ainvoke(messages))
95
+ model_with_tools = llm.bind_tools(
96
+ [retrieve_tools, *selected_tools], tool_choice="auto"
97
+ )
98
+
99
+
100
+ response = await model_with_tools.ainvoke(messages)
101
+ cast(AIMessage, response)
102
+ logger.debug(f"Response: {response}")
103
+
113
104
 
114
105
  if response.tool_calls:
115
106
  logger.info(
@@ -151,7 +142,7 @@ def build_graph(
151
142
  raise
152
143
 
153
144
  async def select_tools(
154
- state: State, runtime: Runtime[Context]
145
+ state: State
155
146
  ) -> Command[Literal["call_model"]]:
156
147
  logger.info("Selecting tools...")
157
148
  try:
@@ -210,7 +201,7 @@ def build_graph(
210
201
  update={"messages": outputs, "selected_tool_ids": recent_tool_ids},
211
202
  )
212
203
 
213
- builder = StateGraph(State, context_schema=Context)
204
+ builder = StateGraph(State)
214
205
 
215
206
  builder.add_node(call_model)
216
207
  builder.add_node(select_tools)
@@ -1,15 +1,15 @@
1
1
  """Default prompts used by the agent."""
2
2
 
3
- SYSTEM_PROMPT = """You are a helpful AI assistant.
3
+ SYSTEM_PROMPT = """You are {name}, a helpful AI assistant.
4
4
 
5
5
  **Core Directives:**
6
6
  1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event), you MUST use a tool. Do not answer from your own knowledge or refuse a task if a tool might exist for it.
7
7
  2. **First Step is ALWAYS `retrieve_tools`:** Before you can use any other tool, you MUST first call the `retrieve_tools` function to find the right tool for the user's request. This is your mandatory first action.
8
8
  3. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `retrieve_tools` with a concise query describing the core task. Do not engage in conversation.
9
9
 
10
- System time: {system_time}
11
-
12
10
  When multiple tools are available for the same task, you must ask the user.
11
+
12
+ {instructions}
13
13
  """
14
14
 
15
15
  SELECT_TOOL_PROMPT = """You are an AI assistant that helps the user perform tasks using various apps (each app has multiple tools).
@@ -1,9 +1,10 @@
1
1
  from langgraph.checkpoint.base import BaseCheckpointSaver
2
+ from universal_mcp.logger import logger
3
+ from universal_mcp.tools.registry import ToolRegistry
2
4
 
3
5
  from universal_mcp.agents.base import BaseAgent
4
6
  from universal_mcp.agents.llm import load_chat_model
5
- from universal_mcp.logger import logger
6
- from universal_mcp.tools.registry import ToolRegistry
7
+ from universal_mcp.agents.utils import initialize_ui_tools
7
8
 
8
9
  from .graph import build_graph
9
10
  from .prompts import SYSTEM_PROMPT
@@ -19,18 +20,23 @@ class BigToolAgent2(BaseAgent):
19
20
  memory: BaseCheckpointSaver | None = None,
20
21
  **kwargs,
21
22
  ):
22
- # Combine the base system prompt with agent-specific instructions
23
- full_instructions = f"{SYSTEM_PROMPT}\n\n**User Instructions:**\n{instructions}"
24
- super().__init__(name, full_instructions, model, memory, **kwargs)
23
+ super().__init__(name, instructions, model, memory, **kwargs)
25
24
 
26
25
  self.registry = registry
27
26
  self.llm = load_chat_model(self.model)
28
27
  self.recursion_limit = kwargs.get("recursion_limit", 10)
28
+ self.ui_tools = initialize_ui_tools()
29
29
 
30
30
  logger.info(
31
31
  f"BigToolAgent '{self.name}' initialized with model '{self.model}'."
32
32
  )
33
33
 
34
+ def _build_system_message(self):
35
+ return SYSTEM_PROMPT.format(
36
+ name=self.name,
37
+ instructions=f"**User Instructions:**\n{self.instructions}",
38
+ )
39
+
34
40
  async def _build_graph(self):
35
41
  """Build the bigtool agent graph using the existing create_agent function."""
36
42
  logger.info(f"Building graph for BigToolAgent '{self.name}'...")
@@ -38,6 +44,8 @@ class BigToolAgent2(BaseAgent):
38
44
  graph_builder = build_graph(
39
45
  tool_registry=self.registry,
40
46
  llm=self.llm,
47
+ system_prompt=self._build_system_message(),
48
+ ui_tools=self.ui_tools,
41
49
  )
42
50
 
43
51
  compiled_graph = graph_builder.compile(checkpointer=self.memory)
@@ -1,9 +1,10 @@
1
1
  import asyncio
2
2
 
3
3
  from loguru import logger
4
-
5
4
  from universal_mcp.agentr.registry import AgentrRegistry
5
+
6
6
  from universal_mcp.agents.bigtool2 import BigToolAgent2
7
+ from universal_mcp.agents.utils import messages_to_list
7
8
 
8
9
 
9
10
  async def main():
@@ -13,11 +14,11 @@ async def main():
13
14
  model="azure/gpt-4.1",
14
15
  registry=AgentrRegistry(),
15
16
  )
16
- async for event in agent.stream(
17
- user_input="Send an email to manoj@agentr.dev",
18
- thread_id="test123",
19
- ):
20
- logger.info(event.content)
17
+ await agent.ainit()
18
+ output = await agent.invoke(
19
+ user_input="Send an email to manoj@agentr.dev"
20
+ )
21
+ logger.info(messages_to_list(output["messages"]))
21
22
 
22
23
 
23
24
  if __name__ == "__main__":
@@ -1,6 +1,7 @@
1
- from universal_mcp.agents.bigtool2 import BigToolAgent2
2
1
  from universal_mcp.agentr.registry import AgentrRegistry
3
2
 
3
+ from universal_mcp.agents.bigtool2 import BigToolAgent2
4
+
4
5
 
5
6
  async def agent():
6
7
  agent_object = await BigToolAgent2(
@@ -1,23 +1,22 @@
1
1
  import json
2
2
  from datetime import UTC, datetime
3
- from typing import Literal, TypedDict, cast
3
+ from typing import Literal, cast
4
4
 
5
- from langchain_anthropic import ChatAnthropic
6
5
  from langchain_core.language_models import BaseChatModel
7
6
  from langchain_core.messages import AIMessage, ToolMessage
8
7
  from langchain_core.tools import tool
9
8
  from langgraph.graph import StateGraph
10
- from langgraph.runtime import Runtime
11
9
  from langgraph.types import Command
12
-
13
- from universal_mcp.agents.bigtool2.context import Context
14
- from universal_mcp.agents.bigtool2.state import State
15
10
  from universal_mcp.logger import logger
16
11
  from universal_mcp.tools.registry import ToolRegistry
17
12
  from universal_mcp.types import ToolFormat
18
13
 
14
+ from universal_mcp.agents.bigtool2.state import State
15
+
19
16
 
20
- def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
17
+ def build_graph(
18
+ tool_registry: ToolRegistry, llm: BaseChatModel, system_prompt: str, ui_tools: list
19
+ ):
21
20
  @tool
22
21
  async def search_tools(queries: list[str]) -> str:
23
22
  """Search tools for a given list of queries
@@ -32,7 +31,7 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
32
31
  connected_apps = [
33
32
  app["id"] for app in app_ids if app["id"] in connection_ids
34
33
  ]
35
- unconnected_apps = [
34
+ [
36
35
  app["id"] for app in app_ids if app["id"] not in connection_ids
37
36
  ]
38
37
  app_tools = {}
@@ -69,11 +68,11 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
69
68
  return tool_ids
70
69
 
71
70
  async def call_model(
72
- state: State, runtime: Runtime[Context]
71
+ state: State,
73
72
  ) -> Command[Literal["select_tools", "call_tools"]]:
74
73
  logger.info("Calling model...")
75
74
  try:
76
- system_message = runtime.context.system_prompt.format(
75
+ system_message = system_prompt.format(
77
76
  system_time=datetime.now(tz=UTC).isoformat()
78
77
  )
79
78
  messages = [
@@ -93,7 +92,8 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
93
92
  model = llm
94
93
 
95
94
  model_with_tools = model.bind_tools(
96
- [search_tools, load_tools, *selected_tools], tool_choice="auto"
95
+ [search_tools, load_tools, *selected_tools, *ui_tools],
96
+ tool_choice="auto",
97
97
  )
98
98
  response = cast(AIMessage, await model_with_tools.ainvoke(messages))
99
99
 
@@ -112,7 +112,7 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
112
112
  elif tool_call["name"] == load_tools.name:
113
113
  logger.info("Model requested to load tools.")
114
114
  tool_msg = ToolMessage(
115
- f"Loaded tools.", tool_call_id=tool_call["id"]
115
+ "Loaded tools.", tool_call_id=tool_call["id"]
116
116
  )
117
117
  selected_tool_ids = tool_call["args"]["tool_ids"]
118
118
  logger.info(f"Loaded tools: {selected_tool_ids}")
@@ -151,9 +151,7 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
151
151
  logger.error(f"Error in call_model: {e}")
152
152
  raise
153
153
 
154
- async def select_tools(
155
- state: State, runtime: Runtime[Context]
156
- ) -> Command[Literal["call_model"]]:
154
+ async def select_tools(state: State) -> Command[Literal["call_model"]]:
157
155
  logger.info("Selecting tools...")
158
156
  try:
159
157
  tool_call = state["messages"][-1].tool_calls[0]
@@ -204,7 +202,7 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
204
202
  update={"messages": outputs, "selected_tool_ids": recent_tool_ids},
205
203
  )
206
204
 
207
- builder = StateGraph(State, context_schema=Context)
205
+ builder = StateGraph(State)
208
206
 
209
207
  builder.add_node(call_model)
210
208
  builder.add_node(select_tools)
@@ -8,5 +8,5 @@ SYSTEM_PROMPT = """You are a helpful AI assistant.
8
8
  3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
9
9
  3. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `search_tools` with a concise query describing the core task. Do not engage in conversation.
10
10
 
11
- System time: {system_time}
11
+ {instructions}
12
12
  """
@@ -1,9 +1,9 @@
1
1
  from langgraph.checkpoint.base import BaseCheckpointSaver
2
+ from universal_mcp.logger import logger
3
+ from universal_mcp.tools.registry import ToolRegistry
2
4
 
3
5
  from universal_mcp.agents.base import BaseAgent
4
6
  from universal_mcp.agents.llm import load_chat_model
5
- from universal_mcp.logger import logger
6
- from universal_mcp.tools.registry import ToolRegistry
7
7
 
8
8
  from .graph import build_graph
9
9
  from .prompts import SYSTEM_PROMPT
@@ -1,8 +1,8 @@
1
1
  import asyncio
2
2
 
3
3
  from loguru import logger
4
-
5
4
  from universal_mcp.agentr.registry import AgentrRegistry
5
+
6
6
  from universal_mcp.agents.bigtoolcache import BigToolAgentCache
7
7
 
8
8
 
@@ -1,10 +1,11 @@
1
- from universal_mcp.agents.bigtoolcache import BigToolAgentCache
2
1
  from universal_mcp.agentr.registry import AgentrRegistry
3
2
 
3
+ from universal_mcp.agents.bigtoolcache import BigToolAgentCache
4
+
4
5
 
5
6
  async def agent():
6
7
  agent_object = await BigToolAgentCache(
7
- name="BigTool Agent 2",
8
+ name="BigTool Agent Cache version",
8
9
  instructions="You are a helpful assistant that can use various tools to complete tasks.",
9
10
  model="anthropic/claude-4-sonnet-20250514",
10
11
  registry=AgentrRegistry(),
@@ -1,6 +1,6 @@
1
1
  import json
2
2
  from datetime import UTC, datetime
3
- from typing import Any, Literal, TypedDict, cast
3
+ from typing import Literal, TypedDict, cast
4
4
 
5
5
  from langchain_anthropic import ChatAnthropic
6
6
  from langchain_core.language_models import BaseChatModel
@@ -18,6 +18,7 @@ from universal_mcp.types import ToolFormat
18
18
  from universal_mcp.agents.bigtoolcache.prompts import TOOLS_LIST
19
19
 
20
20
 
21
+
21
22
  class ToolSelectionOutput(TypedDict):
22
23
  connected_tool_ids: list[str]
23
24
  unconnected_tool_ids: list[str]
@@ -180,15 +181,15 @@ def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel):
180
181
  name=tool_id,
181
182
  tool_call_id=tool_call["id"],
182
183
  )
183
- )
184
- recent_tool_ids.append(tool_id)
185
- except Exception as e:
186
- logger.error(f"Error executing tool '{tool_id}': {e}")
187
- outputs.append(
188
- ToolMessage(
189
- content=json.dumps("Error: " + str(e)),
190
- name=tool_id,
191
- tool_call_id=tool_call["id"],
184
+ recent_tool_ids.append(tool_call["name"])
185
+ except Exception as e:
186
+ logger.error(f"Error executing tool '{tool_call['name']}': {e}")
187
+ outputs.append(
188
+ ToolMessage(
189
+ content=json.dumps("Error: " + str(e)),
190
+ name=tool_call["name"],
191
+ tool_call_id=tool_call["id"],
192
+ )
192
193
  )
193
194
  )
194
195
  return Command(
@@ -1,6 +1,5 @@
1
1
  """Default prompts used by the agent."""
2
2
 
3
- import os
4
3
  from pathlib import Path
5
4
 
6
5
 
@@ -14,7 +13,7 @@ def load_tools_from_file():
14
13
  tools_file = current_dir / "tools_all.txt"
15
14
 
16
15
  if tools_file.exists():
17
- with open(tools_file, "r", encoding="utf-8") as f:
16
+ with open(tools_file, encoding="utf-8") as f:
18
17
  return f.read()
19
18
  else:
20
19
  return "No tools file found. Please run tool_retrieve.py to generate the tools list."