universal-mcp 0.1.24rc11__py3-none-any.whl → 0.1.24rc13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. universal_mcp/agentr/registry.py +6 -0
  2. universal_mcp/agents/__init__.py +4 -1
  3. universal_mcp/agents/autoagent/__init__.py +1 -2
  4. universal_mcp/agents/autoagent/__main__.py +8 -5
  5. universal_mcp/agents/autoagent/graph.py +4 -7
  6. universal_mcp/agents/autoagent/prompts.py +1 -1
  7. universal_mcp/agents/base.py +13 -15
  8. universal_mcp/agents/bigtool/__init__.py +54 -0
  9. universal_mcp/agents/bigtool/__main__.py +24 -0
  10. universal_mcp/agents/bigtool/context.py +24 -0
  11. universal_mcp/agents/bigtool/graph.py +166 -0
  12. universal_mcp/agents/bigtool/prompts.py +31 -0
  13. universal_mcp/agents/bigtool/state.py +27 -0
  14. universal_mcp/agents/builder.py +80 -0
  15. universal_mcp/agents/llm.py +25 -12
  16. universal_mcp/agents/planner/__init__.py +37 -0
  17. universal_mcp/agents/planner/__main__.py +24 -0
  18. universal_mcp/agents/planner/graph.py +82 -0
  19. universal_mcp/agents/planner/prompts.py +1 -0
  20. universal_mcp/agents/planner/state.py +12 -0
  21. universal_mcp/agents/shared/agent_node.py +34 -0
  22. universal_mcp/agents/shared/tool_node.py +235 -0
  23. universal_mcp/tools/registry.py +5 -0
  24. universal_mcp/types.py +5 -2
  25. {universal_mcp-0.1.24rc11.dist-info → universal_mcp-0.1.24rc13.dist-info}/METADATA +4 -1
  26. {universal_mcp-0.1.24rc11.dist-info → universal_mcp-0.1.24rc13.dist-info}/RECORD +29 -16
  27. universal_mcp/agents/auto.py +0 -575
  28. {universal_mcp-0.1.24rc11.dist-info → universal_mcp-0.1.24rc13.dist-info}/WHEEL +0 -0
  29. {universal_mcp-0.1.24rc11.dist-info → universal_mcp-0.1.24rc13.dist-info}/entry_points.txt +0 -0
  30. {universal_mcp-0.1.24rc11.dist-info → universal_mcp-0.1.24rc13.dist-info}/licenses/LICENSE +0 -0
@@ -31,6 +31,8 @@ class AgentrRegistry(ToolRegistry):
31
31
  raise ValueError("Client is not initialized")
32
32
  try:
33
33
  all_apps = self.client.list_all_apps()
34
+ all_apps = [{"id": app["id"], "name": app["name"], "description": app["description"]} for app in all_apps]
35
+ # logger.debug(f"All apps: {all_apps}")
34
36
  return all_apps
35
37
  except Exception as e:
36
38
  logger.error(f"Error fetching apps from AgentR: {e}")
@@ -180,3 +182,7 @@ class AgentrRegistry(ToolRegistry):
180
182
  async def call_tool(self, tool_name: str, tool_args: dict[str, Any]) -> dict[str, Any]:
181
183
  """Call a tool with the given name and arguments."""
182
184
  return await self.tool_manager.call_tool(tool_name, tool_args)
185
+
186
+ async def list_connected_apps(self) -> list[str]:
187
+ """List all apps that the user has connected."""
188
+ return self.client.list_my_connections()
@@ -1,6 +1,9 @@
1
1
  from universal_mcp.agents.autoagent import AutoAgent
2
2
  from universal_mcp.agents.base import BaseAgent
3
+ from universal_mcp.agents.bigtool import BigToolAgent
4
+ from universal_mcp.agents.builder import BuilderAgent
5
+ from universal_mcp.agents.planner import PlannerAgent
3
6
  from universal_mcp.agents.react import ReactAgent
4
7
  from universal_mcp.agents.simple import SimpleAgent
5
8
 
6
- __all__ = ["BaseAgent", "ReactAgent", "SimpleAgent", "AutoAgent"]
9
+ __all__ = ["BaseAgent", "ReactAgent", "SimpleAgent", "AutoAgent", "BigToolAgent", "PlannerAgent", "BuilderAgent"]
@@ -1,6 +1,5 @@
1
1
  from langgraph.checkpoint.base import BaseCheckpointSaver
2
2
 
3
- from universal_mcp.agentr.registry import AgentrRegistry
4
3
  from universal_mcp.agents.autoagent.graph import build_graph
5
4
  from universal_mcp.agents.base import BaseAgent
6
5
  from universal_mcp.tools.registry import ToolRegistry
@@ -17,7 +16,7 @@ class AutoAgent(BaseAgent):
17
16
  **kwargs,
18
17
  ):
19
18
  super().__init__(name, instructions, model, memory, **kwargs)
20
- self.tool_registry = registry or AgentrRegistry()
19
+ self.tool_registry = registry
21
20
 
22
21
  async def _build_graph(self):
23
22
  builder = await build_graph(self.tool_registry, self.instructions)
@@ -1,5 +1,7 @@
1
1
  import asyncio
2
2
 
3
+ from loguru import logger
4
+
3
5
  from universal_mcp.agentr.registry import AgentrRegistry
4
6
  from universal_mcp.agents.autoagent import AutoAgent
5
7
 
@@ -9,12 +11,13 @@ async def main():
9
11
  name="autoagent",
10
12
  instructions="You are a helpful assistant that can use tools to help the user.",
11
13
  model="azure/gpt-4.1",
12
- tool_registry=AgentrRegistry(),
13
- )
14
- await agent.invoke(
15
- user_input="Please send the email from google-mail to manoj@agentr.dev, with subject hello and body hello from auto",
16
- thread_id="12345",
14
+ registry=AgentrRegistry(),
17
15
  )
16
+ async for event in agent.stream(
17
+ user_input="Send an email to manoj@agentr.dev",
18
+ thread_id="test123",
19
+ ):
20
+ logger.info(event.content)
18
21
  # from loguru import logger; logger.debug(result)
19
22
 
20
23
 
@@ -45,7 +45,7 @@ async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
45
45
  ):
46
46
  system_prompt = SYSTEM_PROMPT
47
47
  app_ids = await tool_registry.list_all_apps()
48
- connections = tool_registry.client.list_my_connections()
48
+ connections = await tool_registry.list_connected_apps()
49
49
  connection_ids = set([connection["app_id"] for connection in connections])
50
50
  connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
51
51
  unconnected_apps = [app["id"] for app in app_ids if app["id"] not in connection_ids]
@@ -61,7 +61,6 @@ async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
61
61
 
62
62
  messages = [{"role": "system", "content": system_prompt + "\n" + instructions}, *state["messages"]]
63
63
  model = load_chat_model(runtime.context.model)
64
- # Load tools from tool registry
65
64
  loaded_tools = await tool_registry.export_tools(tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN)
66
65
  model_with_tools = model.bind_tools([search_tools, ask_user, load_tools, *loaded_tools], tool_choice="auto")
67
66
  response_raw = model_with_tools.invoke(messages)
@@ -80,10 +79,10 @@ async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
80
79
 
81
80
  def tool_router(state: State):
82
81
  last_message = state["messages"][-1]
83
- if isinstance(last_message, ToolMessage):
84
- return "agent"
85
- else:
82
+ if isinstance(last_message, ToolMessage) and last_message.name == ask_user.name:
86
83
  return END
84
+ else:
85
+ return "agent"
87
86
 
88
87
  async def tool_node(state: State):
89
88
  outputs = []
@@ -99,8 +98,6 @@ async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
99
98
  tool_call_id=tool_call["id"],
100
99
  )
101
100
  )
102
- ai_message = AIMessage(content=tool_call["args"]["question"])
103
- outputs.append(ai_message)
104
101
  elif tool_call["name"] == search_tools.name:
105
102
  tools = await search_tools.ainvoke(tool_call["args"])
106
103
  outputs.append(
@@ -5,5 +5,5 @@ SYSTEM_PROMPT = """You are a helpful AI assistant. When you lack tools for any t
5
5
  System time: {system_time}
6
6
  These are the list of apps available to you:
7
7
  {app_ids}
8
- Note that when multiple apps seem relevant for a task, you MUST ask the user to choose the app. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app.
8
+ Note that when multiple apps seem relevant for a task, you MUST ask the user to choose the app. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. You will be provided a link for connection that you should pass on to the user.
9
9
  """
@@ -4,7 +4,6 @@ from uuid import uuid4
4
4
 
5
5
  from langchain_core.messages import AIMessageChunk
6
6
  from langgraph.checkpoint.base import BaseCheckpointSaver
7
- from langgraph.checkpoint.memory import MemorySaver
8
7
  from langgraph.types import Command
9
8
 
10
9
  from .utils import RichCLI
@@ -15,7 +14,7 @@ class BaseAgent:
15
14
  self.name = name
16
15
  self.instructions = instructions
17
16
  self.model = model
18
- self.memory = memory or MemorySaver()
17
+ self.memory = memory
19
18
  self._graph = None
20
19
  self._initialized = False
21
20
  self.cli = RichCLI()
@@ -30,6 +29,7 @@ class BaseAgent:
30
29
 
31
30
  async def stream(self, thread_id: str, user_input: str):
32
31
  await self.ainit()
32
+ aggregate = None
33
33
  async for event, metadata in self._graph.astream(
34
34
  {"messages": [{"role": "user", "content": user_input}]},
35
35
  config={"configurable": {"thread_id": thread_id}},
@@ -38,25 +38,23 @@ class BaseAgent:
38
38
  stream_usage=True,
39
39
  ):
40
40
  # Only forward assistant token chunks that are not tool-related.
41
- event = cast(AIMessageChunk, event)
42
- if "finish_reason" in event.response_metadata:
43
- # Got LLM finish reason ignore it
44
- # logger.debug(f"Finish event: {event}")
41
+ type_ = type(event)
42
+ if type_ != AIMessageChunk:
45
43
  continue
46
-
47
- # Skip chunks that correspond to tool calls or tool execution phases
48
- # - tool_call_chunks present => model is emitting tool call(s)
49
- # - metadata tags or node names may indicate tool/quiet phases
44
+ event = cast(AIMessageChunk, event)
45
+ aggregate = event if aggregate is None else aggregate + event
50
46
  tags = metadata.get("tags", []) if isinstance(metadata, dict) else []
51
-
52
47
  is_quiet = isinstance(tags, list) and ("quiet" in tags)
53
48
 
54
49
  if is_quiet:
55
50
  continue
56
-
57
- # Emit only the token chunks for the final assistant message.
58
- # logger.debug(f"Event: {event}, Metadata: {metadata}")
59
- yield event
51
+ if "finish_reason" in event.response_metadata:
52
+ # Got LLM finish reason ignore it
53
+ # logger.debug(f"Finish event: {event}, Metadata: {metadata}")
54
+ pass
55
+ else:
56
+ # logger.debug(f"Event: {event}, Metadata: {metadata}")
57
+ yield event
60
58
  # Send a final finished message
61
59
  # The last event would be finish
62
60
  event = cast(AIMessageChunk, event)
@@ -0,0 +1,54 @@
1
+ from langgraph.checkpoint.base import BaseCheckpointSaver
2
+
3
+ from universal_mcp.agents.base import BaseAgent
4
+ from universal_mcp.agents.llm import load_chat_model
5
+ from universal_mcp.logger import logger
6
+ from universal_mcp.tools.registry import ToolRegistry
7
+
8
+ from .graph import build_graph
9
+ from .prompts import SYSTEM_PROMPT
10
+
11
+
12
+ class BigToolAgent(BaseAgent):
13
+ def __init__(
14
+ self,
15
+ name: str,
16
+ instructions: str,
17
+ model: str,
18
+ registry: ToolRegistry,
19
+ memory: BaseCheckpointSaver | None = None,
20
+ **kwargs,
21
+ ):
22
+ # Combine the base system prompt with agent-specific instructions
23
+ full_instructions = f"{SYSTEM_PROMPT}\n\n**User Instructions:**\n{instructions}"
24
+ super().__init__(name, full_instructions, model, memory, **kwargs)
25
+
26
+ self.registry = registry
27
+ self.llm = load_chat_model(self.model)
28
+ self.tool_selection_llm = load_chat_model("gemini/gemini-2.0-flash-001")
29
+
30
+ logger.info(f"BigToolAgent '{self.name}' initialized with model '{self.model}'.")
31
+
32
+ async def _build_graph(self):
33
+ """Build the bigtool agent graph using the existing create_agent function."""
34
+ logger.info(f"Building graph for BigToolAgent '{self.name}'...")
35
+ try:
36
+ graph_builder = build_graph(
37
+ tool_registry=self.registry,
38
+ llm=self.llm,
39
+ tool_selection_llm=self.tool_selection_llm,
40
+ )
41
+
42
+ compiled_graph = graph_builder.compile(checkpointer=self.memory)
43
+ logger.info("Graph built and compiled successfully.")
44
+ return compiled_graph
45
+ except Exception as e:
46
+ logger.error(f"Error building graph for BigToolAgent '{self.name}': {e}")
47
+ raise
48
+
49
+ @property
50
+ def graph(self):
51
+ return self._graph
52
+
53
+
54
+ __all__ = ["BigToolAgent"]
@@ -0,0 +1,24 @@
1
+ import asyncio
2
+
3
+ from loguru import logger
4
+
5
+ from universal_mcp.agentr.registry import AgentrRegistry
6
+ from universal_mcp.agents.bigtool import BigToolAgent
7
+
8
+
9
+ async def main():
10
+ agent = BigToolAgent(
11
+ name="bigtool",
12
+ instructions="You are a helpful assistant that can use tools to help the user.",
13
+ model="azure/gpt-4.1",
14
+ registry=AgentrRegistry(),
15
+ )
16
+ async for event in agent.stream(
17
+ user_input="Send an email to manoj@agentr.dev",
18
+ thread_id="test123",
19
+ ):
20
+ logger.info(event.content)
21
+
22
+
23
+ if __name__ == "__main__":
24
+ asyncio.run(main())
@@ -0,0 +1,24 @@
1
+ from dataclasses import dataclass, field
2
+
3
+ from .prompts import SYSTEM_PROMPT
4
+
5
+
6
+ @dataclass(kw_only=True)
7
+ class Context:
8
+ """The context for the agent."""
9
+
10
+ system_prompt: str = field(
11
+ default=SYSTEM_PROMPT,
12
+ metadata={
13
+ "description": "The system prompt to use for the agent's interactions. "
14
+ "This prompt sets the context and behavior for the agent."
15
+ },
16
+ )
17
+
18
+ model: str = field(
19
+ default="anthropic/claude-4-sonnet-20250514",
20
+ metadata={
21
+ "description": "The name of the language model to use for the agent's main interactions. "
22
+ "Should be in the form: provider/model-name."
23
+ },
24
+ )
@@ -0,0 +1,166 @@
1
+ import json
2
+ from datetime import UTC, datetime
3
+ from typing import Literal, TypedDict, cast
4
+
5
+ from langchain_anthropic import ChatAnthropic
6
+ from langchain_core.language_models import BaseChatModel
7
+ from langchain_core.messages import AIMessage, ToolMessage
8
+ from langchain_core.tools import tool
9
+ from langgraph.graph import StateGraph
10
+ from langgraph.runtime import Runtime
11
+ from langgraph.types import Command
12
+
13
+ from universal_mcp.agents.bigtool.context import Context
14
+ from universal_mcp.agents.bigtool.state import State
15
+ from universal_mcp.logger import logger
16
+ from universal_mcp.tools.registry import ToolRegistry
17
+ from universal_mcp.types import ToolFormat
18
+
19
+ from .prompts import SELECT_TOOL_PROMPT
20
+
21
+
22
+ def build_graph(
23
+ tool_registry: ToolRegistry,
24
+ llm: BaseChatModel,
25
+ tool_selection_llm: BaseChatModel,
26
+ ):
27
+ @tool
28
+ async def retrieve_tools(task_query: str) -> list[str]:
29
+ """Retrieve tools for a given task.
30
+ Task query should be atomic (doable with a single tool).
31
+ For tasks requiring multiple tools, call this tool multiple times for each subtask."""
32
+ logger.info(f"Retrieving tools for task: '{task_query}'")
33
+ try:
34
+ tools_list = await tool_registry.search_tools(task_query, limit=10)
35
+ tool_candidates = [f"{tool['id']}: {tool['description']}" for tool in tools_list]
36
+ logger.info(f"Found {len(tool_candidates)} candidate tools.")
37
+
38
+ class ToolSelectionOutput(TypedDict):
39
+ tool_names: list[str]
40
+
41
+ model = tool_selection_llm
42
+ app_ids = await tool_registry.list_all_apps()
43
+ connections = await tool_registry.list_connected_apps()
44
+ connection_ids = set([connection["app_id"] for connection in connections])
45
+ connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
46
+ unconnected_apps = [app["id"] for app in app_ids if app["id"] not in connection_ids]
47
+ app_id_descriptions = "These are the apps connected to the user's account:\n" + "\n".join(
48
+ [f"{app}" for app in connected_apps]
49
+ )
50
+ if unconnected_apps:
51
+ app_id_descriptions += "\n\nOther (not connected) apps: " + "\n".join(
52
+ [f"{app}" for app in unconnected_apps]
53
+ )
54
+
55
+ response = await model.with_structured_output(schema=ToolSelectionOutput, method="json_mode").ainvoke(
56
+ SELECT_TOOL_PROMPT.format(
57
+ app_ids=app_id_descriptions, tool_candidates="\n - ".join(tool_candidates), task=task_query
58
+ )
59
+ )
60
+
61
+ selected_tool_names = cast(ToolSelectionOutput, response)["tool_names"]
62
+ logger.info(f"Selected tools: {selected_tool_names}")
63
+ return selected_tool_names
64
+ except Exception as e:
65
+ logger.error(f"Error retrieving tools: {e}")
66
+ return []
67
+
68
+ async def call_model(state: State, runtime: Runtime[Context]) -> Command[Literal["select_tools", "call_tools"]]:
69
+ logger.info("Calling model...")
70
+ try:
71
+ system_message = runtime.context.system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
72
+ messages = [{"role": "system", "content": system_message}, *state["messages"]]
73
+
74
+ logger.info(f"Selected tool IDs: {state['selected_tool_ids']}")
75
+ selected_tools = await tool_registry.export_tools(
76
+ tools=state["selected_tool_ids"],
77
+ format=ToolFormat.LANGCHAIN,
78
+ )
79
+ logger.info(f"Exported {len(selected_tools)} tools for model.")
80
+
81
+ model = llm
82
+ if isinstance(model, ChatAnthropic):
83
+ model_with_tools = model.bind_tools(
84
+ [retrieve_tools, *selected_tools], tool_choice="auto", cache_control={"type": "ephemeral"}
85
+ )
86
+ else:
87
+ model_with_tools = model.bind_tools([retrieve_tools, *selected_tools], tool_choice="auto")
88
+ response = cast(AIMessage, await model_with_tools.ainvoke(messages))
89
+
90
+ if response.tool_calls:
91
+ logger.info(f"Model responded with {len(response.tool_calls)} tool calls.")
92
+ if len(response.tool_calls) > 1:
93
+ raise Exception("Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')")
94
+ tool_call = response.tool_calls[0]
95
+ if tool_call["name"] == retrieve_tools.name:
96
+ logger.info("Model requested to select tools.")
97
+ return Command(goto="select_tools", update={"messages": [response]})
98
+ elif tool_call["name"] not in state["selected_tool_ids"]:
99
+ try:
100
+ await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
101
+ logger.info(
102
+ f"Tool '{tool_call['name']}' not in selected tools, but available. Proceeding to call."
103
+ )
104
+ return Command(goto="call_tools", update={"messages": [response]})
105
+ except Exception as e:
106
+ logger.error(f"Unexpected tool call: {tool_call['name']}. Error: {e}")
107
+ raise Exception(
108
+ f"Unexpected tool call: {tool_call['name']}. Available tools: {state['selected_tool_ids']}"
109
+ ) from e
110
+ logger.info(f"Proceeding to call tool: {tool_call['name']}")
111
+ return Command(goto="call_tools", update={"messages": [response]})
112
+ else:
113
+ logger.info("Model responded with a message, ending execution.")
114
+ return Command(update={"messages": [response]})
115
+ except Exception as e:
116
+ logger.error(f"Error in call_model: {e}")
117
+ raise
118
+
119
+ async def select_tools(state: State, runtime: Runtime[Context]) -> Command[Literal["call_model"]]:
120
+ logger.info("Selecting tools...")
121
+ try:
122
+ tool_call = state["messages"][-1].tool_calls[0]
123
+ selected_tool_names = await retrieve_tools.ainvoke(input=tool_call["args"])
124
+ tool_msg = ToolMessage(f"Available tools: {selected_tool_names}", tool_call_id=tool_call["id"])
125
+ logger.info(f"Tools selected: {selected_tool_names}")
126
+ return Command(goto="call_model", update={"messages": [tool_msg], "selected_tool_ids": selected_tool_names})
127
+ except Exception as e:
128
+ logger.error(f"Error in select_tools: {e}")
129
+ raise
130
+
131
+ async def call_tools(state: State) -> Command[Literal["call_model"]]:
132
+ logger.info("Calling tools...")
133
+ outputs = []
134
+ recent_tool_ids = []
135
+ for tool_call in state["messages"][-1].tool_calls:
136
+ logger.info(f"Executing tool: {tool_call['name']} with args: {tool_call['args']}")
137
+ try:
138
+ await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
139
+ tool_result = await tool_registry.call_tool(tool_call["name"], tool_call["args"])
140
+ logger.info(f"Tool '{tool_call['name']}' executed successfully.")
141
+ outputs.append(
142
+ ToolMessage(
143
+ content=json.dumps(tool_result),
144
+ name=tool_call["name"],
145
+ tool_call_id=tool_call["id"],
146
+ )
147
+ )
148
+ recent_tool_ids.append(tool_call["name"])
149
+ except Exception as e:
150
+ logger.error(f"Error executing tool '{tool_call['name']}': {e}")
151
+ outputs.append(
152
+ ToolMessage(
153
+ content=json.dumps("Error: " + str(e)),
154
+ name=tool_call["name"],
155
+ tool_call_id=tool_call["id"],
156
+ )
157
+ )
158
+ return Command(goto="call_model", update={"messages": outputs, "selected_tool_ids": recent_tool_ids})
159
+
160
+ builder = StateGraph(State, context_schema=Context)
161
+
162
+ builder.add_node(call_model)
163
+ builder.add_node(select_tools)
164
+ builder.add_node(call_tools)
165
+ builder.set_entry_point("call_model")
166
+ return builder
@@ -0,0 +1,31 @@
1
+ """Default prompts used by the agent."""
2
+
3
+ SYSTEM_PROMPT = """You are a helpful AI assistant.
4
+
5
+ **Core Directives:**
6
+ 1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event), you MUST use a tool. Do not answer from your own knowledge or refuse a task if a tool might exist for it.
7
+ 2. **First Step is ALWAYS `retrieve_tools`:** Before you can use any other tool, you MUST first call the `retrieve_tools` function to find the right tool for the user's request. This is your mandatory first action.
8
+ 3. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `retrieve_tools` with a concise query describing the core task. Do not engage in conversation.
9
+
10
+ System time: {system_time}
11
+
12
+ When multiple tools are available for the same task, you must ask the user.
13
+ """
14
+
15
+ SELECT_TOOL_PROMPT = """You are an AI assistant that helps the user perform tasks using various apps (each app has multiple tools).
16
+ You will be provided with a task and a list of tools which might be relevant for this task.
17
+
18
+ Your goal is to select the most appropriate tool for the given task.
19
+ <task>
20
+ {task}
21
+ </task>
22
+
23
+ These are the list of apps available to you:
24
+ {app_ids}
25
+ Note that when multiple apps seem relevant for a task, prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must choose both apps tools. In case the user specifically asks you to use an app that is not connected, select the tool.
26
+
27
+ <tool_candidates>
28
+ - {tool_candidates}
29
+ </tool_candidates>
30
+
31
+ """
@@ -0,0 +1,27 @@
1
+ from typing import Annotated
2
+
3
+ from langgraph.prebuilt.chat_agent_executor import AgentState
4
+
5
+
6
+ def _enqueue(left: list, right: list) -> list:
7
+ """Treat left as a FIFO queue, append new items from right (preserve order),
8
+ keep items unique, and cap total size to 20 (drop oldest items)."""
9
+ max_size = 30
10
+ preferred_size = 20
11
+ if len(right) > preferred_size:
12
+ preferred_size = min(max_size, len(right))
13
+ queue = list(left or [])
14
+
15
+ for item in right[:preferred_size] or []:
16
+ if item in queue:
17
+ queue.remove(item)
18
+ queue.append(item)
19
+
20
+ if len(queue) > preferred_size:
21
+ queue = queue[-preferred_size:]
22
+
23
+ return queue
24
+
25
+
26
+ class State(AgentState):
27
+ selected_tool_ids: Annotated[list[str], _enqueue]
@@ -0,0 +1,80 @@
1
+ import asyncio
2
+ from collections.abc import Sequence
3
+ from typing import Annotated, TypedDict
4
+
5
+ from langchain_core.language_models import BaseChatModel
6
+ from langchain_core.messages import BaseMessage
7
+ from langgraph.checkpoint.base import BaseCheckpointSaver
8
+ from langgraph.graph import END, START, StateGraph
9
+ from langgraph.graph.message import add_messages
10
+
11
+ from universal_mcp.agents.base import BaseAgent
12
+ from universal_mcp.agents.llm import load_chat_model
13
+ from universal_mcp.agents.shared.agent_node import Agent, generate_agent
14
+ from universal_mcp.agents.shared.tool_node import build_tool_node_graph
15
+ from universal_mcp.tools.registry import ToolRegistry
16
+ from universal_mcp.types import ToolConfig
17
+
18
+
19
+ class BuilderState(TypedDict):
20
+ messages: Annotated[Sequence[BaseMessage], add_messages]
21
+ generated_agent: Agent | None
22
+ tool_config: ToolConfig | None
23
+
24
+
25
+ class BuilderAgent(BaseAgent):
26
+ def __init__(
27
+ self,
28
+ name: str,
29
+ instructions: str,
30
+ model: str,
31
+ registry: ToolRegistry,
32
+ memory: BaseCheckpointSaver | None = None,
33
+ **kwargs,
34
+ ):
35
+ super().__init__(name, instructions, model, memory, **kwargs)
36
+ self.registry = registry
37
+ self.llm: BaseChatModel = load_chat_model(model)
38
+
39
+ async def _create_agent(self, state: BuilderState):
40
+ last_message = state["messages"][-1]
41
+ generated_agent = await generate_agent(self.llm, last_message.content)
42
+ return {"generated_agent": generated_agent}
43
+
44
+ async def _create_tool_config(self, state: BuilderState):
45
+ last_message = state["messages"][-1]
46
+ tool_finder_graph = build_tool_node_graph(self.llm, self.registry)
47
+ tool_config = await tool_finder_graph.ainvoke({"task": last_message.content, "messages": [last_message]})
48
+ tool_config = tool_config.get("apps_with_tools", {})
49
+ return {"tool_config": tool_config}
50
+
51
+ async def _build_graph(self):
52
+ builder = StateGraph(BuilderState)
53
+ builder.add_node("create_agent", self._create_agent)
54
+ builder.add_node("create_tool_config", self._create_tool_config)
55
+ builder.add_edge(START, "create_agent")
56
+ builder.add_edge("create_agent", "create_tool_config")
57
+ builder.add_edge("create_tool_config", END)
58
+ return builder.compile()
59
+
60
+
61
+ async def main():
62
+ from universal_mcp.agentr.registry import AgentrRegistry
63
+
64
+ registry = AgentrRegistry()
65
+ agent = BuilderAgent(
66
+ name="Builder Agent",
67
+ instructions="You are a builder agent that creates other agents.",
68
+ model="gemini/gemini-1.5-pro",
69
+ registry=registry,
70
+ )
71
+ result = await agent.invoke(
72
+ "Send a daily email to manoj@agentr.dev with daily agenda of the day",
73
+ )
74
+ print(result.model_dump_json(indent=2))
75
+ # print(f"Agent: {result['generated_agent'].model_dump_json(indent=2)}")
76
+ # print(f"Tool Config: {result['tool_config'].model_dump_json(indent=2)}")
77
+
78
+
79
+ if __name__ == "__main__":
80
+ asyncio.run(main())
@@ -1,26 +1,37 @@
1
1
  from langchain_anthropic import ChatAnthropic
2
2
  from langchain_core.language_models import BaseChatModel
3
- from langchain_google_vertexai import ChatVertexAI
4
- from langchain_google_vertexai.model_garden import ChatAnthropicVertex
3
+ from langchain_google_genai import ChatGoogleGenerativeAI
5
4
  from langchain_openai import AzureChatOpenAI
6
5
 
7
6
 
8
- def load_chat_model(fully_specified_name: str, tags: list[str] | None = None) -> BaseChatModel:
7
+ def load_chat_model(
8
+ fully_specified_name: str, temperature: float = 1.0, tags: list[str] | None = None
9
+ ) -> BaseChatModel:
9
10
  """Load a chat model from a fully specified name.
10
11
  Args:
11
12
  fully_specified_name (str): String in the format 'provider/model'.
12
13
  """
13
14
  provider, model = fully_specified_name.split("/", maxsplit=1)
14
- if provider == "google_anthropic_vertex":
15
- return ChatAnthropicVertex(model=model, temperature=0.2, location="asia-east1", tags=tags)
16
- elif provider == "anthropic":
15
+ if provider == "anthropic":
17
16
  return ChatAnthropic(
18
- model=model, temperature=1, thinking={"type": "enabled", "budget_tokens": 2048}, max_tokens=4096, tags=tags
17
+ model=model,
18
+ temperature=temperature,
19
+ thinking={"type": "enabled", "budget_tokens": 2048},
20
+ max_tokens=4096,
21
+ tags=tags,
22
+ stream_usage=True,
19
23
  ) # pyright: ignore[reportCallIssue]
20
24
  elif provider == "azure":
21
- return AzureChatOpenAI(model=model, api_version="2024-12-01-preview", azure_deployment=model, tags=tags)
22
- elif provider == "vertex":
23
- return ChatVertexAI(model=model, temperature=0.1, tags=tags)
25
+ return AzureChatOpenAI(
26
+ model=model,
27
+ api_version="2024-12-01-preview",
28
+ azure_deployment=model,
29
+ temperature=temperature,
30
+ tags=tags,
31
+ stream_usage=True,
32
+ )
33
+ elif provider == "gemini":
34
+ return ChatGoogleGenerativeAI(model=model, temperature=temperature)
24
35
  else:
25
36
  raise ValueError(f"Unsupported provider: {provider}")
26
37
 
@@ -28,5 +39,7 @@ def load_chat_model(fully_specified_name: str, tags: list[str] | None = None) ->
28
39
  if __name__ == "__main__":
29
40
  from loguru import logger
30
41
 
31
- llm = load_chat_model("azure/gpt-4.1")
32
- logger.info(llm.invoke("Hello, world!"))
42
+ models_to_test = ["azure/gpt-5-chat", "anthropic/claude-4-sonnet-20250514", "gemini/gemini-2.5-pro"]
43
+ for model in models_to_test:
44
+ llm = load_chat_model(model)
45
+ logger.info(llm.invoke("Hi!"))