universal-mcp 0.1.24rc12__py3-none-any.whl → 0.1.24rc14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. universal_mcp/agentr/registry.py +6 -0
  2. universal_mcp/agents/__init__.py +5 -1
  3. universal_mcp/agents/autoagent/__init__.py +1 -2
  4. universal_mcp/agents/autoagent/__main__.py +8 -5
  5. universal_mcp/agents/autoagent/graph.py +4 -7
  6. universal_mcp/agents/autoagent/prompts.py +1 -1
  7. universal_mcp/agents/base.py +2 -4
  8. universal_mcp/agents/bigtool/__init__.py +54 -0
  9. universal_mcp/agents/bigtool/__main__.py +24 -0
  10. universal_mcp/agents/bigtool/context.py +24 -0
  11. universal_mcp/agents/bigtool/graph.py +166 -0
  12. universal_mcp/agents/bigtool/prompts.py +31 -0
  13. universal_mcp/agents/bigtool/state.py +27 -0
  14. universal_mcp/agents/bigtool2/__init__.py +53 -0
  15. universal_mcp/agents/bigtool2/__main__.py +24 -0
  16. universal_mcp/agents/bigtool2/agent.py +11 -0
  17. universal_mcp/agents/bigtool2/context.py +33 -0
  18. universal_mcp/agents/bigtool2/graph.py +169 -0
  19. universal_mcp/agents/bigtool2/prompts.py +12 -0
  20. universal_mcp/agents/bigtool2/state.py +27 -0
  21. universal_mcp/agents/builder.py +80 -0
  22. universal_mcp/agents/llm.py +23 -16
  23. universal_mcp/agents/planner/__init__.py +37 -0
  24. universal_mcp/agents/planner/__main__.py +24 -0
  25. universal_mcp/agents/planner/graph.py +82 -0
  26. universal_mcp/agents/planner/prompts.py +1 -0
  27. universal_mcp/agents/planner/state.py +12 -0
  28. universal_mcp/agents/shared/agent_node.py +34 -0
  29. universal_mcp/agents/shared/tool_node.py +235 -0
  30. universal_mcp/tools/registry.py +5 -0
  31. universal_mcp/types.py +5 -2
  32. {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/METADATA +3 -1
  33. {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/RECORD +36 -16
  34. universal_mcp/agents/auto.py +0 -575
  35. {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/WHEEL +0 -0
  36. {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/entry_points.txt +0 -0
  37. {universal_mcp-0.1.24rc12.dist-info → universal_mcp-0.1.24rc14.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,169 @@
1
+ import json
2
+ from datetime import UTC, datetime
3
+ from typing import Literal, TypedDict, cast
4
+
5
+ from langchain_anthropic import ChatAnthropic
6
+ from langchain_core.language_models import BaseChatModel
7
+ from langchain_core.messages import AIMessage, ToolMessage
8
+ from langchain_core.tools import tool
9
+ from langgraph.graph import StateGraph
10
+ from langgraph.runtime import Runtime
11
+ from langgraph.types import Command
12
+
13
+ from universal_mcp.agents.bigtool2.context import Context
14
+ from universal_mcp.agents.bigtool2.state import State
15
+ from universal_mcp.logger import logger
16
+ from universal_mcp.tools.registry import ToolRegistry
17
+ from universal_mcp.types import ToolFormat
18
+
19
+
20
+
21
+ def build_graph(
22
+ tool_registry: ToolRegistry,
23
+ llm: BaseChatModel
24
+ ):
25
+ @tool
26
+ async def search_tools(queries: list[str]) -> str:
27
+ """Search tools for a given list of queries
28
+ Each single query should be atomic (doable with a single tool).
29
+ For tasks requiring multiple tools, add separate queries for each subtask"""
30
+ logger.info(f"Searching tools for queries: '{queries}'")
31
+ try:
32
+ all_tool_candidates = ""
33
+ app_ids = await tool_registry.list_all_apps()
34
+ connections = await tool_registry.list_connected_apps()
35
+ connection_ids = set([connection["app_id"] for connection in connections])
36
+ connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
37
+ unconnected_apps = [app["id"] for app in app_ids if app["id"] not in connection_ids]
38
+ app_tools = {}
39
+ for task_query in queries:
40
+ tools_list = await tool_registry.search_tools(task_query, limit=40)
41
+ tool_candidates = [f"{tool['id']}: {tool['description']}" for tool in tools_list]
42
+ for tool in tool_candidates:
43
+ app = tool.split("__")[0]
44
+ if app not in app_tools:
45
+ if len(app_tools.keys()) >= 10:
46
+ break
47
+ app_tools[app] = []
48
+ if len(app_tools[app]) < 3:
49
+ app_tools[app].append(tool)
50
+ for app in app_tools:
51
+ app_status = "connected" if app in connected_apps else "NOT connected"
52
+ all_tool_candidates += f"Tools from {app} (status: {app_status} by user):\n"
53
+ for tool in app_tools[app]:
54
+ all_tool_candidates += f" - {tool}\n"
55
+ all_tool_candidates += "\n"
56
+
57
+
58
+ return all_tool_candidates
59
+ except Exception as e:
60
+ logger.error(f"Error retrieving tools: {e}")
61
+ return "Error: " + str(e)
62
+
63
+ @tool
64
+ async def load_tools(tool_ids: list[str]) -> list[str]:
65
+ """Load the tools for the given tool ids. Returns the tool ids."""
66
+ return tool_ids
67
+
68
+
69
+ async def call_model(state: State, runtime: Runtime[Context]) -> Command[Literal["select_tools", "call_tools"]]:
70
+ logger.info("Calling model...")
71
+ try:
72
+ system_message = runtime.context.system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
73
+ messages = [{"role": "system", "content": system_message}, *state["messages"]]
74
+
75
+ logger.info(f"Selected tool IDs: {state['selected_tool_ids']}")
76
+ if len(state["selected_tool_ids"]) > 0:
77
+ selected_tools = await tool_registry.export_tools(tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN)
78
+ logger.info(f"Exported {len(selected_tools)} tools for model.")
79
+ else:
80
+ selected_tools = []
81
+
82
+ model = llm
83
+
84
+ model_with_tools = model.bind_tools([search_tools, load_tools, *selected_tools], tool_choice="auto")
85
+ response = cast(AIMessage, await model_with_tools.ainvoke(messages))
86
+
87
+ if response.tool_calls:
88
+ logger.info(f"Model responded with {len(response.tool_calls)} tool calls.")
89
+ if len(response.tool_calls) > 1:
90
+ raise Exception("Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')")
91
+ tool_call = response.tool_calls[0]
92
+ if tool_call["name"] == search_tools.name:
93
+ logger.info("Model requested to select tools.")
94
+ return Command(goto="select_tools", update={"messages": [response]})
95
+ elif tool_call["name"] == load_tools.name:
96
+ logger.info("Model requested to load tools.")
97
+ tool_msg = ToolMessage(f"Loaded tools.", tool_call_id=tool_call["id"])
98
+ selected_tool_ids = tool_call["args"]["tool_ids"]
99
+ logger.info(f"Loaded tools: {selected_tool_ids}")
100
+ return Command(goto="call_model", update={ "messages": [response, tool_msg], "selected_tool_ids": selected_tool_ids})
101
+
102
+ elif tool_call["name"] not in state["selected_tool_ids"]:
103
+ try:
104
+ await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
105
+ logger.info(
106
+ f"Tool '{tool_call['name']}' not in selected tools, but available. Proceeding to call."
107
+ )
108
+ return Command(goto="call_tools", update={"messages": [response]})
109
+ except Exception as e:
110
+ logger.error(f"Unexpected tool call: {tool_call['name']}. Error: {e}")
111
+ raise Exception(
112
+ f"Unexpected tool call: {tool_call['name']}. Available tools: {state['selected_tool_ids']}"
113
+ ) from e
114
+ logger.info(f"Proceeding to call tool: {tool_call['name']}")
115
+ return Command(goto="call_tools", update={"messages": [response]})
116
+ else:
117
+ logger.info("Model responded with a message, ending execution.")
118
+ return Command(update={"messages": [response]})
119
+ except Exception as e:
120
+ logger.error(f"Error in call_model: {e}")
121
+ raise
122
+
123
+ async def select_tools(state: State, runtime: Runtime[Context]) -> Command[Literal["call_model"]]:
124
+ logger.info("Selecting tools...")
125
+ try:
126
+ tool_call = state["messages"][-1].tool_calls[0]
127
+ searched_tools= await search_tools.ainvoke(input=tool_call["args"])
128
+ tool_msg = ToolMessage(f"Available tools: {searched_tools}", tool_call_id=tool_call["id"])
129
+ return Command(goto="call_model", update={"messages": [tool_msg]})
130
+ except Exception as e:
131
+ logger.error(f"Error in select_tools: {e}")
132
+ raise
133
+
134
+ async def call_tools(state: State) -> Command[Literal["call_model"]]:
135
+ logger.info("Calling tools...")
136
+ outputs = []
137
+ recent_tool_ids = []
138
+ for tool_call in state["messages"][-1].tool_calls:
139
+ logger.info(f"Executing tool: {tool_call['name']} with args: {tool_call['args']}")
140
+ try:
141
+ await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
142
+ tool_result = await tool_registry.call_tool(tool_call["name"], tool_call["args"])
143
+ logger.info(f"Tool '{tool_call['name']}' executed successfully.")
144
+ outputs.append(
145
+ ToolMessage(
146
+ content=json.dumps(tool_result),
147
+ name=tool_call["name"],
148
+ tool_call_id=tool_call["id"],
149
+ )
150
+ )
151
+ recent_tool_ids.append(tool_call["name"])
152
+ except Exception as e:
153
+ logger.error(f"Error executing tool '{tool_call['name']}': {e}")
154
+ outputs.append(
155
+ ToolMessage(
156
+ content=json.dumps("Error: " + str(e)),
157
+ name=tool_call["name"],
158
+ tool_call_id=tool_call["id"],
159
+ )
160
+ )
161
+ return Command(goto="call_model", update={"messages": outputs, "selected_tool_ids": recent_tool_ids})
162
+
163
+ builder = StateGraph(State, context_schema=Context)
164
+
165
+ builder.add_node(call_model)
166
+ builder.add_node(select_tools)
167
+ builder.add_node(call_tools)
168
+ builder.set_entry_point("call_model")
169
+ return builder
@@ -0,0 +1,12 @@
1
+ """Default prompts used by the agent."""
2
+
3
+ SYSTEM_PROMPT = """You are a helpful AI assistant.
4
+
5
+ **Core Directives:**
6
+ 1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event), you MUST use a tool. Do not answer from your own knowledge or refuse a task if a tool might exist for it.
7
+ 2. **First Step is ALWAYS `search_tools`:** Before you can use any other tool, you MUST first call the `search_tools` function to find the right tools for the user's request. This is your mandatory first action. You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks.
8
+ 3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
9
+ 3. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `search_tools` with a concise query describing the core task. Do not engage in conversation.
10
+
11
+ System time: {system_time}
12
+ """
@@ -0,0 +1,27 @@
1
+ from typing import Annotated
2
+
3
+ from langgraph.prebuilt.chat_agent_executor import AgentState
4
+
5
+
6
+ def _enqueue(left: list, right: list) -> list:
7
+ """Treat left as a FIFO queue, append new items from right (preserve order),
8
+ keep items unique, and cap total size to 20 (drop oldest items)."""
9
+ max_size = 30
10
+ preferred_size = 20
11
+ if len(right) > preferred_size:
12
+ preferred_size = min(max_size, len(right))
13
+ queue = list(left or [])
14
+
15
+ for item in right[:preferred_size] or []:
16
+ if item in queue:
17
+ queue.remove(item)
18
+ queue.append(item)
19
+
20
+ if len(queue) > preferred_size:
21
+ queue = queue[-preferred_size:]
22
+
23
+ return queue
24
+
25
+
26
+ class State(AgentState):
27
+ selected_tool_ids: Annotated[list[str], _enqueue]
@@ -0,0 +1,80 @@
1
+ import asyncio
2
+ from collections.abc import Sequence
3
+ from typing import Annotated, TypedDict
4
+
5
+ from langchain_core.language_models import BaseChatModel
6
+ from langchain_core.messages import BaseMessage
7
+ from langgraph.checkpoint.base import BaseCheckpointSaver
8
+ from langgraph.graph import END, START, StateGraph
9
+ from langgraph.graph.message import add_messages
10
+
11
+ from universal_mcp.agents.base import BaseAgent
12
+ from universal_mcp.agents.llm import load_chat_model
13
+ from universal_mcp.agents.shared.agent_node import Agent, generate_agent
14
+ from universal_mcp.agents.shared.tool_node import build_tool_node_graph
15
+ from universal_mcp.tools.registry import ToolRegistry
16
+ from universal_mcp.types import ToolConfig
17
+
18
+
19
+ class BuilderState(TypedDict):
20
+ messages: Annotated[Sequence[BaseMessage], add_messages]
21
+ generated_agent: Agent | None
22
+ tool_config: ToolConfig | None
23
+
24
+
25
+ class BuilderAgent(BaseAgent):
26
+ def __init__(
27
+ self,
28
+ name: str,
29
+ instructions: str,
30
+ model: str,
31
+ registry: ToolRegistry,
32
+ memory: BaseCheckpointSaver | None = None,
33
+ **kwargs,
34
+ ):
35
+ super().__init__(name, instructions, model, memory, **kwargs)
36
+ self.registry = registry
37
+ self.llm: BaseChatModel = load_chat_model(model)
38
+
39
+ async def _create_agent(self, state: BuilderState):
40
+ last_message = state["messages"][-1]
41
+ generated_agent = await generate_agent(self.llm, last_message.content)
42
+ return {"generated_agent": generated_agent}
43
+
44
+ async def _create_tool_config(self, state: BuilderState):
45
+ last_message = state["messages"][-1]
46
+ tool_finder_graph = build_tool_node_graph(self.llm, self.registry)
47
+ tool_config = await tool_finder_graph.ainvoke({"task": last_message.content, "messages": [last_message]})
48
+ tool_config = tool_config.get("apps_with_tools", {})
49
+ return {"tool_config": tool_config}
50
+
51
+ async def _build_graph(self):
52
+ builder = StateGraph(BuilderState)
53
+ builder.add_node("create_agent", self._create_agent)
54
+ builder.add_node("create_tool_config", self._create_tool_config)
55
+ builder.add_edge(START, "create_agent")
56
+ builder.add_edge("create_agent", "create_tool_config")
57
+ builder.add_edge("create_tool_config", END)
58
+ return builder.compile()
59
+
60
+
61
+ async def main():
62
+ from universal_mcp.agentr.registry import AgentrRegistry
63
+
64
+ registry = AgentrRegistry()
65
+ agent = BuilderAgent(
66
+ name="Builder Agent",
67
+ instructions="You are a builder agent that creates other agents.",
68
+ model="gemini/gemini-1.5-pro",
69
+ registry=registry,
70
+ )
71
+ result = await agent.invoke(
72
+ "Send a daily email to manoj@agentr.dev with daily agenda of the day",
73
+ )
74
+ print(result.model_dump_json(indent=2))
75
+ # print(f"Agent: {result['generated_agent'].model_dump_json(indent=2)}")
76
+ # print(f"Tool Config: {result['tool_config'].model_dump_json(indent=2)}")
77
+
78
+
79
+ if __name__ == "__main__":
80
+ asyncio.run(main())
@@ -1,32 +1,37 @@
1
- import os
2
1
  from langchain_anthropic import ChatAnthropic
3
2
  from langchain_core.language_models import BaseChatModel
4
3
  from langchain_google_genai import ChatGoogleGenerativeAI
5
- from langchain_google_vertexai import ChatVertexAI
6
- from langchain_google_vertexai.model_garden import ChatAnthropicVertex
7
4
  from langchain_openai import AzureChatOpenAI
8
5
 
9
6
 
10
- def load_chat_model(fully_specified_name: str, tags: list[str] | None = None) -> BaseChatModel:
7
+ def load_chat_model(
8
+ fully_specified_name: str, temperature: float = 1.0, tags: list[str] | None = None
9
+ ) -> BaseChatModel:
11
10
  """Load a chat model from a fully specified name.
12
11
  Args:
13
12
  fully_specified_name (str): String in the format 'provider/model'.
14
13
  """
15
14
  provider, model = fully_specified_name.split("/", maxsplit=1)
16
- if provider == "google_anthropic_vertex":
17
- return ChatAnthropicVertex(model=model, temperature=0.2, location="asia-east1", tags=tags, stream_usage=True,)
18
- elif provider == "anthropic":
15
+ if provider == "anthropic":
19
16
  return ChatAnthropic(
20
- model=model, temperature=1, thinking={"type": "enabled", "budget_tokens": 2048}, max_tokens=4096, tags=tags, stream_usage=True,
17
+ model=model,
18
+ temperature=temperature,
19
+ thinking={"type": "enabled", "budget_tokens": 2048},
20
+ max_tokens=4096,
21
+ tags=tags,
22
+ stream_usage=True,
21
23
  ) # pyright: ignore[reportCallIssue]
22
24
  elif provider == "azure":
23
- return AzureChatOpenAI(model=model, api_version="2024-12-01-preview", azure_deployment=model, tags=tags, stream_usage=True,)
24
- elif provider == "gemini":
25
- return ChatGoogleGenerativeAI(
26
- model= model,
27
- temperature=1.0,
28
- max_retries=2
25
+ return AzureChatOpenAI(
26
+ model=model,
27
+ api_version="2024-12-01-preview",
28
+ azure_deployment=model,
29
+ temperature=temperature,
30
+ tags=tags,
31
+ stream_usage=True,
29
32
  )
33
+ elif provider == "gemini":
34
+ return ChatGoogleGenerativeAI(model=model, temperature=temperature)
30
35
  else:
31
36
  raise ValueError(f"Unsupported provider: {provider}")
32
37
 
@@ -34,5 +39,7 @@ def load_chat_model(fully_specified_name: str, tags: list[str] | None = None) ->
34
39
  if __name__ == "__main__":
35
40
  from loguru import logger
36
41
 
37
- llm = load_chat_model("azure/gpt-4.1")
38
- logger.info(llm.invoke("Hello, world!"))
42
+ models_to_test = ["azure/gpt-5-chat", "anthropic/claude-4-sonnet-20250514", "gemini/gemini-2.5-pro"]
43
+ for model in models_to_test:
44
+ llm = load_chat_model(model)
45
+ logger.info(llm.invoke("Hi!"))
@@ -0,0 +1,37 @@
1
+ from langgraph.checkpoint.base import BaseCheckpointSaver
2
+
3
+ from universal_mcp.agents.base import BaseAgent
4
+ from universal_mcp.agents.llm import load_chat_model
5
+ from universal_mcp.agents.react import ReactAgent
6
+ from universal_mcp.tools.registry import ToolRegistry
7
+
8
+ from .graph import build_graph
9
+
10
+
11
+ class PlannerAgent(BaseAgent):
12
+ def __init__(
13
+ self,
14
+ name: str,
15
+ instructions: str,
16
+ model: str,
17
+ registry: ToolRegistry,
18
+ memory: BaseCheckpointSaver | None = None,
19
+ executor_agent_cls: type[BaseAgent] = ReactAgent,
20
+ **kwargs,
21
+ ):
22
+ super().__init__(name, instructions, model, memory, **kwargs)
23
+ self.app_registry = registry
24
+ self.llm = load_chat_model(model)
25
+ self.executor_agent_cls = executor_agent_cls
26
+
27
+ async def _build_graph(self):
28
+ return build_graph(self.llm, self.app_registry, self.instructions, self.model, self.executor_agent_cls).compile(
29
+ checkpointer=self.memory
30
+ )
31
+
32
+ @property
33
+ def graph(self):
34
+ return self._graph
35
+
36
+
37
+ __all__ = ["PlannerAgent"]
@@ -0,0 +1,24 @@
1
+ import asyncio
2
+
3
+ from universal_mcp.agentr.registry import AgentrRegistry
4
+ from universal_mcp.agents.planner import PlannerAgent
5
+
6
+
7
+ async def main():
8
+ registry = AgentrRegistry()
9
+ agent = PlannerAgent(
10
+ name="planner-agent",
11
+ instructions="You are a helpful assistant.",
12
+ model="gemini/gemini-2.5-flash",
13
+ registry=registry,
14
+ )
15
+ from rich.console import Console
16
+
17
+ console = Console()
18
+ console.print("Starting agent...", style="yellow")
19
+ async for event in agent.stream(user_input="Send an email to manoj@agentr.dev'", thread_id="xyz"):
20
+ console.print(event.content, style="red")
21
+
22
+
23
+ if __name__ == "__main__":
24
+ asyncio.run(main())
@@ -0,0 +1,82 @@
1
+ from typing import Any
2
+
3
+ from langchain_core.messages import AIMessage
4
+ from langgraph.graph import END, START, StateGraph
5
+ from loguru import logger
6
+
7
+ from universal_mcp.agents.shared.tool_node import build_tool_node_graph
8
+ from universal_mcp.types import AgentrToolConfig, ToolConfig
9
+
10
+ from .state import State
11
+
12
+
13
+ def build_graph(llm, registry, instructions, model, executor_agent_cls):
14
+ """Build the graph for the planner agent."""
15
+ graph_builder = StateGraph(State)
16
+
17
+ async def _tool_finder_node(state: State) -> dict[str, Any]:
18
+ """Runs the tool finder subgraph to identify necessary tools."""
19
+ task = state["messages"][-1].content
20
+ logger.info(f"Running tool finder for task: {task}")
21
+ tool_finder_graph = build_tool_node_graph(llm, registry)
22
+ tool_finder_state = await tool_finder_graph.ainvoke({"task": task, "messages": state["messages"]})
23
+
24
+ if not tool_finder_state.get("apps_required"):
25
+ logger.info("Tool finder determined no apps are required.")
26
+ return {"apps_with_tools": AgentrToolConfig(agentrServers={})}
27
+
28
+ apps_with_tools = tool_finder_state.get("apps_with_tools", AgentrToolConfig(agentrServers={}))
29
+ logger.info(f"Tool finder identified apps and tools: {apps_with_tools}")
30
+ return {"apps_with_tools": apps_with_tools, "task": task}
31
+
32
+ def _should_continue(state: State) -> str:
33
+ """Determines whether to continue to the executor or end."""
34
+ if state.get("apps_with_tools") and state["apps_with_tools"].agentrServers:
35
+ return "continue"
36
+ return "end"
37
+
38
+ async def _executor_node(state: State) -> dict[str, Any]:
39
+ """Executes the task with the identified tools."""
40
+ tool_config = state["apps_with_tools"]
41
+
42
+ logger.info(f"Preparing executor with tools: {tool_config}")
43
+ agent = executor_agent_cls(
44
+ name="executor-agent",
45
+ instructions=instructions,
46
+ model=model,
47
+ registry=registry,
48
+ tools=ToolConfig(agentrServers=tool_config.agentrServers),
49
+ )
50
+
51
+ await agent.ainit()
52
+ react_graph = agent._graph
53
+ logger.info("Invoking executor agent with tools.")
54
+ # We invoke the agent to make it run the tool
55
+ response = await react_graph.ainvoke({"messages": state["messages"]})
56
+
57
+ final_message = AIMessage(content=response["messages"][-1].content)
58
+ return {"messages": [final_message]}
59
+
60
+ async def _no_tools_node(state: State) -> dict[str, Any]:
61
+ """Handles tasks that don't require tools by invoking the LLM directly."""
62
+ logger.info("No tools required. Invoking LLM directly.")
63
+ response = await llm.ainvoke(state["messages"])
64
+ return {"messages": [response]}
65
+
66
+ graph_builder.add_node("tool_finder", _tool_finder_node)
67
+ graph_builder.add_node("executor", _executor_node)
68
+ graph_builder.add_node("no_tools_executor", _no_tools_node)
69
+
70
+ graph_builder.add_edge(START, "tool_finder")
71
+ graph_builder.add_conditional_edges(
72
+ "tool_finder",
73
+ _should_continue,
74
+ {
75
+ "continue": "executor",
76
+ "end": "no_tools_executor",
77
+ },
78
+ )
79
+ graph_builder.add_edge("executor", END)
80
+ graph_builder.add_edge("no_tools_executor", END)
81
+
82
+ return graph_builder
@@ -0,0 +1 @@
1
+ # Prompts for the planner agent
@@ -0,0 +1,12 @@
1
+ from typing import Annotated
2
+
3
+ from langgraph.graph.message import add_messages
4
+ from typing_extensions import TypedDict
5
+
6
+ from universal_mcp.types import AgentrToolConfig
7
+
8
+
9
+ class State(TypedDict):
10
+ messages: Annotated[list, add_messages]
11
+ task: str
12
+ apps_with_tools: AgentrToolConfig
@@ -0,0 +1,34 @@
1
+ from pydantic import BaseModel, Field
2
+
3
+
4
+ class Agent(BaseModel):
5
+ name: str = Field(description="The name of the agent")
6
+ description: str = Field(description="A small paragraph description of the agent")
7
+ expertise: str = Field(description="Agents expertise. Growth expert, SEO expert, etc")
8
+ instructions: str = Field(description="The instructions for the agent")
9
+ schedule: str = Field(
10
+ description="The schedule for the agent in crontab syntax (e.g., '0 9 * * *' for daily at 9 AM)"
11
+ )
12
+
13
+
14
+ AGENT_PROMPT = """You are an AI assistant that creates autonomous agents based on user requests.
15
+
16
+ Your task is to analyze the user's request and create a structured agent definition that includes:
17
+ - A clear name for the agent
18
+ - A concise description of what the agent does
19
+ - The agent's area of expertise
20
+ - Detailed instructions for executing the task
21
+ - A cron schedule for when the agent should run
22
+ - A list of apps/services the agent will need to use
23
+
24
+ Be specific and actionable in your agent definitions. Consider the user's intent and create agents that can effectively accomplish their goals.
25
+
26
+ <query>
27
+ {query}
28
+ </query>
29
+ """
30
+
31
+
32
+ async def generate_agent(llm, query):
33
+ response = await llm.with_structured_output(Agent).ainvoke(input=AGENT_PROMPT.format(query=query))
34
+ return response