universal-mcp 0.1.24rc14__py3-none-any.whl → 0.1.24rc17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agentr/registry.py +4 -4
- universal_mcp/applications/application.py +0 -2
- universal_mcp/applications/utils.py +52 -0
- universal_mcp/servers/server.py +4 -3
- universal_mcp/tools/manager.py +0 -3
- universal_mcp/types.py +1 -21
- universal_mcp/utils/prompts.py +0 -2
- universal_mcp/utils/testing.py +1 -1
- {universal_mcp-0.1.24rc14.dist-info → universal_mcp-0.1.24rc17.dist-info}/METADATA +2 -1
- universal_mcp-0.1.24rc17.dist-info/RECORD +54 -0
- universal_mcp/__init__.py +0 -0
- universal_mcp/agents/__init__.py +0 -10
- universal_mcp/agents/autoagent/__init__.py +0 -30
- universal_mcp/agents/autoagent/__main__.py +0 -25
- universal_mcp/agents/autoagent/context.py +0 -26
- universal_mcp/agents/autoagent/graph.py +0 -151
- universal_mcp/agents/autoagent/prompts.py +0 -9
- universal_mcp/agents/autoagent/state.py +0 -27
- universal_mcp/agents/autoagent/studio.py +0 -25
- universal_mcp/agents/autoagent/utils.py +0 -13
- universal_mcp/agents/base.py +0 -129
- universal_mcp/agents/bigtool/__init__.py +0 -54
- universal_mcp/agents/bigtool/__main__.py +0 -24
- universal_mcp/agents/bigtool/context.py +0 -24
- universal_mcp/agents/bigtool/graph.py +0 -166
- universal_mcp/agents/bigtool/prompts.py +0 -31
- universal_mcp/agents/bigtool/state.py +0 -27
- universal_mcp/agents/bigtool2/__init__.py +0 -53
- universal_mcp/agents/bigtool2/__main__.py +0 -24
- universal_mcp/agents/bigtool2/agent.py +0 -11
- universal_mcp/agents/bigtool2/context.py +0 -33
- universal_mcp/agents/bigtool2/graph.py +0 -169
- universal_mcp/agents/bigtool2/prompts.py +0 -12
- universal_mcp/agents/bigtool2/state.py +0 -27
- universal_mcp/agents/builder.py +0 -80
- universal_mcp/agents/cli.py +0 -27
- universal_mcp/agents/codeact/__init__.py +0 -243
- universal_mcp/agents/codeact/sandbox.py +0 -27
- universal_mcp/agents/codeact/test.py +0 -15
- universal_mcp/agents/codeact/utils.py +0 -61
- universal_mcp/agents/hil.py +0 -104
- universal_mcp/agents/llm.py +0 -45
- universal_mcp/agents/planner/__init__.py +0 -37
- universal_mcp/agents/planner/__main__.py +0 -24
- universal_mcp/agents/planner/graph.py +0 -82
- universal_mcp/agents/planner/prompts.py +0 -1
- universal_mcp/agents/planner/state.py +0 -12
- universal_mcp/agents/react.py +0 -84
- universal_mcp/agents/shared/agent_node.py +0 -34
- universal_mcp/agents/shared/tool_node.py +0 -235
- universal_mcp/agents/simple.py +0 -40
- universal_mcp/agents/tools.py +0 -35
- universal_mcp/agents/utils.py +0 -111
- universal_mcp/analytics.py +0 -111
- universal_mcp/applications/__init__.py +0 -70
- universal_mcp/utils/common.py +0 -278
- universal_mcp-0.1.24rc14.dist-info/RECORD +0 -99
- {universal_mcp-0.1.24rc14.dist-info → universal_mcp-0.1.24rc17.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.24rc14.dist-info → universal_mcp-0.1.24rc17.dist-info}/entry_points.txt +0 -0
- {universal_mcp-0.1.24rc14.dist-info → universal_mcp-0.1.24rc17.dist-info}/licenses/LICENSE +0 -0
universal_mcp/agents/hil.py
DELETED
@@ -1,104 +0,0 @@
|
|
1
|
-
from typing import Annotated, TypedDict
|
2
|
-
|
3
|
-
from langchain_core.messages import HumanMessage
|
4
|
-
from langgraph.constants import END, START
|
5
|
-
from langgraph.graph import StateGraph
|
6
|
-
from langgraph.graph.message import add_messages
|
7
|
-
from langgraph.types import Interrupt, interrupt
|
8
|
-
|
9
|
-
from .base import BaseAgent
|
10
|
-
from .llm import load_chat_model
|
11
|
-
|
12
|
-
|
13
|
-
class State(TypedDict):
|
14
|
-
messages: Annotated[list, add_messages]
|
15
|
-
name: str | None = None
|
16
|
-
favourite_color: str | None = None
|
17
|
-
human: bool | None = None
|
18
|
-
|
19
|
-
|
20
|
-
def ask_name_node(state: State) -> State:
|
21
|
-
if state.get("name") is not None:
|
22
|
-
return state
|
23
|
-
name = interrupt({"question": "What is your name?", "type": "text"})
|
24
|
-
state.update(name=name, messages=[HumanMessage(content=f"My name is {name}")])
|
25
|
-
return state
|
26
|
-
|
27
|
-
|
28
|
-
def ask_bool_node(state: State) -> State:
|
29
|
-
if state.get("human") is not None:
|
30
|
-
return state
|
31
|
-
bool = interrupt({"question": "Are you a human?", "type": "bool"})
|
32
|
-
|
33
|
-
if bool:
|
34
|
-
state.update(human=True, messages=[HumanMessage(content="I am human")])
|
35
|
-
else:
|
36
|
-
state.update(human=False, messages=[HumanMessage(content="I am AI agent")])
|
37
|
-
return state
|
38
|
-
|
39
|
-
|
40
|
-
def ask_favourite_color_node(state: State) -> State:
|
41
|
-
if state.get("favourite_color") is not None:
|
42
|
-
return state
|
43
|
-
favourite_color = interrupt(
|
44
|
-
{"question": "What is your favourite color?", "type": "choice", "choices": ["red", "green", "blue"]}
|
45
|
-
)
|
46
|
-
state.update(
|
47
|
-
favourite_color=favourite_color, messages=[HumanMessage(content=f"My favourite color is {favourite_color}")]
|
48
|
-
)
|
49
|
-
return state
|
50
|
-
|
51
|
-
|
52
|
-
def handle_interrupt(interrupt: Interrupt) -> str | bool:
|
53
|
-
interrupt_type = interrupt.value["type"]
|
54
|
-
if interrupt_type == "text":
|
55
|
-
value = input(interrupt.value["question"])
|
56
|
-
return value
|
57
|
-
elif interrupt_type == "bool":
|
58
|
-
value = input("Do you accept this? (y/n): " + interrupt.value["question"])
|
59
|
-
return value.lower() in ["y", "yes"]
|
60
|
-
elif interrupt_type == "choice":
|
61
|
-
value = input("Enter your choice: " + interrupt.value["question"] + " " + ", ".join(interrupt.value["choices"]))
|
62
|
-
if value in interrupt.value["choices"]:
|
63
|
-
return value
|
64
|
-
else:
|
65
|
-
return interrupt.value["choices"][0]
|
66
|
-
else:
|
67
|
-
raise ValueError(f"Invalid interrupt type: {interrupt.value['type']}")
|
68
|
-
|
69
|
-
|
70
|
-
class HilAgent(BaseAgent):
|
71
|
-
def __init__(self, name: str, instructions: str, model: str):
|
72
|
-
super().__init__(name, instructions, model)
|
73
|
-
self.llm = load_chat_model(model)
|
74
|
-
self._graph = self._build_graph()
|
75
|
-
|
76
|
-
def chatbot(self, state: State):
|
77
|
-
return {"messages": [self.llm.invoke(state["messages"])]}
|
78
|
-
|
79
|
-
def _build_graph(self):
|
80
|
-
graph_builder = StateGraph(State)
|
81
|
-
graph_builder.add_node("ask_name_node", ask_name_node)
|
82
|
-
graph_builder.add_node("ask_bool_node", ask_bool_node)
|
83
|
-
graph_builder.add_node("ask_favourite_color_node", ask_favourite_color_node)
|
84
|
-
graph_builder.add_node("chatbot", self.chatbot)
|
85
|
-
graph_builder.add_edge(START, "ask_name_node")
|
86
|
-
graph_builder.add_edge("ask_name_node", "ask_bool_node")
|
87
|
-
graph_builder.add_edge("ask_bool_node", "ask_favourite_color_node")
|
88
|
-
graph_builder.add_edge("ask_favourite_color_node", "chatbot")
|
89
|
-
graph_builder.add_edge("chatbot", END)
|
90
|
-
return graph_builder.compile(checkpointer=self.memory)
|
91
|
-
|
92
|
-
@property
|
93
|
-
def graph(self):
|
94
|
-
return self._graph
|
95
|
-
|
96
|
-
|
97
|
-
if __name__ == "__main__":
|
98
|
-
import asyncio
|
99
|
-
|
100
|
-
agent = HilAgent(
|
101
|
-
"Hil Agent", "You are a friendly agent that asks for the user's name and greets them.", "openrouter/auto"
|
102
|
-
)
|
103
|
-
|
104
|
-
asyncio.run(agent.run_interactive())
|
universal_mcp/agents/llm.py
DELETED
@@ -1,45 +0,0 @@
|
|
1
|
-
from langchain_anthropic import ChatAnthropic
|
2
|
-
from langchain_core.language_models import BaseChatModel
|
3
|
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
4
|
-
from langchain_openai import AzureChatOpenAI
|
5
|
-
|
6
|
-
|
7
|
-
def load_chat_model(
|
8
|
-
fully_specified_name: str, temperature: float = 1.0, tags: list[str] | None = None
|
9
|
-
) -> BaseChatModel:
|
10
|
-
"""Load a chat model from a fully specified name.
|
11
|
-
Args:
|
12
|
-
fully_specified_name (str): String in the format 'provider/model'.
|
13
|
-
"""
|
14
|
-
provider, model = fully_specified_name.split("/", maxsplit=1)
|
15
|
-
if provider == "anthropic":
|
16
|
-
return ChatAnthropic(
|
17
|
-
model=model,
|
18
|
-
temperature=temperature,
|
19
|
-
thinking={"type": "enabled", "budget_tokens": 2048},
|
20
|
-
max_tokens=4096,
|
21
|
-
tags=tags,
|
22
|
-
stream_usage=True,
|
23
|
-
) # pyright: ignore[reportCallIssue]
|
24
|
-
elif provider == "azure":
|
25
|
-
return AzureChatOpenAI(
|
26
|
-
model=model,
|
27
|
-
api_version="2024-12-01-preview",
|
28
|
-
azure_deployment=model,
|
29
|
-
temperature=temperature,
|
30
|
-
tags=tags,
|
31
|
-
stream_usage=True,
|
32
|
-
)
|
33
|
-
elif provider == "gemini":
|
34
|
-
return ChatGoogleGenerativeAI(model=model, temperature=temperature)
|
35
|
-
else:
|
36
|
-
raise ValueError(f"Unsupported provider: {provider}")
|
37
|
-
|
38
|
-
|
39
|
-
if __name__ == "__main__":
|
40
|
-
from loguru import logger
|
41
|
-
|
42
|
-
models_to_test = ["azure/gpt-5-chat", "anthropic/claude-4-sonnet-20250514", "gemini/gemini-2.5-pro"]
|
43
|
-
for model in models_to_test:
|
44
|
-
llm = load_chat_model(model)
|
45
|
-
logger.info(llm.invoke("Hi!"))
|
@@ -1,37 +0,0 @@
|
|
1
|
-
from langgraph.checkpoint.base import BaseCheckpointSaver
|
2
|
-
|
3
|
-
from universal_mcp.agents.base import BaseAgent
|
4
|
-
from universal_mcp.agents.llm import load_chat_model
|
5
|
-
from universal_mcp.agents.react import ReactAgent
|
6
|
-
from universal_mcp.tools.registry import ToolRegistry
|
7
|
-
|
8
|
-
from .graph import build_graph
|
9
|
-
|
10
|
-
|
11
|
-
class PlannerAgent(BaseAgent):
|
12
|
-
def __init__(
|
13
|
-
self,
|
14
|
-
name: str,
|
15
|
-
instructions: str,
|
16
|
-
model: str,
|
17
|
-
registry: ToolRegistry,
|
18
|
-
memory: BaseCheckpointSaver | None = None,
|
19
|
-
executor_agent_cls: type[BaseAgent] = ReactAgent,
|
20
|
-
**kwargs,
|
21
|
-
):
|
22
|
-
super().__init__(name, instructions, model, memory, **kwargs)
|
23
|
-
self.app_registry = registry
|
24
|
-
self.llm = load_chat_model(model)
|
25
|
-
self.executor_agent_cls = executor_agent_cls
|
26
|
-
|
27
|
-
async def _build_graph(self):
|
28
|
-
return build_graph(self.llm, self.app_registry, self.instructions, self.model, self.executor_agent_cls).compile(
|
29
|
-
checkpointer=self.memory
|
30
|
-
)
|
31
|
-
|
32
|
-
@property
|
33
|
-
def graph(self):
|
34
|
-
return self._graph
|
35
|
-
|
36
|
-
|
37
|
-
__all__ = ["PlannerAgent"]
|
@@ -1,24 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
|
3
|
-
from universal_mcp.agentr.registry import AgentrRegistry
|
4
|
-
from universal_mcp.agents.planner import PlannerAgent
|
5
|
-
|
6
|
-
|
7
|
-
async def main():
|
8
|
-
registry = AgentrRegistry()
|
9
|
-
agent = PlannerAgent(
|
10
|
-
name="planner-agent",
|
11
|
-
instructions="You are a helpful assistant.",
|
12
|
-
model="gemini/gemini-2.5-flash",
|
13
|
-
registry=registry,
|
14
|
-
)
|
15
|
-
from rich.console import Console
|
16
|
-
|
17
|
-
console = Console()
|
18
|
-
console.print("Starting agent...", style="yellow")
|
19
|
-
async for event in agent.stream(user_input="Send an email to manoj@agentr.dev'", thread_id="xyz"):
|
20
|
-
console.print(event.content, style="red")
|
21
|
-
|
22
|
-
|
23
|
-
if __name__ == "__main__":
|
24
|
-
asyncio.run(main())
|
@@ -1,82 +0,0 @@
|
|
1
|
-
from typing import Any
|
2
|
-
|
3
|
-
from langchain_core.messages import AIMessage
|
4
|
-
from langgraph.graph import END, START, StateGraph
|
5
|
-
from loguru import logger
|
6
|
-
|
7
|
-
from universal_mcp.agents.shared.tool_node import build_tool_node_graph
|
8
|
-
from universal_mcp.types import AgentrToolConfig, ToolConfig
|
9
|
-
|
10
|
-
from .state import State
|
11
|
-
|
12
|
-
|
13
|
-
def build_graph(llm, registry, instructions, model, executor_agent_cls):
|
14
|
-
"""Build the graph for the planner agent."""
|
15
|
-
graph_builder = StateGraph(State)
|
16
|
-
|
17
|
-
async def _tool_finder_node(state: State) -> dict[str, Any]:
|
18
|
-
"""Runs the tool finder subgraph to identify necessary tools."""
|
19
|
-
task = state["messages"][-1].content
|
20
|
-
logger.info(f"Running tool finder for task: {task}")
|
21
|
-
tool_finder_graph = build_tool_node_graph(llm, registry)
|
22
|
-
tool_finder_state = await tool_finder_graph.ainvoke({"task": task, "messages": state["messages"]})
|
23
|
-
|
24
|
-
if not tool_finder_state.get("apps_required"):
|
25
|
-
logger.info("Tool finder determined no apps are required.")
|
26
|
-
return {"apps_with_tools": AgentrToolConfig(agentrServers={})}
|
27
|
-
|
28
|
-
apps_with_tools = tool_finder_state.get("apps_with_tools", AgentrToolConfig(agentrServers={}))
|
29
|
-
logger.info(f"Tool finder identified apps and tools: {apps_with_tools}")
|
30
|
-
return {"apps_with_tools": apps_with_tools, "task": task}
|
31
|
-
|
32
|
-
def _should_continue(state: State) -> str:
|
33
|
-
"""Determines whether to continue to the executor or end."""
|
34
|
-
if state.get("apps_with_tools") and state["apps_with_tools"].agentrServers:
|
35
|
-
return "continue"
|
36
|
-
return "end"
|
37
|
-
|
38
|
-
async def _executor_node(state: State) -> dict[str, Any]:
|
39
|
-
"""Executes the task with the identified tools."""
|
40
|
-
tool_config = state["apps_with_tools"]
|
41
|
-
|
42
|
-
logger.info(f"Preparing executor with tools: {tool_config}")
|
43
|
-
agent = executor_agent_cls(
|
44
|
-
name="executor-agent",
|
45
|
-
instructions=instructions,
|
46
|
-
model=model,
|
47
|
-
registry=registry,
|
48
|
-
tools=ToolConfig(agentrServers=tool_config.agentrServers),
|
49
|
-
)
|
50
|
-
|
51
|
-
await agent.ainit()
|
52
|
-
react_graph = agent._graph
|
53
|
-
logger.info("Invoking executor agent with tools.")
|
54
|
-
# We invoke the agent to make it run the tool
|
55
|
-
response = await react_graph.ainvoke({"messages": state["messages"]})
|
56
|
-
|
57
|
-
final_message = AIMessage(content=response["messages"][-1].content)
|
58
|
-
return {"messages": [final_message]}
|
59
|
-
|
60
|
-
async def _no_tools_node(state: State) -> dict[str, Any]:
|
61
|
-
"""Handles tasks that don't require tools by invoking the LLM directly."""
|
62
|
-
logger.info("No tools required. Invoking LLM directly.")
|
63
|
-
response = await llm.ainvoke(state["messages"])
|
64
|
-
return {"messages": [response]}
|
65
|
-
|
66
|
-
graph_builder.add_node("tool_finder", _tool_finder_node)
|
67
|
-
graph_builder.add_node("executor", _executor_node)
|
68
|
-
graph_builder.add_node("no_tools_executor", _no_tools_node)
|
69
|
-
|
70
|
-
graph_builder.add_edge(START, "tool_finder")
|
71
|
-
graph_builder.add_conditional_edges(
|
72
|
-
"tool_finder",
|
73
|
-
_should_continue,
|
74
|
-
{
|
75
|
-
"continue": "executor",
|
76
|
-
"end": "no_tools_executor",
|
77
|
-
},
|
78
|
-
)
|
79
|
-
graph_builder.add_edge("executor", END)
|
80
|
-
graph_builder.add_edge("no_tools_executor", END)
|
81
|
-
|
82
|
-
return graph_builder
|
@@ -1 +0,0 @@
|
|
1
|
-
# Prompts for the planner agent
|
@@ -1,12 +0,0 @@
|
|
1
|
-
from typing import Annotated
|
2
|
-
|
3
|
-
from langgraph.graph.message import add_messages
|
4
|
-
from typing_extensions import TypedDict
|
5
|
-
|
6
|
-
from universal_mcp.types import AgentrToolConfig
|
7
|
-
|
8
|
-
|
9
|
-
class State(TypedDict):
|
10
|
-
messages: Annotated[list, add_messages]
|
11
|
-
task: str
|
12
|
-
apps_with_tools: AgentrToolConfig
|
universal_mcp/agents/react.py
DELETED
@@ -1,84 +0,0 @@
|
|
1
|
-
from langgraph.checkpoint.base import BaseCheckpointSaver
|
2
|
-
from langgraph.prebuilt import create_react_agent
|
3
|
-
from loguru import logger
|
4
|
-
|
5
|
-
from universal_mcp.agentr.registry import AgentrRegistry
|
6
|
-
from universal_mcp.agents.base import BaseAgent
|
7
|
-
from universal_mcp.agents.llm import load_chat_model
|
8
|
-
from universal_mcp.agents.tools import load_mcp_tools
|
9
|
-
from universal_mcp.tools.registry import ToolRegistry
|
10
|
-
from universal_mcp.types import ToolConfig, ToolFormat
|
11
|
-
|
12
|
-
|
13
|
-
class ReactAgent(BaseAgent):
|
14
|
-
def __init__(
|
15
|
-
self,
|
16
|
-
name: str,
|
17
|
-
instructions: str,
|
18
|
-
model: str,
|
19
|
-
memory: BaseCheckpointSaver | None = None,
|
20
|
-
tools: ToolConfig | None = None,
|
21
|
-
registry: ToolRegistry | None = None,
|
22
|
-
max_iterations: int = 10,
|
23
|
-
**kwargs,
|
24
|
-
):
|
25
|
-
super().__init__(name, instructions, model, memory, **kwargs)
|
26
|
-
self.llm = load_chat_model(model)
|
27
|
-
self.tools = tools
|
28
|
-
self.max_iterations = max_iterations
|
29
|
-
self.registry = registry
|
30
|
-
|
31
|
-
async def _build_graph(self):
|
32
|
-
if self.tools:
|
33
|
-
config = self.tools.model_dump(exclude_none=True)
|
34
|
-
if config.get("agentrServers") and not self.registry:
|
35
|
-
raise ValueError("Agentr servers are configured but no registry is provided")
|
36
|
-
agentr_tools = (
|
37
|
-
await self.registry.export_tools(self.tools, ToolFormat.LANGCHAIN)
|
38
|
-
if config.get("agentrServers")
|
39
|
-
else []
|
40
|
-
)
|
41
|
-
logger.debug(agentr_tools)
|
42
|
-
mcp_tools = await load_mcp_tools(config["mcpServers"]) if config.get("mcpServers") else []
|
43
|
-
logger.debug(mcp_tools)
|
44
|
-
tools = agentr_tools + mcp_tools
|
45
|
-
else:
|
46
|
-
tools = []
|
47
|
-
logger.debug(f"Initialized ReactAgent: name={self.name}, model={self.model}")
|
48
|
-
return create_react_agent(
|
49
|
-
self.llm,
|
50
|
-
tools,
|
51
|
-
prompt=self._build_system_message(),
|
52
|
-
checkpointer=self.memory,
|
53
|
-
)
|
54
|
-
|
55
|
-
def _build_system_message(self) -> str:
|
56
|
-
system_message = f"""You are {self.name}.
|
57
|
-
|
58
|
-
You have access to various tools that can help you answer questions and complete tasks. When you need to use a tool:
|
59
|
-
|
60
|
-
1. Think about what information you need
|
61
|
-
2. Call the appropriate tool with the right parameters
|
62
|
-
3. Use the tool results to provide a comprehensive answer
|
63
|
-
|
64
|
-
Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so.
|
65
|
-
|
66
|
-
{self.instructions}
|
67
|
-
"""
|
68
|
-
return system_message
|
69
|
-
|
70
|
-
|
71
|
-
if __name__ == "__main__":
|
72
|
-
import asyncio
|
73
|
-
|
74
|
-
agent = ReactAgent(
|
75
|
-
"Universal React Agent",
|
76
|
-
instructions="",
|
77
|
-
model="azure/gpt-4o",
|
78
|
-
tools=ToolConfig(agentrServers={"google-mail": {"tools": ["send_email"]}}),
|
79
|
-
registry=AgentrRegistry(),
|
80
|
-
)
|
81
|
-
result = asyncio.run(
|
82
|
-
agent.invoke(user_input="Send an email with the subject 'testing react agent' to manoj@agentr.dev")
|
83
|
-
)
|
84
|
-
logger.info(result["messages"][-1].content)
|
@@ -1,34 +0,0 @@
|
|
1
|
-
from pydantic import BaseModel, Field
|
2
|
-
|
3
|
-
|
4
|
-
class Agent(BaseModel):
|
5
|
-
name: str = Field(description="The name of the agent")
|
6
|
-
description: str = Field(description="A small paragraph description of the agent")
|
7
|
-
expertise: str = Field(description="Agents expertise. Growth expert, SEO expert, etc")
|
8
|
-
instructions: str = Field(description="The instructions for the agent")
|
9
|
-
schedule: str = Field(
|
10
|
-
description="The schedule for the agent in crontab syntax (e.g., '0 9 * * *' for daily at 9 AM)"
|
11
|
-
)
|
12
|
-
|
13
|
-
|
14
|
-
AGENT_PROMPT = """You are an AI assistant that creates autonomous agents based on user requests.
|
15
|
-
|
16
|
-
Your task is to analyze the user's request and create a structured agent definition that includes:
|
17
|
-
- A clear name for the agent
|
18
|
-
- A concise description of what the agent does
|
19
|
-
- The agent's area of expertise
|
20
|
-
- Detailed instructions for executing the task
|
21
|
-
- A cron schedule for when the agent should run
|
22
|
-
- A list of apps/services the agent will need to use
|
23
|
-
|
24
|
-
Be specific and actionable in your agent definitions. Consider the user's intent and create agents that can effectively accomplish their goals.
|
25
|
-
|
26
|
-
<query>
|
27
|
-
{query}
|
28
|
-
</query>
|
29
|
-
"""
|
30
|
-
|
31
|
-
|
32
|
-
async def generate_agent(llm, query):
|
33
|
-
response = await llm.with_structured_output(Agent).ainvoke(input=AGENT_PROMPT.format(query=query))
|
34
|
-
return response
|
@@ -1,235 +0,0 @@
|
|
1
|
-
# tool_node.py
|
2
|
-
|
3
|
-
import asyncio
|
4
|
-
from typing import Annotated, TypedDict
|
5
|
-
|
6
|
-
from langchain_core.language_models import BaseChatModel
|
7
|
-
from langchain_core.messages import AIMessage, AnyMessage, HumanMessage
|
8
|
-
from langgraph.graph import END, StateGraph
|
9
|
-
from langgraph.graph.message import add_messages
|
10
|
-
from loguru import logger
|
11
|
-
from pydantic import BaseModel, Field
|
12
|
-
|
13
|
-
from universal_mcp.tools.registry import ToolRegistry
|
14
|
-
from universal_mcp.types import AgentrConnection, AgentrToolConfig
|
15
|
-
|
16
|
-
# --- LangGraph Agent ---
|
17
|
-
|
18
|
-
|
19
|
-
class AgentState(TypedDict):
|
20
|
-
task: str
|
21
|
-
apps_required: bool
|
22
|
-
relevant_apps: list[str]
|
23
|
-
apps_with_tools: AgentrToolConfig
|
24
|
-
messages: Annotated[list[AnyMessage], add_messages]
|
25
|
-
reasoning: str
|
26
|
-
|
27
|
-
|
28
|
-
class ToolSelectionOutput(BaseModel):
|
29
|
-
tool_ids: list[str] = Field(description="The ids of the tools to use")
|
30
|
-
|
31
|
-
|
32
|
-
def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGraph:
|
33
|
-
"""Builds the LangGraph workflow."""
|
34
|
-
|
35
|
-
async def _check_if_app_needed(state: AgentState) -> AgentState:
|
36
|
-
"""Checks if an external application is needed for the given task."""
|
37
|
-
task = state["task"]
|
38
|
-
prompt = f"""
|
39
|
-
Given the user's task: "{task}"
|
40
|
-
Does this task require an external application to be completed?
|
41
|
-
Your answer should be a simple "Yes" or "No", followed by a brief explanation.
|
42
|
-
For example:
|
43
|
-
Yes, an external application is needed to send emails.
|
44
|
-
No, this is a general question that can be answered directly.
|
45
|
-
"""
|
46
|
-
response = await llm.ainvoke(prompt)
|
47
|
-
content = response.content.strip()
|
48
|
-
reasoning = f"Initial check for app requirement. LLM response: {content}"
|
49
|
-
|
50
|
-
if content.lower().startswith("yes"):
|
51
|
-
return {
|
52
|
-
**state,
|
53
|
-
"messages": [AIMessage(content=content)],
|
54
|
-
"apps_required": True,
|
55
|
-
"reasoning": reasoning,
|
56
|
-
}
|
57
|
-
else:
|
58
|
-
return {
|
59
|
-
**state,
|
60
|
-
"messages": [AIMessage(content=content)],
|
61
|
-
"apps_required": False,
|
62
|
-
"reasoning": reasoning,
|
63
|
-
}
|
64
|
-
|
65
|
-
async def _find_relevant_apps(state: AgentState) -> AgentState:
|
66
|
-
"""Identifies relevant apps for the given task, preferring connected apps."""
|
67
|
-
task = state["task"]
|
68
|
-
all_apps = await registry.list_all_apps()
|
69
|
-
connected_apps = await registry.list_connected_apps()
|
70
|
-
prompt = """
|
71
|
-
You are an expert at identifying which applications are needed to complete specific tasks.
|
72
|
-
|
73
|
-
TASK: "{task}"
|
74
|
-
|
75
|
-
AVAILABLE APPS:
|
76
|
-
{all_apps}
|
77
|
-
|
78
|
-
CONNECTED APPS (user has already authenticated these):
|
79
|
-
{connected_apps}
|
80
|
-
|
81
|
-
INSTRUCTIONS:
|
82
|
-
1. Analyze the task carefully to understand what functionality is required.
|
83
|
-
2. Review the available apps and their descriptions to identify which ones could help.
|
84
|
-
3. If multiple apps can perform the task, prefer connected apps, but you MUST include all relevant apps.
|
85
|
-
4. Consider apps that provide complementary functionality for complex tasks.
|
86
|
-
5. Only suggest apps that are directly relevant to the core task requirements.
|
87
|
-
6. Your output should be a list of app IDs.
|
88
|
-
|
89
|
-
"""
|
90
|
-
|
91
|
-
class AppList(BaseModel):
|
92
|
-
app_list: list[str]
|
93
|
-
reasoning: str
|
94
|
-
|
95
|
-
response = await llm.with_structured_output(AppList).ainvoke(
|
96
|
-
input=prompt.format(task=task, all_apps=all_apps, connected_apps=connected_apps)
|
97
|
-
)
|
98
|
-
app_list = response.app_list
|
99
|
-
reasoning = f"Found relevant apps: {app_list}. Reasoning: {response.reasoning}"
|
100
|
-
logger.info(f"Found relevant apps: {app_list}.")
|
101
|
-
|
102
|
-
return {
|
103
|
-
**state,
|
104
|
-
"messages": [AIMessage(content=f"Identified relevant apps: {', '.join(app_list)}")],
|
105
|
-
"relevant_apps": app_list,
|
106
|
-
"reasoning": state.get("reasoning", "") + "\n" + reasoning,
|
107
|
-
}
|
108
|
-
|
109
|
-
async def _select_tools(task: str, tools: list[dict]) -> list[str]:
|
110
|
-
"""Selects the most appropriate tools from a list for a given task."""
|
111
|
-
tool_candidates = [f"{tool['name']}: {tool['description']}" for tool in tools]
|
112
|
-
|
113
|
-
SELECT_TOOL_PROMPT = f"""You are an AI assistant that helps the user perform tasks using various apps (each app has multiple tools).
|
114
|
-
You will be provided with a task and a list of tools which might be relevant for this task.
|
115
|
-
|
116
|
-
Your goal is to select the most appropriate tool for the given task.
|
117
|
-
<task>
|
118
|
-
{task}
|
119
|
-
</task>
|
120
|
-
|
121
|
-
<tool_candidates>
|
122
|
-
- {tool_candidates}
|
123
|
-
</tool_candidates>
|
124
|
-
|
125
|
-
Only return tool ids.
|
126
|
-
"""
|
127
|
-
|
128
|
-
response = await llm.with_structured_output(schema=ToolSelectionOutput).ainvoke(input=SELECT_TOOL_PROMPT)
|
129
|
-
|
130
|
-
selected_tool_ids = response.tool_ids
|
131
|
-
return selected_tool_ids
|
132
|
-
|
133
|
-
async def _generate_search_query(task: str) -> str:
|
134
|
-
"""Generates a concise search query from the user's task."""
|
135
|
-
prompt = f"""
|
136
|
-
You are an expert at summarizing a user's task into a concise search query for finding relevant tools.
|
137
|
-
The query should capture all the main actions or intents of the task.
|
138
|
-
|
139
|
-
For example:
|
140
|
-
Task: "Send an email to abc@the-read-example.com with the subject 'Hello'"
|
141
|
-
Query: "send email"
|
142
|
-
|
143
|
-
Task: "Create a new contact in my CRM for John Doe"
|
144
|
-
Query: "create contact"
|
145
|
-
|
146
|
-
Task: "Find the latest news about artificial intelligence"
|
147
|
-
Query: "search news"
|
148
|
-
|
149
|
-
Task: "Post a message to the #general channel in Slack and create a new issue in Jira"
|
150
|
-
Query: "send message, create issue"
|
151
|
-
|
152
|
-
Task: "{task}"
|
153
|
-
"""
|
154
|
-
|
155
|
-
class SearchQuery(BaseModel):
|
156
|
-
query: str
|
157
|
-
|
158
|
-
response = await llm.with_structured_output(SearchQuery).ainvoke(input=prompt.format(task=task))
|
159
|
-
query = response.query
|
160
|
-
logger.info(f"Generated search query '{query}' for task '{task}'")
|
161
|
-
return query
|
162
|
-
|
163
|
-
async def _search_tools(state: AgentState) -> AgentState:
|
164
|
-
"""Searches for and filters tools in the relevant apps."""
|
165
|
-
task = state["task"]
|
166
|
-
logger.info(f"Searching for tools in relevant apps for task: {task}")
|
167
|
-
search_query = await _generate_search_query(task)
|
168
|
-
apps_with_tools_dict = {}
|
169
|
-
reasoning_steps = []
|
170
|
-
for app_name in state["relevant_apps"]:
|
171
|
-
logger.info(f"Searching for tools in {app_name} for task: {task} with query '{search_query}'")
|
172
|
-
found_tools = await registry.search_tools(query=search_query, app_id=app_name)
|
173
|
-
selected_tools = await _select_tools(task, found_tools)
|
174
|
-
apps_with_tools_dict[app_name] = selected_tools
|
175
|
-
reasoning_steps.append(f"For '{app_name}', selected tool(s): {', '.join(selected_tools)}.")
|
176
|
-
|
177
|
-
agentr_servers = {app_name: AgentrConnection(tools=tools) for app_name, tools in apps_with_tools_dict.items()}
|
178
|
-
tool_config = AgentrToolConfig(agentrServers=agentr_servers)
|
179
|
-
|
180
|
-
return {
|
181
|
-
**state,
|
182
|
-
"apps_with_tools": tool_config,
|
183
|
-
"reasoning": state.get("reasoning", "") + "\n" + "\n".join(reasoning_steps),
|
184
|
-
}
|
185
|
-
|
186
|
-
def _handle_no_apps_found(state: AgentState) -> AgentState:
|
187
|
-
"""Handles the case where no relevant apps are found."""
|
188
|
-
reasoning = "No suitable application was found among the available apps."
|
189
|
-
return {
|
190
|
-
**state,
|
191
|
-
"apps_with_tools": AgentrToolConfig(agentrServers={}),
|
192
|
-
"reasoning": state.get("reasoning", "") + "\n" + reasoning,
|
193
|
-
}
|
194
|
-
|
195
|
-
workflow = StateGraph(AgentState)
|
196
|
-
|
197
|
-
workflow.add_node("check_if_app_needed", _check_if_app_needed)
|
198
|
-
workflow.add_node("find_relevant_apps", _find_relevant_apps)
|
199
|
-
workflow.add_node("search_tools", _search_tools)
|
200
|
-
workflow.add_node("handle_no_apps_found", _handle_no_apps_found)
|
201
|
-
|
202
|
-
workflow.set_entry_point("check_if_app_needed")
|
203
|
-
|
204
|
-
workflow.add_conditional_edges(
|
205
|
-
"check_if_app_needed",
|
206
|
-
lambda state: "find_relevant_apps" if state["apps_required"] else END,
|
207
|
-
)
|
208
|
-
workflow.add_conditional_edges(
|
209
|
-
"find_relevant_apps",
|
210
|
-
lambda state: "search_tools" if state["relevant_apps"] else "handle_no_apps_found",
|
211
|
-
)
|
212
|
-
|
213
|
-
workflow.add_edge("search_tools", END)
|
214
|
-
workflow.add_edge("handle_no_apps_found", END)
|
215
|
-
|
216
|
-
return workflow.compile()
|
217
|
-
|
218
|
-
|
219
|
-
async def main():
|
220
|
-
from universal_mcp.agentr.registry import AgentrRegistry
|
221
|
-
from universal_mcp.agents.llm import load_chat_model
|
222
|
-
|
223
|
-
registry = AgentrRegistry()
|
224
|
-
llm = load_chat_model("gemini/gemini-2.5-flash")
|
225
|
-
graph = build_tool_node_graph(llm, registry)
|
226
|
-
initial_state = {
|
227
|
-
"task": "Send an email to manoj@agentr.dev",
|
228
|
-
"messages": [HumanMessage(content="Send an email to manoj@agentr.dev")],
|
229
|
-
}
|
230
|
-
result = await graph.ainvoke(initial_state)
|
231
|
-
print(result)
|
232
|
-
|
233
|
-
|
234
|
-
if __name__ == "__main__":
|
235
|
-
asyncio.run(main())
|