universal-mcp-agents 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +19 -0
- universal_mcp/agents/autoagent/__init__.py +30 -0
- universal_mcp/agents/autoagent/__main__.py +25 -0
- universal_mcp/agents/autoagent/context.py +26 -0
- universal_mcp/agents/autoagent/graph.py +151 -0
- universal_mcp/agents/autoagent/prompts.py +9 -0
- universal_mcp/agents/autoagent/state.py +27 -0
- universal_mcp/agents/autoagent/studio.py +25 -0
- universal_mcp/agents/autoagent/utils.py +13 -0
- universal_mcp/agents/base.py +129 -0
- universal_mcp/agents/bigtool/__init__.py +54 -0
- universal_mcp/agents/bigtool/__main__.py +24 -0
- universal_mcp/agents/bigtool/context.py +24 -0
- universal_mcp/agents/bigtool/graph.py +166 -0
- universal_mcp/agents/bigtool/prompts.py +31 -0
- universal_mcp/agents/bigtool/state.py +27 -0
- universal_mcp/agents/bigtool2/__init__.py +53 -0
- universal_mcp/agents/bigtool2/__main__.py +24 -0
- universal_mcp/agents/bigtool2/agent.py +11 -0
- universal_mcp/agents/bigtool2/context.py +33 -0
- universal_mcp/agents/bigtool2/graph.py +169 -0
- universal_mcp/agents/bigtool2/prompts.py +12 -0
- universal_mcp/agents/bigtool2/state.py +27 -0
- universal_mcp/agents/bigtoolcache/__init__.py +53 -0
- universal_mcp/agents/bigtoolcache/__main__.py +24 -0
- universal_mcp/agents/bigtoolcache/agent.py +11 -0
- universal_mcp/agents/bigtoolcache/context.py +33 -0
- universal_mcp/agents/bigtoolcache/graph.py +176 -0
- universal_mcp/agents/bigtoolcache/prompts.py +13 -0
- universal_mcp/agents/bigtoolcache/state.py +27 -0
- universal_mcp/agents/builder.py +146 -0
- universal_mcp/agents/cli.py +27 -0
- universal_mcp/agents/codeact/__init__.py +243 -0
- universal_mcp/agents/codeact/sandbox.py +27 -0
- universal_mcp/agents/codeact/test.py +15 -0
- universal_mcp/agents/codeact/utils.py +61 -0
- universal_mcp/agents/hil.py +104 -0
- universal_mcp/agents/llm.py +45 -0
- universal_mcp/agents/planner/__init__.py +37 -0
- universal_mcp/agents/planner/__main__.py +24 -0
- universal_mcp/agents/planner/graph.py +81 -0
- universal_mcp/agents/planner/prompts.py +1 -0
- universal_mcp/agents/planner/state.py +12 -0
- universal_mcp/agents/react.py +76 -0
- universal_mcp/agents/shared/tool_node.py +236 -0
- universal_mcp/agents/simple.py +40 -0
- universal_mcp/agents/tools.py +35 -0
- universal_mcp/agents/utils.py +111 -0
- universal_mcp_agents-0.1.2.dist-info/METADATA +21 -0
- universal_mcp_agents-0.1.2.dist-info/RECORD +51 -0
- universal_mcp_agents-0.1.2.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import builtins
|
|
2
|
+
import contextlib
|
|
3
|
+
import io
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def eval_unsafe(code: str, _locals: dict[str, Any]) -> tuple[str, dict[str, Any]]:
|
|
8
|
+
# Store original keys before execution
|
|
9
|
+
original_keys = set(_locals.keys())
|
|
10
|
+
result = f"Executing code...\n{code}\n\nOutput:\n"
|
|
11
|
+
result += "=" * 50 + "\n"
|
|
12
|
+
try:
|
|
13
|
+
with contextlib.redirect_stdout(io.StringIO()) as f:
|
|
14
|
+
# Execute the code in the provided locals context
|
|
15
|
+
# Using exec to allow dynamic code execution
|
|
16
|
+
# This is a simplified version; in production, consider security implications
|
|
17
|
+
exec(code, builtins.__dict__, _locals)
|
|
18
|
+
result += f.getvalue()
|
|
19
|
+
if not result:
|
|
20
|
+
result = "<code ran, no output printed to stdout>"
|
|
21
|
+
except Exception as e:
|
|
22
|
+
result += f"Error during execution: {repr(e)}"
|
|
23
|
+
|
|
24
|
+
# Determine new variables created during execution
|
|
25
|
+
new_keys = set(_locals.keys()) - original_keys
|
|
26
|
+
new_vars = {key: _locals[key] for key in new_keys}
|
|
27
|
+
return result, new_vars
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from universal_mcp.agentr import Agentr
|
|
2
|
+
from universal_mcp.agents.codeact import create_codeact
|
|
3
|
+
from universal_mcp.agents.codeact.sandbox import eval_unsafe
|
|
4
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
5
|
+
from universal_mcp.tools.adapters import ToolFormat
|
|
6
|
+
|
|
7
|
+
model = load_chat_model("gpt-4.1")
|
|
8
|
+
|
|
9
|
+
agentr = Agentr()
|
|
10
|
+
agentr.load_tools(["google-mail_send_email"])
|
|
11
|
+
|
|
12
|
+
tools = agentr.list_tools(format=ToolFormat.NATIVE)
|
|
13
|
+
|
|
14
|
+
code_act = create_codeact(model, tools, eval_unsafe)
|
|
15
|
+
agent = code_act.compile()
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import re
|
|
2
|
+
|
|
3
|
+
BACKTICK_PATTERN = r"(?:^|\n)```(.*?)(?:```(?:\n|$))"
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def extract_and_combine_codeblocks(text: str) -> str:
|
|
7
|
+
"""
|
|
8
|
+
Extracts all codeblocks from a text string and combines them into a single code string.
|
|
9
|
+
|
|
10
|
+
Args:
|
|
11
|
+
text: A string containing zero or more codeblocks, where each codeblock is
|
|
12
|
+
surrounded by triple backticks (```).
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
A string containing the combined code from all codeblocks, with each codeblock
|
|
16
|
+
separated by a newline.
|
|
17
|
+
|
|
18
|
+
Example:
|
|
19
|
+
text = '''Here's some code:
|
|
20
|
+
|
|
21
|
+
```python
|
|
22
|
+
print('hello')
|
|
23
|
+
```
|
|
24
|
+
And more:
|
|
25
|
+
|
|
26
|
+
```
|
|
27
|
+
print('world')
|
|
28
|
+
```'''
|
|
29
|
+
|
|
30
|
+
result = extract_and_combine_codeblocks(text)
|
|
31
|
+
|
|
32
|
+
Result:
|
|
33
|
+
|
|
34
|
+
print('hello')
|
|
35
|
+
|
|
36
|
+
print('world')
|
|
37
|
+
"""
|
|
38
|
+
# Find all code blocks in the text using regex
|
|
39
|
+
# Pattern matches anything between triple backticks, with or without a language identifier
|
|
40
|
+
code_blocks = re.findall(BACKTICK_PATTERN, text, re.DOTALL)
|
|
41
|
+
|
|
42
|
+
if not code_blocks:
|
|
43
|
+
return ""
|
|
44
|
+
|
|
45
|
+
# Process each codeblock
|
|
46
|
+
processed_blocks = []
|
|
47
|
+
for block in code_blocks:
|
|
48
|
+
# Strip leading and trailing whitespace
|
|
49
|
+
block = block.strip()
|
|
50
|
+
|
|
51
|
+
# If the first line looks like a language identifier, remove it
|
|
52
|
+
lines = block.split("\n")
|
|
53
|
+
if lines and (not lines[0].strip() or " " not in lines[0].strip()):
|
|
54
|
+
# First line is empty or likely a language identifier (no spaces)
|
|
55
|
+
block = "\n".join(lines[1:])
|
|
56
|
+
|
|
57
|
+
processed_blocks.append(block)
|
|
58
|
+
|
|
59
|
+
# Combine all codeblocks with newlines between them
|
|
60
|
+
combined_code = "\n\n".join(processed_blocks)
|
|
61
|
+
return combined_code
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
from typing import Annotated, TypedDict
|
|
2
|
+
|
|
3
|
+
from langchain_core.messages import HumanMessage
|
|
4
|
+
from langgraph.constants import END, START
|
|
5
|
+
from langgraph.graph import StateGraph
|
|
6
|
+
from langgraph.graph.message import add_messages
|
|
7
|
+
from langgraph.types import Interrupt, interrupt
|
|
8
|
+
|
|
9
|
+
from .base import BaseAgent
|
|
10
|
+
from .llm import load_chat_model
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class State(TypedDict):
|
|
14
|
+
messages: Annotated[list, add_messages]
|
|
15
|
+
name: str | None = None
|
|
16
|
+
favourite_color: str | None = None
|
|
17
|
+
human: bool | None = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def ask_name_node(state: State) -> State:
|
|
21
|
+
if state.get("name") is not None:
|
|
22
|
+
return state
|
|
23
|
+
name = interrupt({"question": "What is your name?", "type": "text"})
|
|
24
|
+
state.update(name=name, messages=[HumanMessage(content=f"My name is {name}")])
|
|
25
|
+
return state
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def ask_bool_node(state: State) -> State:
|
|
29
|
+
if state.get("human") is not None:
|
|
30
|
+
return state
|
|
31
|
+
bool = interrupt({"question": "Are you a human?", "type": "bool"})
|
|
32
|
+
|
|
33
|
+
if bool:
|
|
34
|
+
state.update(human=True, messages=[HumanMessage(content="I am human")])
|
|
35
|
+
else:
|
|
36
|
+
state.update(human=False, messages=[HumanMessage(content="I am AI agent")])
|
|
37
|
+
return state
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def ask_favourite_color_node(state: State) -> State:
|
|
41
|
+
if state.get("favourite_color") is not None:
|
|
42
|
+
return state
|
|
43
|
+
favourite_color = interrupt(
|
|
44
|
+
{"question": "What is your favourite color?", "type": "choice", "choices": ["red", "green", "blue"]}
|
|
45
|
+
)
|
|
46
|
+
state.update(
|
|
47
|
+
favourite_color=favourite_color, messages=[HumanMessage(content=f"My favourite color is {favourite_color}")]
|
|
48
|
+
)
|
|
49
|
+
return state
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def handle_interrupt(interrupt: Interrupt) -> str | bool:
|
|
53
|
+
interrupt_type = interrupt.value["type"]
|
|
54
|
+
if interrupt_type == "text":
|
|
55
|
+
value = input(interrupt.value["question"])
|
|
56
|
+
return value
|
|
57
|
+
elif interrupt_type == "bool":
|
|
58
|
+
value = input("Do you accept this? (y/n): " + interrupt.value["question"])
|
|
59
|
+
return value.lower() in ["y", "yes"]
|
|
60
|
+
elif interrupt_type == "choice":
|
|
61
|
+
value = input("Enter your choice: " + interrupt.value["question"] + " " + ", ".join(interrupt.value["choices"]))
|
|
62
|
+
if value in interrupt.value["choices"]:
|
|
63
|
+
return value
|
|
64
|
+
else:
|
|
65
|
+
return interrupt.value["choices"][0]
|
|
66
|
+
else:
|
|
67
|
+
raise ValueError(f"Invalid interrupt type: {interrupt.value['type']}")
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class HilAgent(BaseAgent):
|
|
71
|
+
def __init__(self, name: str, instructions: str, model: str):
|
|
72
|
+
super().__init__(name, instructions, model)
|
|
73
|
+
self.llm = load_chat_model(model)
|
|
74
|
+
self._graph = self._build_graph()
|
|
75
|
+
|
|
76
|
+
def chatbot(self, state: State):
|
|
77
|
+
return {"messages": [self.llm.invoke(state["messages"])]}
|
|
78
|
+
|
|
79
|
+
def _build_graph(self):
|
|
80
|
+
graph_builder = StateGraph(State)
|
|
81
|
+
graph_builder.add_node("ask_name_node", ask_name_node)
|
|
82
|
+
graph_builder.add_node("ask_bool_node", ask_bool_node)
|
|
83
|
+
graph_builder.add_node("ask_favourite_color_node", ask_favourite_color_node)
|
|
84
|
+
graph_builder.add_node("chatbot", self.chatbot)
|
|
85
|
+
graph_builder.add_edge(START, "ask_name_node")
|
|
86
|
+
graph_builder.add_edge("ask_name_node", "ask_bool_node")
|
|
87
|
+
graph_builder.add_edge("ask_bool_node", "ask_favourite_color_node")
|
|
88
|
+
graph_builder.add_edge("ask_favourite_color_node", "chatbot")
|
|
89
|
+
graph_builder.add_edge("chatbot", END)
|
|
90
|
+
return graph_builder.compile(checkpointer=self.memory)
|
|
91
|
+
|
|
92
|
+
@property
|
|
93
|
+
def graph(self):
|
|
94
|
+
return self._graph
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
if __name__ == "__main__":
|
|
98
|
+
import asyncio
|
|
99
|
+
|
|
100
|
+
agent = HilAgent(
|
|
101
|
+
"Hil Agent", "You are a friendly agent that asks for the user's name and greets them.", "openrouter/auto"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
asyncio.run(agent.run_interactive())
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from langchain_anthropic import ChatAnthropic
|
|
2
|
+
from langchain_core.language_models import BaseChatModel
|
|
3
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
4
|
+
from langchain_openai import AzureChatOpenAI
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def load_chat_model(
|
|
8
|
+
fully_specified_name: str, temperature: float = 1.0, tags: list[str] | None = None
|
|
9
|
+
) -> BaseChatModel:
|
|
10
|
+
"""Load a chat model from a fully specified name.
|
|
11
|
+
Args:
|
|
12
|
+
fully_specified_name (str): String in the format 'provider/model'.
|
|
13
|
+
"""
|
|
14
|
+
provider, model = fully_specified_name.split("/", maxsplit=1)
|
|
15
|
+
if provider == "anthropic":
|
|
16
|
+
return ChatAnthropic(
|
|
17
|
+
model=model,
|
|
18
|
+
temperature=temperature,
|
|
19
|
+
# thinking={"type": "enabled", "budget_tokens": 2048},
|
|
20
|
+
max_tokens=4096,
|
|
21
|
+
tags=tags,
|
|
22
|
+
stream_usage=True,
|
|
23
|
+
) # pyright: ignore[reportCallIssue]
|
|
24
|
+
elif provider == "azure":
|
|
25
|
+
return AzureChatOpenAI(
|
|
26
|
+
model=model,
|
|
27
|
+
api_version="2024-12-01-preview",
|
|
28
|
+
azure_deployment=model,
|
|
29
|
+
temperature=temperature,
|
|
30
|
+
tags=tags,
|
|
31
|
+
stream_usage=True,
|
|
32
|
+
)
|
|
33
|
+
elif provider == "gemini":
|
|
34
|
+
return ChatGoogleGenerativeAI(model=model, temperature=temperature)
|
|
35
|
+
else:
|
|
36
|
+
raise ValueError(f"Unsupported provider: {provider}")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
if __name__ == "__main__":
|
|
40
|
+
from loguru import logger
|
|
41
|
+
|
|
42
|
+
models_to_test = ["azure/gpt-5-chat", "anthropic/claude-4-sonnet-20250514", "gemini/gemini-2.5-pro"]
|
|
43
|
+
for model in models_to_test:
|
|
44
|
+
llm = load_chat_model(model)
|
|
45
|
+
logger.info(llm.invoke("Hi!"))
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
+
|
|
3
|
+
from universal_mcp.agents.base import BaseAgent
|
|
4
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
5
|
+
from universal_mcp.agents.react import ReactAgent
|
|
6
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
7
|
+
|
|
8
|
+
from .graph import build_graph
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class PlannerAgent(BaseAgent):
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
name: str,
|
|
15
|
+
instructions: str,
|
|
16
|
+
model: str,
|
|
17
|
+
registry: ToolRegistry,
|
|
18
|
+
memory: BaseCheckpointSaver | None = None,
|
|
19
|
+
executor_agent_cls: type[BaseAgent] = ReactAgent,
|
|
20
|
+
**kwargs,
|
|
21
|
+
):
|
|
22
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
|
23
|
+
self.app_registry = registry
|
|
24
|
+
self.llm = load_chat_model(model)
|
|
25
|
+
self.executor_agent_cls = executor_agent_cls
|
|
26
|
+
|
|
27
|
+
async def _build_graph(self):
|
|
28
|
+
return build_graph(self.llm, self.app_registry, self.instructions, self.model, self.executor_agent_cls).compile(
|
|
29
|
+
checkpointer=self.memory
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
@property
|
|
33
|
+
def graph(self):
|
|
34
|
+
return self._graph
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
__all__ = ["PlannerAgent"]
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
4
|
+
from universal_mcp.agents.planner import PlannerAgent
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
async def main():
|
|
8
|
+
registry = AgentrRegistry()
|
|
9
|
+
agent = PlannerAgent(
|
|
10
|
+
name="planner-agent",
|
|
11
|
+
instructions="You are a helpful assistant.",
|
|
12
|
+
model="gemini/gemini-2.5-flash",
|
|
13
|
+
registry=registry,
|
|
14
|
+
)
|
|
15
|
+
from rich.console import Console
|
|
16
|
+
|
|
17
|
+
console = Console()
|
|
18
|
+
console.print("Starting agent...", style="yellow")
|
|
19
|
+
async for event in agent.stream(user_input="Send an email to manoj@agentr.dev'", thread_id="xyz"):
|
|
20
|
+
console.print(event.content, style="red")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
if __name__ == "__main__":
|
|
24
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from langchain_core.messages import AIMessage
|
|
4
|
+
from langgraph.graph import END, START, StateGraph
|
|
5
|
+
from loguru import logger
|
|
6
|
+
|
|
7
|
+
from universal_mcp.agents.shared.tool_node import build_tool_node_graph
|
|
8
|
+
|
|
9
|
+
from .state import State
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def build_graph(llm, registry, instructions, model, executor_agent_cls):
|
|
13
|
+
"""Build the graph for the planner agent."""
|
|
14
|
+
graph_builder = StateGraph(State)
|
|
15
|
+
|
|
16
|
+
async def _tool_finder_node(state: State) -> dict[str, Any]:
|
|
17
|
+
"""Runs the tool finder subgraph to identify necessary tools."""
|
|
18
|
+
task = state["messages"][-1].content
|
|
19
|
+
logger.info(f"Running tool finder for task: {task}")
|
|
20
|
+
tool_finder_graph = build_tool_node_graph(llm, registry)
|
|
21
|
+
tool_finder_state = await tool_finder_graph.ainvoke({"task": task, "messages": state["messages"]})
|
|
22
|
+
|
|
23
|
+
if not tool_finder_state.get("apps_required"):
|
|
24
|
+
logger.info("Tool finder determined no apps are required.")
|
|
25
|
+
return {"apps_with_tools": {}}
|
|
26
|
+
|
|
27
|
+
apps_with_tools = tool_finder_state.get("apps_with_tools", {})
|
|
28
|
+
logger.info(f"Tool finder identified apps and tools: {apps_with_tools}")
|
|
29
|
+
return {"apps_with_tools": apps_with_tools, "task": task}
|
|
30
|
+
|
|
31
|
+
def _should_continue(state: State) -> str:
|
|
32
|
+
"""Determines whether to continue to the executor or end."""
|
|
33
|
+
if state.get("apps_with_tools"):
|
|
34
|
+
return "continue"
|
|
35
|
+
return "end"
|
|
36
|
+
|
|
37
|
+
async def _executor_node(state: State) -> dict[str, Any]:
|
|
38
|
+
"""Executes the task with the identified tools."""
|
|
39
|
+
tool_config = state["apps_with_tools"]
|
|
40
|
+
|
|
41
|
+
logger.info(f"Preparing executor with tools: {tool_config}")
|
|
42
|
+
agent = executor_agent_cls(
|
|
43
|
+
name="executor-agent",
|
|
44
|
+
instructions=instructions,
|
|
45
|
+
model=model,
|
|
46
|
+
registry=registry,
|
|
47
|
+
tools=tool_config,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
await agent.ainit()
|
|
51
|
+
react_graph = agent._graph
|
|
52
|
+
logger.info("Invoking executor agent with tools.")
|
|
53
|
+
# We invoke the agent to make it run the tool
|
|
54
|
+
response = await react_graph.ainvoke({"messages": state["messages"]})
|
|
55
|
+
|
|
56
|
+
final_message = AIMessage(content=response["messages"][-1].content)
|
|
57
|
+
return {"messages": [final_message]}
|
|
58
|
+
|
|
59
|
+
async def _no_tools_node(state: State) -> dict[str, Any]:
|
|
60
|
+
"""Handles tasks that don't require tools by invoking the LLM directly."""
|
|
61
|
+
logger.info("No tools required. Invoking LLM directly.")
|
|
62
|
+
response = await llm.ainvoke(state["messages"])
|
|
63
|
+
return {"messages": [response]}
|
|
64
|
+
|
|
65
|
+
graph_builder.add_node("tool_finder", _tool_finder_node)
|
|
66
|
+
graph_builder.add_node("executor", _executor_node)
|
|
67
|
+
graph_builder.add_node("no_tools_executor", _no_tools_node)
|
|
68
|
+
|
|
69
|
+
graph_builder.add_edge(START, "tool_finder")
|
|
70
|
+
graph_builder.add_conditional_edges(
|
|
71
|
+
"tool_finder",
|
|
72
|
+
_should_continue,
|
|
73
|
+
{
|
|
74
|
+
"continue": "executor",
|
|
75
|
+
"end": "no_tools_executor",
|
|
76
|
+
},
|
|
77
|
+
)
|
|
78
|
+
graph_builder.add_edge("executor", END)
|
|
79
|
+
graph_builder.add_edge("no_tools_executor", END)
|
|
80
|
+
|
|
81
|
+
return graph_builder
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Prompts for the planner agent
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from typing import Annotated
|
|
2
|
+
|
|
3
|
+
from langgraph.graph.message import add_messages
|
|
4
|
+
from typing_extensions import TypedDict
|
|
5
|
+
|
|
6
|
+
from universal_mcp.types import ToolConfig
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class State(TypedDict):
|
|
10
|
+
messages: Annotated[list, add_messages]
|
|
11
|
+
task: str
|
|
12
|
+
apps_with_tools: ToolConfig
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
+
from langgraph.prebuilt import create_react_agent
|
|
3
|
+
from loguru import logger
|
|
4
|
+
|
|
5
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
6
|
+
from universal_mcp.agents.base import BaseAgent
|
|
7
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
8
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
9
|
+
from universal_mcp.types import ToolConfig, ToolFormat
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ReactAgent(BaseAgent):
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
name: str,
|
|
16
|
+
instructions: str,
|
|
17
|
+
model: str,
|
|
18
|
+
memory: BaseCheckpointSaver | None = None,
|
|
19
|
+
tools: ToolConfig | None = None,
|
|
20
|
+
registry: ToolRegistry | None = None,
|
|
21
|
+
max_iterations: int = 10,
|
|
22
|
+
**kwargs,
|
|
23
|
+
):
|
|
24
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
|
25
|
+
self.llm = load_chat_model(model)
|
|
26
|
+
self.tools = tools
|
|
27
|
+
self.max_iterations = max_iterations
|
|
28
|
+
self.registry = registry
|
|
29
|
+
|
|
30
|
+
async def _build_graph(self):
|
|
31
|
+
tools = []
|
|
32
|
+
if self.tools:
|
|
33
|
+
if not self.registry:
|
|
34
|
+
raise ValueError("Tools are configured but no registry is provided")
|
|
35
|
+
|
|
36
|
+
tools = await self.registry.export_tools(self.tools, ToolFormat.LANGCHAIN)
|
|
37
|
+
logger.debug(tools)
|
|
38
|
+
else:
|
|
39
|
+
tools = []
|
|
40
|
+
|
|
41
|
+
logger.debug(f"Initialized ReactAgent: name={self.name}, model={self.model}")
|
|
42
|
+
return create_react_agent(
|
|
43
|
+
self.llm,
|
|
44
|
+
tools,
|
|
45
|
+
prompt=self._build_system_message(),
|
|
46
|
+
checkpointer=self.memory,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
def _build_system_message(self):
|
|
50
|
+
system_message = f"""You are {self.name}.
|
|
51
|
+
|
|
52
|
+
You have access to various tools that can help you answer questions and complete tasks. When you need to use a tool:
|
|
53
|
+
|
|
54
|
+
1. Think about what information you need
|
|
55
|
+
2. Call the appropriate tool with the right parameters
|
|
56
|
+
3. Use the tool results to provide a comprehensive answer
|
|
57
|
+
|
|
58
|
+
Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so.
|
|
59
|
+
|
|
60
|
+
{self.instructions}
|
|
61
|
+
"""
|
|
62
|
+
return system_message
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
if __name__ == "__main__":
|
|
66
|
+
import asyncio
|
|
67
|
+
|
|
68
|
+
agent = ReactAgent(
|
|
69
|
+
"Universal React Agent",
|
|
70
|
+
instructions="",
|
|
71
|
+
model="azure/gpt-4o",
|
|
72
|
+
tools={"google-mail": ["send_email"]},
|
|
73
|
+
registry=AgentrRegistry(),
|
|
74
|
+
)
|
|
75
|
+
result = asyncio.run(agent.invoke("Send an email with the subject 'testing react agent' to manoj@agentr.dev"))
|
|
76
|
+
logger.info(result["messages"][-1].content)
|