universal-mcp 0.1.24rc6__py3-none-any.whl → 0.1.24rc8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agentr/README.md +43 -34
- universal_mcp/agentr/__init__.py +1 -2
- universal_mcp/agentr/client.py +134 -48
- universal_mcp/agentr/registry.py +132 -41
- universal_mcp/agents/__init__.py +4 -4
- universal_mcp/agents/auto.py +8 -9
- universal_mcp/agents/autoagent/__init__.py +31 -0
- universal_mcp/agents/autoagent/__main__.py +21 -0
- universal_mcp/agents/autoagent/context.py +25 -0
- universal_mcp/agents/autoagent/graph.py +148 -0
- universal_mcp/agents/autoagent/prompts.py +8 -0
- universal_mcp/agents/autoagent/state.py +28 -0
- universal_mcp/agents/autoagent/studio.py +25 -0
- universal_mcp/agents/autoagent/utils.py +13 -0
- universal_mcp/agents/base.py +13 -10
- universal_mcp/agents/codeact/test.py +2 -2
- universal_mcp/agents/hil.py +2 -2
- universal_mcp/agents/llm.py +21 -3
- universal_mcp/agents/react.py +27 -7
- universal_mcp/agents/simple.py +14 -14
- universal_mcp/tools/registry.py +49 -7
- {universal_mcp-0.1.24rc6.dist-info → universal_mcp-0.1.24rc8.dist-info}/METADATA +4 -1
- {universal_mcp-0.1.24rc6.dist-info → universal_mcp-0.1.24rc8.dist-info}/RECORD +26 -19
- universal_mcp/agentr/agentr.py +0 -30
- {universal_mcp-0.1.24rc6.dist-info → universal_mcp-0.1.24rc8.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.24rc6.dist-info → universal_mcp-0.1.24rc8.dist-info}/entry_points.txt +0 -0
- {universal_mcp-0.1.24rc6.dist-info → universal_mcp-0.1.24rc8.dist-info}/licenses/LICENSE +0 -0
universal_mcp/agents/auto.py
CHANGED
@@ -12,13 +12,12 @@ from pydantic import BaseModel
|
|
12
12
|
from typing_extensions import TypedDict
|
13
13
|
|
14
14
|
from universal_mcp.agentr.registry import AgentrRegistry
|
15
|
+
from universal_mcp.agents.base import BaseAgent
|
16
|
+
from universal_mcp.agents.llm import load_chat_model
|
15
17
|
from universal_mcp.tools import ToolManager
|
16
18
|
from universal_mcp.tools.adapters import ToolFormat
|
17
19
|
from universal_mcp.tools.registry import ToolRegistry
|
18
20
|
|
19
|
-
from .base import BaseAgent
|
20
|
-
from .llm import get_llm
|
21
|
-
|
22
21
|
# Auto Agent
|
23
22
|
# Working
|
24
23
|
# 1. For every message, and given list of tools, figure out if external tools are needed
|
@@ -61,9 +60,9 @@ class AutoAgent(BaseAgent):
|
|
61
60
|
def __init__(self, name: str, instructions: str, model: str, app_registry: ToolRegistry):
|
62
61
|
super().__init__(name, instructions, model)
|
63
62
|
self.app_registry = app_registry
|
64
|
-
self.llm_tools =
|
65
|
-
self.llm_choice =
|
66
|
-
self.llm_quiet =
|
63
|
+
self.llm_tools = load_chat_model(model, tags=["tools"])
|
64
|
+
self.llm_choice = load_chat_model(model, tags=["choice"])
|
65
|
+
self.llm_quiet = load_chat_model(model, tags=["quiet"])
|
67
66
|
self.tool_manager = ToolManager()
|
68
67
|
|
69
68
|
self.task_analysis_prompt = """You are a task analysis expert. Given a task description and available apps, determine:
|
@@ -522,7 +521,7 @@ Be friendly and concise, but list each set of apps clearly. Do not return any ot
|
|
522
521
|
return result
|
523
522
|
|
524
523
|
# Get all available apps from platform manager
|
525
|
-
available_apps =
|
524
|
+
available_apps = self.app_registry.list_apps()
|
526
525
|
|
527
526
|
logger.info(f"Found {len(available_apps)} available apps")
|
528
527
|
|
@@ -563,10 +562,10 @@ if __name__ == "__main__":
|
|
563
562
|
|
564
563
|
# Create platform manager
|
565
564
|
app_registry = AgentrRegistry(api_key=agentr_api_key)
|
566
|
-
want_instructions = input("Do you want to add a system prompt/instructions? (Y/N)")
|
565
|
+
want_instructions = input("Do you want to add a system prompt/instructions? (Y/N): ")
|
567
566
|
instructions = "" if want_instructions.upper() == "N" else input("Enter your instructions/system prompt: ")
|
568
567
|
|
569
|
-
agent = AutoAgent("Auto Agent", instructions, "gpt-4.1", app_registry=app_registry)
|
568
|
+
agent = AutoAgent("Auto Agent", instructions, "azure/gpt-4.1", app_registry=app_registry)
|
570
569
|
|
571
570
|
print("AutoAgent created successfully!")
|
572
571
|
print(f"Agent name: {agent.name}")
|
@@ -0,0 +1,31 @@
|
|
1
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
2
|
+
|
3
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
4
|
+
from universal_mcp.agents.autoagent.graph import build_graph
|
5
|
+
from universal_mcp.agents.base import BaseAgent
|
6
|
+
from universal_mcp.tools.registry import ToolRegistry
|
7
|
+
|
8
|
+
|
9
|
+
class AutoAgent(BaseAgent):
|
10
|
+
def __init__(
|
11
|
+
self,
|
12
|
+
name: str,
|
13
|
+
instructions: str,
|
14
|
+
model: str,
|
15
|
+
memory: BaseCheckpointSaver | None = None,
|
16
|
+
registry: ToolRegistry | None = None,
|
17
|
+
**kwargs,
|
18
|
+
):
|
19
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
20
|
+
self.tool_registry = registry or AgentrRegistry()
|
21
|
+
|
22
|
+
async def _build_graph(self):
|
23
|
+
builder = await build_graph(self.tool_registry, self.instructions)
|
24
|
+
return builder.compile(checkpointer=self.memory)
|
25
|
+
|
26
|
+
@property
|
27
|
+
def graph(self):
|
28
|
+
return self._graph
|
29
|
+
|
30
|
+
|
31
|
+
__all__ = ["AutoAgent"]
|
@@ -0,0 +1,21 @@
|
|
1
|
+
import asyncio
|
2
|
+
|
3
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
4
|
+
from universal_mcp.agents.autoagent import AutoAgent
|
5
|
+
|
6
|
+
|
7
|
+
async def main():
|
8
|
+
agent = AutoAgent(
|
9
|
+
name="autoagent",
|
10
|
+
instructions="You are a helpful assistant that can use tools to help the user.",
|
11
|
+
model="anthropic/claude-4-sonnet-20250514",
|
12
|
+
tool_registry=AgentrRegistry(),
|
13
|
+
)
|
14
|
+
result = await agent.invoke(
|
15
|
+
user_input="Send an email to Manoj from my google mail account, manoj@agentr.dev, with the subject 'Hello from auto agent' and the body 'testing'"
|
16
|
+
)
|
17
|
+
print(result)
|
18
|
+
|
19
|
+
|
20
|
+
if __name__ == "__main__":
|
21
|
+
asyncio.run(main())
|
@@ -0,0 +1,25 @@
|
|
1
|
+
from dataclasses import dataclass, field
|
2
|
+
from typing import Annotated
|
3
|
+
|
4
|
+
from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
|
5
|
+
|
6
|
+
|
7
|
+
@dataclass(kw_only=True)
|
8
|
+
class Context:
|
9
|
+
"""The context for the agent."""
|
10
|
+
|
11
|
+
system_prompt: str = field(
|
12
|
+
default=SYSTEM_PROMPT,
|
13
|
+
metadata={
|
14
|
+
"description": "The system prompt to use for the agent's interactions. "
|
15
|
+
"This prompt sets the context and behavior for the agent."
|
16
|
+
},
|
17
|
+
)
|
18
|
+
|
19
|
+
model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
20
|
+
default="anthropic/claude-4-sonnet-20250514",
|
21
|
+
metadata={
|
22
|
+
"description": "The name of the language model to use for the agent's main interactions. "
|
23
|
+
"Should be in the form: provider/model-name."
|
24
|
+
},
|
25
|
+
)
|
@@ -0,0 +1,148 @@
|
|
1
|
+
import json
|
2
|
+
from datetime import UTC, datetime
|
3
|
+
from typing import cast
|
4
|
+
|
5
|
+
from langchain_core.messages import AIMessage, ToolMessage
|
6
|
+
from langchain_core.tools import tool
|
7
|
+
from langgraph.graph import END, START, StateGraph
|
8
|
+
from langgraph.runtime import Runtime
|
9
|
+
|
10
|
+
from universal_mcp.agents.autoagent.context import Context
|
11
|
+
from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
|
12
|
+
from universal_mcp.agents.autoagent.state import State
|
13
|
+
from universal_mcp.agents.llm import load_chat_model
|
14
|
+
from universal_mcp.tools.registry import ToolRegistry
|
15
|
+
from universal_mcp.types import ToolFormat
|
16
|
+
|
17
|
+
|
18
|
+
async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
|
19
|
+
@tool()
|
20
|
+
async def search_tools(query: str, app_ids: list[str] | None = None) -> list[str]:
|
21
|
+
"""Retrieve tools using a search query and app id. Use multiple times if you require tools for different tasks."""
|
22
|
+
tools_list = []
|
23
|
+
if app_ids is not None:
|
24
|
+
for app_id in app_ids:
|
25
|
+
tools_list.extend(await tool_registry.search_tools(query, limit=10, app_id=app_id))
|
26
|
+
else:
|
27
|
+
tools_list = await tool_registry.search_tools(query, limit=10)
|
28
|
+
tools_list = [f"{tool['id']}: {tool['description']}" for tool in tools_list]
|
29
|
+
return tools_list
|
30
|
+
|
31
|
+
@tool()
|
32
|
+
async def ask_user(question: str) -> str:
|
33
|
+
"""Ask the user a question. Use this tool to ask the user for any missing information for performing a task, or when you have multiple apps to choose from for performing a task."""
|
34
|
+
full_question = question
|
35
|
+
return f"ASKING_USER: {full_question}"
|
36
|
+
|
37
|
+
@tool()
|
38
|
+
async def load_tools(tools: list[str]) -> list[str]:
|
39
|
+
"""Choose the tools you want to use by passing their tool ids. Loads the tools for the chosen tools and returns the tool ids."""
|
40
|
+
return tools
|
41
|
+
|
42
|
+
|
43
|
+
async def call_model(
|
44
|
+
state: State,
|
45
|
+
runtime: Runtime[Context],
|
46
|
+
):
|
47
|
+
system_prompt = runtime.context.system_prompt if runtime.context.system_prompt else SYSTEM_PROMPT
|
48
|
+
app_ids = await tool_registry.list_all_apps()
|
49
|
+
app_id_descriptions = "\n".join([f"{app['id']}: {app['description']}" for app in app_ids])
|
50
|
+
print(app_id_descriptions)
|
51
|
+
system_prompt = system_prompt.format(system_time=datetime.now(tz=UTC).isoformat(), app_ids=app_id_descriptions)
|
52
|
+
|
53
|
+
messages = [{"role": "system", "content": system_prompt + "\n" + instructions}, *state["messages"]]
|
54
|
+
model = load_chat_model(runtime.context.model)
|
55
|
+
# Load tools from tool registry
|
56
|
+
loaded_tools = await tool_registry.export_tools(tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN)
|
57
|
+
model_with_tools = model.bind_tools([search_tools, ask_user, load_tools, *loaded_tools], tool_choice="auto")
|
58
|
+
response_raw = model_with_tools.invoke(messages)
|
59
|
+
token_usage = state.get("token_usage", {})
|
60
|
+
for key in ["input_tokens", "output_tokens", "total_tokens"]:
|
61
|
+
if key in token_usage:
|
62
|
+
token_usage[key] += response_raw.usage_metadata[key]
|
63
|
+
else:
|
64
|
+
token_usage[key] = response_raw.usage_metadata[key]
|
65
|
+
print(response_raw.usage_metadata)
|
66
|
+
print(token_usage)
|
67
|
+
response = cast(AIMessage, response_raw)
|
68
|
+
return {"messages": [response], "token_usage": token_usage}
|
69
|
+
|
70
|
+
# Define the conditional edge that determines whether to continue or not
|
71
|
+
def should_continue(state: State):
|
72
|
+
messages = state["messages"]
|
73
|
+
last_message = messages[-1]
|
74
|
+
# If there is no function call, then we finish
|
75
|
+
if not last_message.tool_calls:
|
76
|
+
return END
|
77
|
+
else:
|
78
|
+
return "tools"
|
79
|
+
|
80
|
+
def tool_router(state: State):
|
81
|
+
last_message = state["messages"][-1]
|
82
|
+
if isinstance(last_message, ToolMessage):
|
83
|
+
return "agent"
|
84
|
+
else:
|
85
|
+
return END
|
86
|
+
|
87
|
+
async def tool_node(state: State):
|
88
|
+
outputs = []
|
89
|
+
tool_ids = state["selected_tool_ids"]
|
90
|
+
for tool_call in state["messages"][-1].tool_calls:
|
91
|
+
if tool_call["name"] == ask_user.name:
|
92
|
+
outputs.append(
|
93
|
+
ToolMessage(
|
94
|
+
content=json.dumps(
|
95
|
+
"The user has been asked the question, and the run will wait for the user's response."
|
96
|
+
),
|
97
|
+
name=tool_call["name"],
|
98
|
+
tool_call_id=tool_call["id"],
|
99
|
+
)
|
100
|
+
)
|
101
|
+
ai_message = AIMessage(content=tool_call["args"]["question"])
|
102
|
+
outputs.append(ai_message)
|
103
|
+
elif tool_call["name"] == search_tools.name:
|
104
|
+
tools = await search_tools.ainvoke(tool_call["args"])
|
105
|
+
outputs.append(
|
106
|
+
ToolMessage(
|
107
|
+
content=json.dumps(tools)+"\n\nUse the load_tools tool to load the tools you want to use.",
|
108
|
+
name=tool_call["name"],
|
109
|
+
tool_call_id=tool_call["id"],
|
110
|
+
)
|
111
|
+
)
|
112
|
+
|
113
|
+
elif tool_call["name"] == load_tools.name:
|
114
|
+
tool_ids = await load_tools.ainvoke(tool_call["args"])
|
115
|
+
print(tool_ids)
|
116
|
+
outputs.append(
|
117
|
+
ToolMessage(
|
118
|
+
content=json.dumps(tool_ids),
|
119
|
+
name=tool_call["name"],
|
120
|
+
tool_call_id=tool_call["id"],
|
121
|
+
)
|
122
|
+
)
|
123
|
+
else:
|
124
|
+
await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
|
125
|
+
tool_result = await tool_registry.call_tool(tool_call["name"], tool_call["args"])
|
126
|
+
outputs.append(
|
127
|
+
ToolMessage(
|
128
|
+
content=json.dumps(tool_result),
|
129
|
+
name=tool_call["name"],
|
130
|
+
tool_call_id=tool_call["id"],
|
131
|
+
)
|
132
|
+
)
|
133
|
+
return {"messages": outputs, "selected_tool_ids": tool_ids}
|
134
|
+
|
135
|
+
|
136
|
+
|
137
|
+
|
138
|
+
|
139
|
+
builder = StateGraph(State, context_schema=Context)
|
140
|
+
|
141
|
+
builder.add_node("agent", call_model)
|
142
|
+
builder.add_node("tools", tool_node)
|
143
|
+
|
144
|
+
builder.add_edge(START, "agent")
|
145
|
+
builder.add_conditional_edges("agent", should_continue)
|
146
|
+
builder.add_conditional_edges("tools", tool_router)
|
147
|
+
|
148
|
+
return builder
|
@@ -0,0 +1,8 @@
|
|
1
|
+
"""Default prompts used by the agent."""
|
2
|
+
|
3
|
+
SYSTEM_PROMPT = """You are a helpful AI assistant. When you lack tools for any task you should use the `search_tools` function to unlock relevant tools. Whenever you need to ask the user for any information, or choose between multiple different applications, you can ask the user using the `ask_user` function.
|
4
|
+
|
5
|
+
System time: {system_time}
|
6
|
+
App IDs: {app_ids}
|
7
|
+
Note that when multiple apps seem relevant for a task, you should ask the user to choose the app.
|
8
|
+
"""
|
@@ -0,0 +1,28 @@
|
|
1
|
+
from typing import Annotated
|
2
|
+
|
3
|
+
from langgraph.prebuilt.chat_agent_executor import AgentState
|
4
|
+
|
5
|
+
|
6
|
+
def _enqueue(left: list, right: list) -> list:
|
7
|
+
"""Treat left as a FIFO queue, append new items from right (preserve order),
|
8
|
+
keep items unique, and cap total size to 20 (drop oldest items)."""
|
9
|
+
max_size = 30
|
10
|
+
preferred_size = 20
|
11
|
+
if len(right) > preferred_size:
|
12
|
+
preferred_size = min(max_size, len(right))
|
13
|
+
queue = list(left or [])
|
14
|
+
|
15
|
+
for item in right[:preferred_size] or []:
|
16
|
+
if item in queue:
|
17
|
+
queue.remove(item)
|
18
|
+
queue.append(item)
|
19
|
+
|
20
|
+
if len(queue) > preferred_size:
|
21
|
+
queue = queue[-preferred_size:]
|
22
|
+
|
23
|
+
return queue
|
24
|
+
|
25
|
+
|
26
|
+
class State(AgentState):
|
27
|
+
selected_tool_ids: Annotated[list[str], _enqueue]
|
28
|
+
token_usage: dict[str, int]
|
@@ -0,0 +1,25 @@
|
|
1
|
+
import asyncio
|
2
|
+
|
3
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
4
|
+
from universal_mcp.agents.autoagent import build_graph
|
5
|
+
from universal_mcp.tools import ToolManager
|
6
|
+
|
7
|
+
tool_registry = AgentrRegistry()
|
8
|
+
tool_manager = ToolManager()
|
9
|
+
|
10
|
+
|
11
|
+
|
12
|
+
async def main():
|
13
|
+
instructions = """
|
14
|
+
You are a helpful assistant that can use tools to help the user. If a task requires multiple steps, you should perform separate different searches for different actions. Prefer completing one action before searching for another.
|
15
|
+
"""
|
16
|
+
graph = await build_graph(tool_registry, instructions=instructions)
|
17
|
+
return graph
|
18
|
+
|
19
|
+
graph = asyncio.run(main())
|
20
|
+
|
21
|
+
|
22
|
+
|
23
|
+
|
24
|
+
|
25
|
+
|
@@ -0,0 +1,13 @@
|
|
1
|
+
from langchain_core.messages import BaseMessage
|
2
|
+
|
3
|
+
|
4
|
+
def get_message_text(msg: BaseMessage) -> str:
|
5
|
+
"""Get the text content of a message."""
|
6
|
+
content = msg.content
|
7
|
+
if isinstance(content, str):
|
8
|
+
return content
|
9
|
+
elif isinstance(content, dict):
|
10
|
+
return content.get("text", "")
|
11
|
+
else:
|
12
|
+
txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content]
|
13
|
+
return "".join(txts).strip()
|
universal_mcp/agents/base.py
CHANGED
@@ -7,7 +7,6 @@ from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
7
7
|
from langgraph.checkpoint.memory import MemorySaver
|
8
8
|
from langgraph.types import Command
|
9
9
|
|
10
|
-
from .llm import get_llm
|
11
10
|
from .utils import RichCLI
|
12
11
|
|
13
12
|
|
@@ -18,15 +17,19 @@ class BaseAgent:
|
|
18
17
|
self.model = model
|
19
18
|
self.memory = memory or MemorySaver()
|
20
19
|
self._graph = None
|
21
|
-
self.
|
20
|
+
self._initialized = False
|
22
21
|
self.cli = RichCLI()
|
23
22
|
|
23
|
+
async def ainit(self):
|
24
|
+
if not self._initialized:
|
25
|
+
self._graph = await self._build_graph()
|
26
|
+
self._initialized = True
|
27
|
+
|
24
28
|
async def _build_graph(self):
|
25
29
|
raise NotImplementedError("Subclasses must implement this method")
|
26
30
|
|
27
31
|
async def stream(self, thread_id: str, user_input: str):
|
28
|
-
|
29
|
-
self._graph = await self._build_graph()
|
32
|
+
await self.ainit()
|
30
33
|
async for event, _ in self._graph.astream(
|
31
34
|
{"messages": [{"role": "user", "content": user_input}]},
|
32
35
|
config={"configurable": {"thread_id": thread_id}},
|
@@ -36,24 +39,24 @@ class BaseAgent:
|
|
36
39
|
yield event
|
37
40
|
|
38
41
|
async def stream_interactive(self, thread_id: str, user_input: str):
|
42
|
+
await self.ainit()
|
39
43
|
with self.cli.display_agent_response_streaming(self.name) as stream_updater:
|
40
|
-
async for event in self.
|
44
|
+
async for event in self.stream(thread_id, user_input):
|
41
45
|
stream_updater.update(event.content)
|
42
46
|
|
43
|
-
async def
|
47
|
+
async def invoke(self, user_input: str, thread_id: str = str(uuid4())):
|
44
48
|
"""Run the agent"""
|
45
|
-
|
46
|
-
self._graph = await self._build_graph()
|
49
|
+
await self.ainit()
|
47
50
|
return await self._graph.ainvoke(
|
48
51
|
{"messages": [{"role": "user", "content": user_input}]},
|
49
52
|
config={"configurable": {"thread_id": thread_id}},
|
53
|
+
context={"system_prompt": self.instructions, "model": self.model},
|
50
54
|
)
|
51
55
|
|
52
56
|
async def run_interactive(self, thread_id: str = str(uuid4())):
|
53
57
|
"""Main application loop"""
|
54
58
|
|
55
|
-
|
56
|
-
self._graph = await self._build_graph()
|
59
|
+
await self.ainit()
|
57
60
|
# Display welcome
|
58
61
|
self.cli.display_welcome(self.name)
|
59
62
|
|
@@ -1,10 +1,10 @@
|
|
1
1
|
from universal_mcp.agentr import Agentr
|
2
2
|
from universal_mcp.agents.codeact import create_codeact
|
3
3
|
from universal_mcp.agents.codeact.sandbox import eval_unsafe
|
4
|
-
from universal_mcp.agents.llm import
|
4
|
+
from universal_mcp.agents.llm import load_chat_model
|
5
5
|
from universal_mcp.tools.adapters import ToolFormat
|
6
6
|
|
7
|
-
model =
|
7
|
+
model = load_chat_model("gpt-4.1")
|
8
8
|
|
9
9
|
agentr = Agentr()
|
10
10
|
agentr.load_tools(["google-mail_send_email"])
|
universal_mcp/agents/hil.py
CHANGED
@@ -7,7 +7,7 @@ from langgraph.graph.message import add_messages
|
|
7
7
|
from langgraph.types import Interrupt, interrupt
|
8
8
|
|
9
9
|
from .base import BaseAgent
|
10
|
-
from .llm import
|
10
|
+
from .llm import load_chat_model
|
11
11
|
|
12
12
|
|
13
13
|
class State(TypedDict):
|
@@ -70,7 +70,7 @@ def handle_interrupt(interrupt: Interrupt) -> str | bool:
|
|
70
70
|
class HilAgent(BaseAgent):
|
71
71
|
def __init__(self, name: str, instructions: str, model: str):
|
72
72
|
super().__init__(name, instructions, model)
|
73
|
-
self.llm =
|
73
|
+
self.llm = load_chat_model(model)
|
74
74
|
self._graph = self._build_graph()
|
75
75
|
|
76
76
|
def chatbot(self, state: State):
|
universal_mcp/agents/llm.py
CHANGED
@@ -1,10 +1,28 @@
|
|
1
|
+
from langchain_anthropic import ChatAnthropic
|
2
|
+
from langchain_core.language_models import BaseChatModel
|
3
|
+
from langchain_google_vertexai.model_garden import ChatAnthropicVertex
|
1
4
|
from langchain_openai import AzureChatOpenAI
|
2
5
|
|
3
6
|
|
4
|
-
def
|
5
|
-
|
7
|
+
def load_chat_model(fully_specified_name: str, tags: list[str] | None = None) -> BaseChatModel:
|
8
|
+
"""Load a chat model from a fully specified name.
|
9
|
+
|
10
|
+
Args:
|
11
|
+
fully_specified_name (str): String in the format 'provider/model'.
|
12
|
+
"""
|
13
|
+
provider, model = fully_specified_name.split("/", maxsplit=1)
|
14
|
+
if provider == "google_anthropic_vertex":
|
15
|
+
return ChatAnthropicVertex(model=model, temperature=0.2, location="asia-east1", tags=tags)
|
16
|
+
elif provider == "anthropic":
|
17
|
+
return ChatAnthropic(
|
18
|
+
model=model, temperature=1, thinking={"type": "enabled", "budget_tokens": 2048}, max_tokens=4096, tags=tags
|
19
|
+
) # pyright: ignore[reportCallIssue]
|
20
|
+
elif provider == "azure":
|
21
|
+
return AzureChatOpenAI(model=model, api_version="2024-12-01-preview", azure_deployment=model, tags=tags)
|
22
|
+
else:
|
23
|
+
raise ValueError(f"Unsupported provider: {provider}")
|
6
24
|
|
7
25
|
|
8
26
|
if __name__ == "__main__":
|
9
|
-
llm =
|
27
|
+
llm = load_chat_model("azure/gpt-4.1")
|
10
28
|
print(llm.invoke("Hello, world!"))
|
universal_mcp/agents/react.py
CHANGED
@@ -2,9 +2,12 @@ from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
2
|
from langgraph.prebuilt import create_react_agent
|
3
3
|
from loguru import logger
|
4
4
|
|
5
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
5
6
|
from universal_mcp.agents.base import BaseAgent
|
6
|
-
from universal_mcp.agents.
|
7
|
-
from universal_mcp.
|
7
|
+
from universal_mcp.agents.llm import load_chat_model
|
8
|
+
from universal_mcp.agents.tools import load_mcp_tools
|
9
|
+
from universal_mcp.tools.registry import ToolRegistry
|
10
|
+
from universal_mcp.types import ToolConfig, ToolFormat
|
8
11
|
|
9
12
|
|
10
13
|
class ReactAgent(BaseAgent):
|
@@ -15,18 +18,29 @@ class ReactAgent(BaseAgent):
|
|
15
18
|
model: str,
|
16
19
|
memory: BaseCheckpointSaver | None = None,
|
17
20
|
tools: ToolConfig | None = None,
|
21
|
+
registry: ToolRegistry | None = None,
|
18
22
|
max_iterations: int = 10,
|
19
23
|
**kwargs,
|
20
24
|
):
|
21
25
|
super().__init__(name, instructions, model, memory, **kwargs)
|
26
|
+
self.llm = load_chat_model(model)
|
22
27
|
self.tools = tools
|
23
28
|
self.max_iterations = max_iterations
|
29
|
+
self.registry = registry
|
24
30
|
|
25
31
|
async def _build_graph(self):
|
26
32
|
if self.tools:
|
27
33
|
config = self.tools.model_dump(exclude_none=True)
|
28
|
-
|
34
|
+
if config.get("agentrServers") and not self.registry:
|
35
|
+
raise ValueError("Agentr servers are configured but no registry is provided")
|
36
|
+
agentr_tools = (
|
37
|
+
await self.registry.export_tools(self.tools, ToolFormat.LANGCHAIN)
|
38
|
+
if config.get("agentrServers")
|
39
|
+
else []
|
40
|
+
)
|
41
|
+
print(agentr_tools)
|
29
42
|
mcp_tools = await load_mcp_tools(config["mcpServers"]) if config.get("mcpServers") else []
|
43
|
+
print(mcp_tools)
|
30
44
|
tools = agentr_tools + mcp_tools
|
31
45
|
else:
|
32
46
|
tools = []
|
@@ -39,7 +53,7 @@ class ReactAgent(BaseAgent):
|
|
39
53
|
)
|
40
54
|
|
41
55
|
def _build_system_message(self) -> str:
|
42
|
-
system_message = f"""You are {self.name}.
|
56
|
+
system_message = f"""You are {self.name}.
|
43
57
|
|
44
58
|
You have access to various tools that can help you answer questions and complete tasks. When you need to use a tool:
|
45
59
|
|
@@ -47,7 +61,10 @@ You have access to various tools that can help you answer questions and complete
|
|
47
61
|
2. Call the appropriate tool with the right parameters
|
48
62
|
3. Use the tool results to provide a comprehensive answer
|
49
63
|
|
50
|
-
Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so.
|
64
|
+
Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so.
|
65
|
+
|
66
|
+
{self.instructions}
|
67
|
+
"""
|
51
68
|
return system_message
|
52
69
|
|
53
70
|
|
@@ -57,8 +74,11 @@ if __name__ == "__main__":
|
|
57
74
|
agent = ReactAgent(
|
58
75
|
"Universal React Agent",
|
59
76
|
instructions="",
|
60
|
-
model="gpt-4o",
|
77
|
+
model="azure/gpt-4o",
|
61
78
|
tools=ToolConfig(agentrServers={"google-mail": {"tools": ["send_email"]}}),
|
79
|
+
registry=AgentrRegistry(),
|
80
|
+
)
|
81
|
+
result = asyncio.run(
|
82
|
+
agent.invoke(user_input="Send an email with the subject 'testing react agent' to manoj@agentr.dev")
|
62
83
|
)
|
63
|
-
result = asyncio.run(agent.run(user_input="Send an email with the subject 'Hello' to john.doe@example.com"))
|
64
84
|
print(result["messages"][-1].content)
|
universal_mcp/agents/simple.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1
1
|
import asyncio
|
2
2
|
from typing import Annotated
|
3
3
|
|
4
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
4
5
|
from langgraph.graph import END, START, StateGraph
|
5
6
|
from langgraph.graph.message import add_messages
|
6
7
|
from typing_extensions import TypedDict
|
7
8
|
|
8
|
-
from .base import BaseAgent
|
9
|
-
from .llm import
|
9
|
+
from universal_mcp.agents.base import BaseAgent
|
10
|
+
from universal_mcp.agents.llm import load_chat_model
|
10
11
|
|
11
12
|
|
12
13
|
class State(TypedDict):
|
@@ -14,27 +15,26 @@ class State(TypedDict):
|
|
14
15
|
|
15
16
|
|
16
17
|
class SimpleAgent(BaseAgent):
|
17
|
-
def __init__(self, name: str, instructions: str, model: str):
|
18
|
-
super().__init__(name, instructions, model)
|
19
|
-
self.llm =
|
20
|
-
self._graph = self._build_graph()
|
18
|
+
def __init__(self, name: str, instructions: str, model: str, memory: BaseCheckpointSaver = None, **kwargs):
|
19
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
20
|
+
self.llm = load_chat_model(model)
|
21
21
|
|
22
|
-
def _build_graph(self):
|
22
|
+
async def _build_graph(self):
|
23
23
|
graph_builder = StateGraph(State)
|
24
24
|
|
25
|
-
def chatbot(state: State):
|
26
|
-
|
25
|
+
async def chatbot(state: State):
|
26
|
+
messages = [
|
27
|
+
{"role": "system", "content": self.instructions},
|
28
|
+
*state["messages"],
|
29
|
+
]
|
30
|
+
return {"messages": [await self.llm.ainvoke(messages)]}
|
27
31
|
|
28
32
|
graph_builder.add_node("chatbot", chatbot)
|
29
33
|
graph_builder.add_edge(START, "chatbot")
|
30
34
|
graph_builder.add_edge("chatbot", END)
|
31
35
|
return graph_builder.compile(checkpointer=self.memory)
|
32
36
|
|
33
|
-
@property
|
34
|
-
def graph(self):
|
35
|
-
return self._graph
|
36
|
-
|
37
37
|
|
38
38
|
if __name__ == "__main__":
|
39
|
-
agent = SimpleAgent("Simple Agent", "You are a helpful assistant", "
|
39
|
+
agent = SimpleAgent("Simple Agent", "You are a helpful assistant", "azure/gpt-4o")
|
40
40
|
asyncio.run(agent.run_interactive())
|