universal-mcp 0.1.24rc4__py3-none-any.whl → 0.1.24rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agentr/README.md +43 -34
- universal_mcp/agentr/agentr.py +7 -0
- universal_mcp/agentr/client.py +96 -42
- universal_mcp/agentr/registry.py +21 -25
- universal_mcp/agents/__init__.py +4 -4
- universal_mcp/agents/auto.py +8 -8
- universal_mcp/agents/autoagent/__init__.py +35 -0
- universal_mcp/agents/autoagent/__main__.py +21 -0
- universal_mcp/agents/autoagent/context.py +25 -0
- universal_mcp/agents/autoagent/graph.py +119 -0
- universal_mcp/agents/autoagent/prompts.py +5 -0
- universal_mcp/agents/autoagent/state.py +27 -0
- universal_mcp/agents/autoagent/studio.py +25 -0
- universal_mcp/agents/autoagent/utils.py +13 -0
- universal_mcp/agents/base.py +24 -10
- universal_mcp/agents/codeact/test.py +2 -2
- universal_mcp/agents/hil.py +2 -2
- universal_mcp/agents/llm.py +21 -3
- universal_mcp/agents/react.py +32 -24
- universal_mcp/agents/simple.py +3 -3
- universal_mcp/agents/tools.py +35 -0
- universal_mcp/config.py +0 -93
- universal_mcp/tools/manager.py +15 -22
- universal_mcp/tools/registry.py +13 -3
- universal_mcp/tools/tools.py +11 -5
- universal_mcp/types.py +25 -0
- {universal_mcp-0.1.24rc4.dist-info → universal_mcp-0.1.24rc7.dist-info}/METADATA +6 -6
- {universal_mcp-0.1.24rc4.dist-info → universal_mcp-0.1.24rc7.dist-info}/RECORD +31 -22
- {universal_mcp-0.1.24rc4.dist-info → universal_mcp-0.1.24rc7.dist-info}/WHEEL +0 -0
- {universal_mcp-0.1.24rc4.dist-info → universal_mcp-0.1.24rc7.dist-info}/entry_points.txt +0 -0
- {universal_mcp-0.1.24rc4.dist-info → universal_mcp-0.1.24rc7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,119 @@
|
|
1
|
+
import json
|
2
|
+
from datetime import UTC, datetime
|
3
|
+
from typing import cast
|
4
|
+
|
5
|
+
from langchain_core.messages import AIMessage, ToolMessage
|
6
|
+
from langchain_core.tools import tool
|
7
|
+
from langgraph.graph import END, START, StateGraph
|
8
|
+
from langgraph.runtime import Runtime
|
9
|
+
|
10
|
+
from universal_mcp.agents.llm import load_chat_model
|
11
|
+
from universal_mcp.tools.manager import ToolManager
|
12
|
+
from universal_mcp.tools.registry import ToolRegistry
|
13
|
+
from universal_mcp.types import ToolFormat
|
14
|
+
|
15
|
+
from universal_mcp.agents.autoagent.context import Context
|
16
|
+
from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
|
17
|
+
from universal_mcp.agents.autoagent.state import State
|
18
|
+
|
19
|
+
|
20
|
+
def create_agent(tool_registry: ToolRegistry, tool_manager: ToolManager, instructions: str = ""):
|
21
|
+
@tool()
|
22
|
+
def retrieve_tools(query: str) -> list[str]:
|
23
|
+
"""Retrieve tools using a search query. Use multiple times if you require tools for different tasks."""
|
24
|
+
tools = tool_registry.search_tools(query)
|
25
|
+
my_connections = tool_registry.client.list_my_connections()
|
26
|
+
connected_apps = set(connection["app_id"] for connection in my_connections)
|
27
|
+
filtered_tools = [tool for tool in tools if tool["app_id"] in connected_apps]
|
28
|
+
if len(filtered_tools) == 0:
|
29
|
+
return tools
|
30
|
+
return filtered_tools
|
31
|
+
|
32
|
+
@tool()
|
33
|
+
def ask_user(question: str) -> str:
|
34
|
+
"""Ask the user a question. Use this tool to ask the user for any missing information for performing a task, or when you have multiple apps to choose from for performing a task."""
|
35
|
+
full_question = question
|
36
|
+
return f"ASKING_USER: {full_question}"
|
37
|
+
|
38
|
+
def call_model(
|
39
|
+
state: State,
|
40
|
+
runtime: Runtime[Context],
|
41
|
+
):
|
42
|
+
system_prompt = runtime.context.system_prompt if runtime.context.system_prompt else SYSTEM_PROMPT
|
43
|
+
system_prompt = system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
|
44
|
+
|
45
|
+
messages = [{"role": "system", "content": system_prompt + "\n" + instructions}, *state["messages"]]
|
46
|
+
model = load_chat_model(runtime.context.model)
|
47
|
+
# Load tools from tool registry
|
48
|
+
tool_registry.load_tools(tools=state["selected_tool_ids"], tool_manager=tool_manager)
|
49
|
+
loaded_tools = tool_manager.list_tools(format=ToolFormat.LANGCHAIN)
|
50
|
+
model_with_tools = model.bind_tools([retrieve_tools, ask_user, *loaded_tools], tool_choice="auto")
|
51
|
+
response = cast(AIMessage, model_with_tools.invoke(messages))
|
52
|
+
return {"messages": [response]}
|
53
|
+
|
54
|
+
# Define the conditional edge that determines whether to continue or not
|
55
|
+
def should_continue(state: State):
|
56
|
+
messages = state["messages"]
|
57
|
+
last_message = messages[-1]
|
58
|
+
# If there is no function call, then we finish
|
59
|
+
if not last_message.tool_calls:
|
60
|
+
return END
|
61
|
+
# Otherwise if there is, we continue
|
62
|
+
else:
|
63
|
+
return "tools"
|
64
|
+
|
65
|
+
def tool_router(state: State):
|
66
|
+
last_message = state["messages"][-1]
|
67
|
+
if isinstance(last_message, ToolMessage):
|
68
|
+
return "agent"
|
69
|
+
else:
|
70
|
+
return END
|
71
|
+
|
72
|
+
|
73
|
+
async def tool_node(state: State):
|
74
|
+
outputs = []
|
75
|
+
tool_ids = state["selected_tool_ids"]
|
76
|
+
for tool_call in state["messages"][-1].tool_calls:
|
77
|
+
if tool_call["name"] == retrieve_tools.name:
|
78
|
+
tool_result = retrieve_tools.invoke(tool_call["args"])
|
79
|
+
tool_ids = [tool["id"] for tool in tool_result]
|
80
|
+
outputs.append(
|
81
|
+
ToolMessage(
|
82
|
+
content=json.dumps(tool_result),
|
83
|
+
name=tool_call["name"],
|
84
|
+
tool_call_id=tool_call["id"],
|
85
|
+
)
|
86
|
+
)
|
87
|
+
elif tool_call["name"] == ask_user.name:
|
88
|
+
outputs.append(
|
89
|
+
ToolMessage(
|
90
|
+
content=json.dumps("The user has been asked the question, and the run will wait for the user's response."),
|
91
|
+
name=tool_call["name"],
|
92
|
+
tool_call_id=tool_call["id"],
|
93
|
+
)
|
94
|
+
)
|
95
|
+
ai_message = AIMessage(content=tool_call["args"]["question"])
|
96
|
+
outputs.append(ai_message)
|
97
|
+
else:
|
98
|
+
tool_manager.clear_tools()
|
99
|
+
tool_registry.load_tools([tool_call["name"]], tool_manager=tool_manager)
|
100
|
+
tool_result = await tool_manager.call_tool(tool_call["name"], tool_call["args"])
|
101
|
+
outputs.append(
|
102
|
+
ToolMessage(
|
103
|
+
content=json.dumps(tool_result),
|
104
|
+
name=tool_call["name"],
|
105
|
+
tool_call_id=tool_call["id"],
|
106
|
+
)
|
107
|
+
)
|
108
|
+
return {"messages": outputs, "selected_tool_ids": tool_ids}
|
109
|
+
|
110
|
+
builder = StateGraph(State, context_schema=Context)
|
111
|
+
|
112
|
+
builder.add_node("agent", call_model)
|
113
|
+
builder.add_node("tools", tool_node)
|
114
|
+
|
115
|
+
builder.add_edge(START, "agent")
|
116
|
+
builder.add_conditional_edges("agent", should_continue)
|
117
|
+
builder.add_conditional_edges("tools", tool_router)
|
118
|
+
|
119
|
+
return builder
|
@@ -0,0 +1,5 @@
|
|
1
|
+
"""Default prompts used by the agent."""
|
2
|
+
|
3
|
+
SYSTEM_PROMPT = """You are a helpful AI assistant. When you lack tools for any task you should use the `retrieve_tools` function to unlock relevant tools. Whenever you need to ask the user for any information, or choose between multiple different applications, you can ask the user using the `ask_user` function.
|
4
|
+
|
5
|
+
System time: {system_time}"""
|
@@ -0,0 +1,27 @@
|
|
1
|
+
from typing import Annotated
|
2
|
+
|
3
|
+
from langgraph.prebuilt.chat_agent_executor import AgentState
|
4
|
+
|
5
|
+
|
6
|
+
def _enqueue(left: list, right: list) -> list:
|
7
|
+
"""Treat left as a FIFO queue, append new items from right (preserve order),
|
8
|
+
keep items unique, and cap total size to 20 (drop oldest items)."""
|
9
|
+
max_size = 30
|
10
|
+
preferred_size = 20
|
11
|
+
if len(right) > preferred_size:
|
12
|
+
preferred_size = min(max_size, len(right))
|
13
|
+
queue = list(left or [])
|
14
|
+
|
15
|
+
for item in right[:preferred_size] or []:
|
16
|
+
if item in queue:
|
17
|
+
queue.remove(item)
|
18
|
+
queue.append(item)
|
19
|
+
|
20
|
+
if len(queue) > preferred_size:
|
21
|
+
queue = queue[-preferred_size:]
|
22
|
+
|
23
|
+
return queue
|
24
|
+
|
25
|
+
|
26
|
+
class State(AgentState):
|
27
|
+
selected_tool_ids: Annotated[list[str], _enqueue]
|
@@ -0,0 +1,25 @@
|
|
1
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
2
|
+
from universal_mcp.agents.autoagent import create_agent
|
3
|
+
from universal_mcp.tools import ToolManager
|
4
|
+
|
5
|
+
tool_registry = AgentrRegistry()
|
6
|
+
tool_manager = ToolManager()
|
7
|
+
|
8
|
+
|
9
|
+
|
10
|
+
apps = tool_registry.client.list_all_apps()
|
11
|
+
names = [app["name"] for app in apps]
|
12
|
+
|
13
|
+
instructions = """
|
14
|
+
You are a helpful assistant that can use tools to help the user. If a task requires multiple steps, you should perform separate different searches for different actions.
|
15
|
+
These are the list of applications you can use to help the user:
|
16
|
+
{names}
|
17
|
+
"""
|
18
|
+
graph = create_agent(tool_registry, tool_manager, instructions=instructions)
|
19
|
+
|
20
|
+
|
21
|
+
|
22
|
+
|
23
|
+
|
24
|
+
|
25
|
+
|
@@ -0,0 +1,13 @@
|
|
1
|
+
from langchain_core.messages import BaseMessage
|
2
|
+
|
3
|
+
|
4
|
+
def get_message_text(msg: BaseMessage) -> str:
|
5
|
+
"""Get the text content of a message."""
|
6
|
+
content = msg.content
|
7
|
+
if isinstance(content, str):
|
8
|
+
return content
|
9
|
+
elif isinstance(content, dict):
|
10
|
+
return content.get("text", "")
|
11
|
+
else:
|
12
|
+
txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content]
|
13
|
+
return "".join(txts).strip()
|
universal_mcp/agents/base.py
CHANGED
@@ -3,26 +3,31 @@ from typing import cast
|
|
3
3
|
from uuid import uuid4
|
4
4
|
|
5
5
|
from langchain_core.messages import AIMessageChunk
|
6
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
6
7
|
from langgraph.checkpoint.memory import MemorySaver
|
7
8
|
from langgraph.types import Command
|
8
9
|
|
10
|
+
from .llm import load_chat_model
|
9
11
|
from .utils import RichCLI
|
10
12
|
|
11
13
|
|
12
14
|
class BaseAgent:
|
13
|
-
def __init__(self, name: str, instructions: str, model: str):
|
15
|
+
def __init__(self, name: str, instructions: str, model: str, memory: BaseCheckpointSaver | None = None, **kwargs):
|
14
16
|
self.name = name
|
15
17
|
self.instructions = instructions
|
16
18
|
self.model = model
|
17
|
-
self.memory = MemorySaver()
|
19
|
+
self.memory = memory or MemorySaver()
|
20
|
+
self._graph = None
|
21
|
+
self.llm = load_chat_model(model)
|
18
22
|
self.cli = RichCLI()
|
19
23
|
|
20
|
-
|
21
|
-
def graph(self):
|
24
|
+
async def _build_graph(self):
|
22
25
|
raise NotImplementedError("Subclasses must implement this method")
|
23
26
|
|
24
27
|
async def stream(self, thread_id: str, user_input: str):
|
25
|
-
|
28
|
+
if self._graph is None:
|
29
|
+
self._graph = await self._build_graph()
|
30
|
+
async for event, _ in self._graph.astream(
|
26
31
|
{"messages": [{"role": "user", "content": user_input}]},
|
27
32
|
config={"configurable": {"thread_id": thread_id}},
|
28
33
|
stream_mode="messages",
|
@@ -32,25 +37,34 @@ class BaseAgent:
|
|
32
37
|
|
33
38
|
async def stream_interactive(self, thread_id: str, user_input: str):
|
34
39
|
with self.cli.display_agent_response_streaming(self.name) as stream_updater:
|
35
|
-
async for event in self.
|
40
|
+
async for event in self.astream(thread_id, user_input):
|
36
41
|
stream_updater.update(event.content)
|
37
42
|
|
38
|
-
async def
|
39
|
-
"""
|
43
|
+
async def run(self, user_input: str, thread_id: str = str(uuid4())):
|
44
|
+
"""Run the agent"""
|
45
|
+
if not self._graph:
|
46
|
+
self._graph = await self._build_graph()
|
47
|
+
return await self._graph.ainvoke(
|
48
|
+
{"messages": [{"role": "user", "content": user_input}]},
|
49
|
+
config={"configurable": {"thread_id": thread_id}},
|
50
|
+
context={"system_prompt": self.instructions, "model": self.model},
|
51
|
+
)
|
40
52
|
|
41
53
|
async def run_interactive(self, thread_id: str = str(uuid4())):
|
42
54
|
"""Main application loop"""
|
43
55
|
|
56
|
+
if not self._graph:
|
57
|
+
self._graph = await self._build_graph()
|
44
58
|
# Display welcome
|
45
59
|
self.cli.display_welcome(self.name)
|
46
60
|
|
47
61
|
# Main loop
|
48
62
|
while True:
|
49
63
|
try:
|
50
|
-
state = self.
|
64
|
+
state = self._graph.get_state(config={"configurable": {"thread_id": thread_id}})
|
51
65
|
if state.interrupts:
|
52
66
|
value = self.cli.handle_interrupt(state.interrupts[0])
|
53
|
-
self.
|
67
|
+
self._graph.invoke(Command(resume=value), config={"configurable": {"thread_id": thread_id}})
|
54
68
|
continue
|
55
69
|
|
56
70
|
user_input = self.cli.get_user_input()
|
@@ -1,10 +1,10 @@
|
|
1
1
|
from universal_mcp.agentr import Agentr
|
2
2
|
from universal_mcp.agents.codeact import create_codeact
|
3
3
|
from universal_mcp.agents.codeact.sandbox import eval_unsafe
|
4
|
-
from universal_mcp.agents.llm import
|
4
|
+
from universal_mcp.agents.llm import load_chat_model
|
5
5
|
from universal_mcp.tools.adapters import ToolFormat
|
6
6
|
|
7
|
-
model =
|
7
|
+
model = load_chat_model("gpt-4.1")
|
8
8
|
|
9
9
|
agentr = Agentr()
|
10
10
|
agentr.load_tools(["google-mail_send_email"])
|
universal_mcp/agents/hil.py
CHANGED
@@ -7,7 +7,7 @@ from langgraph.graph.message import add_messages
|
|
7
7
|
from langgraph.types import Interrupt, interrupt
|
8
8
|
|
9
9
|
from .base import BaseAgent
|
10
|
-
from .llm import
|
10
|
+
from .llm import load_chat_model
|
11
11
|
|
12
12
|
|
13
13
|
class State(TypedDict):
|
@@ -70,7 +70,7 @@ def handle_interrupt(interrupt: Interrupt) -> str | bool:
|
|
70
70
|
class HilAgent(BaseAgent):
|
71
71
|
def __init__(self, name: str, instructions: str, model: str):
|
72
72
|
super().__init__(name, instructions, model)
|
73
|
-
self.llm =
|
73
|
+
self.llm = load_chat_model(model)
|
74
74
|
self._graph = self._build_graph()
|
75
75
|
|
76
76
|
def chatbot(self, state: State):
|
universal_mcp/agents/llm.py
CHANGED
@@ -1,10 +1,28 @@
|
|
1
|
+
from langchain_anthropic import ChatAnthropic
|
2
|
+
from langchain_core.language_models import BaseChatModel
|
3
|
+
from langchain_google_vertexai.model_garden import ChatAnthropicVertex
|
1
4
|
from langchain_openai import AzureChatOpenAI
|
2
5
|
|
3
6
|
|
4
|
-
def
|
5
|
-
|
7
|
+
def load_chat_model(fully_specified_name: str, tags: list[str] | None = None) -> BaseChatModel:
|
8
|
+
"""Load a chat model from a fully specified name.
|
9
|
+
|
10
|
+
Args:
|
11
|
+
fully_specified_name (str): String in the format 'provider/model'.
|
12
|
+
"""
|
13
|
+
provider, model = fully_specified_name.split("/", maxsplit=1)
|
14
|
+
if provider == "google_anthropic_vertex":
|
15
|
+
return ChatAnthropicVertex(model=model, temperature=0.2, location="asia-east1", tags=tags)
|
16
|
+
elif provider == "anthropic":
|
17
|
+
return ChatAnthropic(
|
18
|
+
model=model, temperature=1, thinking={"type": "enabled", "budget_tokens": 2048}, max_tokens=4096, tags=tags
|
19
|
+
) # pyright: ignore[reportCallIssue]
|
20
|
+
elif provider == "azure":
|
21
|
+
return AzureChatOpenAI(model=model, api_version="2024-12-01-preview", azure_deployment=model, tags=tags)
|
22
|
+
else:
|
23
|
+
raise ValueError(f"Unsupported provider: {provider}")
|
6
24
|
|
7
25
|
|
8
26
|
if __name__ == "__main__":
|
9
|
-
llm =
|
27
|
+
llm = load_chat_model("azure/gpt-4.1")
|
10
28
|
print(llm.invoke("Hello, world!"))
|
universal_mcp/agents/react.py
CHANGED
@@ -1,34 +1,36 @@
|
|
1
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
1
2
|
from langgraph.prebuilt import create_react_agent
|
2
3
|
from loguru import logger
|
3
4
|
|
4
|
-
from universal_mcp.
|
5
|
-
from universal_mcp.tools
|
6
|
-
from universal_mcp.
|
7
|
-
|
8
|
-
from .base import BaseAgent
|
9
|
-
from .llm import get_llm
|
5
|
+
from universal_mcp.agents.base import BaseAgent
|
6
|
+
from universal_mcp.agents.tools import load_agentr_tools, load_mcp_tools
|
7
|
+
from universal_mcp.types import ToolConfig
|
10
8
|
|
11
9
|
|
12
10
|
class ReactAgent(BaseAgent):
|
13
11
|
def __init__(
|
14
|
-
self,
|
12
|
+
self,
|
13
|
+
name: str,
|
14
|
+
instructions: str,
|
15
|
+
model: str,
|
16
|
+
memory: BaseCheckpointSaver | None = None,
|
17
|
+
tools: ToolConfig | None = None,
|
18
|
+
max_iterations: int = 10,
|
19
|
+
**kwargs,
|
15
20
|
):
|
16
|
-
super().__init__(name, instructions, model)
|
17
|
-
self.
|
21
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
22
|
+
self.tools = tools
|
18
23
|
self.max_iterations = max_iterations
|
19
|
-
|
20
|
-
|
21
|
-
if tools:
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
def _build_graph(self):
|
31
|
-
tools = self.tool_manager.list_tools(format=ToolFormat.LANGCHAIN) if self.tool_manager else []
|
24
|
+
|
25
|
+
async def _build_graph(self):
|
26
|
+
if self.tools:
|
27
|
+
config = self.tools.model_dump(exclude_none=True)
|
28
|
+
agentr_tools = await load_agentr_tools(config["agentrServers"]) if config.get("agentrServers") else []
|
29
|
+
mcp_tools = await load_mcp_tools(config["mcpServers"]) if config.get("mcpServers") else []
|
30
|
+
tools = agentr_tools + mcp_tools
|
31
|
+
else:
|
32
|
+
tools = []
|
33
|
+
logger.debug(f"Initialized ReactAgent: name={self.name}, model={self.model}")
|
32
34
|
return create_react_agent(
|
33
35
|
self.llm,
|
34
36
|
tools,
|
@@ -53,6 +55,12 @@ if __name__ == "__main__":
|
|
53
55
|
import asyncio
|
54
56
|
|
55
57
|
agent = ReactAgent(
|
56
|
-
"Universal React Agent",
|
58
|
+
"Universal React Agent",
|
59
|
+
instructions="",
|
60
|
+
model="gpt-4o",
|
61
|
+
tools=ToolConfig(agentrServers={"google-mail": {"tools": ["send_email"]}}),
|
62
|
+
)
|
63
|
+
result = asyncio.run(
|
64
|
+
agent.run(user_input="Send an email with the subject 'testing react agent' to manoj@agentr.dev")
|
57
65
|
)
|
58
|
-
|
66
|
+
print(result["messages"][-1].content)
|
universal_mcp/agents/simple.py
CHANGED
@@ -5,8 +5,8 @@ from langgraph.graph import END, START, StateGraph
|
|
5
5
|
from langgraph.graph.message import add_messages
|
6
6
|
from typing_extensions import TypedDict
|
7
7
|
|
8
|
-
from .base import BaseAgent
|
9
|
-
from .llm import
|
8
|
+
from universal_mcp.agents.base import BaseAgent
|
9
|
+
from universal_mcp.agents.llm import load_chat_model
|
10
10
|
|
11
11
|
|
12
12
|
class State(TypedDict):
|
@@ -16,7 +16,7 @@ class State(TypedDict):
|
|
16
16
|
class SimpleAgent(BaseAgent):
|
17
17
|
def __init__(self, name: str, instructions: str, model: str):
|
18
18
|
super().__init__(name, instructions, model)
|
19
|
-
self.llm =
|
19
|
+
self.llm = load_chat_model(model)
|
20
20
|
self._graph = self._build_graph()
|
21
21
|
|
22
22
|
def _build_graph(self):
|
@@ -0,0 +1,35 @@
|
|
1
|
+
import json
|
2
|
+
|
3
|
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
4
|
+
|
5
|
+
from universal_mcp.agentr.integration import AgentrIntegration
|
6
|
+
from universal_mcp.applications import app_from_slug
|
7
|
+
from universal_mcp.tools.adapters import ToolFormat
|
8
|
+
from universal_mcp.tools.manager import ToolManager
|
9
|
+
from universal_mcp.types import ToolConfig
|
10
|
+
|
11
|
+
|
12
|
+
async def load_agentr_tools(agentr_servers: dict):
|
13
|
+
tool_manager = ToolManager()
|
14
|
+
for app_name, tool_names in agentr_servers.items():
|
15
|
+
app = app_from_slug(app_name)
|
16
|
+
integration = AgentrIntegration(name=app_name)
|
17
|
+
app_instance = app(integration=integration)
|
18
|
+
tool_manager.register_tools_from_app(app_instance, tool_names=tool_names["tools"])
|
19
|
+
tools = tool_manager.list_tools(format=ToolFormat.LANGCHAIN)
|
20
|
+
return tools
|
21
|
+
|
22
|
+
|
23
|
+
async def load_mcp_tools(mcp_servers: dict):
|
24
|
+
client = MultiServerMCPClient(mcp_servers)
|
25
|
+
tools = await client.get_tools()
|
26
|
+
return tools
|
27
|
+
|
28
|
+
|
29
|
+
async def load_tools(path: str) -> ToolConfig:
|
30
|
+
with open(path) as f:
|
31
|
+
data = json.load(f)
|
32
|
+
config = ToolConfig.model_validate(data)
|
33
|
+
agentr_tools = await load_agentr_tools(config.model_dump(exclude_none=True)["agentrServers"])
|
34
|
+
mcp_tools = await load_mcp_tools(config.model_dump(exclude_none=True)["mcpServers"])
|
35
|
+
return agentr_tools + mcp_tools
|
universal_mcp/config.py
CHANGED
@@ -176,96 +176,3 @@ class ServerConfig(BaseSettings):
|
|
176
176
|
with open(path) as f:
|
177
177
|
data = json.load(f)
|
178
178
|
return cls.model_validate(data)
|
179
|
-
|
180
|
-
|
181
|
-
class ClientTransportConfig(BaseModel):
|
182
|
-
"""Configuration for how an MCP client connects to an MCP server.
|
183
|
-
|
184
|
-
Specifies the transport protocol and its associated parameters, such as
|
185
|
-
the command for stdio, URL for HTTP-based transports (SSE, streamable_http),
|
186
|
-
and any necessary headers or environment variables.
|
187
|
-
"""
|
188
|
-
|
189
|
-
transport: str | None = Field(
|
190
|
-
default=None,
|
191
|
-
description="The transport protocol (e.g., 'stdio', 'sse', 'streamable_http'). Auto-detected in model_validate if not set.",
|
192
|
-
)
|
193
|
-
command: str | None = Field(
|
194
|
-
default=None, description="The command to execute for 'stdio' transport (e.g., 'python -m mcp_server.run')."
|
195
|
-
)
|
196
|
-
args: list[str] = Field(default=[], description="List of arguments for the 'stdio' command.")
|
197
|
-
env: dict[str, str] = Field(default={}, description="Environment variables to set for the 'stdio' command.")
|
198
|
-
url: str | None = Field(default=None, description="The URL for 'sse' or 'streamable_http' transport.")
|
199
|
-
headers: dict[str, str] = Field(
|
200
|
-
default={}, description="HTTP headers to include for 'sse' or 'streamable_http' transport."
|
201
|
-
)
|
202
|
-
|
203
|
-
@model_validator(mode="after")
|
204
|
-
def determine_transport_if_not_set(self) -> Self:
|
205
|
-
"""Determines and sets the transport type if not explicitly provided.
|
206
|
-
|
207
|
-
- If `command` is present, transport is set to 'stdio'.
|
208
|
-
- If `url` is present, transport is 'streamable_http' if URL ends with '/mcp',
|
209
|
-
otherwise 'sse' if URL ends with '/sse'.
|
210
|
-
- Raises ValueError if transport cannot be determined or if neither
|
211
|
-
`command` nor `url` is provided.
|
212
|
-
"""
|
213
|
-
if self.command:
|
214
|
-
self.transport = "stdio"
|
215
|
-
elif self.url:
|
216
|
-
# Remove search params from url
|
217
|
-
url = self.url.split("?")[0]
|
218
|
-
if url.rstrip("/").endswith("mcp"):
|
219
|
-
self.transport = "streamable_http"
|
220
|
-
elif url.rstrip("/").endswith("sse"):
|
221
|
-
self.transport = "sse"
|
222
|
-
else:
|
223
|
-
raise ValueError(f"Unknown transport: {self.url}")
|
224
|
-
else:
|
225
|
-
raise ValueError("Either command or url must be provided")
|
226
|
-
return self
|
227
|
-
|
228
|
-
|
229
|
-
class ClientConfig(BaseSettings):
|
230
|
-
"""Configuration for a client application that interacts with MCP servers and an LLM.
|
231
|
-
|
232
|
-
Defines connections to one or more MCP servers (via `mcpServers`) and
|
233
|
-
optionally, settings for an LLM to be used by the client (e.g., by an agent).
|
234
|
-
"""
|
235
|
-
|
236
|
-
mcpServers: dict[str, ClientTransportConfig] = Field(
|
237
|
-
...,
|
238
|
-
description="Dictionary of MCP server connections. Keys are descriptive names for the server, values are `ClientTransportConfig` objects defining how to connect to each server.",
|
239
|
-
)
|
240
|
-
apps: list[AppConfig] = Field(
|
241
|
-
default=[],
|
242
|
-
description="List of application configurations to load",
|
243
|
-
)
|
244
|
-
store: StoreConfig | None = Field(
|
245
|
-
default=None,
|
246
|
-
description="Default credential store configuration for applications that do not define their own specific store.",
|
247
|
-
)
|
248
|
-
model: str = Field(
|
249
|
-
default="openrouter/auto",
|
250
|
-
description="The model to use for the LLM.",
|
251
|
-
)
|
252
|
-
|
253
|
-
@classmethod
|
254
|
-
def load_json_config(cls, path: Path) -> Self:
|
255
|
-
"""Loads client configuration from a JSON file.
|
256
|
-
|
257
|
-
Args:
|
258
|
-
path (str, optional): The path to the JSON configuration file.
|
259
|
-
Defaults to "client_config.json".
|
260
|
-
|
261
|
-
Returns:
|
262
|
-
ClientConfig: An instance of ClientConfig populated with data
|
263
|
-
from the JSON file.
|
264
|
-
"""
|
265
|
-
with open(path) as f:
|
266
|
-
data = json.load(f)
|
267
|
-
return cls.model_validate(data)
|
268
|
-
|
269
|
-
def save_json_config(self, path: str) -> None:
|
270
|
-
with open(path, "w") as f:
|
271
|
-
json.dump(self.model_dump(), f, indent=4)
|
universal_mcp/tools/manager.py
CHANGED
@@ -12,12 +12,7 @@ from universal_mcp.tools.adapters import (
|
|
12
12
|
convert_tool_to_openai_tool,
|
13
13
|
)
|
14
14
|
from universal_mcp.tools.tools import Tool
|
15
|
-
from universal_mcp.types import ToolFormat
|
16
|
-
|
17
|
-
# Constants
|
18
|
-
DEFAULT_IMPORTANT_TAG = "important"
|
19
|
-
TOOL_NAME_SEPARATOR = "_"
|
20
|
-
DEFAULT_APP_NAME = "common"
|
15
|
+
from universal_mcp.types import DEFAULT_APP_NAME, DEFAULT_IMPORTANT_TAG, TOOL_NAME_SEPARATOR, ToolFormat
|
21
16
|
|
22
17
|
|
23
18
|
def _get_app_and_tool_name(tool_name: str) -> tuple[str, str]:
|
@@ -31,8 +26,13 @@ def _get_app_and_tool_name(tool_name: str) -> tuple[str, str]:
|
|
31
26
|
return app_name, tool_name_without_app_name
|
32
27
|
|
33
28
|
|
29
|
+
def _sanitize_tool_names(tool_names: list[str]) -> list[str]:
|
30
|
+
"""Sanitize tool names by removing empty strings and converting to lowercase."""
|
31
|
+
return [_get_app_and_tool_name(name)[1].lower() for name in tool_names if name]
|
32
|
+
|
33
|
+
|
34
34
|
def _filter_by_name(tools: list[Tool], tool_names: list[str] | None) -> list[Tool]:
|
35
|
-
"""Filter tools by name using
|
35
|
+
"""Filter tools by name using set comparison for efficient matching.
|
36
36
|
|
37
37
|
Args:
|
38
38
|
tools: List of tools to filter.
|
@@ -45,16 +45,14 @@ def _filter_by_name(tools: list[Tool], tool_names: list[str] | None) -> list[Too
|
|
45
45
|
return tools
|
46
46
|
|
47
47
|
logger.debug(f"Filtering tools by names: {tool_names}")
|
48
|
-
|
49
|
-
|
48
|
+
tool_names_set = set(_sanitize_tool_names(tool_names))
|
49
|
+
logger.debug(f"Tool names set: {tool_names_set}")
|
50
50
|
filtered_tools = []
|
51
51
|
for tool in tools:
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
break
|
57
|
-
|
52
|
+
if tool.tool_name.lower() in tool_names_set:
|
53
|
+
filtered_tools.append(tool)
|
54
|
+
logger.debug(f"Tool '{tool.name}' matched name filter")
|
55
|
+
logger.debug(f"Filtered tools: {[tool.name for tool in filtered_tools]}")
|
58
56
|
return filtered_tools
|
59
57
|
|
60
58
|
|
@@ -200,11 +198,6 @@ class ToolManager:
|
|
200
198
|
app_name: Application name to group the tools under.
|
201
199
|
"""
|
202
200
|
for tool in tools:
|
203
|
-
app_name, tool_name = _get_app_and_tool_name(tool.name)
|
204
|
-
|
205
|
-
# Add prefix to tool name, if not already present
|
206
|
-
tool.name = f"{app_name}{TOOL_NAME_SEPARATOR}{tool_name}"
|
207
|
-
tool.tags.append(app_name)
|
208
201
|
self.add_tool(tool)
|
209
202
|
|
210
203
|
def remove_tool(self, name: str) -> bool:
|
@@ -259,14 +252,14 @@ class ToolManager:
|
|
259
252
|
|
260
253
|
try:
|
261
254
|
tool_instance = Tool.from_function(function)
|
262
|
-
tool_instance.
|
255
|
+
tool_instance.app_name = app.name
|
263
256
|
if app.name not in tool_instance.tags:
|
264
257
|
tool_instance.tags.append(app.name)
|
265
258
|
tools.append(tool_instance)
|
266
259
|
except Exception as e:
|
267
260
|
tool_name = getattr(function, "__name__", "unknown")
|
268
261
|
logger.error(f"Failed to create Tool from '{tool_name}' in {app.name}: {e}")
|
269
|
-
|
262
|
+
print([tool.name for tool in tools])
|
270
263
|
if tags:
|
271
264
|
tools = _filter_by_tags(tools, tags)
|
272
265
|
|