universal-mcp-agents 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +19 -0
- universal_mcp/agents/autoagent/__init__.py +30 -0
- universal_mcp/agents/autoagent/__main__.py +25 -0
- universal_mcp/agents/autoagent/context.py +26 -0
- universal_mcp/agents/autoagent/graph.py +151 -0
- universal_mcp/agents/autoagent/prompts.py +9 -0
- universal_mcp/agents/autoagent/state.py +27 -0
- universal_mcp/agents/autoagent/studio.py +25 -0
- universal_mcp/agents/autoagent/utils.py +13 -0
- universal_mcp/agents/base.py +129 -0
- universal_mcp/agents/bigtool/__init__.py +54 -0
- universal_mcp/agents/bigtool/__main__.py +24 -0
- universal_mcp/agents/bigtool/context.py +24 -0
- universal_mcp/agents/bigtool/graph.py +166 -0
- universal_mcp/agents/bigtool/prompts.py +31 -0
- universal_mcp/agents/bigtool/state.py +27 -0
- universal_mcp/agents/bigtool2/__init__.py +53 -0
- universal_mcp/agents/bigtool2/__main__.py +24 -0
- universal_mcp/agents/bigtool2/agent.py +11 -0
- universal_mcp/agents/bigtool2/context.py +33 -0
- universal_mcp/agents/bigtool2/graph.py +169 -0
- universal_mcp/agents/bigtool2/prompts.py +12 -0
- universal_mcp/agents/bigtool2/state.py +27 -0
- universal_mcp/agents/bigtoolcache/__init__.py +53 -0
- universal_mcp/agents/bigtoolcache/__main__.py +24 -0
- universal_mcp/agents/bigtoolcache/agent.py +11 -0
- universal_mcp/agents/bigtoolcache/context.py +33 -0
- universal_mcp/agents/bigtoolcache/graph.py +176 -0
- universal_mcp/agents/bigtoolcache/prompts.py +13 -0
- universal_mcp/agents/bigtoolcache/state.py +27 -0
- universal_mcp/agents/builder.py +146 -0
- universal_mcp/agents/cli.py +27 -0
- universal_mcp/agents/codeact/__init__.py +243 -0
- universal_mcp/agents/codeact/sandbox.py +27 -0
- universal_mcp/agents/codeact/test.py +15 -0
- universal_mcp/agents/codeact/utils.py +61 -0
- universal_mcp/agents/hil.py +104 -0
- universal_mcp/agents/llm.py +45 -0
- universal_mcp/agents/planner/__init__.py +37 -0
- universal_mcp/agents/planner/__main__.py +24 -0
- universal_mcp/agents/planner/graph.py +81 -0
- universal_mcp/agents/planner/prompts.py +1 -0
- universal_mcp/agents/planner/state.py +12 -0
- universal_mcp/agents/react.py +76 -0
- universal_mcp/agents/shared/tool_node.py +236 -0
- universal_mcp/agents/simple.py +40 -0
- universal_mcp/agents/tools.py +35 -0
- universal_mcp/agents/utils.py +111 -0
- universal_mcp_agents-0.1.2.dist-info/METADATA +21 -0
- universal_mcp_agents-0.1.2.dist-info/RECORD +51 -0
- universal_mcp_agents-0.1.2.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from datetime import UTC, datetime
|
|
3
|
+
from typing import Literal, TypedDict, cast
|
|
4
|
+
|
|
5
|
+
from langchain_anthropic import ChatAnthropic
|
|
6
|
+
from langchain_core.language_models import BaseChatModel
|
|
7
|
+
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
|
+
from langchain_core.tools import tool
|
|
9
|
+
from langgraph.graph import StateGraph
|
|
10
|
+
from langgraph.runtime import Runtime
|
|
11
|
+
from langgraph.types import Command
|
|
12
|
+
|
|
13
|
+
from universal_mcp.agents.bigtool.context import Context
|
|
14
|
+
from universal_mcp.agents.bigtool.state import State
|
|
15
|
+
from universal_mcp.logger import logger
|
|
16
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
17
|
+
from universal_mcp.types import ToolFormat
|
|
18
|
+
|
|
19
|
+
from .prompts import SELECT_TOOL_PROMPT
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def build_graph(
|
|
23
|
+
tool_registry: ToolRegistry,
|
|
24
|
+
llm: BaseChatModel,
|
|
25
|
+
tool_selection_llm: BaseChatModel,
|
|
26
|
+
):
|
|
27
|
+
@tool
|
|
28
|
+
async def retrieve_tools(task_query: str) -> list[str]:
|
|
29
|
+
"""Retrieve tools for a given task.
|
|
30
|
+
Task query should be atomic (doable with a single tool).
|
|
31
|
+
For tasks requiring multiple tools, call this tool multiple times for each subtask."""
|
|
32
|
+
logger.info(f"Retrieving tools for task: '{task_query}'")
|
|
33
|
+
try:
|
|
34
|
+
tools_list = await tool_registry.search_tools(task_query, limit=10)
|
|
35
|
+
tool_candidates = [f"{tool['id']}: {tool['description']}" for tool in tools_list]
|
|
36
|
+
logger.info(f"Found {len(tool_candidates)} candidate tools.")
|
|
37
|
+
|
|
38
|
+
class ToolSelectionOutput(TypedDict):
|
|
39
|
+
tool_names: list[str]
|
|
40
|
+
|
|
41
|
+
model = tool_selection_llm
|
|
42
|
+
app_ids = await tool_registry.list_all_apps()
|
|
43
|
+
connections = await tool_registry.list_connected_apps()
|
|
44
|
+
connection_ids = set([connection["app_id"] for connection in connections])
|
|
45
|
+
connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
|
|
46
|
+
unconnected_apps = [app["id"] for app in app_ids if app["id"] not in connection_ids]
|
|
47
|
+
app_id_descriptions = "These are the apps connected to the user's account:\n" + "\n".join(
|
|
48
|
+
[f"{app}" for app in connected_apps]
|
|
49
|
+
)
|
|
50
|
+
if unconnected_apps:
|
|
51
|
+
app_id_descriptions += "\n\nOther (not connected) apps: " + "\n".join(
|
|
52
|
+
[f"{app}" for app in unconnected_apps]
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
response = await model.with_structured_output(schema=ToolSelectionOutput, method="json_mode").ainvoke(
|
|
56
|
+
SELECT_TOOL_PROMPT.format(
|
|
57
|
+
app_ids=app_id_descriptions, tool_candidates="\n - ".join(tool_candidates), task=task_query
|
|
58
|
+
)
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
selected_tool_names = cast(ToolSelectionOutput, response)["tool_names"]
|
|
62
|
+
logger.info(f"Selected tools: {selected_tool_names}")
|
|
63
|
+
return selected_tool_names
|
|
64
|
+
except Exception as e:
|
|
65
|
+
logger.error(f"Error retrieving tools: {e}")
|
|
66
|
+
return []
|
|
67
|
+
|
|
68
|
+
async def call_model(state: State, runtime: Runtime[Context]) -> Command[Literal["select_tools", "call_tools"]]:
|
|
69
|
+
logger.info("Calling model...")
|
|
70
|
+
try:
|
|
71
|
+
system_message = runtime.context.system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
|
|
72
|
+
messages = [{"role": "system", "content": system_message}, *state["messages"]]
|
|
73
|
+
|
|
74
|
+
logger.info(f"Selected tool IDs: {state['selected_tool_ids']}")
|
|
75
|
+
if len(state["selected_tool_ids"]) > 0:
|
|
76
|
+
selected_tools = await tool_registry.export_tools(tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN)
|
|
77
|
+
logger.info(f"Exported {len(selected_tools)} tools for model.")
|
|
78
|
+
else:
|
|
79
|
+
selected_tools = []
|
|
80
|
+
|
|
81
|
+
model = llm
|
|
82
|
+
if isinstance(model, ChatAnthropic):
|
|
83
|
+
model_with_tools = model.bind_tools(
|
|
84
|
+
[retrieve_tools, *selected_tools], tool_choice="auto", cache_control={"type": "ephemeral"}
|
|
85
|
+
)
|
|
86
|
+
else:
|
|
87
|
+
model_with_tools = model.bind_tools([retrieve_tools, *selected_tools], tool_choice="auto")
|
|
88
|
+
response = cast(AIMessage, await model_with_tools.ainvoke(messages))
|
|
89
|
+
|
|
90
|
+
if response.tool_calls:
|
|
91
|
+
logger.info(f"Model responded with {len(response.tool_calls)} tool calls.")
|
|
92
|
+
if len(response.tool_calls) > 1:
|
|
93
|
+
raise Exception("Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')")
|
|
94
|
+
tool_call = response.tool_calls[0]
|
|
95
|
+
if tool_call["name"] == retrieve_tools.name:
|
|
96
|
+
logger.info("Model requested to select tools.")
|
|
97
|
+
return Command(goto="select_tools", update={"messages": [response]})
|
|
98
|
+
elif tool_call["name"] not in state["selected_tool_ids"]:
|
|
99
|
+
try:
|
|
100
|
+
await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
|
|
101
|
+
logger.info(
|
|
102
|
+
f"Tool '{tool_call['name']}' not in selected tools, but available. Proceeding to call."
|
|
103
|
+
)
|
|
104
|
+
return Command(goto="call_tools", update={"messages": [response]})
|
|
105
|
+
except Exception as e:
|
|
106
|
+
logger.error(f"Unexpected tool call: {tool_call['name']}. Error: {e}")
|
|
107
|
+
raise Exception(
|
|
108
|
+
f"Unexpected tool call: {tool_call['name']}. Available tools: {state['selected_tool_ids']}"
|
|
109
|
+
) from e
|
|
110
|
+
logger.info(f"Proceeding to call tool: {tool_call['name']}")
|
|
111
|
+
return Command(goto="call_tools", update={"messages": [response]})
|
|
112
|
+
else:
|
|
113
|
+
logger.info("Model responded with a message, ending execution.")
|
|
114
|
+
return Command(update={"messages": [response]})
|
|
115
|
+
except Exception as e:
|
|
116
|
+
logger.error(f"Error in call_model: {e}")
|
|
117
|
+
raise
|
|
118
|
+
|
|
119
|
+
async def select_tools(state: State, runtime: Runtime[Context]) -> Command[Literal["call_model"]]:
|
|
120
|
+
logger.info("Selecting tools...")
|
|
121
|
+
try:
|
|
122
|
+
tool_call = state["messages"][-1].tool_calls[0]
|
|
123
|
+
selected_tool_names = await retrieve_tools.ainvoke(input=tool_call["args"])
|
|
124
|
+
tool_msg = ToolMessage(f"Available tools: {selected_tool_names}", tool_call_id=tool_call["id"])
|
|
125
|
+
logger.info(f"Tools selected: {selected_tool_names}")
|
|
126
|
+
return Command(goto="call_model", update={"messages": [tool_msg], "selected_tool_ids": selected_tool_names})
|
|
127
|
+
except Exception as e:
|
|
128
|
+
logger.error(f"Error in select_tools: {e}")
|
|
129
|
+
raise
|
|
130
|
+
|
|
131
|
+
async def call_tools(state: State) -> Command[Literal["call_model"]]:
|
|
132
|
+
logger.info("Calling tools...")
|
|
133
|
+
outputs = []
|
|
134
|
+
recent_tool_ids = []
|
|
135
|
+
for tool_call in state["messages"][-1].tool_calls:
|
|
136
|
+
logger.info(f"Executing tool: {tool_call['name']} with args: {tool_call['args']}")
|
|
137
|
+
try:
|
|
138
|
+
await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
|
|
139
|
+
tool_result = await tool_registry.call_tool(tool_call["name"], tool_call["args"])
|
|
140
|
+
logger.info(f"Tool '{tool_call['name']}' executed successfully.")
|
|
141
|
+
outputs.append(
|
|
142
|
+
ToolMessage(
|
|
143
|
+
content=json.dumps(tool_result),
|
|
144
|
+
name=tool_call["name"],
|
|
145
|
+
tool_call_id=tool_call["id"],
|
|
146
|
+
)
|
|
147
|
+
)
|
|
148
|
+
recent_tool_ids.append(tool_call["name"])
|
|
149
|
+
except Exception as e:
|
|
150
|
+
logger.error(f"Error executing tool '{tool_call['name']}': {e}")
|
|
151
|
+
outputs.append(
|
|
152
|
+
ToolMessage(
|
|
153
|
+
content=json.dumps("Error: " + str(e)),
|
|
154
|
+
name=tool_call["name"],
|
|
155
|
+
tool_call_id=tool_call["id"],
|
|
156
|
+
)
|
|
157
|
+
)
|
|
158
|
+
return Command(goto="call_model", update={"messages": outputs, "selected_tool_ids": recent_tool_ids})
|
|
159
|
+
|
|
160
|
+
builder = StateGraph(State, context_schema=Context)
|
|
161
|
+
|
|
162
|
+
builder.add_node(call_model)
|
|
163
|
+
builder.add_node(select_tools)
|
|
164
|
+
builder.add_node(call_tools)
|
|
165
|
+
builder.set_entry_point("call_model")
|
|
166
|
+
return builder
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""Default prompts used by the agent."""
|
|
2
|
+
|
|
3
|
+
SYSTEM_PROMPT = """You are a helpful AI assistant.
|
|
4
|
+
|
|
5
|
+
**Core Directives:**
|
|
6
|
+
1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event), you MUST use a tool. Do not answer from your own knowledge or refuse a task if a tool might exist for it.
|
|
7
|
+
2. **First Step is ALWAYS `retrieve_tools`:** Before you can use any other tool, you MUST first call the `retrieve_tools` function to find the right tool for the user's request. This is your mandatory first action.
|
|
8
|
+
3. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `retrieve_tools` with a concise query describing the core task. Do not engage in conversation.
|
|
9
|
+
|
|
10
|
+
System time: {system_time}
|
|
11
|
+
|
|
12
|
+
When multiple tools are available for the same task, you must ask the user.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
SELECT_TOOL_PROMPT = """You are an AI assistant that helps the user perform tasks using various apps (each app has multiple tools).
|
|
16
|
+
You will be provided with a task and a list of tools which might be relevant for this task.
|
|
17
|
+
|
|
18
|
+
Your goal is to select the most appropriate tool for the given task.
|
|
19
|
+
<task>
|
|
20
|
+
{task}
|
|
21
|
+
</task>
|
|
22
|
+
|
|
23
|
+
These are the list of apps available to you:
|
|
24
|
+
{app_ids}
|
|
25
|
+
Note that when multiple apps seem relevant for a task, prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must choose both apps tools. In case the user specifically asks you to use an app that is not connected, select the tool.
|
|
26
|
+
|
|
27
|
+
<tool_candidates>
|
|
28
|
+
- {tool_candidates}
|
|
29
|
+
</tool_candidates>
|
|
30
|
+
|
|
31
|
+
"""
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from typing import Annotated
|
|
2
|
+
|
|
3
|
+
from langgraph.prebuilt.chat_agent_executor import AgentState
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _enqueue(left: list, right: list) -> list:
|
|
7
|
+
"""Treat left as a FIFO queue, append new items from right (preserve order),
|
|
8
|
+
keep items unique, and cap total size to 20 (drop oldest items)."""
|
|
9
|
+
max_size = 30
|
|
10
|
+
preferred_size = 20
|
|
11
|
+
if len(right) > preferred_size:
|
|
12
|
+
preferred_size = min(max_size, len(right))
|
|
13
|
+
queue = list(left or [])
|
|
14
|
+
|
|
15
|
+
for item in right[:preferred_size] or []:
|
|
16
|
+
if item in queue:
|
|
17
|
+
queue.remove(item)
|
|
18
|
+
queue.append(item)
|
|
19
|
+
|
|
20
|
+
if len(queue) > preferred_size:
|
|
21
|
+
queue = queue[-preferred_size:]
|
|
22
|
+
|
|
23
|
+
return queue
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class State(AgentState):
|
|
27
|
+
selected_tool_ids: Annotated[list[str], _enqueue]
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
+
|
|
3
|
+
from universal_mcp.agents.base import BaseAgent
|
|
4
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
5
|
+
from universal_mcp.logger import logger
|
|
6
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
7
|
+
|
|
8
|
+
from .graph import build_graph
|
|
9
|
+
from .prompts import SYSTEM_PROMPT
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class BigToolAgent2(BaseAgent):
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
name: str,
|
|
16
|
+
instructions: str,
|
|
17
|
+
model: str,
|
|
18
|
+
registry: ToolRegistry,
|
|
19
|
+
memory: BaseCheckpointSaver | None = None,
|
|
20
|
+
**kwargs,
|
|
21
|
+
):
|
|
22
|
+
# Combine the base system prompt with agent-specific instructions
|
|
23
|
+
full_instructions = f"{SYSTEM_PROMPT}\n\n**User Instructions:**\n{instructions}"
|
|
24
|
+
super().__init__(name, full_instructions, model, memory, **kwargs)
|
|
25
|
+
|
|
26
|
+
self.registry = registry
|
|
27
|
+
self.llm = load_chat_model(self.model)
|
|
28
|
+
self.recursion_limit = kwargs.get("recursion_limit", 10)
|
|
29
|
+
|
|
30
|
+
logger.info(f"BigToolAgent '{self.name}' initialized with model '{self.model}'.")
|
|
31
|
+
|
|
32
|
+
async def _build_graph(self):
|
|
33
|
+
"""Build the bigtool agent graph using the existing create_agent function."""
|
|
34
|
+
logger.info(f"Building graph for BigToolAgent '{self.name}'...")
|
|
35
|
+
try:
|
|
36
|
+
graph_builder = build_graph(
|
|
37
|
+
tool_registry=self.registry,
|
|
38
|
+
llm=self.llm,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
compiled_graph = graph_builder.compile(checkpointer=self.memory)
|
|
42
|
+
logger.info("Graph built and compiled successfully.")
|
|
43
|
+
return compiled_graph
|
|
44
|
+
except Exception as e:
|
|
45
|
+
logger.error(f"Error building graph for BigToolAgent '{self.name}': {e}")
|
|
46
|
+
raise
|
|
47
|
+
|
|
48
|
+
@property
|
|
49
|
+
def graph(self):
|
|
50
|
+
return self._graph
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
__all__ = ["BigToolAgent2"]
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from loguru import logger
|
|
4
|
+
|
|
5
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
6
|
+
from universal_mcp.agents.bigtool2 import BigToolAgent2
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
async def main():
|
|
10
|
+
agent = BigToolAgent2(
|
|
11
|
+
name="bigtool",
|
|
12
|
+
instructions="You are a helpful assistant that can use tools to help the user.",
|
|
13
|
+
model="azure/gpt-4.1",
|
|
14
|
+
registry=AgentrRegistry(),
|
|
15
|
+
)
|
|
16
|
+
async for event in agent.stream(
|
|
17
|
+
user_input="Send an email to manoj@agentr.dev",
|
|
18
|
+
thread_id="test123",
|
|
19
|
+
):
|
|
20
|
+
logger.info(event.content)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
if __name__ == "__main__":
|
|
24
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from universal_mcp.agents.bigtool2 import BigToolAgent2
|
|
2
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
3
|
+
|
|
4
|
+
async def agent():
|
|
5
|
+
agent_object = await BigToolAgent2(
|
|
6
|
+
name="BigTool Agent 2",
|
|
7
|
+
instructions="You are a helpful assistant that can use various tools to complete tasks.",
|
|
8
|
+
model="anthropic/claude-4-sonnet-20250514",
|
|
9
|
+
registry=AgentrRegistry(),
|
|
10
|
+
)._build_graph()
|
|
11
|
+
return agent_object
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
|
|
3
|
+
from .prompts import SYSTEM_PROMPT
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass(kw_only=True)
|
|
7
|
+
class Context:
|
|
8
|
+
"""The context for the agent."""
|
|
9
|
+
|
|
10
|
+
system_prompt: str = field(
|
|
11
|
+
default=SYSTEM_PROMPT,
|
|
12
|
+
metadata={
|
|
13
|
+
"description": "The system prompt to use for the agent's interactions. "
|
|
14
|
+
"This prompt sets the context and behavior for the agent."
|
|
15
|
+
},
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
model: str = field(
|
|
19
|
+
default="anthropic/claude-4-sonnet-20250514",
|
|
20
|
+
metadata={
|
|
21
|
+
"description": "The name of the language model to use for the agent's main interactions. "
|
|
22
|
+
"Should be in the form: provider/model-name."
|
|
23
|
+
},
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
recursion_limit: int = field(
|
|
27
|
+
default=10,
|
|
28
|
+
metadata={
|
|
29
|
+
"description": "The maximum number of times the agent can call itself recursively. "
|
|
30
|
+
"This is to prevent infinite recursion."
|
|
31
|
+
},
|
|
32
|
+
)
|
|
33
|
+
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from datetime import UTC, datetime
|
|
3
|
+
from typing import Literal, TypedDict, cast
|
|
4
|
+
|
|
5
|
+
from langchain_anthropic import ChatAnthropic
|
|
6
|
+
from langchain_core.language_models import BaseChatModel
|
|
7
|
+
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
|
+
from langchain_core.tools import tool
|
|
9
|
+
from langgraph.graph import StateGraph
|
|
10
|
+
from langgraph.runtime import Runtime
|
|
11
|
+
from langgraph.types import Command
|
|
12
|
+
|
|
13
|
+
from universal_mcp.agents.bigtool2.context import Context
|
|
14
|
+
from universal_mcp.agents.bigtool2.state import State
|
|
15
|
+
from universal_mcp.logger import logger
|
|
16
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
17
|
+
from universal_mcp.types import ToolFormat
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def build_graph(
|
|
22
|
+
tool_registry: ToolRegistry,
|
|
23
|
+
llm: BaseChatModel
|
|
24
|
+
):
|
|
25
|
+
@tool
|
|
26
|
+
async def search_tools(queries: list[str]) -> str:
|
|
27
|
+
"""Search tools for a given list of queries
|
|
28
|
+
Each single query should be atomic (doable with a single tool).
|
|
29
|
+
For tasks requiring multiple tools, add separate queries for each subtask"""
|
|
30
|
+
logger.info(f"Searching tools for queries: '{queries}'")
|
|
31
|
+
try:
|
|
32
|
+
all_tool_candidates = ""
|
|
33
|
+
app_ids = await tool_registry.list_all_apps()
|
|
34
|
+
connections = await tool_registry.list_connected_apps()
|
|
35
|
+
connection_ids = set([connection["app_id"] for connection in connections])
|
|
36
|
+
connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
|
|
37
|
+
unconnected_apps = [app["id"] for app in app_ids if app["id"] not in connection_ids]
|
|
38
|
+
app_tools = {}
|
|
39
|
+
for task_query in queries:
|
|
40
|
+
tools_list = await tool_registry.search_tools(task_query, limit=40)
|
|
41
|
+
tool_candidates = [f"{tool['id']}: {tool['description']}" for tool in tools_list]
|
|
42
|
+
for tool in tool_candidates:
|
|
43
|
+
app = tool.split("__")[0]
|
|
44
|
+
if app not in app_tools:
|
|
45
|
+
if len(app_tools.keys()) >= 10:
|
|
46
|
+
break
|
|
47
|
+
app_tools[app] = []
|
|
48
|
+
if len(app_tools[app]) < 3:
|
|
49
|
+
app_tools[app].append(tool)
|
|
50
|
+
for app in app_tools:
|
|
51
|
+
app_status = "connected" if app in connected_apps else "NOT connected"
|
|
52
|
+
all_tool_candidates += f"Tools from {app} (status: {app_status} by user):\n"
|
|
53
|
+
for tool in app_tools[app]:
|
|
54
|
+
all_tool_candidates += f" - {tool}\n"
|
|
55
|
+
all_tool_candidates += "\n"
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
return all_tool_candidates
|
|
59
|
+
except Exception as e:
|
|
60
|
+
logger.error(f"Error retrieving tools: {e}")
|
|
61
|
+
return "Error: " + str(e)
|
|
62
|
+
|
|
63
|
+
@tool
|
|
64
|
+
async def load_tools(tool_ids: list[str]) -> list[str]:
|
|
65
|
+
"""Load the tools for the given tool ids. Returns the tool ids."""
|
|
66
|
+
return tool_ids
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
async def call_model(state: State, runtime: Runtime[Context]) -> Command[Literal["select_tools", "call_tools"]]:
|
|
70
|
+
logger.info("Calling model...")
|
|
71
|
+
try:
|
|
72
|
+
system_message = runtime.context.system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
|
|
73
|
+
messages = [{"role": "system", "content": system_message}, *state["messages"]]
|
|
74
|
+
|
|
75
|
+
logger.info(f"Selected tool IDs: {state['selected_tool_ids']}")
|
|
76
|
+
if len(state["selected_tool_ids"]) > 0:
|
|
77
|
+
selected_tools = await tool_registry.export_tools(tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN)
|
|
78
|
+
logger.info(f"Exported {len(selected_tools)} tools for model.")
|
|
79
|
+
else:
|
|
80
|
+
selected_tools = []
|
|
81
|
+
|
|
82
|
+
model = llm
|
|
83
|
+
|
|
84
|
+
model_with_tools = model.bind_tools([search_tools, load_tools, *selected_tools], tool_choice="auto")
|
|
85
|
+
response = cast(AIMessage, await model_with_tools.ainvoke(messages))
|
|
86
|
+
|
|
87
|
+
if response.tool_calls:
|
|
88
|
+
logger.info(f"Model responded with {len(response.tool_calls)} tool calls.")
|
|
89
|
+
if len(response.tool_calls) > 1:
|
|
90
|
+
raise Exception("Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')")
|
|
91
|
+
tool_call = response.tool_calls[0]
|
|
92
|
+
if tool_call["name"] == search_tools.name:
|
|
93
|
+
logger.info("Model requested to select tools.")
|
|
94
|
+
return Command(goto="select_tools", update={"messages": [response]})
|
|
95
|
+
elif tool_call["name"] == load_tools.name:
|
|
96
|
+
logger.info("Model requested to load tools.")
|
|
97
|
+
tool_msg = ToolMessage(f"Loaded tools.", tool_call_id=tool_call["id"])
|
|
98
|
+
selected_tool_ids = tool_call["args"]["tool_ids"]
|
|
99
|
+
logger.info(f"Loaded tools: {selected_tool_ids}")
|
|
100
|
+
return Command(goto="call_model", update={ "messages": [response, tool_msg], "selected_tool_ids": selected_tool_ids})
|
|
101
|
+
|
|
102
|
+
elif tool_call["name"] not in state["selected_tool_ids"]:
|
|
103
|
+
try:
|
|
104
|
+
await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
|
|
105
|
+
logger.info(
|
|
106
|
+
f"Tool '{tool_call['name']}' not in selected tools, but available. Proceeding to call."
|
|
107
|
+
)
|
|
108
|
+
return Command(goto="call_tools", update={"messages": [response]})
|
|
109
|
+
except Exception as e:
|
|
110
|
+
logger.error(f"Unexpected tool call: {tool_call['name']}. Error: {e}")
|
|
111
|
+
raise Exception(
|
|
112
|
+
f"Unexpected tool call: {tool_call['name']}. Available tools: {state['selected_tool_ids']}"
|
|
113
|
+
) from e
|
|
114
|
+
logger.info(f"Proceeding to call tool: {tool_call['name']}")
|
|
115
|
+
return Command(goto="call_tools", update={"messages": [response]})
|
|
116
|
+
else:
|
|
117
|
+
logger.info("Model responded with a message, ending execution.")
|
|
118
|
+
return Command(update={"messages": [response]})
|
|
119
|
+
except Exception as e:
|
|
120
|
+
logger.error(f"Error in call_model: {e}")
|
|
121
|
+
raise
|
|
122
|
+
|
|
123
|
+
async def select_tools(state: State, runtime: Runtime[Context]) -> Command[Literal["call_model"]]:
|
|
124
|
+
logger.info("Selecting tools...")
|
|
125
|
+
try:
|
|
126
|
+
tool_call = state["messages"][-1].tool_calls[0]
|
|
127
|
+
searched_tools= await search_tools.ainvoke(input=tool_call["args"])
|
|
128
|
+
tool_msg = ToolMessage(f"Available tools: {searched_tools}", tool_call_id=tool_call["id"])
|
|
129
|
+
return Command(goto="call_model", update={"messages": [tool_msg]})
|
|
130
|
+
except Exception as e:
|
|
131
|
+
logger.error(f"Error in select_tools: {e}")
|
|
132
|
+
raise
|
|
133
|
+
|
|
134
|
+
async def call_tools(state: State) -> Command[Literal["call_model"]]:
|
|
135
|
+
logger.info("Calling tools...")
|
|
136
|
+
outputs = []
|
|
137
|
+
recent_tool_ids = []
|
|
138
|
+
for tool_call in state["messages"][-1].tool_calls:
|
|
139
|
+
logger.info(f"Executing tool: {tool_call['name']} with args: {tool_call['args']}")
|
|
140
|
+
try:
|
|
141
|
+
await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
|
|
142
|
+
tool_result = await tool_registry.call_tool(tool_call["name"], tool_call["args"])
|
|
143
|
+
logger.info(f"Tool '{tool_call['name']}' executed successfully.")
|
|
144
|
+
outputs.append(
|
|
145
|
+
ToolMessage(
|
|
146
|
+
content=json.dumps(tool_result),
|
|
147
|
+
name=tool_call["name"],
|
|
148
|
+
tool_call_id=tool_call["id"],
|
|
149
|
+
)
|
|
150
|
+
)
|
|
151
|
+
recent_tool_ids.append(tool_call["name"])
|
|
152
|
+
except Exception as e:
|
|
153
|
+
logger.error(f"Error executing tool '{tool_call['name']}': {e}")
|
|
154
|
+
outputs.append(
|
|
155
|
+
ToolMessage(
|
|
156
|
+
content=json.dumps("Error: " + str(e)),
|
|
157
|
+
name=tool_call["name"],
|
|
158
|
+
tool_call_id=tool_call["id"],
|
|
159
|
+
)
|
|
160
|
+
)
|
|
161
|
+
return Command(goto="call_model", update={"messages": outputs, "selected_tool_ids": recent_tool_ids})
|
|
162
|
+
|
|
163
|
+
builder = StateGraph(State, context_schema=Context)
|
|
164
|
+
|
|
165
|
+
builder.add_node(call_model)
|
|
166
|
+
builder.add_node(select_tools)
|
|
167
|
+
builder.add_node(call_tools)
|
|
168
|
+
builder.set_entry_point("call_model")
|
|
169
|
+
return builder
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""Default prompts used by the agent."""
|
|
2
|
+
|
|
3
|
+
SYSTEM_PROMPT = """You are a helpful AI assistant.
|
|
4
|
+
|
|
5
|
+
**Core Directives:**
|
|
6
|
+
1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event), you MUST use a tool. Do not answer from your own knowledge or refuse a task if a tool might exist for it.
|
|
7
|
+
2. **First Step is ALWAYS `search_tools`:** Before you can use any other tool, you MUST first call the `search_tools` function to find the right tools for the user's request. This is your mandatory first action. You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks.
|
|
8
|
+
3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
|
|
9
|
+
3. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `search_tools` with a concise query describing the core task. Do not engage in conversation.
|
|
10
|
+
|
|
11
|
+
System time: {system_time}
|
|
12
|
+
"""
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from typing import Annotated
|
|
2
|
+
|
|
3
|
+
from langgraph.prebuilt.chat_agent_executor import AgentState
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _enqueue(left: list, right: list) -> list:
|
|
7
|
+
"""Treat left as a FIFO queue, append new items from right (preserve order),
|
|
8
|
+
keep items unique, and cap total size to 20 (drop oldest items)."""
|
|
9
|
+
max_size = 30
|
|
10
|
+
preferred_size = 20
|
|
11
|
+
if len(right) > preferred_size:
|
|
12
|
+
preferred_size = min(max_size, len(right))
|
|
13
|
+
queue = list(left or [])
|
|
14
|
+
|
|
15
|
+
for item in right[:preferred_size] or []:
|
|
16
|
+
if item in queue:
|
|
17
|
+
queue.remove(item)
|
|
18
|
+
queue.append(item)
|
|
19
|
+
|
|
20
|
+
if len(queue) > preferred_size:
|
|
21
|
+
queue = queue[-preferred_size:]
|
|
22
|
+
|
|
23
|
+
return queue
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class State(AgentState):
|
|
27
|
+
selected_tool_ids: Annotated[list[str], _enqueue]
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
+
|
|
3
|
+
from universal_mcp.agents.base import BaseAgent
|
|
4
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
5
|
+
from universal_mcp.logger import logger
|
|
6
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
7
|
+
|
|
8
|
+
from .graph import build_graph
|
|
9
|
+
from .prompts import SYSTEM_PROMPT
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class BigToolAgentCache(BaseAgent):
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
name: str,
|
|
16
|
+
instructions: str,
|
|
17
|
+
model: str,
|
|
18
|
+
registry: ToolRegistry,
|
|
19
|
+
memory: BaseCheckpointSaver | None = None,
|
|
20
|
+
**kwargs,
|
|
21
|
+
):
|
|
22
|
+
# Combine the base system prompt with agent-specific instructions
|
|
23
|
+
full_instructions = f"{SYSTEM_PROMPT}\n\n**User Instructions:**\n{instructions}"
|
|
24
|
+
super().__init__(name, full_instructions, model, memory, **kwargs)
|
|
25
|
+
|
|
26
|
+
self.registry = registry
|
|
27
|
+
self.llm = load_chat_model(self.model)
|
|
28
|
+
self.recursion_limit = kwargs.get("recursion_limit", 10)
|
|
29
|
+
|
|
30
|
+
logger.info(f"BigToolAgent '{self.name}' initialized with model '{self.model}'.")
|
|
31
|
+
|
|
32
|
+
async def _build_graph(self):
|
|
33
|
+
"""Build the bigtool agent graph using the existing create_agent function."""
|
|
34
|
+
logger.info(f"Building graph for BigToolAgent '{self.name}'...")
|
|
35
|
+
try:
|
|
36
|
+
graph_builder = build_graph(
|
|
37
|
+
tool_registry=self.registry,
|
|
38
|
+
llm=self.llm,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
compiled_graph = graph_builder.compile(checkpointer=self.memory)
|
|
42
|
+
logger.info("Graph built and compiled successfully.")
|
|
43
|
+
return compiled_graph
|
|
44
|
+
except Exception as e:
|
|
45
|
+
logger.error(f"Error building graph for BigToolAgentCache '{self.name}': {e}")
|
|
46
|
+
raise
|
|
47
|
+
|
|
48
|
+
@property
|
|
49
|
+
def graph(self):
|
|
50
|
+
return self._graph
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
__all__ = ["BigToolAgentCache"]
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from loguru import logger
|
|
4
|
+
|
|
5
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
6
|
+
from universal_mcp.agents.bigtoolcache import BigToolAgentCache
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
async def main():
|
|
10
|
+
agent = BigToolAgentCache(
|
|
11
|
+
name="bigtoolcache",
|
|
12
|
+
instructions="You are a helpful assistant that can use tools to help the user.",
|
|
13
|
+
model="azure/gpt-4.1",
|
|
14
|
+
registry=AgentrRegistry(),
|
|
15
|
+
)
|
|
16
|
+
async for event in agent.stream(
|
|
17
|
+
user_input="Send an email to manoj@agentr.dev",
|
|
18
|
+
thread_id="test123",
|
|
19
|
+
):
|
|
20
|
+
logger.info(event.content)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
if __name__ == "__main__":
|
|
24
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from universal_mcp.agents.bigtoolcache import BigToolAgentCache
|
|
2
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
3
|
+
|
|
4
|
+
async def agent():
|
|
5
|
+
agent_object = await BigToolAgentCache(
|
|
6
|
+
name="BigTool Agent 2",
|
|
7
|
+
instructions="You are a helpful assistant that can use various tools to complete tasks.",
|
|
8
|
+
model="anthropic/claude-4-sonnet-20250514",
|
|
9
|
+
registry=AgentrRegistry(),
|
|
10
|
+
)._build_graph()
|
|
11
|
+
return agent_object
|