universal-mcp-agents 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +19 -0
- universal_mcp/agents/autoagent/__init__.py +30 -0
- universal_mcp/agents/autoagent/__main__.py +25 -0
- universal_mcp/agents/autoagent/context.py +26 -0
- universal_mcp/agents/autoagent/graph.py +151 -0
- universal_mcp/agents/autoagent/prompts.py +9 -0
- universal_mcp/agents/autoagent/state.py +27 -0
- universal_mcp/agents/autoagent/studio.py +25 -0
- universal_mcp/agents/autoagent/utils.py +13 -0
- universal_mcp/agents/base.py +129 -0
- universal_mcp/agents/bigtool/__init__.py +54 -0
- universal_mcp/agents/bigtool/__main__.py +24 -0
- universal_mcp/agents/bigtool/context.py +24 -0
- universal_mcp/agents/bigtool/graph.py +166 -0
- universal_mcp/agents/bigtool/prompts.py +31 -0
- universal_mcp/agents/bigtool/state.py +27 -0
- universal_mcp/agents/bigtool2/__init__.py +53 -0
- universal_mcp/agents/bigtool2/__main__.py +24 -0
- universal_mcp/agents/bigtool2/agent.py +11 -0
- universal_mcp/agents/bigtool2/context.py +33 -0
- universal_mcp/agents/bigtool2/graph.py +169 -0
- universal_mcp/agents/bigtool2/prompts.py +12 -0
- universal_mcp/agents/bigtool2/state.py +27 -0
- universal_mcp/agents/bigtoolcache/__init__.py +53 -0
- universal_mcp/agents/bigtoolcache/__main__.py +24 -0
- universal_mcp/agents/bigtoolcache/agent.py +11 -0
- universal_mcp/agents/bigtoolcache/context.py +33 -0
- universal_mcp/agents/bigtoolcache/graph.py +176 -0
- universal_mcp/agents/bigtoolcache/prompts.py +13 -0
- universal_mcp/agents/bigtoolcache/state.py +27 -0
- universal_mcp/agents/builder.py +146 -0
- universal_mcp/agents/cli.py +27 -0
- universal_mcp/agents/codeact/__init__.py +243 -0
- universal_mcp/agents/codeact/sandbox.py +27 -0
- universal_mcp/agents/codeact/test.py +15 -0
- universal_mcp/agents/codeact/utils.py +61 -0
- universal_mcp/agents/hil.py +104 -0
- universal_mcp/agents/llm.py +45 -0
- universal_mcp/agents/planner/__init__.py +37 -0
- universal_mcp/agents/planner/__main__.py +24 -0
- universal_mcp/agents/planner/graph.py +81 -0
- universal_mcp/agents/planner/prompts.py +1 -0
- universal_mcp/agents/planner/state.py +12 -0
- universal_mcp/agents/react.py +76 -0
- universal_mcp/agents/shared/tool_node.py +236 -0
- universal_mcp/agents/simple.py +40 -0
- universal_mcp/agents/tools.py +35 -0
- universal_mcp/agents/utils.py +111 -0
- universal_mcp_agents-0.1.2.dist-info/METADATA +21 -0
- universal_mcp_agents-0.1.2.dist-info/RECORD +51 -0
- universal_mcp_agents-0.1.2.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
|
|
3
|
+
from .prompts import SYSTEM_PROMPT
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
@dataclass(kw_only=True)
|
|
7
|
+
class Context:
|
|
8
|
+
"""The context for the agent."""
|
|
9
|
+
|
|
10
|
+
system_prompt: str = field(
|
|
11
|
+
default=SYSTEM_PROMPT,
|
|
12
|
+
metadata={
|
|
13
|
+
"description": "The system prompt to use for the agent's interactions. "
|
|
14
|
+
"This prompt sets the context and behavior for the agent."
|
|
15
|
+
},
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
model: str = field(
|
|
19
|
+
default="anthropic/claude-4-sonnet-20250514",
|
|
20
|
+
metadata={
|
|
21
|
+
"description": "The name of the language model to use for the agent's main interactions. "
|
|
22
|
+
"Should be in the form: provider/model-name."
|
|
23
|
+
},
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
recursion_limit: int = field(
|
|
27
|
+
default=10,
|
|
28
|
+
metadata={
|
|
29
|
+
"description": "The maximum number of times the agent can call itself recursively. "
|
|
30
|
+
"This is to prevent infinite recursion."
|
|
31
|
+
},
|
|
32
|
+
)
|
|
33
|
+
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from datetime import UTC, datetime
|
|
3
|
+
from typing import Any, Literal, TypedDict, cast
|
|
4
|
+
|
|
5
|
+
from langchain_anthropic import ChatAnthropic
|
|
6
|
+
from langchain_core.language_models import BaseChatModel
|
|
7
|
+
from langchain_core.messages import AIMessage, ToolMessage
|
|
8
|
+
from langchain_core.tools import tool
|
|
9
|
+
from langgraph.graph import StateGraph
|
|
10
|
+
from langgraph.runtime import Runtime
|
|
11
|
+
from langgraph.types import Command
|
|
12
|
+
|
|
13
|
+
from universal_mcp.agents.bigtoolcache.context import Context
|
|
14
|
+
from universal_mcp.agents.bigtoolcache.state import State
|
|
15
|
+
from universal_mcp.logger import logger
|
|
16
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
17
|
+
from universal_mcp.types import ToolFormat
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def build_graph(
|
|
22
|
+
tool_registry: ToolRegistry,
|
|
23
|
+
llm: BaseChatModel
|
|
24
|
+
):
|
|
25
|
+
@tool
|
|
26
|
+
async def search_tools(queries: list[str]) -> str:
|
|
27
|
+
"""Search tools for a given list of queries
|
|
28
|
+
Each single query should be atomic (doable with a single tool).
|
|
29
|
+
For tasks requiring multiple tools, add separate queries for each subtask"""
|
|
30
|
+
logger.info(f"Searching tools for queries: '{queries}'")
|
|
31
|
+
try:
|
|
32
|
+
all_tool_candidates = ""
|
|
33
|
+
app_ids = await tool_registry.list_all_apps()
|
|
34
|
+
connections = await tool_registry.list_connected_apps()
|
|
35
|
+
connection_ids = set([connection["app_id"] for connection in connections])
|
|
36
|
+
connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
|
|
37
|
+
unconnected_apps = [app["id"] for app in app_ids if app["id"] not in connection_ids]
|
|
38
|
+
app_tools = {}
|
|
39
|
+
for task_query in queries:
|
|
40
|
+
tools_list = await tool_registry.search_tools(task_query, limit=40)
|
|
41
|
+
for tool in tools_list:
|
|
42
|
+
app = tool["id"].split("__")[0]
|
|
43
|
+
if app not in app_tools:
|
|
44
|
+
if len(app_tools.keys()) >= 10:
|
|
45
|
+
break
|
|
46
|
+
app_tools[app] = {}
|
|
47
|
+
if len(app_tools[app]) < 3:
|
|
48
|
+
if tool["id"] not in app_tools[app]:
|
|
49
|
+
app_tools[app][tool["id"]] = tool["description"]
|
|
50
|
+
for app in app_tools:
|
|
51
|
+
app_status = "connected" if app in connected_apps else "NOT connected"
|
|
52
|
+
all_tool_candidates += f"Tools from {app} (status: {app_status} by user):\n"
|
|
53
|
+
for tool in app_tools[app]:
|
|
54
|
+
all_tool_candidates += f" - {tool}: {app_tools[app][tool]}\n"
|
|
55
|
+
all_tool_candidates += "\n"
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
return all_tool_candidates
|
|
59
|
+
except Exception as e:
|
|
60
|
+
logger.error(f"Error retrieving tools: {e}")
|
|
61
|
+
return "Error: " + str(e)
|
|
62
|
+
|
|
63
|
+
@tool
|
|
64
|
+
async def load_tools(tool_ids: list[str]) -> list[dict[str, Any]]:
|
|
65
|
+
"""Load the tools for the given tool ids. Returns the tool name, description, parameters schema, and output schema."""
|
|
66
|
+
temp_manager = tool_registry.tool_manager
|
|
67
|
+
temp_manager.clear_tools()
|
|
68
|
+
await tool_registry.export_tools(tool_ids, format=ToolFormat.NATIVE)
|
|
69
|
+
tool_details = []
|
|
70
|
+
for tool_id in tool_ids:
|
|
71
|
+
tool = temp_manager.get_tool(tool_id)
|
|
72
|
+
tool_details.append({
|
|
73
|
+
"name": tool.name,
|
|
74
|
+
"description": tool.description,
|
|
75
|
+
"parameters_schema": tool.parameters,
|
|
76
|
+
"output_schema": tool.output_schema,
|
|
77
|
+
})
|
|
78
|
+
return tool_details
|
|
79
|
+
|
|
80
|
+
@tool
|
|
81
|
+
async def call_tool(tool_id: str, tool_args: dict[str, Any]) -> Any:
|
|
82
|
+
"""Call the tool with the given id and arguments."""
|
|
83
|
+
return await tool_registry.call_tool(tool_id, tool_args)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
async def call_model(state: State, runtime: Runtime[Context]) -> Command[Literal["select_tools", "call_tools"]]:
|
|
87
|
+
logger.info("Calling model...")
|
|
88
|
+
try:
|
|
89
|
+
system_message = runtime.context.system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
|
|
90
|
+
messages = [{"role": "system", "content": system_message}, *state["messages"]]
|
|
91
|
+
|
|
92
|
+
model = llm
|
|
93
|
+
|
|
94
|
+
if isinstance(model, ChatAnthropic):
|
|
95
|
+
model_with_tools = model.bind_tools(
|
|
96
|
+
[search_tools, load_tools, call_tool], tool_choice="auto", cache_control={"type": "ephemeral"}
|
|
97
|
+
)
|
|
98
|
+
else:
|
|
99
|
+
model_with_tools = model.bind_tools([search_tools, load_tools, call_tool], tool_choice="auto")
|
|
100
|
+
response = cast(AIMessage, await model_with_tools.ainvoke(messages))
|
|
101
|
+
|
|
102
|
+
if response.tool_calls:
|
|
103
|
+
logger.info(f"Model responded with {len(response.tool_calls)} tool calls.")
|
|
104
|
+
if len(response.tool_calls) > 1:
|
|
105
|
+
raise Exception("Not possible in Claude with llm.bind_tools(tools=tools, tool_choice='auto')")
|
|
106
|
+
tool_call = response.tool_calls[0]
|
|
107
|
+
if tool_call["name"] == search_tools.name:
|
|
108
|
+
logger.info("Model requested to select tools.")
|
|
109
|
+
return Command(goto="select_tools", update={"messages": [response]})
|
|
110
|
+
elif tool_call["name"] == load_tools.name:
|
|
111
|
+
logger.info("Model requested to load tools.")
|
|
112
|
+
tool_details = await load_tools.ainvoke(input=tool_call["args"])
|
|
113
|
+
tool_msg = ToolMessage(f"Loaded tools. {tool_details}", tool_call_id=tool_call["id"])
|
|
114
|
+
selected_tool_ids = tool_call["args"]["tool_ids"]
|
|
115
|
+
logger.info(f"Loaded tools: {selected_tool_ids}")
|
|
116
|
+
return Command(goto="call_model", update={ "messages": [response, tool_msg], "selected_tool_ids": selected_tool_ids})
|
|
117
|
+
elif tool_call["name"] == call_tool.name:
|
|
118
|
+
logger.info("Model requested to call tool.")
|
|
119
|
+
return Command(goto="call_tools", update={"messages": [response]})
|
|
120
|
+
return Command(goto="call_tools", update={"messages": [response]})
|
|
121
|
+
else:
|
|
122
|
+
logger.info("Model responded with a message, ending execution.")
|
|
123
|
+
return Command(update={"messages": [response]})
|
|
124
|
+
except Exception as e:
|
|
125
|
+
logger.error(f"Error in call_model: {e}")
|
|
126
|
+
raise
|
|
127
|
+
|
|
128
|
+
async def select_tools(state: State, runtime: Runtime[Context]) -> Command[Literal["call_model"]]:
|
|
129
|
+
logger.info("Selecting tools...")
|
|
130
|
+
try:
|
|
131
|
+
tool_call = state["messages"][-1].tool_calls[0]
|
|
132
|
+
searched_tools= await search_tools.ainvoke(input=tool_call["args"])
|
|
133
|
+
tool_msg = ToolMessage(f"Available tools: {searched_tools}", tool_call_id=tool_call["id"])
|
|
134
|
+
return Command(goto="call_model", update={"messages": [tool_msg]})
|
|
135
|
+
except Exception as e:
|
|
136
|
+
logger.error(f"Error in select_tools: {e}")
|
|
137
|
+
raise
|
|
138
|
+
|
|
139
|
+
async def call_tools(state: State) -> Command[Literal["call_model"]]:
|
|
140
|
+
logger.info("Calling tools...")
|
|
141
|
+
outputs = []
|
|
142
|
+
recent_tool_ids = []
|
|
143
|
+
tool_call = state["messages"][-1].tool_calls[0]
|
|
144
|
+
tool_id = tool_call["args"]["tool_id"]
|
|
145
|
+
tool_args = tool_call["args"]["tool_args"]
|
|
146
|
+
logger.info(f"Executing tool: {tool_id} with args: {tool_args}")
|
|
147
|
+
try:
|
|
148
|
+
await tool_registry.export_tools([tool_id], ToolFormat.LANGCHAIN)
|
|
149
|
+
tool_result = await call_tool.ainvoke(input={"tool_id": tool_id, "tool_args": tool_args})
|
|
150
|
+
logger.info(f"Tool '{tool_id}' executed successfully.")
|
|
151
|
+
outputs.append(
|
|
152
|
+
ToolMessage(
|
|
153
|
+
content=json.dumps(tool_result),
|
|
154
|
+
name=tool_id,
|
|
155
|
+
tool_call_id=tool_call["id"],
|
|
156
|
+
)
|
|
157
|
+
)
|
|
158
|
+
recent_tool_ids.append(tool_id)
|
|
159
|
+
except Exception as e:
|
|
160
|
+
logger.error(f"Error executing tool '{tool_id}': {e}")
|
|
161
|
+
outputs.append(
|
|
162
|
+
ToolMessage(
|
|
163
|
+
content=json.dumps("Error: " + str(e)),
|
|
164
|
+
name=tool_id,
|
|
165
|
+
tool_call_id=tool_call["id"],
|
|
166
|
+
)
|
|
167
|
+
)
|
|
168
|
+
return Command(goto="call_model", update={"messages": outputs, "selected_tool_ids": recent_tool_ids})
|
|
169
|
+
|
|
170
|
+
builder = StateGraph(State, context_schema=Context)
|
|
171
|
+
|
|
172
|
+
builder.add_node(call_model)
|
|
173
|
+
builder.add_node(select_tools)
|
|
174
|
+
builder.add_node(call_tools)
|
|
175
|
+
builder.set_entry_point("call_model")
|
|
176
|
+
return builder
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Default prompts used by the agent."""
|
|
2
|
+
|
|
3
|
+
SYSTEM_PROMPT = """You are a helpful AI assistant.
|
|
4
|
+
|
|
5
|
+
**Core Directives:**
|
|
6
|
+
1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event), you MUST use a tool. Do not answer from your own knowledge or refuse a task if a tool might exist for it.
|
|
7
|
+
2. **First Step is ALWAYS `search_tools`:** Before you can use any other tool, you MUST first call the `search_tools` function to find the right tools for the user's request. This is your mandatory first action. You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks.
|
|
8
|
+
3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
|
|
9
|
+
4. **Call Tools:** After loading the tools, you MUST call the `call_tool` function to call the tools you want to use. You must call the tool with the correct arguments. You can only call the tool once you have loaded it.
|
|
10
|
+
5. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and call `search_tools` with a concise query describing the core task. Do not engage in conversation.
|
|
11
|
+
|
|
12
|
+
System time: {system_time}
|
|
13
|
+
"""
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from typing import Annotated
|
|
2
|
+
|
|
3
|
+
from langgraph.prebuilt.chat_agent_executor import AgentState
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _enqueue(left: list, right: list) -> list:
|
|
7
|
+
"""Treat left as a FIFO queue, append new items from right (preserve order),
|
|
8
|
+
keep items unique, and cap total size to 20 (drop oldest items)."""
|
|
9
|
+
max_size = 30
|
|
10
|
+
preferred_size = 20
|
|
11
|
+
if len(right) > preferred_size:
|
|
12
|
+
preferred_size = min(max_size, len(right))
|
|
13
|
+
queue = list(left or [])
|
|
14
|
+
|
|
15
|
+
for item in right[:preferred_size] or []:
|
|
16
|
+
if item in queue:
|
|
17
|
+
queue.remove(item)
|
|
18
|
+
queue.append(item)
|
|
19
|
+
|
|
20
|
+
if len(queue) > preferred_size:
|
|
21
|
+
queue = queue[-preferred_size:]
|
|
22
|
+
|
|
23
|
+
return queue
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class State(AgentState):
|
|
27
|
+
selected_tool_ids: Annotated[list[str], _enqueue]
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from collections.abc import Sequence
|
|
3
|
+
from typing import Annotated, TypedDict
|
|
4
|
+
|
|
5
|
+
from langchain_core.language_models import BaseChatModel
|
|
6
|
+
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
|
|
7
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
8
|
+
from langgraph.graph import END, START, StateGraph
|
|
9
|
+
from langgraph.graph.message import add_messages
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
from universal_mcp.agents.base import BaseAgent
|
|
13
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
14
|
+
from universal_mcp.agents.shared.tool_node import build_tool_node_graph
|
|
15
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
16
|
+
from universal_mcp.types import ToolConfig
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Agent(BaseModel):
|
|
20
|
+
"""Agent that can be created by the builder."""
|
|
21
|
+
|
|
22
|
+
name: str = Field(description="Name of the agent.")
|
|
23
|
+
description: str = Field(description="A small description of the agent.")
|
|
24
|
+
expertise: str = Field(description="The expertise of the agent.")
|
|
25
|
+
instructions: str = Field(description="The instructions for the agent to follow.")
|
|
26
|
+
schedule: str | None = Field(description="The cron expression for the agent to run on.", default=None)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class BuilderState(TypedDict):
|
|
30
|
+
user_task: str
|
|
31
|
+
generated_agent: Agent | None
|
|
32
|
+
tool_config: ToolConfig | None
|
|
33
|
+
messages: Annotated[Sequence[BaseMessage], add_messages]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
AGENT_BUILDER_INSTRUCTIONS = """
|
|
37
|
+
You are an agent builder. Your goal is to create an agent that can accomplish the user's task.
|
|
38
|
+
Your will be given a task and you need to generate an agent that can accomplish the task.
|
|
39
|
+
The agent should have a name, role, instructions, and a model.
|
|
40
|
+
- The name should be a short and descriptive name for the agent.
|
|
41
|
+
- The description should be a small description of the agent. For example, research a stock and write a buy sell analysis report.
|
|
42
|
+
- The expertise should be the expertise of the agent. For example, GTM Expert, SEO Expert, etc.
|
|
43
|
+
- The instructions should be a detailed description of what the agent should do. This should include the input, the output, and the tool usage. The agent will be provided a set of tools, you can use that to give a more accurate response.
|
|
44
|
+
- The model should be the model to use for the agent.
|
|
45
|
+
- The reasoning should be a detailed explanation of why you are creating this agent with these parameters.
|
|
46
|
+
- If the user specifies a schedule, you should also provide a cron expression for the agent to run on. The schedule should be in a proper cron expression and nothing more.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
async def generate_agent(llm: BaseChatModel, task: str, old_agent: Agent | None = None) -> Agent:
|
|
51
|
+
"""Generates an agent from a task, optionally modifying an existing one."""
|
|
52
|
+
prompt_parts = [AGENT_BUILDER_INSTRUCTIONS]
|
|
53
|
+
if old_agent:
|
|
54
|
+
prompt_parts.append(
|
|
55
|
+
"\nThe user wants to modify the following agent design. "
|
|
56
|
+
"Incorporate their feedback into a new design.\n\n"
|
|
57
|
+
f"{old_agent.model_dump_json(indent=2)}"
|
|
58
|
+
)
|
|
59
|
+
else:
|
|
60
|
+
prompt_parts.append(f"\n\n**Task:** {task}")
|
|
61
|
+
|
|
62
|
+
prompt = "\n".join(prompt_parts)
|
|
63
|
+
structured_llm = llm.with_structured_output(Agent)
|
|
64
|
+
agent = await structured_llm.ainvoke(prompt)
|
|
65
|
+
return agent
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class BuilderAgent(BaseAgent):
|
|
69
|
+
def __init__(
|
|
70
|
+
self,
|
|
71
|
+
name: str,
|
|
72
|
+
instructions: str,
|
|
73
|
+
model: str,
|
|
74
|
+
registry: ToolRegistry,
|
|
75
|
+
memory: BaseCheckpointSaver | None = None,
|
|
76
|
+
**kwargs,
|
|
77
|
+
):
|
|
78
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
|
79
|
+
self.registry = registry
|
|
80
|
+
self.llm: BaseChatModel = load_chat_model(model)
|
|
81
|
+
|
|
82
|
+
async def _create_agent(self, state: BuilderState):
|
|
83
|
+
last_message = state["messages"][-1]
|
|
84
|
+
task = last_message.content
|
|
85
|
+
agent = state.get("generated_agent")
|
|
86
|
+
|
|
87
|
+
yield {
|
|
88
|
+
"messages": [
|
|
89
|
+
AIMessage(
|
|
90
|
+
content="Thinking... I will now design an agent to handle your request.",
|
|
91
|
+
)
|
|
92
|
+
],
|
|
93
|
+
}
|
|
94
|
+
generated_agent = await generate_agent(self.llm, task, agent)
|
|
95
|
+
yield {
|
|
96
|
+
"user_task": task,
|
|
97
|
+
"generated_agent": generated_agent,
|
|
98
|
+
"messages": [AIMessage(content=("I've designed an agent to help you with your task."))],
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
async def _create_tool_config(self, state: BuilderState):
|
|
102
|
+
task = state["user_task"]
|
|
103
|
+
yield {
|
|
104
|
+
"messages": [
|
|
105
|
+
AIMessage(
|
|
106
|
+
content="Great! Now, I will select the appropriate tools for this agent. This may take a moment.",
|
|
107
|
+
)
|
|
108
|
+
]
|
|
109
|
+
}
|
|
110
|
+
tool_finder_graph = build_tool_node_graph(self.llm, self.registry)
|
|
111
|
+
tool_config = await tool_finder_graph.ainvoke({"task": task, "messages": [HumanMessage(content=task)]})
|
|
112
|
+
tool_config = tool_config.get("apps_with_tools", {})
|
|
113
|
+
yield {
|
|
114
|
+
"tool_config": tool_config,
|
|
115
|
+
"messages": [AIMessage(content="I have selected the necessary tools for the agent. The agent is ready!")],
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
async def _build_graph(self):
|
|
119
|
+
builder = StateGraph(BuilderState)
|
|
120
|
+
builder.add_node("create_agent", self._create_agent)
|
|
121
|
+
builder.add_node("create_tool_config", self._create_tool_config)
|
|
122
|
+
|
|
123
|
+
builder.add_edge(START, "create_agent")
|
|
124
|
+
builder.add_edge("create_agent", "create_tool_config")
|
|
125
|
+
builder.add_edge("create_tool_config", END)
|
|
126
|
+
return builder.compile(checkpointer=self.memory)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
async def main():
|
|
130
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
131
|
+
|
|
132
|
+
registry = AgentrRegistry()
|
|
133
|
+
agent = BuilderAgent(
|
|
134
|
+
name="Builder Agent",
|
|
135
|
+
instructions="You are a builder agent that creates other agents.",
|
|
136
|
+
model="gemini/gemini-1.5-pro",
|
|
137
|
+
registry=registry,
|
|
138
|
+
)
|
|
139
|
+
result = await agent.invoke(
|
|
140
|
+
"Send a daily email to manoj@agentr.dev with daily agenda of the day",
|
|
141
|
+
)
|
|
142
|
+
print(result)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
if __name__ == "__main__":
|
|
146
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from typer import Typer
|
|
2
|
+
|
|
3
|
+
from universal_mcp.agents import ReactAgent
|
|
4
|
+
from universal_mcp.logger import setup_logger
|
|
5
|
+
|
|
6
|
+
app = Typer()
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@app.command(
|
|
10
|
+
help="Run the agent CLI",
|
|
11
|
+
epilog="""
|
|
12
|
+
Example:
|
|
13
|
+
mcp client run --config client_config.json
|
|
14
|
+
""",
|
|
15
|
+
)
|
|
16
|
+
def run():
|
|
17
|
+
"""Run the agent CLI"""
|
|
18
|
+
import asyncio
|
|
19
|
+
|
|
20
|
+
setup_logger(log_file=None, level="WARNING")
|
|
21
|
+
|
|
22
|
+
agent = ReactAgent("React Agent", "You are a helpful assistant", "openrouter/auto")
|
|
23
|
+
asyncio.run(agent.run_interactive())
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
if __name__ == "__main__":
|
|
27
|
+
app()
|
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
import re
|
|
3
|
+
from collections.abc import Awaitable, Callable, Sequence
|
|
4
|
+
from typing import Any, TypeVar
|
|
5
|
+
|
|
6
|
+
from langchain_core.language_models import BaseChatModel
|
|
7
|
+
from langchain_core.tools import StructuredTool
|
|
8
|
+
from langchain_core.tools import tool as create_tool
|
|
9
|
+
from langgraph.graph import END, START, MessagesState, StateGraph
|
|
10
|
+
from langgraph.types import Command
|
|
11
|
+
|
|
12
|
+
from .utils import extract_and_combine_codeblocks
|
|
13
|
+
|
|
14
|
+
EvalFunction = Callable[[str, dict[str, Any]], tuple[str, dict[str, Any]]]
|
|
15
|
+
EvalCoroutine = Callable[[str, dict[str, Any]], Awaitable[tuple[str, dict[str, Any]]]]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class CodeActState(MessagesState):
|
|
19
|
+
"""State for CodeAct agent."""
|
|
20
|
+
|
|
21
|
+
script: str | None
|
|
22
|
+
"""The Python code script to be executed."""
|
|
23
|
+
context: dict[str, Any]
|
|
24
|
+
"""Dictionary containing the execution context with available tools and variables."""
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
StateSchema = TypeVar("StateSchema", bound=CodeActState)
|
|
28
|
+
StateSchemaType = type[StateSchema]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def make_safe_function_name(name: str) -> str:
|
|
32
|
+
"""Convert a tool name to a valid Python function name."""
|
|
33
|
+
# Replace non-alphanumeric characters with underscores
|
|
34
|
+
safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
|
|
35
|
+
# Ensure the name doesn't start with a digit
|
|
36
|
+
if safe_name and safe_name[0].isdigit():
|
|
37
|
+
safe_name = f"tool_{safe_name}"
|
|
38
|
+
# Handle empty name edge case
|
|
39
|
+
if not safe_name:
|
|
40
|
+
safe_name = "unnamed_tool"
|
|
41
|
+
return safe_name
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def create_default_prompt(tools: list[StructuredTool], base_prompt: str | None = None):
|
|
45
|
+
"""Create default prompt for the CodeAct agent."""
|
|
46
|
+
tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in tools]
|
|
47
|
+
prompt = f"{base_prompt}\n\n" if base_prompt else ""
|
|
48
|
+
prompt += """You will be given a task to perform. You should output either
|
|
49
|
+
- a Python code snippet that provides the solution to the task, or a step towards the solution. Any output you want to extract from the code should be printed to the console. Code should be output in a fenced code block.
|
|
50
|
+
- text to be shown directly to the user, if you want to ask for more information or provide the final answer.
|
|
51
|
+
|
|
52
|
+
In addition to the Python Standard Library, you can use the following functions:
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
for tool in tools:
|
|
56
|
+
# Use coroutine if it exists, otherwise use func
|
|
57
|
+
tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
|
|
58
|
+
# Create a safe function name
|
|
59
|
+
safe_name = make_safe_function_name(tool.name)
|
|
60
|
+
# Determine if it's an async function
|
|
61
|
+
is_async = inspect.iscoroutinefunction(tool_callable)
|
|
62
|
+
# Add appropriate function definition
|
|
63
|
+
prompt += f'''
|
|
64
|
+
{"async " if is_async else ""}def {safe_name}{str(inspect.signature(tool_callable))}:
|
|
65
|
+
"""{tool.description}"""
|
|
66
|
+
...
|
|
67
|
+
'''
|
|
68
|
+
|
|
69
|
+
prompt += """
|
|
70
|
+
|
|
71
|
+
Variables defined at the top level of previous code snippets can be referenced in your code.
|
|
72
|
+
|
|
73
|
+
Reminder: use Python code snippets to call tools"""
|
|
74
|
+
return prompt
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def create_codeact(
|
|
78
|
+
model: BaseChatModel,
|
|
79
|
+
tools: Sequence[StructuredTool | Callable],
|
|
80
|
+
eval_fn: EvalFunction | EvalCoroutine,
|
|
81
|
+
*,
|
|
82
|
+
prompt: str | None = None,
|
|
83
|
+
reflection_prompt: str | None = None,
|
|
84
|
+
reflection_model: BaseChatModel | None = None,
|
|
85
|
+
max_reflections: int = 3,
|
|
86
|
+
state_schema: StateSchemaType = CodeActState,
|
|
87
|
+
) -> StateGraph:
|
|
88
|
+
"""Create a CodeAct agent.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
model: The language model to use for generating code
|
|
92
|
+
tools: List of tools available to the agent. Can be passed as python functions or StructuredTool instances.
|
|
93
|
+
eval_fn: Function or coroutine that executes code in a sandbox. Takes code string and locals dict,
|
|
94
|
+
returns a tuple of (stdout output, new variables dict)
|
|
95
|
+
prompt: Optional custom system prompt. If None, uses default prompt.
|
|
96
|
+
To customize default prompt you can use `create_default_prompt` helper:
|
|
97
|
+
`create_default_prompt(tools, "You are a helpful assistant.")`
|
|
98
|
+
reflection_prompt: Optional prompt for reflection. If provided, will be used to evaluate responses.
|
|
99
|
+
If the reflection output contains "NONE", the response is considered valid, otherwise the
|
|
100
|
+
reflection output is passed back to the model for regeneration.
|
|
101
|
+
reflection_model: Optional model to use for reflection. If None, uses the same model as for generation.
|
|
102
|
+
max_reflections: Maximum number of reflection iterations (default: 3).
|
|
103
|
+
state_schema: The state schema to use for the agent.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
A StateGraph implementing the CodeAct architecture
|
|
107
|
+
"""
|
|
108
|
+
tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in tools]
|
|
109
|
+
|
|
110
|
+
if prompt is None:
|
|
111
|
+
prompt = create_default_prompt(tools)
|
|
112
|
+
|
|
113
|
+
# If no reflection model is provided, use the main model
|
|
114
|
+
if reflection_model is None:
|
|
115
|
+
reflection_model = model
|
|
116
|
+
|
|
117
|
+
# Make tools available to the code sandbox - use safe names for keys
|
|
118
|
+
tools_context = {}
|
|
119
|
+
for tool in tools:
|
|
120
|
+
safe_name = make_safe_function_name(tool.name)
|
|
121
|
+
# Use coroutine if it exists, otherwise use func (same as in create_default_prompt)
|
|
122
|
+
tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
|
|
123
|
+
# Only use the safe name for consistency with the prompt
|
|
124
|
+
tools_context[safe_name] = tool_callable
|
|
125
|
+
|
|
126
|
+
def call_model(state: StateSchema) -> Command:
|
|
127
|
+
messages = [{"role": "system", "content": prompt}] + state["messages"]
|
|
128
|
+
|
|
129
|
+
# Run the model and potentially loop for reflection
|
|
130
|
+
response = model.invoke(messages)
|
|
131
|
+
|
|
132
|
+
# Extract and combine all code blocks
|
|
133
|
+
code = extract_and_combine_codeblocks(response.content)
|
|
134
|
+
|
|
135
|
+
# Loop for reflection if needed and if code is present
|
|
136
|
+
if reflection_prompt and code:
|
|
137
|
+
reflection_count = 0
|
|
138
|
+
while reflection_count < max_reflections:
|
|
139
|
+
# Format conversation history with XML-style tags
|
|
140
|
+
conversation_history = "\n".join(
|
|
141
|
+
[
|
|
142
|
+
f'<message role="{("user" if m.type == "human" else "assistant")}">\n{m.content}\n</message>'
|
|
143
|
+
for m in state["messages"]
|
|
144
|
+
]
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Add the current response
|
|
148
|
+
conversation_history += f'\n<message role="assistant">\n{response.content}\n</message>'
|
|
149
|
+
|
|
150
|
+
# Create the reflection prompt with the tagged conversation history
|
|
151
|
+
formatted_prompt = f"""
|
|
152
|
+
Review the assistant's latest code for as per the quality rules:
|
|
153
|
+
|
|
154
|
+
<conversation_history>
|
|
155
|
+
{conversation_history}
|
|
156
|
+
</conversation_history>
|
|
157
|
+
|
|
158
|
+
If you find ANY of these issues, describe the problem briefly and clearly.
|
|
159
|
+
If NO issues are found, respond with EXACTLY: "NONE"
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
# Create messages for reflection with correct ordering
|
|
163
|
+
reflection_messages = [
|
|
164
|
+
{"role": "system", "content": reflection_prompt},
|
|
165
|
+
# Include the formatted reflection prompt as the final user message
|
|
166
|
+
{"role": "user", "content": formatted_prompt},
|
|
167
|
+
]
|
|
168
|
+
reflection_result = reflection_model.invoke(reflection_messages)
|
|
169
|
+
|
|
170
|
+
# Check if reflection passed
|
|
171
|
+
if "NONE" in reflection_result.content:
|
|
172
|
+
# Reflection passed, exit loop
|
|
173
|
+
break
|
|
174
|
+
|
|
175
|
+
# Reflection didn't pass, regenerate response
|
|
176
|
+
reflection_messages = [
|
|
177
|
+
{"role": "system", "content": prompt},
|
|
178
|
+
*state["messages"],
|
|
179
|
+
{"role": "assistant", "content": response.content},
|
|
180
|
+
{
|
|
181
|
+
"role": "user",
|
|
182
|
+
"content": f"""
|
|
183
|
+
I need you to completely regenerate your previous response based on this feedback:
|
|
184
|
+
|
|
185
|
+
'''
|
|
186
|
+
{reflection_result.content}
|
|
187
|
+
'''
|
|
188
|
+
|
|
189
|
+
DO NOT reference the feedback directly. Instead, provide a completely new response that addresses the issues.
|
|
190
|
+
""",
|
|
191
|
+
},
|
|
192
|
+
]
|
|
193
|
+
response = model.invoke(reflection_messages)
|
|
194
|
+
|
|
195
|
+
# Extract code from the new response
|
|
196
|
+
code = extract_and_combine_codeblocks(response.content)
|
|
197
|
+
|
|
198
|
+
# If no code in the new response, exit the reflection loop
|
|
199
|
+
if not code:
|
|
200
|
+
break
|
|
201
|
+
|
|
202
|
+
# Increment reflection count
|
|
203
|
+
reflection_count += 1
|
|
204
|
+
|
|
205
|
+
# Return appropriate command with only the latest response
|
|
206
|
+
if code:
|
|
207
|
+
return Command(goto="sandbox", update={"messages": [response], "script": code})
|
|
208
|
+
else:
|
|
209
|
+
# no code block, end the loop and respond to the user
|
|
210
|
+
return Command(update={"messages": [response], "script": None})
|
|
211
|
+
|
|
212
|
+
# If eval_fn is a async, we define async node function.
|
|
213
|
+
if inspect.iscoroutinefunction(eval_fn):
|
|
214
|
+
|
|
215
|
+
async def sandbox(state: StateSchema):
|
|
216
|
+
existing_context = state.get("context", {})
|
|
217
|
+
context = {**existing_context, **tools_context}
|
|
218
|
+
# Execute the script in the sandbox
|
|
219
|
+
output, new_vars = await eval_fn(state["script"], context)
|
|
220
|
+
new_context = {**existing_context, **new_vars}
|
|
221
|
+
return {
|
|
222
|
+
"messages": [{"role": "user", "content": output}],
|
|
223
|
+
"context": new_context,
|
|
224
|
+
}
|
|
225
|
+
else:
|
|
226
|
+
|
|
227
|
+
def sandbox(state: StateSchema):
|
|
228
|
+
existing_context = state.get("context", {})
|
|
229
|
+
context = {**existing_context, **tools_context}
|
|
230
|
+
# Execute the script in the sandbox
|
|
231
|
+
output, new_vars = eval_fn(state["script"], context)
|
|
232
|
+
new_context = {**existing_context, **new_vars}
|
|
233
|
+
return {
|
|
234
|
+
"messages": [{"role": "user", "content": output}],
|
|
235
|
+
"context": new_context,
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
agent = StateGraph(state_schema)
|
|
239
|
+
agent.add_node(call_model, destinations=(END, "sandbox"))
|
|
240
|
+
agent.add_node(sandbox)
|
|
241
|
+
agent.add_edge(START, "call_model")
|
|
242
|
+
agent.add_edge("sandbox", "call_model")
|
|
243
|
+
return agent
|