universal-mcp-agents 0.1.9__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +9 -9
- universal_mcp/agents/base.py +13 -18
- universal_mcp/agents/bigtool2/__init__.py +6 -7
- universal_mcp/agents/bigtool2/__main__.py +2 -4
- universal_mcp/agents/bigtool2/agent.py +1 -0
- universal_mcp/agents/bigtool2/graph.py +48 -184
- universal_mcp/agents/bigtool2/meta_tools.py +120 -0
- universal_mcp/agents/bigtoolcache/__init__.py +31 -22
- universal_mcp/agents/bigtoolcache/__main__.py +1 -4
- universal_mcp/agents/bigtoolcache/agent.py +1 -3
- universal_mcp/agents/bigtoolcache/graph.py +101 -191
- universal_mcp/agents/bigtoolcache/prompts.py +7 -31
- universal_mcp/agents/bigtoolcache/tools.py +141 -0
- universal_mcp/agents/builder.py +10 -20
- universal_mcp/agents/cli.py +1 -2
- universal_mcp/agents/codeact/__init__.py +1 -1
- universal_mcp/agents/codeact/__main__.py +15 -5
- universal_mcp/agents/codeact/agent.py +55 -66
- universal_mcp/agents/codeact/prompts.py +9 -10
- universal_mcp/agents/codeact/sandbox.py +5 -2
- universal_mcp/agents/codeact/state.py +2 -4
- universal_mcp/agents/codeact/utils.py +12 -5
- universal_mcp/agents/hil.py +1 -6
- universal_mcp/agents/planner/__init__.py +1 -3
- universal_mcp/agents/planner/graph.py +1 -3
- universal_mcp/agents/react.py +14 -6
- universal_mcp/agents/shared/prompts.py +3 -3
- universal_mcp/agents/shared/tool_node.py +47 -47
- universal_mcp/agents/simple.py +2 -1
- universal_mcp/agents/utils.py +4 -15
- universal_mcp/applications/ui/app.py +5 -15
- {universal_mcp_agents-0.1.9.dist-info → universal_mcp_agents-0.1.10.dist-info}/METADATA +2 -1
- universal_mcp_agents-0.1.10.dist-info/RECORD +42 -0
- universal_mcp/agents/autoagent/__init__.py +0 -30
- universal_mcp/agents/autoagent/__main__.py +0 -25
- universal_mcp/agents/autoagent/context.py +0 -26
- universal_mcp/agents/autoagent/graph.py +0 -170
- universal_mcp/agents/autoagent/prompts.py +0 -9
- universal_mcp/agents/autoagent/state.py +0 -27
- universal_mcp/agents/autoagent/utils.py +0 -13
- universal_mcp/agents/bigtool/__init__.py +0 -58
- universal_mcp/agents/bigtool/__main__.py +0 -23
- universal_mcp/agents/bigtool/graph.py +0 -210
- universal_mcp/agents/bigtool/prompts.py +0 -31
- universal_mcp/agents/bigtool/state.py +0 -27
- universal_mcp/agents/bigtoolcache/tools_all.txt +0 -956
- universal_mcp/agents/bigtoolcache/tools_important.txt +0 -474
- universal_mcp_agents-0.1.9.dist-info/RECORD +0 -54
- {universal_mcp_agents-0.1.9.dist-info → universal_mcp_agents-0.1.10.dist-info}/WHEEL +0 -0
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
-
from typing import Annotated, TypedDict
|
|
2
|
+
from typing import Annotated, TypedDict
|
|
3
3
|
|
|
4
4
|
from langchain_core.language_models import BaseChatModel
|
|
5
5
|
from langchain_core.messages import AIMessage, AnyMessage, HumanMessage
|
|
@@ -17,43 +17,50 @@ from universal_mcp.agents.shared.prompts import (
|
|
|
17
17
|
TOOL_SELECTION_PROMPT,
|
|
18
18
|
)
|
|
19
19
|
|
|
20
|
-
|
|
21
20
|
MAX_DECOMPOSITION_ATTEMPTS = 2
|
|
22
21
|
|
|
23
22
|
# --- Pydantic Models for Structured LLM Outputs ---
|
|
24
23
|
|
|
24
|
+
|
|
25
25
|
class TaskDecomposition(BaseModel):
|
|
26
|
-
sub_tasks:
|
|
26
|
+
sub_tasks: list[str] = Field(description="A list of sub-task descriptions.")
|
|
27
|
+
|
|
27
28
|
|
|
28
29
|
class SearchQuery(BaseModel):
|
|
29
30
|
query: str = Field(description="A concise search query.")
|
|
30
31
|
|
|
32
|
+
|
|
31
33
|
class ToolSelection(BaseModel):
|
|
32
|
-
tool_ids:
|
|
34
|
+
tool_ids: list[str] = Field(description="The IDs of the selected tools.")
|
|
33
35
|
|
|
34
36
|
|
|
35
37
|
# --- LangGraph Agent State ---
|
|
36
38
|
|
|
39
|
+
|
|
37
40
|
class SubTask(TypedDict, total=False):
|
|
38
41
|
"""Represents a single step in the execution plan."""
|
|
42
|
+
|
|
39
43
|
task: str
|
|
40
44
|
status: str # "pending", "success", "failed"
|
|
41
45
|
app_id: str
|
|
42
|
-
tool_ids:
|
|
46
|
+
tool_ids: list[str]
|
|
43
47
|
reasoning: str
|
|
44
48
|
|
|
49
|
+
|
|
45
50
|
class AgentState(TypedDict):
|
|
46
51
|
"""The central state of our agent graph."""
|
|
52
|
+
|
|
47
53
|
original_task: str
|
|
48
54
|
decomposition_attempts: int
|
|
49
55
|
failed_sub_task_info: str # To inform re-decomposition
|
|
50
|
-
sub_tasks:
|
|
51
|
-
execution_plan:
|
|
56
|
+
sub_tasks: list[SubTask]
|
|
57
|
+
execution_plan: list[SubTask]
|
|
52
58
|
messages: Annotated[list[AnyMessage], add_messages]
|
|
53
59
|
|
|
54
60
|
|
|
55
61
|
# --- Graph Builder ---
|
|
56
62
|
|
|
63
|
+
|
|
57
64
|
def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGraph:
|
|
58
65
|
"""Builds the adaptive LangGraph workflow for tool selection."""
|
|
59
66
|
|
|
@@ -65,19 +72,14 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
65
72
|
|
|
66
73
|
if attempts > 0 and failed_info:
|
|
67
74
|
logger.warning(f"Revising decomposition. Attempt {attempts + 1}.")
|
|
68
|
-
prompt = REVISE_DECOMPOSITION_PROMPT.format(
|
|
69
|
-
task=task, failed_sub_task=failed_info
|
|
70
|
-
)
|
|
75
|
+
prompt = REVISE_DECOMPOSITION_PROMPT.format(task=task, failed_sub_task=failed_info)
|
|
71
76
|
else:
|
|
72
77
|
logger.info("Performing initial task decomposition.")
|
|
73
78
|
prompt = TASK_DECOMPOSITION_PROMPT.format(task=task)
|
|
74
79
|
|
|
75
80
|
response = await llm.with_structured_output(TaskDecomposition).ainvoke(prompt)
|
|
76
|
-
sub_tasks = [
|
|
77
|
-
|
|
78
|
-
for sub_task_str in response.sub_tasks
|
|
79
|
-
]
|
|
80
|
-
|
|
81
|
+
sub_tasks = [{"task": sub_task_str, "status": "pending"} for sub_task_str in response.sub_tasks]
|
|
82
|
+
|
|
81
83
|
return {
|
|
82
84
|
"sub_tasks": sub_tasks,
|
|
83
85
|
"decomposition_attempts": attempts + 1,
|
|
@@ -99,16 +101,13 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
99
101
|
plan_context_str = "None. This is the first step."
|
|
100
102
|
else:
|
|
101
103
|
context_lines = [
|
|
102
|
-
f"- The sub-task '{step['task']}' was assigned to app '{step['app_id']}'."
|
|
103
|
-
for step in current_plan
|
|
104
|
+
f"- The sub-task '{step['task']}' was assigned to app '{step['app_id']}'." for step in current_plan
|
|
104
105
|
]
|
|
105
106
|
plan_context_str = "\n".join(context_lines)
|
|
106
107
|
|
|
107
108
|
# 2. Generate the App-specific query using the NEW full-context prompt
|
|
108
109
|
app_query_prompt = APP_SEARCH_QUERY_PROMPT.format(
|
|
109
|
-
original_task=original_task,
|
|
110
|
-
plan_context=plan_context_str,
|
|
111
|
-
sub_task=task_desc
|
|
110
|
+
original_task=original_task, plan_context=plan_context_str, sub_task=task_desc
|
|
112
111
|
)
|
|
113
112
|
app_query_response = await llm.with_structured_output(SearchQuery).ainvoke(app_query_prompt)
|
|
114
113
|
app_search_query = app_query_response.query
|
|
@@ -142,12 +141,14 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
142
141
|
|
|
143
142
|
if selection_response.tool_ids:
|
|
144
143
|
logger.success(f"Found and selected tool(s) {selection_response.tool_ids} in app '{app_id}'.")
|
|
145
|
-
sub_task.update(
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
144
|
+
sub_task.update(
|
|
145
|
+
{
|
|
146
|
+
"status": "success",
|
|
147
|
+
"app_id": app_id,
|
|
148
|
+
"tool_ids": selection_response.tool_ids,
|
|
149
|
+
"reasoning": f"Selected tool(s) {selection_response.tool_ids} from app '{app_id}' for sub-task.",
|
|
150
|
+
}
|
|
151
|
+
)
|
|
151
152
|
current_plan.append(sub_task)
|
|
152
153
|
tool_found = True
|
|
153
154
|
break
|
|
@@ -157,12 +158,16 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
157
158
|
return {"failed_sub_task_info": task_desc, "sub_tasks": []}
|
|
158
159
|
|
|
159
160
|
return {"execution_plan": current_plan, "sub_tasks": []}
|
|
160
|
-
|
|
161
|
+
|
|
161
162
|
def _handle_planning_failure(state: AgentState) -> AgentState:
|
|
162
163
|
"""Handles the case where all decomposition attempts have failed."""
|
|
163
164
|
logger.error("Maximum decomposition attempts reached. Planning failed.")
|
|
164
165
|
return {
|
|
165
|
-
"messages": [
|
|
166
|
+
"messages": [
|
|
167
|
+
AIMessage(
|
|
168
|
+
content="I am unable to create a complete plan for this task with the available tools. Please try rephrasing your request."
|
|
169
|
+
)
|
|
170
|
+
]
|
|
166
171
|
}
|
|
167
172
|
|
|
168
173
|
def _consolidate_plan(state: AgentState) -> AgentState:
|
|
@@ -172,7 +177,7 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
172
177
|
"""
|
|
173
178
|
logger.info("Consolidating final execution plan.")
|
|
174
179
|
plan = state["execution_plan"]
|
|
175
|
-
merged_apps:
|
|
180
|
+
merged_apps: dict[str, SubTask] = {}
|
|
176
181
|
|
|
177
182
|
for step in plan:
|
|
178
183
|
app_id = step["app_id"]
|
|
@@ -189,9 +194,8 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
189
194
|
for app_id, step_data in merged_apps.items():
|
|
190
195
|
step_data["tool_ids"] = sorted(list(step_data["tool_ids"]))
|
|
191
196
|
final_plan.append(step_data)
|
|
192
|
-
|
|
193
|
-
return {"execution_plan": final_plan}
|
|
194
197
|
|
|
198
|
+
return {"execution_plan": final_plan}
|
|
195
199
|
|
|
196
200
|
# --- Graph Definition ---
|
|
197
201
|
|
|
@@ -199,26 +203,26 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
199
203
|
|
|
200
204
|
workflow.add_node("decompose_task", _decompose_task)
|
|
201
205
|
workflow.add_node("resolve_sub_tasks", _resolve_sub_tasks)
|
|
202
|
-
workflow.add_node("consolidate_plan", _consolidate_plan)
|
|
206
|
+
workflow.add_node("consolidate_plan", _consolidate_plan) # NEW NODE
|
|
203
207
|
workflow.add_node("handle_planning_failure", _handle_planning_failure)
|
|
204
208
|
|
|
205
209
|
workflow.set_entry_point("decompose_task")
|
|
206
210
|
|
|
207
211
|
def should_continue(state: AgentState):
|
|
208
|
-
if not state.get("sub_tasks"):
|
|
212
|
+
if not state.get("sub_tasks"): # Resolution failed or succeeded
|
|
209
213
|
if state.get("execution_plan"):
|
|
210
|
-
return "consolidate_plan"
|
|
214
|
+
return "consolidate_plan" # MODIFIED: Go to consolidate on success
|
|
211
215
|
elif state["decomposition_attempts"] >= MAX_DECOMPOSITION_ATTEMPTS:
|
|
212
216
|
return "handle_planning_failure"
|
|
213
217
|
else:
|
|
214
|
-
return "decompose_task"
|
|
218
|
+
return "decompose_task" # Re-try decomposition
|
|
215
219
|
else:
|
|
216
220
|
return "resolve_sub_tasks"
|
|
217
221
|
|
|
218
222
|
workflow.add_conditional_edges("decompose_task", lambda s: "resolve_sub_tasks")
|
|
219
223
|
workflow.add_conditional_edges("resolve_sub_tasks", should_continue)
|
|
220
|
-
|
|
221
|
-
workflow.add_edge("consolidate_plan", END)
|
|
224
|
+
|
|
225
|
+
workflow.add_edge("consolidate_plan", END) # NEW EDGE
|
|
222
226
|
workflow.add_edge("handle_planning_failure", END)
|
|
223
227
|
|
|
224
228
|
return workflow.compile()
|
|
@@ -227,15 +231,16 @@ def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGr
|
|
|
227
231
|
async def main():
|
|
228
232
|
"""Main function to run the agent."""
|
|
229
233
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
234
|
+
|
|
230
235
|
from universal_mcp.agents.llm import load_chat_model
|
|
231
236
|
|
|
232
237
|
registry = AgentrRegistry()
|
|
233
238
|
llm = load_chat_model("anthropic/claude-4-sonnet-20250514")
|
|
234
|
-
|
|
239
|
+
|
|
235
240
|
graph = build_tool_node_graph(llm, registry)
|
|
236
241
|
|
|
237
242
|
task = "Find my latest order confirmation in Gmail, search for reviews of the main product on perplexity, and then send an email to ankit@agentr.dev telling about the reviews"
|
|
238
|
-
|
|
243
|
+
|
|
239
244
|
initial_state = {
|
|
240
245
|
"original_task": task,
|
|
241
246
|
"messages": [HumanMessage(content=task)],
|
|
@@ -244,17 +249,12 @@ async def main():
|
|
|
244
249
|
|
|
245
250
|
final_state = await graph.ainvoke(initial_state)
|
|
246
251
|
|
|
247
|
-
print("\n--- Final Agent State ---")
|
|
248
252
|
if final_state.get("execution_plan"):
|
|
249
|
-
print("Successfully created a consolidated execution plan:")
|
|
250
253
|
for step in final_state["execution_plan"]:
|
|
251
|
-
|
|
252
|
-
print(f" - App: {step['app_id']}")
|
|
253
|
-
print(f" - Tool(s): {', '.join(step['tool_ids'])}")
|
|
254
|
+
pass
|
|
254
255
|
else:
|
|
255
|
-
|
|
256
|
-
print(f"Final message: {final_state['messages'][-1].content}")
|
|
256
|
+
pass
|
|
257
257
|
|
|
258
258
|
|
|
259
259
|
if __name__ == "__main__":
|
|
260
|
-
asyncio.run(main())
|
|
260
|
+
asyncio.run(main())
|
universal_mcp/agents/simple.py
CHANGED
|
@@ -4,6 +4,7 @@ from typing import Annotated
|
|
|
4
4
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
5
5
|
from langgraph.graph import END, START, StateGraph
|
|
6
6
|
from langgraph.graph.message import add_messages
|
|
7
|
+
from rich import print
|
|
7
8
|
from typing_extensions import TypedDict
|
|
8
9
|
|
|
9
10
|
from universal_mcp.agents.base import BaseAgent
|
|
@@ -52,10 +53,10 @@ class SimpleAgent(BaseAgent):
|
|
|
52
53
|
graph_builder.add_edge("chatbot", END)
|
|
53
54
|
return graph_builder.compile(checkpointer=self.memory)
|
|
54
55
|
|
|
56
|
+
|
|
55
57
|
async def main():
|
|
56
58
|
agent = SimpleAgent("Simple Agent", "Act as a 14 year old kid, reply in Gen-Z lingo", "azure/gpt-5-mini")
|
|
57
59
|
output = await agent.invoke("What is the capital of France?")
|
|
58
|
-
from rich import print
|
|
59
60
|
print(messages_to_list(output["messages"]))
|
|
60
61
|
|
|
61
62
|
|
universal_mcp/agents/utils.py
CHANGED
|
@@ -8,9 +8,6 @@ from rich.markdown import Markdown
|
|
|
8
8
|
from rich.panel import Panel
|
|
9
9
|
from rich.prompt import Prompt
|
|
10
10
|
from rich.table import Table
|
|
11
|
-
from universal_mcp.tools.manager import ToolManager
|
|
12
|
-
from universal_mcp.types import ToolFormat
|
|
13
|
-
|
|
14
11
|
|
|
15
12
|
|
|
16
13
|
class RichCLI:
|
|
@@ -28,9 +25,7 @@ Available commands:
|
|
|
28
25
|
- `/tools` - List available tools
|
|
29
26
|
- `/exit` - Exit the application
|
|
30
27
|
"""
|
|
31
|
-
self.console.print(
|
|
32
|
-
Panel(Markdown(welcome_text), title="🤖 AI Agent CLI", border_style="blue")
|
|
33
|
-
)
|
|
28
|
+
self.console.print(Panel(Markdown(welcome_text), title="🤖 AI Agent CLI", border_style="blue"))
|
|
34
29
|
|
|
35
30
|
def display_agent_response(self, response: str, agent_name: str):
|
|
36
31
|
"""Display agent response with formatting"""
|
|
@@ -54,13 +49,9 @@ Available commands:
|
|
|
54
49
|
# Check if type has changed and reset content if so
|
|
55
50
|
if self.type_ != type_:
|
|
56
51
|
if type_ == "thinking":
|
|
57
|
-
self.content +=
|
|
58
|
-
"\n[bold yellow]💭 Thinking:[/bold yellow] :"
|
|
59
|
-
)
|
|
52
|
+
self.content += "\n[bold yellow]💭 Thinking:[/bold yellow] :"
|
|
60
53
|
elif type_ == "text":
|
|
61
|
-
self.content +=
|
|
62
|
-
f"\n[bold green]🤖 {agent_name}[/bold green] :"
|
|
63
|
-
)
|
|
54
|
+
self.content += f"\n[bold green]🤖 {agent_name}[/bold green] :"
|
|
64
55
|
self.type_ = type_
|
|
65
56
|
self.content += chunk
|
|
66
57
|
content_text = "".join(self.content)
|
|
@@ -120,9 +111,7 @@ Available commands:
|
|
|
120
111
|
value = Prompt.ask(interrupt.value["question"])
|
|
121
112
|
return value
|
|
122
113
|
elif interrupt_type == "bool":
|
|
123
|
-
value = Prompt.ask(
|
|
124
|
-
interrupt.value["question"], choices=["y", "n"], default="y"
|
|
125
|
-
)
|
|
114
|
+
value = Prompt.ask(interrupt.value["question"], choices=["y", "n"], default="y")
|
|
126
115
|
return value
|
|
127
116
|
elif interrupt_type == "choice":
|
|
128
117
|
value = Prompt.ask(
|
|
@@ -134,9 +134,7 @@ class UiApp(BaseApplication):
|
|
|
134
134
|
"headers": dict(response.headers),
|
|
135
135
|
}
|
|
136
136
|
|
|
137
|
-
def http_get(
|
|
138
|
-
self, url: str, headers: dict | None = None, query_params: dict | None = None
|
|
139
|
-
):
|
|
137
|
+
def http_get(self, url: str, headers: dict | None = None, query_params: dict | None = None):
|
|
140
138
|
"""
|
|
141
139
|
Perform a GET request to the specified URL with optional parameters.
|
|
142
140
|
|
|
@@ -150,16 +148,12 @@ class UiApp(BaseApplication):
|
|
|
150
148
|
Tags:
|
|
151
149
|
get, important
|
|
152
150
|
"""
|
|
153
|
-
logger.debug(
|
|
154
|
-
f"GET request to {url} with headers {headers} and query params {query_params}"
|
|
155
|
-
)
|
|
151
|
+
logger.debug(f"GET request to {url} with headers {headers} and query params {query_params}")
|
|
156
152
|
response = httpx.get(url, params=query_params, headers=headers)
|
|
157
153
|
response.raise_for_status()
|
|
158
154
|
return self._handle_response(response)
|
|
159
155
|
|
|
160
|
-
def http_post(
|
|
161
|
-
self, url: str, headers: dict | None = None, body: dict | None = None
|
|
162
|
-
):
|
|
156
|
+
def http_post(self, url: str, headers: dict | None = None, body: dict | None = None):
|
|
163
157
|
"""
|
|
164
158
|
Perform a POST request to the specified URL with optional parameters.
|
|
165
159
|
|
|
@@ -197,9 +191,7 @@ class UiApp(BaseApplication):
|
|
|
197
191
|
response.raise_for_status()
|
|
198
192
|
return self._handle_response(response)
|
|
199
193
|
|
|
200
|
-
def http_delete(
|
|
201
|
-
self, url: str, headers: dict | None = None, body: dict | None = None
|
|
202
|
-
):
|
|
194
|
+
def http_delete(self, url: str, headers: dict | None = None, body: dict | None = None):
|
|
203
195
|
"""
|
|
204
196
|
Perform a DELETE request to the specified URL with optional parameters.
|
|
205
197
|
|
|
@@ -218,9 +210,7 @@ class UiApp(BaseApplication):
|
|
|
218
210
|
response.raise_for_status()
|
|
219
211
|
return self._handle_response(response)
|
|
220
212
|
|
|
221
|
-
def http_patch(
|
|
222
|
-
self, url: str, headers: dict | None = None, body: dict | None = None
|
|
223
|
-
):
|
|
213
|
+
def http_patch(self, url: str, headers: dict | None = None, body: dict | None = None):
|
|
224
214
|
"""
|
|
225
215
|
Perform a PATCH request to the specified URL with optional parameters.
|
|
226
216
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: universal-mcp-agents
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.10
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Project-URL: Homepage, https://github.com/universal-mcp/applications
|
|
6
6
|
Project-URL: Repository, https://github.com/universal-mcp/applications
|
|
@@ -11,6 +11,7 @@ Requires-Dist: langchain-anthropic>=0.3.19
|
|
|
11
11
|
Requires-Dist: langchain-google-genai>=2.1.10
|
|
12
12
|
Requires-Dist: langchain-openai>=0.3.32
|
|
13
13
|
Requires-Dist: langgraph>=0.6.6
|
|
14
|
+
Requires-Dist: typer>=0.17.4
|
|
14
15
|
Requires-Dist: universal-mcp-applications>=0.1.14
|
|
15
16
|
Requires-Dist: universal-mcp>=0.1.24rc21
|
|
16
17
|
Provides-Extra: dev
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
universal_mcp/agents/__init__.py,sha256=oPoHMITGbHN4Ey68ZrVbmy7sNzVYhoXQgVII9fZwqL8,1245
|
|
2
|
+
universal_mcp/agents/base.py,sha256=KXBxf3TXrVHi-wBVD-cs6PSKfMtUnm73l-hC83FjOog,6753
|
|
3
|
+
universal_mcp/agents/builder.py,sha256=Xl_dGmzbtop3lICH2njnN6yxFF0SnEGY8u1tOIJy2Pk,8677
|
|
4
|
+
universal_mcp/agents/cli.py,sha256=-luC55FHCTwnpcRgqnV95yQa-mGlLYDlseUjLqFrAfs,1014
|
|
5
|
+
universal_mcp/agents/hil.py,sha256=_xLlBte4v5ex-RxXy5H3LqwFhtd3KE1QUHX1QDGIl2w,3760
|
|
6
|
+
universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
|
|
7
|
+
universal_mcp/agents/react.py,sha256=0OZvdBTnQdFF3Wliv2l4wiF2BMd0VG1cVflOaWDC8r0,3166
|
|
8
|
+
universal_mcp/agents/simple.py,sha256=W5_zb2DAGtAx_p5weSQGIC3gheZwxhmwZBuKKBTjd04,1938
|
|
9
|
+
universal_mcp/agents/utils.py,sha256=lnRmP6HBIme4W-iKy1BOb9gTyb9-ooJxXwCyWAQFP9k,4712
|
|
10
|
+
universal_mcp/agents/bigtool2/__init__.py,sha256=i4virR9r1_1FcS_-iuSHZWgEzYZwOroT6J44qPb0ZgM,2462
|
|
11
|
+
universal_mcp/agents/bigtool2/__main__.py,sha256=t6fWhLh3SnpN_05cww3LA_r_5Rb0gaF_U4FH1Mpsv1Y,655
|
|
12
|
+
universal_mcp/agents/bigtool2/agent.py,sha256=4GIQIy2VQgdXOezmET8G7tvP_37Vv8C027bGdGXJbTI,437
|
|
13
|
+
universal_mcp/agents/bigtool2/graph.py,sha256=FuW1XyTIr1aUbAC4ea8JWRR0JENvLfZGUSgQFmk0h3A,6544
|
|
14
|
+
universal_mcp/agents/bigtool2/meta_tools.py,sha256=02xOsGdxZpXBirn2KWk63UqYPQjI41nQ2KGj2zKBf7Y,5306
|
|
15
|
+
universal_mcp/agents/bigtool2/prompts.py,sha256=rQFtZDkwU9z8d4PWdt6jpohGhyab658Xvk8hvNVBFBA,1843
|
|
16
|
+
universal_mcp/agents/bigtool2/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
17
|
+
universal_mcp/agents/bigtoolcache/__init__.py,sha256=qrUwYqhZNE0PdmUCGc5jRQt45Tr1xPDSfBxCPJA1DDM,2260
|
|
18
|
+
universal_mcp/agents/bigtoolcache/__main__.py,sha256=noqT7Nqr17q7Eeiib-Dk2qG6AH4LkNyveeU_ceNDfMA,473
|
|
19
|
+
universal_mcp/agents/bigtoolcache/agent.py,sha256=NCF6fdBPwVcaOyhv8xbAy5DBqbGxJiv8_fLqKib7QJc,267
|
|
20
|
+
universal_mcp/agents/bigtoolcache/context.py,sha256=ny7gd-vvVpUOYAeQbAEUT0A6Vm6Nn2qGywxTzPBzYFg,929
|
|
21
|
+
universal_mcp/agents/bigtoolcache/graph.py,sha256=9KUjsVu5dtPdnyNkqJ5LsMnfH-IF_4H2me_L5aHgsug,4315
|
|
22
|
+
universal_mcp/agents/bigtoolcache/prompts.py,sha256=Rz30qNGdscDG65vMj9d0Vfe7X1pQjBDQBBNc3BuyC94,1886
|
|
23
|
+
universal_mcp/agents/bigtoolcache/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
24
|
+
universal_mcp/agents/bigtoolcache/tools.py,sha256=ynyEj9mVwKKDhxm76sjspyH51SFi63g2Vydi39pY0qY,5562
|
|
25
|
+
universal_mcp/agents/codeact/__init__.py,sha256=rLE8gvOo5H4YSr71DRq76b3RV3uuotxuAy_VnBVaVwk,60
|
|
26
|
+
universal_mcp/agents/codeact/__main__.py,sha256=FRfIkgcZfawP-M66v4ePijA6J2fs7nQv92G_8cj5qYA,1142
|
|
27
|
+
universal_mcp/agents/codeact/agent.py,sha256=L5UlVc13AMLRDgx5l1dANoAHSCqAf5A_7wpU4qPXTsc,6326
|
|
28
|
+
universal_mcp/agents/codeact/prompts.py,sha256=Atv6pd5Y7fuBZiQuXS_FKdRJk7oSETEGIob5gDGOK6E,3854
|
|
29
|
+
universal_mcp/agents/codeact/sandbox.py,sha256=qeydhM1vBhgbTWpjEWVdrSRCAcw2P-qVmRXW42YXEbA,1875
|
|
30
|
+
universal_mcp/agents/codeact/state.py,sha256=xu_iPOBus3xqah_5ERhkLa5ZaBZQpHqpia9O-7_3pGw,211
|
|
31
|
+
universal_mcp/agents/codeact/utils.py,sha256=JUbT_HYGS_D1BzmzoVpORIe7SGur1KgJguTZ_1tZ4JY,1918
|
|
32
|
+
universal_mcp/agents/planner/__init__.py,sha256=9P1UL-ABvrTIWTJ8wcvZmkqT8uyROZxsmUFhpjTK-Q4,1313
|
|
33
|
+
universal_mcp/agents/planner/__main__.py,sha256=OfhTfYDZK_ZUfc8sX-Sa6TWk-dNqD2rl13Ln64mNAtw,771
|
|
34
|
+
universal_mcp/agents/planner/graph.py,sha256=70hhIoEZOcYojpiyVSCedgYpnmxVP7aqdn8s6VBu-D4,3228
|
|
35
|
+
universal_mcp/agents/planner/prompts.py,sha256=_JoHqiAvswtqCDu90AGUHmfsu8eWE1-_yI4LLn3pqMU,657
|
|
36
|
+
universal_mcp/agents/planner/state.py,sha256=qqyp-jSGsCxe1US-PRLT4-y1sITAcVE6nCMlQLnvop0,278
|
|
37
|
+
universal_mcp/agents/shared/prompts.py,sha256=VOsXSUEwBXPaAuxJTUF6bgDGr41u6uctUNQSMRt_OJc,6414
|
|
38
|
+
universal_mcp/agents/shared/tool_node.py,sha256=Ua_wzMt4YgIx4zLp3_ZCow-28qORwrZ2FvKqLPt3RlI,10415
|
|
39
|
+
universal_mcp/applications/ui/app.py,sha256=uaS1KrwrGxw9oexdLj2Jok77DrZQAmby3uVxCONQyV8,11276
|
|
40
|
+
universal_mcp_agents-0.1.10.dist-info/METADATA,sha256=x-whXAce-crCYn1O1JVaV-ur0sEIlbsls_1qFbeFa0Q,878
|
|
41
|
+
universal_mcp_agents-0.1.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
42
|
+
universal_mcp_agents-0.1.10.dist-info/RECORD,,
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
3
|
-
|
|
4
|
-
from universal_mcp.agents.autoagent.graph import build_graph
|
|
5
|
-
from universal_mcp.agents.base import BaseAgent
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class AutoAgent(BaseAgent):
|
|
9
|
-
def __init__(
|
|
10
|
-
self,
|
|
11
|
-
name: str,
|
|
12
|
-
instructions: str,
|
|
13
|
-
model: str,
|
|
14
|
-
memory: BaseCheckpointSaver | None = None,
|
|
15
|
-
registry: ToolRegistry | None = None,
|
|
16
|
-
**kwargs,
|
|
17
|
-
):
|
|
18
|
-
super().__init__(name, instructions, model, memory, **kwargs)
|
|
19
|
-
self.tool_registry = registry
|
|
20
|
-
|
|
21
|
-
async def _build_graph(self):
|
|
22
|
-
builder = await build_graph(self.tool_registry, self.instructions)
|
|
23
|
-
return builder.compile(checkpointer=self.memory)
|
|
24
|
-
|
|
25
|
-
@property
|
|
26
|
-
def graph(self):
|
|
27
|
-
return self._graph
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
__all__ = ["AutoAgent"]
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
|
|
3
|
-
from loguru import logger
|
|
4
|
-
from universal_mcp.agentr.registry import AgentrRegistry
|
|
5
|
-
|
|
6
|
-
from universal_mcp.agents.autoagent import AutoAgent
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
async def main():
|
|
10
|
-
agent = AutoAgent(
|
|
11
|
-
name="autoagent",
|
|
12
|
-
instructions="You are a helpful assistant that can use tools to help the user.",
|
|
13
|
-
model="azure/gpt-4.1",
|
|
14
|
-
registry=AgentrRegistry(),
|
|
15
|
-
)
|
|
16
|
-
async for event in agent.stream(
|
|
17
|
-
user_input="Send an email to manoj@agentr.dev",
|
|
18
|
-
thread_id="test123",
|
|
19
|
-
):
|
|
20
|
-
logger.info(event.content)
|
|
21
|
-
# from loguru import logger; logger.debug(result)
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
if __name__ == "__main__":
|
|
25
|
-
asyncio.run(main())
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
from dataclasses import dataclass, field
|
|
2
|
-
from typing import Annotated
|
|
3
|
-
|
|
4
|
-
from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
@dataclass(kw_only=True)
|
|
8
|
-
class Context:
|
|
9
|
-
"""The context for the agent."""
|
|
10
|
-
|
|
11
|
-
system_prompt: str = field(
|
|
12
|
-
default=SYSTEM_PROMPT,
|
|
13
|
-
metadata={
|
|
14
|
-
"description": "The system prompt to use for the agent's interactions. "
|
|
15
|
-
"This prompt sets the context and behavior for the agent."
|
|
16
|
-
},
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
|
20
|
-
default="anthropic/claude-4-sonnet-20250514",
|
|
21
|
-
# default="vertex/gemini-2.5-flash",
|
|
22
|
-
metadata={
|
|
23
|
-
"description": "The name of the language model to use for the agent's main interactions. "
|
|
24
|
-
"Should be in the form: provider/model-name."
|
|
25
|
-
},
|
|
26
|
-
)
|
|
@@ -1,170 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
from datetime import UTC, datetime
|
|
3
|
-
from typing import cast
|
|
4
|
-
|
|
5
|
-
from langchain_core.messages import AIMessage, ToolMessage
|
|
6
|
-
from langchain_core.tools import tool
|
|
7
|
-
from langgraph.graph import END, START, StateGraph
|
|
8
|
-
from langgraph.runtime import Runtime
|
|
9
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
10
|
-
from universal_mcp.types import ToolFormat
|
|
11
|
-
|
|
12
|
-
from universal_mcp.agents.autoagent.context import Context
|
|
13
|
-
from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
|
|
14
|
-
from universal_mcp.agents.autoagent.state import State
|
|
15
|
-
from universal_mcp.agents.llm import load_chat_model
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
|
|
19
|
-
@tool()
|
|
20
|
-
async def search_tools(query: str, app_ids: list[str] | None = None) -> list[str]:
|
|
21
|
-
"""Retrieve tools using a search query and a list of app ids. Use multiple times if you require tools for different queries."""
|
|
22
|
-
tools_list = []
|
|
23
|
-
if app_ids is not None:
|
|
24
|
-
for app_id in app_ids:
|
|
25
|
-
tools_list.extend(
|
|
26
|
-
await tool_registry.search_tools(query, limit=10, app_id=app_id)
|
|
27
|
-
)
|
|
28
|
-
else:
|
|
29
|
-
tools_list = await tool_registry.search_tools(query, limit=10)
|
|
30
|
-
tools_list = [f"{tool['id']}: {tool['description']}" for tool in tools_list]
|
|
31
|
-
return tools_list
|
|
32
|
-
|
|
33
|
-
@tool()
|
|
34
|
-
async def ask_user(question: str) -> str:
|
|
35
|
-
"""Ask the user a question. Use this tool to ask the user for any missing information for performing a task, or when you have multiple apps to choose from for performing a task."""
|
|
36
|
-
full_question = question
|
|
37
|
-
return f"ASKING_USER: {full_question}"
|
|
38
|
-
|
|
39
|
-
@tool()
|
|
40
|
-
async def load_tools(tools: list[str]) -> list[str]:
|
|
41
|
-
"""Choose the tools you want to use by passing their tool ids. Loads the tools for the chosen tools and returns the tool ids."""
|
|
42
|
-
return tools
|
|
43
|
-
|
|
44
|
-
async def call_model(
|
|
45
|
-
state: State,
|
|
46
|
-
runtime: Runtime[Context],
|
|
47
|
-
):
|
|
48
|
-
system_prompt = SYSTEM_PROMPT
|
|
49
|
-
app_ids = await tool_registry.list_all_apps()
|
|
50
|
-
connections = await tool_registry.list_connected_apps()
|
|
51
|
-
connection_ids = set([connection["app_id"] for connection in connections])
|
|
52
|
-
connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
|
|
53
|
-
unconnected_apps = [
|
|
54
|
-
app["id"] for app in app_ids if app["id"] not in connection_ids
|
|
55
|
-
]
|
|
56
|
-
app_id_descriptions = (
|
|
57
|
-
"These are the apps connected to the user's account:\n"
|
|
58
|
-
+ "\n".join([f"{app}" for app in connected_apps])
|
|
59
|
-
)
|
|
60
|
-
if unconnected_apps:
|
|
61
|
-
app_id_descriptions += "\n\nOther (not connected) apps: " + "\n".join(
|
|
62
|
-
[f"{app}" for app in unconnected_apps]
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
system_prompt = system_prompt.format(
|
|
66
|
-
system_time=datetime.now(tz=UTC).isoformat(), app_ids=app_id_descriptions
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
messages = [
|
|
70
|
-
{"role": "system", "content": system_prompt + "\n" + instructions},
|
|
71
|
-
*state["messages"],
|
|
72
|
-
]
|
|
73
|
-
model = load_chat_model(runtime.context.model)
|
|
74
|
-
loaded_tools = await tool_registry.export_tools(
|
|
75
|
-
tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN
|
|
76
|
-
)
|
|
77
|
-
model_with_tools = model.bind_tools(
|
|
78
|
-
[search_tools, ask_user, load_tools, *loaded_tools], tool_choice="auto"
|
|
79
|
-
)
|
|
80
|
-
response_raw = model_with_tools.invoke(messages)
|
|
81
|
-
response = cast(AIMessage, response_raw)
|
|
82
|
-
return {"messages": [response]}
|
|
83
|
-
|
|
84
|
-
# Define the conditional edge that determines whether to continue or not
|
|
85
|
-
def should_continue(state: State):
|
|
86
|
-
messages = state["messages"]
|
|
87
|
-
last_message = messages[-1]
|
|
88
|
-
# If there is no function call, then we finish
|
|
89
|
-
if not last_message.tool_calls:
|
|
90
|
-
return END
|
|
91
|
-
else:
|
|
92
|
-
return "tools"
|
|
93
|
-
|
|
94
|
-
def tool_router(state: State):
|
|
95
|
-
last_message = state["messages"][-1]
|
|
96
|
-
if isinstance(last_message, ToolMessage) and last_message.name == ask_user.name:
|
|
97
|
-
return END
|
|
98
|
-
else:
|
|
99
|
-
return "agent"
|
|
100
|
-
|
|
101
|
-
async def tool_node(state: State):
|
|
102
|
-
outputs = []
|
|
103
|
-
tool_ids = state["selected_tool_ids"]
|
|
104
|
-
for tool_call in state["messages"][-1].tool_calls:
|
|
105
|
-
if tool_call["name"] == ask_user.name:
|
|
106
|
-
outputs.append(
|
|
107
|
-
ToolMessage(
|
|
108
|
-
content=json.dumps(
|
|
109
|
-
"The user has been asked the question, and the run will wait for the user's response."
|
|
110
|
-
),
|
|
111
|
-
name=tool_call["name"],
|
|
112
|
-
tool_call_id=tool_call["id"],
|
|
113
|
-
)
|
|
114
|
-
)
|
|
115
|
-
elif tool_call["name"] == search_tools.name:
|
|
116
|
-
tools = await search_tools.ainvoke(tool_call["args"])
|
|
117
|
-
outputs.append(
|
|
118
|
-
ToolMessage(
|
|
119
|
-
content=json.dumps(tools)
|
|
120
|
-
+ "\n\nUse the load_tools tool to load the tools you want to use.",
|
|
121
|
-
name=tool_call["name"],
|
|
122
|
-
tool_call_id=tool_call["id"],
|
|
123
|
-
)
|
|
124
|
-
)
|
|
125
|
-
|
|
126
|
-
elif tool_call["name"] == load_tools.name:
|
|
127
|
-
tool_ids = await load_tools.ainvoke(tool_call["args"])
|
|
128
|
-
|
|
129
|
-
outputs.append(
|
|
130
|
-
ToolMessage(
|
|
131
|
-
content=json.dumps(tool_ids),
|
|
132
|
-
name=tool_call["name"],
|
|
133
|
-
tool_call_id=tool_call["id"],
|
|
134
|
-
)
|
|
135
|
-
)
|
|
136
|
-
else:
|
|
137
|
-
await tool_registry.export_tools(
|
|
138
|
-
[tool_call["name"]], ToolFormat.LANGCHAIN
|
|
139
|
-
)
|
|
140
|
-
try:
|
|
141
|
-
tool_result = await tool_registry.call_tool(
|
|
142
|
-
tool_call["name"], tool_call["args"]
|
|
143
|
-
)
|
|
144
|
-
outputs.append(
|
|
145
|
-
ToolMessage(
|
|
146
|
-
content=json.dumps(tool_result),
|
|
147
|
-
name=tool_call["name"],
|
|
148
|
-
tool_call_id=tool_call["id"],
|
|
149
|
-
)
|
|
150
|
-
)
|
|
151
|
-
except Exception as e:
|
|
152
|
-
outputs.append(
|
|
153
|
-
ToolMessage(
|
|
154
|
-
content=json.dumps("Error: " + str(e)),
|
|
155
|
-
name=tool_call["name"],
|
|
156
|
-
tool_call_id=tool_call["id"],
|
|
157
|
-
)
|
|
158
|
-
)
|
|
159
|
-
return {"messages": outputs, "selected_tool_ids": tool_ids}
|
|
160
|
-
|
|
161
|
-
builder = StateGraph(State, context_schema=Context)
|
|
162
|
-
|
|
163
|
-
builder.add_node("agent", call_model)
|
|
164
|
-
builder.add_node("tools", tool_node)
|
|
165
|
-
|
|
166
|
-
builder.add_edge(START, "agent")
|
|
167
|
-
builder.add_conditional_edges("agent", should_continue)
|
|
168
|
-
builder.add_conditional_edges("tools", tool_router)
|
|
169
|
-
|
|
170
|
-
return builder
|