universal-mcp-agents 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +4 -1
- universal_mcp/agents/bigtool2/graph.py +49 -6
- universal_mcp/agents/builder.py +29 -8
- universal_mcp/agents/codeact/__init__.py +2 -254
- universal_mcp/agents/codeact/__main__.py +25 -0
- universal_mcp/agents/codeact/agent.py +171 -0
- universal_mcp/agents/codeact/prompts.py +92 -0
- universal_mcp/agents/codeact/sandbox.py +40 -19
- universal_mcp/agents/codeact/state.py +12 -0
- universal_mcp/agents/llm.py +1 -1
- universal_mcp/agents/planner/graph.py +1 -1
- universal_mcp/agents/shared/prompts.py +132 -0
- universal_mcp/agents/shared/tool_node.py +214 -205
- universal_mcp/applications/ui/app.py +1 -1
- {universal_mcp_agents-0.1.7.dist-info → universal_mcp_agents-0.1.9.dist-info}/METADATA +3 -2
- {universal_mcp_agents-0.1.7.dist-info → universal_mcp_agents-0.1.9.dist-info}/RECORD +17 -13
- universal_mcp/agents/codeact/test.py +0 -16
- {universal_mcp_agents-0.1.7.dist-info → universal_mcp_agents-0.1.9.dist-info}/WHEEL +0 -0
|
@@ -1,7 +1,5 @@
|
|
|
1
|
-
# tool_node.py
|
|
2
|
-
|
|
3
1
|
import asyncio
|
|
4
|
-
from typing import Annotated, TypedDict
|
|
2
|
+
from typing import Annotated, TypedDict, List, Dict
|
|
5
3
|
|
|
6
4
|
from langchain_core.language_models import BaseChatModel
|
|
7
5
|
from langchain_core.messages import AIMessage, AnyMessage, HumanMessage
|
|
@@ -10,242 +8,253 @@ from langgraph.graph.message import add_messages
|
|
|
10
8
|
from loguru import logger
|
|
11
9
|
from pydantic import BaseModel, Field
|
|
12
10
|
from universal_mcp.tools.registry import ToolRegistry
|
|
13
|
-
from universal_mcp.types import ToolConfig
|
|
14
11
|
|
|
15
|
-
|
|
12
|
+
from universal_mcp.agents.shared.prompts import (
|
|
13
|
+
APP_SEARCH_QUERY_PROMPT,
|
|
14
|
+
REVISE_DECOMPOSITION_PROMPT,
|
|
15
|
+
TASK_DECOMPOSITION_PROMPT,
|
|
16
|
+
TOOL_SEARCH_QUERY_PROMPT,
|
|
17
|
+
TOOL_SELECTION_PROMPT,
|
|
18
|
+
)
|
|
16
19
|
|
|
17
20
|
|
|
18
|
-
|
|
21
|
+
MAX_DECOMPOSITION_ATTEMPTS = 2
|
|
22
|
+
|
|
23
|
+
# --- Pydantic Models for Structured LLM Outputs ---
|
|
24
|
+
|
|
25
|
+
class TaskDecomposition(BaseModel):
|
|
26
|
+
sub_tasks: List[str] = Field(description="A list of sub-task descriptions.")
|
|
27
|
+
|
|
28
|
+
class SearchQuery(BaseModel):
|
|
29
|
+
query: str = Field(description="A concise search query.")
|
|
30
|
+
|
|
31
|
+
class ToolSelection(BaseModel):
|
|
32
|
+
tool_ids: List[str] = Field(description="The IDs of the selected tools.")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# --- LangGraph Agent State ---
|
|
36
|
+
|
|
37
|
+
class SubTask(TypedDict, total=False):
|
|
38
|
+
"""Represents a single step in the execution plan."""
|
|
19
39
|
task: str
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
messages: Annotated[list[AnyMessage], add_messages]
|
|
40
|
+
status: str # "pending", "success", "failed"
|
|
41
|
+
app_id: str
|
|
42
|
+
tool_ids: List[str]
|
|
24
43
|
reasoning: str
|
|
25
44
|
|
|
45
|
+
class AgentState(TypedDict):
|
|
46
|
+
"""The central state of our agent graph."""
|
|
47
|
+
original_task: str
|
|
48
|
+
decomposition_attempts: int
|
|
49
|
+
failed_sub_task_info: str # To inform re-decomposition
|
|
50
|
+
sub_tasks: List[SubTask]
|
|
51
|
+
execution_plan: List[SubTask]
|
|
52
|
+
messages: Annotated[list[AnyMessage], add_messages]
|
|
26
53
|
|
|
27
|
-
class ToolSelectionOutput(BaseModel):
|
|
28
|
-
tool_ids: list[str] = Field(description="The ids of the tools to use")
|
|
29
54
|
|
|
55
|
+
# --- Graph Builder ---
|
|
30
56
|
|
|
31
57
|
def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGraph:
|
|
32
|
-
"""Builds the LangGraph workflow."""
|
|
33
|
-
|
|
34
|
-
async def
|
|
35
|
-
"""
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
"""
|
|
45
|
-
response = await llm.ainvoke(prompt)
|
|
46
|
-
# Handle both string and list content types
|
|
47
|
-
if isinstance(response.content, list):
|
|
48
|
-
content = " ".join([str(item) for item in response.content]).strip()
|
|
49
|
-
else:
|
|
50
|
-
content = response.content.strip()
|
|
51
|
-
reasoning = f"Initial check for app requirement. LLM response: {content}"
|
|
52
|
-
|
|
53
|
-
if content.lower().startswith("yes"):
|
|
54
|
-
return {
|
|
55
|
-
**state,
|
|
56
|
-
"messages": [AIMessage(content=content)],
|
|
57
|
-
"apps_required": True,
|
|
58
|
-
"reasoning": reasoning,
|
|
59
|
-
}
|
|
60
|
-
else:
|
|
61
|
-
return {
|
|
62
|
-
**state,
|
|
63
|
-
"messages": [AIMessage(content=content)],
|
|
64
|
-
"apps_required": False,
|
|
65
|
-
"reasoning": reasoning,
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
async def _find_relevant_apps(state: AgentState) -> AgentState:
|
|
69
|
-
"""Identifies relevant apps for the given task, preferring connected apps."""
|
|
70
|
-
task = state["task"]
|
|
71
|
-
all_apps = await registry.list_all_apps()
|
|
72
|
-
connected_apps = await registry.list_connected_apps()
|
|
73
|
-
prompt = """
|
|
74
|
-
You are an expert at identifying which applications are needed to complete specific tasks.
|
|
75
|
-
|
|
76
|
-
TASK: "{task}"
|
|
77
|
-
|
|
78
|
-
AVAILABLE APPS:
|
|
79
|
-
{all_apps}
|
|
80
|
-
|
|
81
|
-
CONNECTED APPS (user has already authenticated these):
|
|
82
|
-
{connected_apps}
|
|
83
|
-
|
|
84
|
-
INSTRUCTIONS:
|
|
85
|
-
1. Analyze the task carefully to understand what functionality is required.
|
|
86
|
-
2. Review the available apps and their descriptions to identify which ones could help.
|
|
87
|
-
3. If multiple apps can perform the task, prefer connected apps, but you MUST include all relevant apps.
|
|
88
|
-
4. Consider apps that provide complementary functionality for complex tasks.
|
|
89
|
-
5. Only suggest apps that are directly relevant to the core task requirements.
|
|
90
|
-
6. Your output should be a list of app IDs.
|
|
91
|
-
|
|
92
|
-
"""
|
|
93
|
-
|
|
94
|
-
class AppList(BaseModel):
|
|
95
|
-
app_list: list[str]
|
|
96
|
-
reasoning: str
|
|
97
|
-
|
|
98
|
-
response = await llm.with_structured_output(AppList).ainvoke(
|
|
99
|
-
input=prompt.format(
|
|
100
|
-
task=task, all_apps=all_apps, connected_apps=connected_apps
|
|
58
|
+
"""Builds the adaptive LangGraph workflow for tool selection."""
|
|
59
|
+
|
|
60
|
+
async def _decompose_task(state: AgentState) -> AgentState:
|
|
61
|
+
"""Decomposes the main task or revises a failed decomposition."""
|
|
62
|
+
attempts = state.get("decomposition_attempts", 0)
|
|
63
|
+
task = state["original_task"]
|
|
64
|
+
failed_info = state.get("failed_sub_task_info")
|
|
65
|
+
|
|
66
|
+
if attempts > 0 and failed_info:
|
|
67
|
+
logger.warning(f"Revising decomposition. Attempt {attempts + 1}.")
|
|
68
|
+
prompt = REVISE_DECOMPOSITION_PROMPT.format(
|
|
69
|
+
task=task, failed_sub_task=failed_info
|
|
101
70
|
)
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
71
|
+
else:
|
|
72
|
+
logger.info("Performing initial task decomposition.")
|
|
73
|
+
prompt = TASK_DECOMPOSITION_PROMPT.format(task=task)
|
|
74
|
+
|
|
75
|
+
response = await llm.with_structured_output(TaskDecomposition).ainvoke(prompt)
|
|
76
|
+
sub_tasks = [
|
|
77
|
+
{"task": sub_task_str, "status": "pending"}
|
|
78
|
+
for sub_task_str in response.sub_tasks
|
|
79
|
+
]
|
|
80
|
+
|
|
107
81
|
return {
|
|
108
|
-
|
|
109
|
-
"
|
|
110
|
-
|
|
111
|
-
],
|
|
112
|
-
"relevant_apps": app_list,
|
|
113
|
-
"reasoning": state.get("reasoning", "") + "\n" + reasoning,
|
|
82
|
+
"sub_tasks": sub_tasks,
|
|
83
|
+
"decomposition_attempts": attempts + 1,
|
|
84
|
+
"messages": [AIMessage(content=f"New plan created with {len(sub_tasks)} steps.")],
|
|
114
85
|
}
|
|
115
86
|
|
|
116
|
-
async def
|
|
117
|
-
"""
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
async def _generate_search_query(task: str) -> str:
|
|
143
|
-
"""Generates a concise search query from the user's task."""
|
|
144
|
-
prompt = f"""
|
|
145
|
-
You are an expert at summarizing a user's task into a concise search query for finding relevant tools.
|
|
146
|
-
The query should capture all the main actions or intents of the task.
|
|
147
|
-
|
|
148
|
-
For example:
|
|
149
|
-
Task: "Send an email to abc@the-read-example.com with the subject 'Hello'"
|
|
150
|
-
Query: "send email"
|
|
151
|
-
|
|
152
|
-
Task: "Create a new contact in my CRM for John Doe"
|
|
153
|
-
Query: "create contact"
|
|
154
|
-
|
|
155
|
-
Task: "Find the latest news about artificial intelligence"
|
|
156
|
-
Query: "search news"
|
|
157
|
-
|
|
158
|
-
Task: "Post a message to the #general channel in Slack and create a new issue in Jira"
|
|
159
|
-
Query: "send message, create issue"
|
|
160
|
-
|
|
161
|
-
Task: "{task}"
|
|
162
|
-
"""
|
|
163
|
-
|
|
164
|
-
class SearchQuery(BaseModel):
|
|
165
|
-
query: str
|
|
166
|
-
|
|
167
|
-
response = await llm.with_structured_output(SearchQuery).ainvoke(
|
|
168
|
-
input=prompt.format(task=task)
|
|
169
|
-
)
|
|
170
|
-
query = response.query
|
|
171
|
-
logger.info(f"Generated search query '{query}' for task '{task}'")
|
|
172
|
-
return query
|
|
173
|
-
|
|
174
|
-
async def _search_tools(state: AgentState) -> AgentState:
|
|
175
|
-
"""Searches for and filters tools in the relevant apps."""
|
|
176
|
-
task = state["task"]
|
|
177
|
-
logger.info(f"Searching for tools in relevant apps for task: {task}")
|
|
178
|
-
search_query = await _generate_search_query(task)
|
|
179
|
-
apps_with_tools_dict = {}
|
|
180
|
-
reasoning_steps = []
|
|
181
|
-
for app_name in state["relevant_apps"]:
|
|
182
|
-
logger.info(
|
|
183
|
-
f"Searching for tools in {app_name} for task: {task} with query '{search_query}'"
|
|
184
|
-
)
|
|
185
|
-
found_tools = await registry.search_tools(
|
|
186
|
-
query=search_query, app_id=app_name
|
|
187
|
-
)
|
|
188
|
-
selected_tools = await _select_tools(task, found_tools)
|
|
189
|
-
apps_with_tools_dict[app_name] = selected_tools
|
|
190
|
-
reasoning_steps.append(
|
|
191
|
-
f"For '{app_name}', selected tool(s): {', '.join(selected_tools)}."
|
|
87
|
+
async def _resolve_sub_tasks(state: AgentState) -> AgentState:
|
|
88
|
+
"""Iterates through sub-tasks, providing full plan context to the app selection prompt."""
|
|
89
|
+
sub_tasks = state["sub_tasks"]
|
|
90
|
+
original_task = state["original_task"]
|
|
91
|
+
current_plan = []
|
|
92
|
+
|
|
93
|
+
for i, sub_task in enumerate(sub_tasks):
|
|
94
|
+
task_desc = sub_task["task"]
|
|
95
|
+
logger.info(f"Resolving sub-task: '{task_desc}'")
|
|
96
|
+
|
|
97
|
+
# 1. Build the FULL context string from the entire plan so far
|
|
98
|
+
if not current_plan:
|
|
99
|
+
plan_context_str = "None. This is the first step."
|
|
100
|
+
else:
|
|
101
|
+
context_lines = [
|
|
102
|
+
f"- The sub-task '{step['task']}' was assigned to app '{step['app_id']}'."
|
|
103
|
+
for step in current_plan
|
|
104
|
+
]
|
|
105
|
+
plan_context_str = "\n".join(context_lines)
|
|
106
|
+
|
|
107
|
+
# 2. Generate the App-specific query using the NEW full-context prompt
|
|
108
|
+
app_query_prompt = APP_SEARCH_QUERY_PROMPT.format(
|
|
109
|
+
original_task=original_task,
|
|
110
|
+
plan_context=plan_context_str,
|
|
111
|
+
sub_task=task_desc
|
|
192
112
|
)
|
|
193
|
-
|
|
113
|
+
app_query_response = await llm.with_structured_output(SearchQuery).ainvoke(app_query_prompt)
|
|
114
|
+
app_search_query = app_query_response.query
|
|
115
|
+
logger.info(f"Generated context-aware app search query: '{app_search_query}'")
|
|
116
|
+
|
|
117
|
+
# 3. Search for candidate apps (the rest of the logic is the same)
|
|
118
|
+
candidate_apps = await registry.search_apps(query=app_search_query, limit=5)
|
|
119
|
+
if not candidate_apps:
|
|
120
|
+
logger.error(f"No apps found for query '{app_search_query}' from sub-task: '{task_desc}'")
|
|
121
|
+
return {"failed_sub_task_info": task_desc, "sub_tasks": []}
|
|
122
|
+
|
|
123
|
+
# 4. Generate Action-specific query for finding the tool
|
|
124
|
+
tool_query_prompt = TOOL_SEARCH_QUERY_PROMPT.format(sub_task=task_desc)
|
|
125
|
+
tool_query_response = await llm.with_structured_output(SearchQuery).ainvoke(tool_query_prompt)
|
|
126
|
+
tool_search_query = tool_query_response.query
|
|
127
|
+
logger.info(f"Generated tool search query: '{tool_search_query}'")
|
|
128
|
+
|
|
129
|
+
# 5. Find a suitable tool within the candidate apps
|
|
130
|
+
tool_found = False
|
|
131
|
+
for app in candidate_apps:
|
|
132
|
+
app_id = app["id"]
|
|
133
|
+
logger.info(f"Searching for tools in app '{app_id}' with query '{tool_search_query}'...")
|
|
134
|
+
|
|
135
|
+
found_tools = await registry.search_tools(query=tool_search_query, app_id=app_id, limit=5)
|
|
136
|
+
if not found_tools:
|
|
137
|
+
continue
|
|
138
|
+
|
|
139
|
+
tool_candidates_str = "\n - ".join([f"{tool['name']}: {tool['description']}" for tool in found_tools])
|
|
140
|
+
selection_prompt = TOOL_SELECTION_PROMPT.format(sub_task=task_desc, tool_candidates=tool_candidates_str)
|
|
141
|
+
selection_response = await llm.with_structured_output(ToolSelection).ainvoke(selection_prompt)
|
|
142
|
+
|
|
143
|
+
if selection_response.tool_ids:
|
|
144
|
+
logger.success(f"Found and selected tool(s) {selection_response.tool_ids} in app '{app_id}'.")
|
|
145
|
+
sub_task.update({
|
|
146
|
+
"status": "success",
|
|
147
|
+
"app_id": app_id,
|
|
148
|
+
"tool_ids": selection_response.tool_ids,
|
|
149
|
+
"reasoning": f"Selected tool(s) {selection_response.tool_ids} from app '{app_id}' for sub-task."
|
|
150
|
+
})
|
|
151
|
+
current_plan.append(sub_task)
|
|
152
|
+
tool_found = True
|
|
153
|
+
break
|
|
154
|
+
|
|
155
|
+
if not tool_found:
|
|
156
|
+
logger.error(f"Could not find any suitable tool for sub-task: '{task_desc}'")
|
|
157
|
+
return {"failed_sub_task_info": task_desc, "sub_tasks": []}
|
|
158
|
+
|
|
159
|
+
return {"execution_plan": current_plan, "sub_tasks": []}
|
|
160
|
+
|
|
161
|
+
def _handle_planning_failure(state: AgentState) -> AgentState:
|
|
162
|
+
"""Handles the case where all decomposition attempts have failed."""
|
|
163
|
+
logger.error("Maximum decomposition attempts reached. Planning failed.")
|
|
194
164
|
return {
|
|
195
|
-
|
|
196
|
-
"apps_with_tools": apps_with_tools_dict,
|
|
197
|
-
"reasoning": state.get("reasoning", "") + "\n" + "\n".join(reasoning_steps),
|
|
165
|
+
"messages": [AIMessage(content="I am unable to create a complete plan for this task with the available tools. Please try rephrasing your request.")]
|
|
198
166
|
}
|
|
199
167
|
|
|
200
|
-
def
|
|
201
|
-
"""
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
}
|
|
168
|
+
def _consolidate_plan(state: AgentState) -> AgentState:
|
|
169
|
+
"""
|
|
170
|
+
NEW: Merges steps in the execution plan that use the same app_id.
|
|
171
|
+
It combines their tool_ids into a single unique list.
|
|
172
|
+
"""
|
|
173
|
+
logger.info("Consolidating final execution plan.")
|
|
174
|
+
plan = state["execution_plan"]
|
|
175
|
+
merged_apps: Dict[str, SubTask] = {}
|
|
176
|
+
|
|
177
|
+
for step in plan:
|
|
178
|
+
app_id = step["app_id"]
|
|
179
|
+
if app_id not in merged_apps:
|
|
180
|
+
# Store the first occurrence of this app
|
|
181
|
+
merged_apps[app_id] = step.copy()
|
|
182
|
+
merged_apps[app_id]["tool_ids"] = set(step["tool_ids"])
|
|
183
|
+
else:
|
|
184
|
+
# If app already seen, just update its set of tool_ids
|
|
185
|
+
merged_apps[app_id]["tool_ids"].update(step["tool_ids"])
|
|
186
|
+
|
|
187
|
+
# Convert the merged dictionary back to a list of SubTasks
|
|
188
|
+
final_plan = []
|
|
189
|
+
for app_id, step_data in merged_apps.items():
|
|
190
|
+
step_data["tool_ids"] = sorted(list(step_data["tool_ids"]))
|
|
191
|
+
final_plan.append(step_data)
|
|
192
|
+
|
|
193
|
+
return {"execution_plan": final_plan}
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
# --- Graph Definition ---
|
|
208
197
|
|
|
209
198
|
workflow = StateGraph(AgentState)
|
|
210
199
|
|
|
211
|
-
workflow.add_node("
|
|
212
|
-
workflow.add_node("
|
|
213
|
-
workflow.add_node("
|
|
214
|
-
workflow.add_node("
|
|
215
|
-
|
|
216
|
-
workflow.set_entry_point("
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
"
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
else
|
|
227
|
-
|
|
200
|
+
workflow.add_node("decompose_task", _decompose_task)
|
|
201
|
+
workflow.add_node("resolve_sub_tasks", _resolve_sub_tasks)
|
|
202
|
+
workflow.add_node("consolidate_plan", _consolidate_plan) # NEW NODE
|
|
203
|
+
workflow.add_node("handle_planning_failure", _handle_planning_failure)
|
|
204
|
+
|
|
205
|
+
workflow.set_entry_point("decompose_task")
|
|
206
|
+
|
|
207
|
+
def should_continue(state: AgentState):
|
|
208
|
+
if not state.get("sub_tasks"): # Resolution failed or succeeded
|
|
209
|
+
if state.get("execution_plan"):
|
|
210
|
+
return "consolidate_plan" # MODIFIED: Go to consolidate on success
|
|
211
|
+
elif state["decomposition_attempts"] >= MAX_DECOMPOSITION_ATTEMPTS:
|
|
212
|
+
return "handle_planning_failure"
|
|
213
|
+
else:
|
|
214
|
+
return "decompose_task" # Re-try decomposition
|
|
215
|
+
else:
|
|
216
|
+
return "resolve_sub_tasks"
|
|
228
217
|
|
|
229
|
-
workflow.
|
|
230
|
-
workflow.
|
|
218
|
+
workflow.add_conditional_edges("decompose_task", lambda s: "resolve_sub_tasks")
|
|
219
|
+
workflow.add_conditional_edges("resolve_sub_tasks", should_continue)
|
|
220
|
+
|
|
221
|
+
workflow.add_edge("consolidate_plan", END) # NEW EDGE
|
|
222
|
+
workflow.add_edge("handle_planning_failure", END)
|
|
231
223
|
|
|
232
224
|
return workflow.compile()
|
|
233
225
|
|
|
234
226
|
|
|
235
227
|
async def main():
|
|
228
|
+
"""Main function to run the agent."""
|
|
236
229
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
237
|
-
|
|
238
230
|
from universal_mcp.agents.llm import load_chat_model
|
|
239
231
|
|
|
240
232
|
registry = AgentrRegistry()
|
|
241
|
-
llm = load_chat_model("
|
|
233
|
+
llm = load_chat_model("anthropic/claude-4-sonnet-20250514")
|
|
234
|
+
|
|
242
235
|
graph = build_tool_node_graph(llm, registry)
|
|
236
|
+
|
|
237
|
+
task = "Find my latest order confirmation in Gmail, search for reviews of the main product on perplexity, and then send an email to ankit@agentr.dev telling about the reviews"
|
|
238
|
+
|
|
243
239
|
initial_state = {
|
|
244
|
-
"
|
|
245
|
-
"messages": [HumanMessage(content=
|
|
240
|
+
"original_task": task,
|
|
241
|
+
"messages": [HumanMessage(content=task)],
|
|
242
|
+
"decomposition_attempts": 0,
|
|
246
243
|
}
|
|
247
|
-
|
|
244
|
+
|
|
245
|
+
final_state = await graph.ainvoke(initial_state)
|
|
246
|
+
|
|
247
|
+
print("\n--- Final Agent State ---")
|
|
248
|
+
if final_state.get("execution_plan"):
|
|
249
|
+
print("Successfully created a consolidated execution plan:")
|
|
250
|
+
for step in final_state["execution_plan"]:
|
|
251
|
+
print(f"- Sub-task: {step['task']}")
|
|
252
|
+
print(f" - App: {step['app_id']}")
|
|
253
|
+
print(f" - Tool(s): {', '.join(step['tool_ids'])}")
|
|
254
|
+
else:
|
|
255
|
+
print("Failed to create an execution plan.")
|
|
256
|
+
print(f"Final message: {final_state['messages'][-1].content}")
|
|
248
257
|
|
|
249
258
|
|
|
250
259
|
if __name__ == "__main__":
|
|
251
|
-
asyncio.run(main())
|
|
260
|
+
asyncio.run(main())
|
|
@@ -105,7 +105,7 @@ class UiApp(BaseApplication):
|
|
|
105
105
|
):
|
|
106
106
|
"""Create an interactive table with data.
|
|
107
107
|
|
|
108
|
-
The table will automatically have sorting, filtering, and search functionality.
|
|
108
|
+
The table will automatically have sorting, filtering, and search functionality. Note that this only creates a table on the frontend. Do not mix this up with tables from applications like google_sheet, airtable.
|
|
109
109
|
|
|
110
110
|
Args:
|
|
111
111
|
title (str): The title of the table.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: universal-mcp-agents
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.9
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Project-URL: Homepage, https://github.com/universal-mcp/applications
|
|
6
6
|
Project-URL: Repository, https://github.com/universal-mcp/applications
|
|
@@ -11,7 +11,8 @@ Requires-Dist: langchain-anthropic>=0.3.19
|
|
|
11
11
|
Requires-Dist: langchain-google-genai>=2.1.10
|
|
12
12
|
Requires-Dist: langchain-openai>=0.3.32
|
|
13
13
|
Requires-Dist: langgraph>=0.6.6
|
|
14
|
-
Requires-Dist: universal-mcp>=0.1.
|
|
14
|
+
Requires-Dist: universal-mcp-applications>=0.1.14
|
|
15
|
+
Requires-Dist: universal-mcp>=0.1.24rc21
|
|
15
16
|
Provides-Extra: dev
|
|
16
17
|
Requires-Dist: pre-commit; extra == 'dev'
|
|
17
18
|
Requires-Dist: ruff; extra == 'dev'
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
universal_mcp/agents/__init__.py,sha256=
|
|
1
|
+
universal_mcp/agents/__init__.py,sha256=S4hVzh-bkkvSvvNBFWUSegnRv0_vxeKG_dEhYss32rs,1311
|
|
2
2
|
universal_mcp/agents/base.py,sha256=h_FDAclpFKpaMCSNhBcwIMF0DLbZtyyoy_l71UxY4Aw,6892
|
|
3
|
-
universal_mcp/agents/builder.py,sha256=
|
|
3
|
+
universal_mcp/agents/builder.py,sha256=W7qTfF-TtmebdtjAv32y1Al_KvWilLyUN3p4Ce3X1Eo,8815
|
|
4
4
|
universal_mcp/agents/cli.py,sha256=_rJV6TxBG2amH3o8mVs4pxViaTfkBhz6n5l6xhv4Z3g,1014
|
|
5
5
|
universal_mcp/agents/hil.py,sha256=XfQT8QcuDbiIpUU9N4WSbO2Tm9YNSuwRqyCTWmCWaZo,3818
|
|
6
|
-
universal_mcp/agents/llm.py,sha256=
|
|
6
|
+
universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
|
|
7
7
|
universal_mcp/agents/react.py,sha256=jH42VFAB-BuPUVpaMIspBjjukYEJan-DQxtNamD1o0I,3010
|
|
8
8
|
universal_mcp/agents/simple.py,sha256=Z5Ja12vJIhIHhB68WWH_5opln7FMDUiRfztKOj2Rx-U,1941
|
|
9
9
|
universal_mcp/agents/utils.py,sha256=g_v7IEKtx6CBQK-Nue_weVVie62KQLQjz7izU3kOWPQ,4988
|
|
@@ -22,7 +22,7 @@ universal_mcp/agents/bigtool/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpn
|
|
|
22
22
|
universal_mcp/agents/bigtool2/__init__.py,sha256=wkhjOeAHhIpuciLTbKZT3J2uPIJ0KFZpCDn0xX2plNs,2421
|
|
23
23
|
universal_mcp/agents/bigtool2/__main__.py,sha256=SAHfoLqDEhUj3dF3vSzfetCzPGMC3UPJxBySHujSrDY,669
|
|
24
24
|
universal_mcp/agents/bigtool2/agent.py,sha256=ef9IIxgJmr26eYWQdazrIA-IXHGRwT0XNyPThJR55Tk,436
|
|
25
|
-
universal_mcp/agents/bigtool2/graph.py,sha256=
|
|
25
|
+
universal_mcp/agents/bigtool2/graph.py,sha256=OB_SBfwF47uEx0sy8XnDDdcBv0qjEqg9uJvxu_DIX9E,12749
|
|
26
26
|
universal_mcp/agents/bigtool2/prompts.py,sha256=rQFtZDkwU9z8d4PWdt6jpohGhyab658Xvk8hvNVBFBA,1843
|
|
27
27
|
universal_mcp/agents/bigtool2/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
28
28
|
universal_mcp/agents/bigtoolcache/__init__.py,sha256=YY7X8-XQ3AC2t_Y9MN9dZk5wTPu7iU6YS8Yhn_akgC0,1844
|
|
@@ -34,17 +34,21 @@ universal_mcp/agents/bigtoolcache/prompts.py,sha256=XDU2uJWzwGwt8t3zGjOH16YIrHJC
|
|
|
34
34
|
universal_mcp/agents/bigtoolcache/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
35
35
|
universal_mcp/agents/bigtoolcache/tools_all.txt,sha256=g52i00AOh9VTDsAtIAF8vhqtTHQVmzTn61k724niEA0,95408
|
|
36
36
|
universal_mcp/agents/bigtoolcache/tools_important.txt,sha256=PD4klowvARwhbC8dcXMm_sGUWH7cAynX40nXLqeRbdQ,38593
|
|
37
|
-
universal_mcp/agents/codeact/__init__.py,sha256=
|
|
38
|
-
universal_mcp/agents/codeact/
|
|
39
|
-
universal_mcp/agents/codeact/
|
|
37
|
+
universal_mcp/agents/codeact/__init__.py,sha256=gULnc3WQoZHsWrMKqyR2r6J-dfo8I25lp84eE5K4fAE,59
|
|
38
|
+
universal_mcp/agents/codeact/__main__.py,sha256=guSg-WpgVtRaRbPoFbAVZVoAi5vfdIHagcYfFiAQzeA,756
|
|
39
|
+
universal_mcp/agents/codeact/agent.py,sha256=lWVKX3-nHprUXXnroUTMOOv0kgPxHlkOa7vQ6ALspSY,6395
|
|
40
|
+
universal_mcp/agents/codeact/prompts.py,sha256=tBscvoGmAd_mMdRS14JH2JGvlk6MldMOkXRTq_YJcz4,3786
|
|
41
|
+
universal_mcp/agents/codeact/sandbox.py,sha256=l7jofgKr3EQblHYvVNFv9rdwKSq7c0_ZHcv7fueFsDo,1698
|
|
42
|
+
universal_mcp/agents/codeact/state.py,sha256=yAIs_-SClTCjdwFopF5LkqiFxppnkQx6MJSfG-tUfOU,342
|
|
40
43
|
universal_mcp/agents/codeact/utils.py,sha256=VuMvLTxBBh3pgaJk8RWj5AK8XZFF-1gnZJ6jFLeM_CI,1690
|
|
41
44
|
universal_mcp/agents/planner/__init__.py,sha256=b5HnTHXvs0y5KBwy9yr8d96MbyObUZ8QWrCFbUhdgGo,1335
|
|
42
45
|
universal_mcp/agents/planner/__main__.py,sha256=OfhTfYDZK_ZUfc8sX-Sa6TWk-dNqD2rl13Ln64mNAtw,771
|
|
43
|
-
universal_mcp/agents/planner/graph.py,sha256=
|
|
46
|
+
universal_mcp/agents/planner/graph.py,sha256=kF6b2LBNsTAlCiZ1Unz78_yk-UOE5d27r_3i0LZZZ7w,3250
|
|
44
47
|
universal_mcp/agents/planner/prompts.py,sha256=_JoHqiAvswtqCDu90AGUHmfsu8eWE1-_yI4LLn3pqMU,657
|
|
45
48
|
universal_mcp/agents/planner/state.py,sha256=qqyp-jSGsCxe1US-PRLT4-y1sITAcVE6nCMlQLnvop0,278
|
|
46
|
-
universal_mcp/agents/shared/
|
|
47
|
-
universal_mcp/
|
|
48
|
-
|
|
49
|
-
universal_mcp_agents-0.1.
|
|
50
|
-
universal_mcp_agents-0.1.
|
|
49
|
+
universal_mcp/agents/shared/prompts.py,sha256=HB4c6nmAmn9ClO-tZ-Iw2Fz6_PjfDGImU8kXVwB9js8,6415
|
|
50
|
+
universal_mcp/agents/shared/tool_node.py,sha256=31oVSu0Cql2yhSFUOtJvTt4dUoTpPp9K7TQCTr3MhVQ,10793
|
|
51
|
+
universal_mcp/applications/ui/app.py,sha256=7boYUxrq0MIV6Qhdn6cneymb3yJ4DGsaIt8YSRNcyow,11354
|
|
52
|
+
universal_mcp_agents-0.1.9.dist-info/METADATA,sha256=aArQs7AQzGlXFOtXETMKOjfoCIBCmIFoQH2FdyfI-IE,848
|
|
53
|
+
universal_mcp_agents-0.1.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
54
|
+
universal_mcp_agents-0.1.9.dist-info/RECORD,,
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
from universal_mcp.agentr import Agentr
|
|
2
|
-
from universal_mcp.tools.adapters import ToolFormat
|
|
3
|
-
|
|
4
|
-
from universal_mcp.agents.codeact import create_codeact
|
|
5
|
-
from universal_mcp.agents.codeact.sandbox import eval_unsafe
|
|
6
|
-
from universal_mcp.agents.llm import load_chat_model
|
|
7
|
-
|
|
8
|
-
model = load_chat_model("gpt-4.1")
|
|
9
|
-
|
|
10
|
-
agentr = Agentr()
|
|
11
|
-
agentr.load_tools(["google-mail_send_email"])
|
|
12
|
-
|
|
13
|
-
tools = agentr.list_tools(format=ToolFormat.NATIVE)
|
|
14
|
-
|
|
15
|
-
code_act = create_codeact(model, tools, eval_unsafe)
|
|
16
|
-
agent = code_act.compile()
|
|
File without changes
|