universal-mcp-agents 0.1.6__py3-none-any.whl → 0.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/bigtool2/graph.py +51 -8
- universal_mcp/agents/builder.py +29 -8
- universal_mcp/agents/llm.py +1 -1
- universal_mcp/agents/planner/graph.py +1 -1
- universal_mcp/agents/shared/prompts.py +118 -0
- universal_mcp/agents/shared/tool_node.py +200 -206
- universal_mcp/applications/ui/app.py +1 -1
- {universal_mcp_agents-0.1.6.dist-info → universal_mcp_agents-0.1.8.dist-info}/METADATA +3 -2
- {universal_mcp_agents-0.1.6.dist-info → universal_mcp_agents-0.1.8.dist-info}/RECORD +10 -9
- {universal_mcp_agents-0.1.6.dist-info → universal_mcp_agents-0.1.8.dist-info}/WHEEL +0 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from datetime import UTC, datetime
|
|
3
3
|
from typing import Literal, cast
|
|
4
|
+
import asyncio
|
|
4
5
|
|
|
5
6
|
from langchain_core.language_models import BaseChatModel
|
|
6
7
|
from langchain_core.messages import AIMessage, ToolMessage
|
|
@@ -65,16 +66,58 @@ def build_graph(
|
|
|
65
66
|
|
|
66
67
|
@tool
|
|
67
68
|
async def load_tools(tool_ids: list[str]) -> list[str]:
|
|
68
|
-
"""
|
|
69
|
-
|
|
69
|
+
"""
|
|
70
|
+
Load the tools for the given tool ids. Returns the valid tool ids after loading.
|
|
71
|
+
Tool ids are of form 'appid__toolid'. Example: 'google_mail__send_email'
|
|
72
|
+
"""
|
|
73
|
+
correct, incorrect = [], []
|
|
74
|
+
app_tool_list: dict[str, list[str]] = {}
|
|
75
|
+
|
|
76
|
+
# Group tool_ids by app for fewer registry calls
|
|
77
|
+
app_to_tools: dict[str, list[str]] = {}
|
|
78
|
+
for tool_id in tool_ids:
|
|
79
|
+
if "__" not in tool_id:
|
|
80
|
+
incorrect.append(tool_id)
|
|
81
|
+
continue
|
|
82
|
+
app, tool = tool_id.split("__", 1)
|
|
83
|
+
app_to_tools.setdefault(app, []).append((tool_id, tool))
|
|
84
|
+
|
|
85
|
+
# Fetch all apps concurrently
|
|
86
|
+
async def fetch_tools(app: str):
|
|
87
|
+
try:
|
|
88
|
+
tools_dict = await tool_registry.list_tools(app)
|
|
89
|
+
return app, {tool_unit["name"] for tool_unit in tools_dict}
|
|
90
|
+
except Exception as e:
|
|
91
|
+
return app, None
|
|
92
|
+
|
|
93
|
+
results = await asyncio.gather(*(fetch_tools(app) for app in app_to_tools))
|
|
94
|
+
|
|
95
|
+
# Build map of available tools per app
|
|
96
|
+
for app, tools in results:
|
|
97
|
+
if tools is not None:
|
|
98
|
+
app_tool_list[app] = tools
|
|
99
|
+
|
|
100
|
+
# Validate tool_ids
|
|
101
|
+
for app, tool_entries in app_to_tools.items():
|
|
102
|
+
available = app_tool_list.get(app)
|
|
103
|
+
if available is None:
|
|
104
|
+
incorrect.extend(tool_id for tool_id, _ in tool_entries)
|
|
105
|
+
continue
|
|
106
|
+
for tool_id, tool in tool_entries:
|
|
107
|
+
if tool in available:
|
|
108
|
+
correct.append(tool_id)
|
|
109
|
+
else:
|
|
110
|
+
incorrect.append(tool_id)
|
|
111
|
+
|
|
112
|
+
return correct
|
|
70
113
|
|
|
71
114
|
@tool
|
|
72
115
|
async def web_search(query: str) -> str:
|
|
73
|
-
"""Search the web for the given query. Returns the search results."""
|
|
116
|
+
"""Search the web for the given query. Returns the search results. Do not use for app-specific searches (for example, reddit or linkedin searches should be done using the app's tools)"""
|
|
74
117
|
tool = await tool_registry.export_tools(
|
|
75
|
-
["
|
|
118
|
+
["exa__search_with_filters"], ToolFormat.LANGCHAIN
|
|
76
119
|
)
|
|
77
|
-
response = await tool_registry.call_tool("
|
|
120
|
+
response = await tool_registry.call_tool("exa__search_with_filters", {"query": query, "contents": {"summary": True}})
|
|
78
121
|
return response
|
|
79
122
|
|
|
80
123
|
|
|
@@ -131,10 +174,10 @@ def build_graph(
|
|
|
131
174
|
return Command(goto="select_tools", update={"messages": [response]})
|
|
132
175
|
elif tool_call["name"] == load_tools.name:
|
|
133
176
|
logger.info("Model requested to load tools.")
|
|
177
|
+
selected_tool_ids = await load_tools.ainvoke(tool_call["args"])
|
|
134
178
|
tool_msg = ToolMessage(
|
|
135
|
-
"Loaded tools
|
|
179
|
+
f"Loaded tools- {selected_tool_ids}", tool_call_id=tool_call["id"]
|
|
136
180
|
)
|
|
137
|
-
selected_tool_ids = tool_call["args"]["tool_ids"]
|
|
138
181
|
logger.info(f"Loaded tools: {selected_tool_ids}")
|
|
139
182
|
return Command(
|
|
140
183
|
goto="call_model",
|
|
@@ -194,7 +237,7 @@ def build_graph(
|
|
|
194
237
|
tool_call = state["messages"][-1].tool_calls[0]
|
|
195
238
|
searched_tools = await search_tools.ainvoke(input=tool_call["args"])
|
|
196
239
|
tool_msg = ToolMessage(
|
|
197
|
-
f"Available
|
|
240
|
+
f"Available tool_ids: {searched_tools}. Call load_tools to select the required tools only.", tool_call_id=tool_call["id"]
|
|
198
241
|
)
|
|
199
242
|
return Command(goto="call_model", update={"messages": [tool_msg]})
|
|
200
243
|
except Exception as e:
|
universal_mcp/agents/builder.py
CHANGED
|
@@ -15,7 +15,7 @@ from universal_mcp.agents.base import BaseAgent
|
|
|
15
15
|
from universal_mcp.agents.llm import load_chat_model
|
|
16
16
|
from universal_mcp.agents.shared.tool_node import build_tool_node_graph
|
|
17
17
|
from universal_mcp.agents.utils import messages_to_list
|
|
18
|
-
|
|
18
|
+
from collections import defaultdict
|
|
19
19
|
|
|
20
20
|
class Agent(BaseModel):
|
|
21
21
|
"""Agent that can be created by the builder."""
|
|
@@ -146,16 +146,37 @@ class BuilderAgent(BaseAgent):
|
|
|
146
146
|
]
|
|
147
147
|
}
|
|
148
148
|
tool_finder_graph = build_tool_node_graph(self.llm, self.registry)
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
149
|
+
|
|
150
|
+
initial_state = {
|
|
151
|
+
"original_task": task,
|
|
152
|
+
"messages": [HumanMessage(content=task)],
|
|
153
|
+
"decomposition_attempts": 0,
|
|
154
|
+
}
|
|
155
|
+
final_state = await tool_finder_graph.ainvoke(initial_state)
|
|
156
|
+
execution_plan = final_state.get("execution_plan")
|
|
157
|
+
tool_config = {}
|
|
158
|
+
if execution_plan:
|
|
159
|
+
# Use defaultdict to easily group tools by app_id
|
|
160
|
+
apps_with_tools = defaultdict(list)
|
|
161
|
+
for step in execution_plan:
|
|
162
|
+
app_id = step.get("app_id")
|
|
163
|
+
tool_ids = step.get("tool_ids")
|
|
164
|
+
if app_id and tool_ids:
|
|
165
|
+
apps_with_tools[app_id].extend(tool_ids)
|
|
166
|
+
|
|
167
|
+
# Convert to a regular dict and remove any duplicate tool_ids for the same app
|
|
168
|
+
tool_config = {
|
|
169
|
+
app_id: list(set(tools)) for app_id, tools in apps_with_tools.items()
|
|
170
|
+
}
|
|
171
|
+
final_message = "I have selected the necessary tools for the agent. The agent is ready!"
|
|
172
|
+
else:
|
|
173
|
+
# Handle the case where the graph failed to create a plan
|
|
174
|
+
final_message = "I was unable to find the right tools for this task. Please try rephrasing your request."
|
|
175
|
+
|
|
153
176
|
yield {
|
|
154
177
|
"tool_config": tool_config,
|
|
155
178
|
"messages": [
|
|
156
|
-
AIMessage(
|
|
157
|
-
content="I have selected the necessary tools for the agent. The agent is ready!"
|
|
158
|
-
)
|
|
179
|
+
AIMessage(content=final_message)
|
|
159
180
|
],
|
|
160
181
|
}
|
|
161
182
|
|
universal_mcp/agents/llm.py
CHANGED
|
@@ -8,7 +8,7 @@ from langchain_openai import AzureChatOpenAI
|
|
|
8
8
|
|
|
9
9
|
@lru_cache(maxsize=8)
|
|
10
10
|
def load_chat_model(
|
|
11
|
-
fully_specified_name: str, temperature: float = 1.0, tags: list[str] | None = None, thinking: bool =
|
|
11
|
+
fully_specified_name: str, temperature: float = 1.0, tags: list[str] | None = None, thinking: bool = True
|
|
12
12
|
) -> BaseChatModel:
|
|
13
13
|
"""Load a chat model from a fully specified name.
|
|
14
14
|
Args:
|
|
@@ -19,7 +19,7 @@ def build_graph(llm, registry, instructions, model, executor_agent_cls):
|
|
|
19
19
|
logger.info(f"Running tool finder for task: {task}")
|
|
20
20
|
tool_finder_graph = build_tool_node_graph(llm, registry)
|
|
21
21
|
tool_finder_state = await tool_finder_graph.ainvoke(
|
|
22
|
-
{"
|
|
22
|
+
{"original_task": task, "messages": state["messages"]}
|
|
23
23
|
)
|
|
24
24
|
|
|
25
25
|
if not tool_finder_state.get("apps_required"):
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
TASK_DECOMPOSITION_PROMPT = """
|
|
2
|
+
You are an expert planner. Your goal is to consolidate a complex user request into the minimum number of high-level sub-tasks required. Each sub-task should correspond to a major, consolidated action within a single target application.
|
|
3
|
+
|
|
4
|
+
**CORE PRINCIPLES:**
|
|
5
|
+
1. **App-Centric Grouping:** Group all related actions for a single application into ONE sub-task.
|
|
6
|
+
2. **Focus on Data Handoffs:** A good decomposition often involves one sub-task to *retrieve* information and a subsequent sub-task to *use* that information.
|
|
7
|
+
3. **Assume Internal Capabilities:** Do NOT create sub-tasks for abstract cognitive work like 'summarize' or 'analyze'.
|
|
8
|
+
4. **Simplify Single Actions:** If the user's task is already a single, simple action, the output should be a single sub-task that concisely describes that action. Do not make it identical to the user's input.
|
|
9
|
+
|
|
10
|
+
**--- EXAMPLES ---**
|
|
11
|
+
|
|
12
|
+
**EXAMPLE 1:**
|
|
13
|
+
- **User Task:** "Create a Google Doc summarizing the last 5 merged pull requests in my GitHub repo universal-mcp/universal-mcp."
|
|
14
|
+
- **CORRECT DECOMPOSITION:**
|
|
15
|
+
- "Fetch the last 5 merged pull requests from the GitHub repository 'universal-mcp/universal-mcp'."
|
|
16
|
+
- "Create a new Google Doc containing the summary of the pull requests."
|
|
17
|
+
|
|
18
|
+
**EXAMPLE 2:**
|
|
19
|
+
- **User Task:** "Find the best restaurants in Goa using perplexity web search."
|
|
20
|
+
- **CORRECT DECOMPOSITION:**
|
|
21
|
+
- "Perform a web search using Perplexity to find the best restaurants in Goa."
|
|
22
|
+
|
|
23
|
+
**--- YOUR TASK ---**
|
|
24
|
+
|
|
25
|
+
**USER TASK:**
|
|
26
|
+
"{task}"
|
|
27
|
+
|
|
28
|
+
**YOUR DECOMPOSITION (as a list of strings):**
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
APP_SEARCH_QUERY_PROMPT = """
|
|
33
|
+
You are an expert at extracting the name of an application or a category of application from a sub-task description. Your goal is to generate a query for an app search engine.
|
|
34
|
+
|
|
35
|
+
**INSTRUCTIONS:**
|
|
36
|
+
1. Read the sub-task carefully.
|
|
37
|
+
2. If an application is explicitly named (e.g., "Perplexity", "Gmail", "GitHub"), your query should be ONLY that name.
|
|
38
|
+
3. If no specific application is named, generate a query for the *category* of application (e.g., "web search", "email client", "document editor").
|
|
39
|
+
4. The query should be concise.
|
|
40
|
+
|
|
41
|
+
**EXAMPLES:**
|
|
42
|
+
- **Sub-task:** "Perform a web search using Perplexity to find the best restaurants in Goa."
|
|
43
|
+
- **Query:** "Perplexity"
|
|
44
|
+
|
|
45
|
+
- **Sub-task:** "Fetch all marketing emails received from Gmail in the last 7 days."
|
|
46
|
+
- **Query:** "Gmail"
|
|
47
|
+
|
|
48
|
+
- **Sub-task:** "Find the latest news about artificial intelligence."
|
|
49
|
+
- **Query:** "web search"
|
|
50
|
+
|
|
51
|
+
**SUB-TASK:**
|
|
52
|
+
"{sub_task}"
|
|
53
|
+
|
|
54
|
+
**YOUR CONCISE APP SEARCH QUERY:**
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
TOOL_SEARCH_QUERY_PROMPT = """
|
|
59
|
+
You are an expert at summarizing the core *action* of a sub-task into a concise query for finding a tool. This query should ignore any application names.
|
|
60
|
+
|
|
61
|
+
**INSTRUCTIONS:**
|
|
62
|
+
1. Focus only on the verb or action being performed in the sub-task.
|
|
63
|
+
2. Include key entities related to the action.
|
|
64
|
+
3. Do NOT include the names of applications (e.g., "Perplexity", "Gmail").
|
|
65
|
+
|
|
66
|
+
**EXAMPLES:**
|
|
67
|
+
- **Sub-task:** "Perform a web search using Perplexity to find the best restaurants in Goa."
|
|
68
|
+
- **Query:** "web search for restaurants"
|
|
69
|
+
|
|
70
|
+
- **Sub-task:** "Fetch all marketing emails received from Gmail in the last 7 days."
|
|
71
|
+
- **Query:** "get emails by date"
|
|
72
|
+
|
|
73
|
+
- **Sub-task:** "Create a new Google Doc and append a summary."
|
|
74
|
+
- **Query:** "create document, append text"
|
|
75
|
+
|
|
76
|
+
**SUB-TASK:**
|
|
77
|
+
"{sub_task}"
|
|
78
|
+
|
|
79
|
+
**YOUR CONCISE TOOL SEARCH QUERY:**
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
REVISE_DECOMPOSITION_PROMPT = """
|
|
83
|
+
You are an expert planner who revises plans that have failed. Your previous attempt to break down a task resulted in a sub-task that could not be matched with any available tools.
|
|
84
|
+
|
|
85
|
+
**INSTRUCTIONS:**
|
|
86
|
+
1. Analyze the original user task and the failed sub-task.
|
|
87
|
+
2. Generate a NEW, alternative decomposition of the original task.
|
|
88
|
+
3. This new plan should try to achieve the same overall goal but with different, perhaps broader or more combined, sub-tasks to increase the chance of finding a suitable tool.
|
|
89
|
+
|
|
90
|
+
**ORIGINAL USER TASK:**
|
|
91
|
+
"{task}"
|
|
92
|
+
|
|
93
|
+
**FAILED SUB-TASK FROM PREVIOUS PLAN:**
|
|
94
|
+
"{failed_sub_task}"
|
|
95
|
+
|
|
96
|
+
**YOUR NEW, REVISED DECOMPOSITION (as a list of strings):**
|
|
97
|
+
"""
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
TOOL_SELECTION_PROMPT = """
|
|
101
|
+
You are an AI assistant that selects the most appropriate tool(s) from a list to accomplish a specific sub-task.
|
|
102
|
+
|
|
103
|
+
**INSTRUCTIONS:**
|
|
104
|
+
1. Carefully review the sub-task to understand the required action.
|
|
105
|
+
2. Examine the list of available tools and their descriptions.
|
|
106
|
+
3. Select the best tool ID that matches the sub-task. You are encouraged to select multiple tools if there are multiple tools with similar capabilties
|
|
107
|
+
or names. It is always good to have more tools than having insufficent tools.
|
|
108
|
+
4. If no tool is a good fit, return an empty list.
|
|
109
|
+
5. Only return the tool IDs.
|
|
110
|
+
|
|
111
|
+
**SUB-TASK:**
|
|
112
|
+
"{sub_task}"
|
|
113
|
+
|
|
114
|
+
**AVAILABLE TOOLS:**
|
|
115
|
+
{tool_candidates}
|
|
116
|
+
|
|
117
|
+
**YOUR SELECTED TOOL ID(s):**
|
|
118
|
+
"""
|
|
@@ -1,7 +1,5 @@
|
|
|
1
|
-
# tool_node.py
|
|
2
|
-
|
|
3
1
|
import asyncio
|
|
4
|
-
from typing import Annotated, TypedDict
|
|
2
|
+
from typing import Annotated, TypedDict, List, Dict
|
|
5
3
|
|
|
6
4
|
from langchain_core.language_models import BaseChatModel
|
|
7
5
|
from langchain_core.messages import AIMessage, AnyMessage, HumanMessage
|
|
@@ -10,242 +8,238 @@ from langgraph.graph.message import add_messages
|
|
|
10
8
|
from loguru import logger
|
|
11
9
|
from pydantic import BaseModel, Field
|
|
12
10
|
from universal_mcp.tools.registry import ToolRegistry
|
|
13
|
-
from universal_mcp.types import ToolConfig
|
|
14
11
|
|
|
15
|
-
|
|
12
|
+
from universal_mcp.agents.shared.prompts import (
|
|
13
|
+
APP_SEARCH_QUERY_PROMPT,
|
|
14
|
+
REVISE_DECOMPOSITION_PROMPT,
|
|
15
|
+
TASK_DECOMPOSITION_PROMPT,
|
|
16
|
+
TOOL_SEARCH_QUERY_PROMPT,
|
|
17
|
+
TOOL_SELECTION_PROMPT,
|
|
18
|
+
)
|
|
16
19
|
|
|
17
20
|
|
|
18
|
-
|
|
21
|
+
MAX_DECOMPOSITION_ATTEMPTS = 2
|
|
22
|
+
|
|
23
|
+
# --- Pydantic Models for Structured LLM Outputs ---
|
|
24
|
+
|
|
25
|
+
class TaskDecomposition(BaseModel):
|
|
26
|
+
sub_tasks: List[str] = Field(description="A list of sub-task descriptions.")
|
|
27
|
+
|
|
28
|
+
class SearchQuery(BaseModel):
|
|
29
|
+
query: str = Field(description="A concise search query.")
|
|
30
|
+
|
|
31
|
+
class ToolSelection(BaseModel):
|
|
32
|
+
tool_ids: List[str] = Field(description="The IDs of the selected tools.")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# --- LangGraph Agent State ---
|
|
36
|
+
|
|
37
|
+
class SubTask(TypedDict, total=False):
|
|
38
|
+
"""Represents a single step in the execution plan."""
|
|
19
39
|
task: str
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
messages: Annotated[list[AnyMessage], add_messages]
|
|
40
|
+
status: str # "pending", "success", "failed"
|
|
41
|
+
app_id: str
|
|
42
|
+
tool_ids: List[str]
|
|
24
43
|
reasoning: str
|
|
25
44
|
|
|
45
|
+
class AgentState(TypedDict):
|
|
46
|
+
"""The central state of our agent graph."""
|
|
47
|
+
original_task: str
|
|
48
|
+
decomposition_attempts: int
|
|
49
|
+
failed_sub_task_info: str # To inform re-decomposition
|
|
50
|
+
sub_tasks: List[SubTask]
|
|
51
|
+
execution_plan: List[SubTask]
|
|
52
|
+
messages: Annotated[list[AnyMessage], add_messages]
|
|
26
53
|
|
|
27
|
-
class ToolSelectionOutput(BaseModel):
|
|
28
|
-
tool_ids: list[str] = Field(description="The ids of the tools to use")
|
|
29
54
|
|
|
55
|
+
# --- Graph Builder ---
|
|
30
56
|
|
|
31
57
|
def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGraph:
|
|
32
|
-
"""Builds the LangGraph workflow."""
|
|
33
|
-
|
|
34
|
-
async def
|
|
35
|
-
"""
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
"""
|
|
45
|
-
response = await llm.ainvoke(prompt)
|
|
46
|
-
# Handle both string and list content types
|
|
47
|
-
if isinstance(response.content, list):
|
|
48
|
-
content = " ".join([str(item) for item in response.content]).strip()
|
|
49
|
-
else:
|
|
50
|
-
content = response.content.strip()
|
|
51
|
-
reasoning = f"Initial check for app requirement. LLM response: {content}"
|
|
52
|
-
|
|
53
|
-
if content.lower().startswith("yes"):
|
|
54
|
-
return {
|
|
55
|
-
**state,
|
|
56
|
-
"messages": [AIMessage(content=content)],
|
|
57
|
-
"apps_required": True,
|
|
58
|
-
"reasoning": reasoning,
|
|
59
|
-
}
|
|
60
|
-
else:
|
|
61
|
-
return {
|
|
62
|
-
**state,
|
|
63
|
-
"messages": [AIMessage(content=content)],
|
|
64
|
-
"apps_required": False,
|
|
65
|
-
"reasoning": reasoning,
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
async def _find_relevant_apps(state: AgentState) -> AgentState:
|
|
69
|
-
"""Identifies relevant apps for the given task, preferring connected apps."""
|
|
70
|
-
task = state["task"]
|
|
71
|
-
all_apps = await registry.list_all_apps()
|
|
72
|
-
connected_apps = await registry.list_connected_apps()
|
|
73
|
-
prompt = """
|
|
74
|
-
You are an expert at identifying which applications are needed to complete specific tasks.
|
|
75
|
-
|
|
76
|
-
TASK: "{task}"
|
|
77
|
-
|
|
78
|
-
AVAILABLE APPS:
|
|
79
|
-
{all_apps}
|
|
80
|
-
|
|
81
|
-
CONNECTED APPS (user has already authenticated these):
|
|
82
|
-
{connected_apps}
|
|
83
|
-
|
|
84
|
-
INSTRUCTIONS:
|
|
85
|
-
1. Analyze the task carefully to understand what functionality is required.
|
|
86
|
-
2. Review the available apps and their descriptions to identify which ones could help.
|
|
87
|
-
3. If multiple apps can perform the task, prefer connected apps, but you MUST include all relevant apps.
|
|
88
|
-
4. Consider apps that provide complementary functionality for complex tasks.
|
|
89
|
-
5. Only suggest apps that are directly relevant to the core task requirements.
|
|
90
|
-
6. Your output should be a list of app IDs.
|
|
91
|
-
|
|
92
|
-
"""
|
|
93
|
-
|
|
94
|
-
class AppList(BaseModel):
|
|
95
|
-
app_list: list[str]
|
|
96
|
-
reasoning: str
|
|
97
|
-
|
|
98
|
-
response = await llm.with_structured_output(AppList).ainvoke(
|
|
99
|
-
input=prompt.format(
|
|
100
|
-
task=task, all_apps=all_apps, connected_apps=connected_apps
|
|
58
|
+
"""Builds the adaptive LangGraph workflow for tool selection."""
|
|
59
|
+
|
|
60
|
+
async def _decompose_task(state: AgentState) -> AgentState:
|
|
61
|
+
"""Decomposes the main task or revises a failed decomposition."""
|
|
62
|
+
attempts = state.get("decomposition_attempts", 0)
|
|
63
|
+
task = state["original_task"]
|
|
64
|
+
failed_info = state.get("failed_sub_task_info")
|
|
65
|
+
|
|
66
|
+
if attempts > 0 and failed_info:
|
|
67
|
+
logger.warning(f"Revising decomposition. Attempt {attempts + 1}.")
|
|
68
|
+
prompt = REVISE_DECOMPOSITION_PROMPT.format(
|
|
69
|
+
task=task, failed_sub_task=failed_info
|
|
101
70
|
)
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
71
|
+
else:
|
|
72
|
+
logger.info("Performing initial task decomposition.")
|
|
73
|
+
prompt = TASK_DECOMPOSITION_PROMPT.format(task=task)
|
|
74
|
+
|
|
75
|
+
response = await llm.with_structured_output(TaskDecomposition).ainvoke(prompt)
|
|
76
|
+
sub_tasks = [
|
|
77
|
+
{"task": sub_task_str, "status": "pending"}
|
|
78
|
+
for sub_task_str in response.sub_tasks
|
|
79
|
+
]
|
|
80
|
+
|
|
107
81
|
return {
|
|
108
|
-
|
|
109
|
-
"
|
|
110
|
-
|
|
111
|
-
],
|
|
112
|
-
"relevant_apps": app_list,
|
|
113
|
-
"reasoning": state.get("reasoning", "") + "\n" + reasoning,
|
|
82
|
+
"sub_tasks": sub_tasks,
|
|
83
|
+
"decomposition_attempts": attempts + 1,
|
|
84
|
+
"messages": [AIMessage(content=f"New plan created with {len(sub_tasks)} steps.")],
|
|
114
85
|
}
|
|
115
86
|
|
|
116
|
-
async def
|
|
117
|
-
"""
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
""
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
logger.
|
|
178
|
-
search_query = await _generate_search_query(task)
|
|
179
|
-
apps_with_tools_dict = {}
|
|
180
|
-
reasoning_steps = []
|
|
181
|
-
for app_name in state["relevant_apps"]:
|
|
182
|
-
logger.info(
|
|
183
|
-
f"Searching for tools in {app_name} for task: {task} with query '{search_query}'"
|
|
184
|
-
)
|
|
185
|
-
found_tools = await registry.search_tools(
|
|
186
|
-
query=search_query, app_id=app_name
|
|
187
|
-
)
|
|
188
|
-
selected_tools = await _select_tools(task, found_tools)
|
|
189
|
-
apps_with_tools_dict[app_name] = selected_tools
|
|
190
|
-
reasoning_steps.append(
|
|
191
|
-
f"For '{app_name}', selected tool(s): {', '.join(selected_tools)}."
|
|
192
|
-
)
|
|
193
|
-
|
|
87
|
+
async def _resolve_sub_tasks(state: AgentState) -> AgentState:
|
|
88
|
+
"""Iterates through sub-tasks, finding apps and tools for each using a two-query approach."""
|
|
89
|
+
sub_tasks = state["sub_tasks"]
|
|
90
|
+
current_plan = []
|
|
91
|
+
|
|
92
|
+
for i, sub_task in enumerate(sub_tasks):
|
|
93
|
+
task_desc = sub_task["task"]
|
|
94
|
+
logger.info(f"Resolving sub-task: '{task_desc}'")
|
|
95
|
+
|
|
96
|
+
# 1. Generate App-specific query to find the right application
|
|
97
|
+
app_query_prompt = APP_SEARCH_QUERY_PROMPT.format(sub_task=task_desc)
|
|
98
|
+
app_query_response = await llm.with_structured_output(SearchQuery).ainvoke(app_query_prompt)
|
|
99
|
+
app_search_query = app_query_response.query
|
|
100
|
+
logger.info(f"Generated app search query: '{app_search_query}'")
|
|
101
|
+
|
|
102
|
+
# 2. Search for candidate apps using the app-centric query
|
|
103
|
+
candidate_apps = await registry.search_apps(query=app_search_query, limit=5)
|
|
104
|
+
if not candidate_apps:
|
|
105
|
+
logger.error(f"No apps found for query '{app_search_query}' from sub-task: '{task_desc}'")
|
|
106
|
+
return {"failed_sub_task_info": task_desc, "sub_tasks": []}
|
|
107
|
+
|
|
108
|
+
# 3. Generate Action-specific query for finding the tool
|
|
109
|
+
tool_query_prompt = TOOL_SEARCH_QUERY_PROMPT.format(sub_task=task_desc)
|
|
110
|
+
tool_query_response = await llm.with_structured_output(SearchQuery).ainvoke(tool_query_prompt)
|
|
111
|
+
tool_search_query = tool_query_response.query
|
|
112
|
+
logger.info(f"Generated tool search query: '{tool_search_query}'")
|
|
113
|
+
|
|
114
|
+
# 4. Find a suitable tool within the candidate apps using the action-centric query
|
|
115
|
+
tool_found = False
|
|
116
|
+
for app in candidate_apps:
|
|
117
|
+
app_id = app["id"]
|
|
118
|
+
logger.info(f"Searching for tools in app '{app_id}' with query '{tool_search_query}'...")
|
|
119
|
+
|
|
120
|
+
found_tools = await registry.search_tools(query=tool_search_query, app_id=app_id, limit=5)
|
|
121
|
+
if not found_tools:
|
|
122
|
+
continue
|
|
123
|
+
|
|
124
|
+
tool_candidates_str = "\n - ".join([f"{tool['name']}: {tool['description']}" for tool in found_tools])
|
|
125
|
+
selection_prompt = TOOL_SELECTION_PROMPT.format(sub_task=task_desc, tool_candidates=tool_candidates_str)
|
|
126
|
+
selection_response = await llm.with_structured_output(ToolSelection).ainvoke(selection_prompt)
|
|
127
|
+
|
|
128
|
+
if selection_response.tool_ids:
|
|
129
|
+
logger.success(f"Found and selected tool(s) {selection_response.tool_ids} in app '{app_id}'.")
|
|
130
|
+
sub_task.update({
|
|
131
|
+
"status": "success",
|
|
132
|
+
"app_id": app_id,
|
|
133
|
+
"tool_ids": selection_response.tool_ids,
|
|
134
|
+
"reasoning": f"Selected tool(s) {selection_response.tool_ids} from app '{app_id}' for sub-task."
|
|
135
|
+
})
|
|
136
|
+
current_plan.append(sub_task)
|
|
137
|
+
tool_found = True
|
|
138
|
+
break
|
|
139
|
+
|
|
140
|
+
if not tool_found:
|
|
141
|
+
logger.error(f"Could not find any suitable tool for sub-task: '{task_desc}'")
|
|
142
|
+
return {"failed_sub_task_info": task_desc, "sub_tasks": []}
|
|
143
|
+
|
|
144
|
+
return {"execution_plan": current_plan, "sub_tasks": []}
|
|
145
|
+
|
|
146
|
+
def _handle_planning_failure(state: AgentState) -> AgentState:
|
|
147
|
+
"""Handles the case where all decomposition attempts have failed."""
|
|
148
|
+
logger.error("Maximum decomposition attempts reached. Planning failed.")
|
|
194
149
|
return {
|
|
195
|
-
|
|
196
|
-
"apps_with_tools": apps_with_tools_dict,
|
|
197
|
-
"reasoning": state.get("reasoning", "") + "\n" + "\n".join(reasoning_steps),
|
|
150
|
+
"messages": [AIMessage(content="I am unable to create a complete plan for this task with the available tools. Please try rephrasing your request.")]
|
|
198
151
|
}
|
|
199
152
|
|
|
200
|
-
def
|
|
201
|
-
"""
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
}
|
|
153
|
+
def _consolidate_plan(state: AgentState) -> AgentState:
|
|
154
|
+
"""
|
|
155
|
+
NEW: Merges steps in the execution plan that use the same app_id.
|
|
156
|
+
It combines their tool_ids into a single unique list.
|
|
157
|
+
"""
|
|
158
|
+
logger.info("Consolidating final execution plan.")
|
|
159
|
+
plan = state["execution_plan"]
|
|
160
|
+
merged_apps: Dict[str, SubTask] = {}
|
|
161
|
+
|
|
162
|
+
for step in plan:
|
|
163
|
+
app_id = step["app_id"]
|
|
164
|
+
if app_id not in merged_apps:
|
|
165
|
+
# Store the first occurrence of this app
|
|
166
|
+
merged_apps[app_id] = step.copy()
|
|
167
|
+
merged_apps[app_id]["tool_ids"] = set(step["tool_ids"])
|
|
168
|
+
else:
|
|
169
|
+
# If app already seen, just update its set of tool_ids
|
|
170
|
+
merged_apps[app_id]["tool_ids"].update(step["tool_ids"])
|
|
171
|
+
|
|
172
|
+
# Convert the merged dictionary back to a list of SubTasks
|
|
173
|
+
final_plan = []
|
|
174
|
+
for app_id, step_data in merged_apps.items():
|
|
175
|
+
step_data["tool_ids"] = sorted(list(step_data["tool_ids"]))
|
|
176
|
+
final_plan.append(step_data)
|
|
177
|
+
|
|
178
|
+
return {"execution_plan": final_plan}
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
# --- Graph Definition ---
|
|
208
182
|
|
|
209
183
|
workflow = StateGraph(AgentState)
|
|
210
184
|
|
|
211
|
-
workflow.add_node("
|
|
212
|
-
workflow.add_node("
|
|
213
|
-
workflow.add_node("
|
|
214
|
-
workflow.add_node("
|
|
215
|
-
|
|
216
|
-
workflow.set_entry_point("
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
"
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
else
|
|
227
|
-
|
|
185
|
+
workflow.add_node("decompose_task", _decompose_task)
|
|
186
|
+
workflow.add_node("resolve_sub_tasks", _resolve_sub_tasks)
|
|
187
|
+
workflow.add_node("consolidate_plan", _consolidate_plan) # NEW NODE
|
|
188
|
+
workflow.add_node("handle_planning_failure", _handle_planning_failure)
|
|
189
|
+
|
|
190
|
+
workflow.set_entry_point("decompose_task")
|
|
191
|
+
|
|
192
|
+
def should_continue(state: AgentState):
|
|
193
|
+
if not state.get("sub_tasks"): # Resolution failed or succeeded
|
|
194
|
+
if state.get("execution_plan"):
|
|
195
|
+
return "consolidate_plan" # MODIFIED: Go to consolidate on success
|
|
196
|
+
elif state["decomposition_attempts"] >= MAX_DECOMPOSITION_ATTEMPTS:
|
|
197
|
+
return "handle_planning_failure"
|
|
198
|
+
else:
|
|
199
|
+
return "decompose_task" # Re-try decomposition
|
|
200
|
+
else:
|
|
201
|
+
return "resolve_sub_tasks"
|
|
228
202
|
|
|
229
|
-
workflow.
|
|
230
|
-
workflow.
|
|
203
|
+
workflow.add_conditional_edges("decompose_task", lambda s: "resolve_sub_tasks")
|
|
204
|
+
workflow.add_conditional_edges("resolve_sub_tasks", should_continue)
|
|
205
|
+
|
|
206
|
+
workflow.add_edge("consolidate_plan", END) # NEW EDGE
|
|
207
|
+
workflow.add_edge("handle_planning_failure", END)
|
|
231
208
|
|
|
232
209
|
return workflow.compile()
|
|
233
210
|
|
|
234
211
|
|
|
235
212
|
async def main():
|
|
213
|
+
"""Main function to run the agent."""
|
|
236
214
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
237
|
-
|
|
238
215
|
from universal_mcp.agents.llm import load_chat_model
|
|
239
216
|
|
|
240
217
|
registry = AgentrRegistry()
|
|
241
|
-
llm = load_chat_model("
|
|
218
|
+
llm = load_chat_model("anthropic/claude-4-sonnet-20250514")
|
|
219
|
+
|
|
242
220
|
graph = build_tool_node_graph(llm, registry)
|
|
221
|
+
|
|
222
|
+
task = "Create a content calendar for next month with trending AI/ML topics using web search and optimal posting times based on my audience analytics in Google Sheets"
|
|
223
|
+
|
|
243
224
|
initial_state = {
|
|
244
|
-
"
|
|
245
|
-
"messages": [HumanMessage(content=
|
|
225
|
+
"original_task": task,
|
|
226
|
+
"messages": [HumanMessage(content=task)],
|
|
227
|
+
"decomposition_attempts": 0,
|
|
246
228
|
}
|
|
247
|
-
|
|
229
|
+
|
|
230
|
+
final_state = await graph.ainvoke(initial_state)
|
|
231
|
+
|
|
232
|
+
print("\n--- Final Agent State ---")
|
|
233
|
+
if final_state.get("execution_plan"):
|
|
234
|
+
print("Successfully created a consolidated execution plan:")
|
|
235
|
+
for step in final_state["execution_plan"]:
|
|
236
|
+
print(f"- Sub-task: {step['task']}")
|
|
237
|
+
print(f" - App: {step['app_id']}")
|
|
238
|
+
print(f" - Tool(s): {', '.join(step['tool_ids'])}")
|
|
239
|
+
else:
|
|
240
|
+
print("Failed to create an execution plan.")
|
|
241
|
+
print(f"Final message: {final_state['messages'][-1].content}")
|
|
248
242
|
|
|
249
243
|
|
|
250
244
|
if __name__ == "__main__":
|
|
251
|
-
asyncio.run(main())
|
|
245
|
+
asyncio.run(main())
|
|
@@ -105,7 +105,7 @@ class UiApp(BaseApplication):
|
|
|
105
105
|
):
|
|
106
106
|
"""Create an interactive table with data.
|
|
107
107
|
|
|
108
|
-
The table will automatically have sorting, filtering, and search functionality.
|
|
108
|
+
The table will automatically have sorting, filtering, and search functionality. Note that this only creates a table on the frontend. Do not mix this up with tables from applications like google_sheet, airtable.
|
|
109
109
|
|
|
110
110
|
Args:
|
|
111
111
|
title (str): The title of the table.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: universal-mcp-agents
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.8
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Project-URL: Homepage, https://github.com/universal-mcp/applications
|
|
6
6
|
Project-URL: Repository, https://github.com/universal-mcp/applications
|
|
@@ -11,7 +11,8 @@ Requires-Dist: langchain-anthropic>=0.3.19
|
|
|
11
11
|
Requires-Dist: langchain-google-genai>=2.1.10
|
|
12
12
|
Requires-Dist: langchain-openai>=0.3.32
|
|
13
13
|
Requires-Dist: langgraph>=0.6.6
|
|
14
|
-
Requires-Dist: universal-mcp>=0.1.
|
|
14
|
+
Requires-Dist: universal-mcp-applications>=0.1.14
|
|
15
|
+
Requires-Dist: universal-mcp>=0.1.24rc21
|
|
15
16
|
Provides-Extra: dev
|
|
16
17
|
Requires-Dist: pre-commit; extra == 'dev'
|
|
17
18
|
Requires-Dist: ruff; extra == 'dev'
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
universal_mcp/agents/__init__.py,sha256=QfYDUZxIYQSqbpGt6NZ3U5tjf7SS1Y9uPzAwmaRoDrA,1186
|
|
2
2
|
universal_mcp/agents/base.py,sha256=h_FDAclpFKpaMCSNhBcwIMF0DLbZtyyoy_l71UxY4Aw,6892
|
|
3
|
-
universal_mcp/agents/builder.py,sha256=
|
|
3
|
+
universal_mcp/agents/builder.py,sha256=W7qTfF-TtmebdtjAv32y1Al_KvWilLyUN3p4Ce3X1Eo,8815
|
|
4
4
|
universal_mcp/agents/cli.py,sha256=_rJV6TxBG2amH3o8mVs4pxViaTfkBhz6n5l6xhv4Z3g,1014
|
|
5
5
|
universal_mcp/agents/hil.py,sha256=XfQT8QcuDbiIpUU9N4WSbO2Tm9YNSuwRqyCTWmCWaZo,3818
|
|
6
|
-
universal_mcp/agents/llm.py,sha256=
|
|
6
|
+
universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
|
|
7
7
|
universal_mcp/agents/react.py,sha256=jH42VFAB-BuPUVpaMIspBjjukYEJan-DQxtNamD1o0I,3010
|
|
8
8
|
universal_mcp/agents/simple.py,sha256=Z5Ja12vJIhIHhB68WWH_5opln7FMDUiRfztKOj2Rx-U,1941
|
|
9
9
|
universal_mcp/agents/utils.py,sha256=g_v7IEKtx6CBQK-Nue_weVVie62KQLQjz7izU3kOWPQ,4988
|
|
@@ -22,7 +22,7 @@ universal_mcp/agents/bigtool/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpn
|
|
|
22
22
|
universal_mcp/agents/bigtool2/__init__.py,sha256=wkhjOeAHhIpuciLTbKZT3J2uPIJ0KFZpCDn0xX2plNs,2421
|
|
23
23
|
universal_mcp/agents/bigtool2/__main__.py,sha256=SAHfoLqDEhUj3dF3vSzfetCzPGMC3UPJxBySHujSrDY,669
|
|
24
24
|
universal_mcp/agents/bigtool2/agent.py,sha256=ef9IIxgJmr26eYWQdazrIA-IXHGRwT0XNyPThJR55Tk,436
|
|
25
|
-
universal_mcp/agents/bigtool2/graph.py,sha256
|
|
25
|
+
universal_mcp/agents/bigtool2/graph.py,sha256=OB_SBfwF47uEx0sy8XnDDdcBv0qjEqg9uJvxu_DIX9E,12749
|
|
26
26
|
universal_mcp/agents/bigtool2/prompts.py,sha256=rQFtZDkwU9z8d4PWdt6jpohGhyab658Xvk8hvNVBFBA,1843
|
|
27
27
|
universal_mcp/agents/bigtool2/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
28
28
|
universal_mcp/agents/bigtoolcache/__init__.py,sha256=YY7X8-XQ3AC2t_Y9MN9dZk5wTPu7iU6YS8Yhn_akgC0,1844
|
|
@@ -40,11 +40,12 @@ universal_mcp/agents/codeact/test.py,sha256=MT0v4HChoJU4MGb7oIDlG8lvBUymroXjAkP-
|
|
|
40
40
|
universal_mcp/agents/codeact/utils.py,sha256=VuMvLTxBBh3pgaJk8RWj5AK8XZFF-1gnZJ6jFLeM_CI,1690
|
|
41
41
|
universal_mcp/agents/planner/__init__.py,sha256=b5HnTHXvs0y5KBwy9yr8d96MbyObUZ8QWrCFbUhdgGo,1335
|
|
42
42
|
universal_mcp/agents/planner/__main__.py,sha256=OfhTfYDZK_ZUfc8sX-Sa6TWk-dNqD2rl13Ln64mNAtw,771
|
|
43
|
-
universal_mcp/agents/planner/graph.py,sha256=
|
|
43
|
+
universal_mcp/agents/planner/graph.py,sha256=kF6b2LBNsTAlCiZ1Unz78_yk-UOE5d27r_3i0LZZZ7w,3250
|
|
44
44
|
universal_mcp/agents/planner/prompts.py,sha256=_JoHqiAvswtqCDu90AGUHmfsu8eWE1-_yI4LLn3pqMU,657
|
|
45
45
|
universal_mcp/agents/planner/state.py,sha256=qqyp-jSGsCxe1US-PRLT4-y1sITAcVE6nCMlQLnvop0,278
|
|
46
|
-
universal_mcp/agents/shared/
|
|
47
|
-
universal_mcp/
|
|
48
|
-
|
|
49
|
-
universal_mcp_agents-0.1.
|
|
50
|
-
universal_mcp_agents-0.1.
|
|
46
|
+
universal_mcp/agents/shared/prompts.py,sha256=bsKRElJg0TxlAeN5PDZF5t3Ev-DjY8nQbgy13cpDbGw,4794
|
|
47
|
+
universal_mcp/agents/shared/tool_node.py,sha256=dMyogozYQe63NmEk1r77ktvaTOOS-Jwg58w1pOMfPII,10222
|
|
48
|
+
universal_mcp/applications/ui/app.py,sha256=7boYUxrq0MIV6Qhdn6cneymb3yJ4DGsaIt8YSRNcyow,11354
|
|
49
|
+
universal_mcp_agents-0.1.8.dist-info/METADATA,sha256=VGar7rLNCXaOLC2zwK0etQ1t9sedD0zTvKciUz99uDA,848
|
|
50
|
+
universal_mcp_agents-0.1.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
51
|
+
universal_mcp_agents-0.1.8.dist-info/RECORD,,
|
|
File without changes
|