universal-mcp-agents 0.1.13__py3-none-any.whl → 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-agents might be problematic. Click here for more details.

Files changed (49) hide show
  1. universal_mcp/agents/__init__.py +1 -1
  2. universal_mcp/agents/base.py +3 -0
  3. universal_mcp/agents/bigtool/__init__.py +1 -1
  4. universal_mcp/agents/bigtool/__main__.py +4 -3
  5. universal_mcp/agents/bigtool/agent.py +3 -2
  6. universal_mcp/agents/bigtool/graph.py +68 -31
  7. universal_mcp/agents/bigtool/prompts.py +2 -2
  8. universal_mcp/agents/bigtool/tools.py +17 -4
  9. universal_mcp/agents/builder/__main__.py +129 -28
  10. universal_mcp/agents/builder/builder.py +149 -161
  11. universal_mcp/agents/builder/helper.py +71 -0
  12. universal_mcp/agents/builder/prompts.py +94 -160
  13. universal_mcp/agents/codeact0/__init__.py +2 -1
  14. universal_mcp/agents/codeact0/agent.py +13 -5
  15. universal_mcp/agents/codeact0/langgraph_agent.py +14 -0
  16. universal_mcp/agents/codeact0/llm_tool.py +1 -2
  17. universal_mcp/agents/codeact0/playbook_agent.py +353 -0
  18. universal_mcp/agents/codeact0/prompts.py +126 -41
  19. universal_mcp/agents/codeact0/sandbox.py +43 -32
  20. universal_mcp/agents/codeact0/state.py +27 -3
  21. universal_mcp/agents/codeact0/tools.py +180 -0
  22. universal_mcp/agents/codeact0/utils.py +89 -75
  23. universal_mcp/agents/shared/__main__.py +44 -0
  24. universal_mcp/agents/shared/prompts.py +49 -98
  25. universal_mcp/agents/shared/tool_node.py +160 -176
  26. universal_mcp/agents/utils.py +71 -0
  27. universal_mcp/applications/ui/app.py +2 -2
  28. {universal_mcp_agents-0.1.13.dist-info → universal_mcp_agents-0.1.15.dist-info}/METADATA +3 -3
  29. universal_mcp_agents-0.1.15.dist-info/RECORD +50 -0
  30. universal_mcp/agents/codeact0/usecases/1-unsubscribe.yaml +0 -4
  31. universal_mcp/agents/codeact0/usecases/10-reddit2.yaml +0 -10
  32. universal_mcp/agents/codeact0/usecases/11-github.yaml +0 -13
  33. universal_mcp/agents/codeact0/usecases/2-reddit.yaml +0 -27
  34. universal_mcp/agents/codeact0/usecases/2.1-instructions.md +0 -81
  35. universal_mcp/agents/codeact0/usecases/2.2-instructions.md +0 -71
  36. universal_mcp/agents/codeact0/usecases/3-earnings.yaml +0 -4
  37. universal_mcp/agents/codeact0/usecases/4-maps.yaml +0 -41
  38. universal_mcp/agents/codeact0/usecases/5-gmailreply.yaml +0 -8
  39. universal_mcp/agents/codeact0/usecases/6-contract.yaml +0 -6
  40. universal_mcp/agents/codeact0/usecases/7-overnight.yaml +0 -14
  41. universal_mcp/agents/codeact0/usecases/8-sheets_chart.yaml +0 -25
  42. universal_mcp/agents/codeact0/usecases/9-learning.yaml +0 -9
  43. universal_mcp/agents/planner/__init__.py +0 -51
  44. universal_mcp/agents/planner/__main__.py +0 -28
  45. universal_mcp/agents/planner/graph.py +0 -85
  46. universal_mcp/agents/planner/prompts.py +0 -14
  47. universal_mcp/agents/planner/state.py +0 -11
  48. universal_mcp_agents-0.1.13.dist-info/RECORD +0 -63
  49. {universal_mcp_agents-0.1.13.dist-info → universal_mcp_agents-0.1.15.dist-info}/WHEEL +0 -0
@@ -1,44 +1,26 @@
1
+ import asyncio
1
2
  import json
2
- from collections import defaultdict
3
3
 
4
- from langchain_core.language_models import BaseChatModel
5
- from langchain_core.messages import AIMessage, HumanMessage
4
+ from langchain_core.messages import HumanMessage
6
5
  from langgraph.checkpoint.base import BaseCheckpointSaver
7
6
  from langgraph.graph import END, START, StateGraph
7
+ from langgraph.types import Command
8
8
  from loguru import logger
9
9
  from universal_mcp.tools.registry import ToolRegistry
10
10
  from universal_mcp.types import ToolConfig
11
11
 
12
12
  from universal_mcp.agents.base import BaseAgent
13
- from universal_mcp.agents.builder.prompts import (
14
- AGENT_BUILDER_INSTRUCTIONS,
15
- AGENT_FROM_CONVERSATION_PROMPT,
16
- TASK_SYNTHESIS_PROMPT,
13
+ from universal_mcp.agents.builder.helper import (
14
+ _clean_conversation_history,
15
+ _extract_tools_from_history,
16
+ _merge_tool_configs,
17
17
  )
18
+ from universal_mcp.agents.builder.prompts import _build_prompt
18
19
  from universal_mcp.agents.builder.state import Agent, BuilderState
19
20
  from universal_mcp.agents.llm import load_chat_model
20
21
  from universal_mcp.agents.shared.tool_node import build_tool_node_graph
21
22
 
22
23
 
23
- async def generate_agent(llm: BaseChatModel, task: str, old_agent: Agent | None = None) -> Agent:
24
- """Generates an agent from a task, optionally modifying an existing one."""
25
- prompt_parts = [AGENT_BUILDER_INSTRUCTIONS]
26
- if old_agent:
27
- prompt_parts.append(
28
- "\nThe user wants to modify the following agent design. "
29
- "Incorporate their feedback into a new design.\n\n"
30
- f"**User Feedback:** {task}\n\n"
31
- f"{old_agent.model_dump_json(indent=2)}"
32
- )
33
- else:
34
- prompt_parts.append(f"\n\n**Task:** {task}")
35
-
36
- prompt = "\n".join(prompt_parts)
37
- structured_llm = llm.with_structured_output(Agent)
38
- agent = await structured_llm.ainvoke(prompt)
39
- return agent
40
-
41
-
42
24
  class BuilderAgent(BaseAgent):
43
25
  def __init__(
44
26
  self,
@@ -57,169 +39,175 @@ class BuilderAgent(BaseAgent):
57
39
  **kwargs,
58
40
  )
59
41
  self.registry = registry
60
- self.llm: BaseChatModel = load_chat_model(model, thinking=False)
42
+ self.llm = load_chat_model(model, thinking=False)
61
43
 
62
- def _entry_point_router(self, state: BuilderState):
44
+ async def invoke(
45
+ self,
46
+ thread_id: str,
47
+ user_input: dict,
48
+ ):
63
49
  """
64
- Determines the entry point of the graph based on the input format and conversation history.
65
- - If input is a JSON with 'conversation_history', it builds from the conversation.
66
- - If an agent has already been generated, it assumes a modification is requested.
67
- - Otherwise, it starts a fresh build from a text prompt.
50
+ Overrides BaseAgent.invoke to build or modify an agent.
51
+ This is the primary entry point for the Builder Agent.
68
52
  """
69
- last_message_content = state["messages"][-1].content
70
- try:
71
- # Case 1: Input is a JSON for building from a conversation
72
- payload = json.loads(last_message_content)
73
- if isinstance(payload, dict) and "conversation_history" in payload and "tool_config" in payload:
74
- logger.info("Routing to: build from conversation history.")
75
- return "synthesize_from_conversation"
76
- except (json.JSONDecodeError, TypeError):
77
- # Input is not a valid JSON, proceed as an interactive build
78
- pass
79
-
80
- # Case 2: It's an interactive build, check for modification vs. new
81
- if state.get("generated_agent"):
82
- logger.info("Routing to: modify existing agent.")
83
- return "synthesize_new_task"
84
- else:
85
- logger.info("Routing to: new agent build.")
86
- return "prepare_for_build"
87
-
88
- async def _prepare_for_build(self, state: BuilderState):
89
- """Sets the initial user task to begin the build process."""
90
- last_message = state["messages"][-1]
91
- task = last_message.content
92
- yield {
93
- "user_task": task,
94
- }
53
+ keys = ("userInput", "agent", "tools", "messages")
54
+ userInput, agent_data, tools, messages = (user_input.get(k) for k in keys)
55
+ agent = Agent(**agent_data) if agent_data else None
56
+
57
+ await self.ainit()
58
+ graph = self._graph
59
+
60
+ initial_state = BuilderState(
61
+ user_task=userInput,
62
+ generated_agent=agent,
63
+ tool_config=tools,
64
+ messages=[],
65
+ )
95
66
 
96
- async def _create_agent(self, state: BuilderState):
97
- """Creates or updates the agent definition from a user_task."""
98
- task = state["user_task"]
99
- agent = state.get("generated_agent")
67
+ if messages:
68
+ initial_state["messages"] = [HumanMessage(content=json.dumps(messages))]
69
+ elif not userInput and not agent:
70
+ raise ValueError("Either 'user_input' or 'messages' must be provided for a new agent.")
100
71
 
101
- generated_agent = await generate_agent(self.llm, task, agent)
102
- yield {
103
- "generated_agent": generated_agent,
104
- }
72
+ run_metadata = {"agent_name": self.name, "is_background_run": False}
105
73
 
106
- async def _get_tool_config_for_task(self, task: str) -> ToolConfig:
107
- """Helper method to find and configure tools for a given task string."""
108
- tool_finder_graph = build_tool_node_graph(self.llm, self.registry)
109
- initial_state = {
110
- "original_task": task,
111
- "decomposition_attempts": 0,
74
+ config = {
75
+ "configurable": {"thread_id": thread_id},
76
+ "metadata": run_metadata,
77
+ "run_id": thread_id,
78
+ "run_name": self.name,
112
79
  }
113
- final_state = await tool_finder_graph.ainvoke(initial_state)
114
- execution_plan = final_state.get("execution_plan")
115
80
 
116
- if not execution_plan:
117
- return {}
81
+ final_state = await graph.ainvoke(initial_state, config=config)
82
+ return final_state
118
83
 
119
- apps_with_tools = defaultdict(list)
120
- for step in execution_plan:
121
- app_id = step.get("app_id")
122
- tool_ids = step.get("tool_ids")
123
- if app_id and tool_ids:
124
- apps_with_tools[app_id].extend(tool_ids)
125
-
126
- return {app_id: list(set(tools)) for app_id, tools in apps_with_tools.items()}
127
-
128
- async def _create_tool_config(self, state: BuilderState):
129
- """Creates the tool configuration for the agent."""
130
- task = state["user_task"]
131
- tool_config = await self._get_tool_config_for_task(task)
132
- yield {
133
- "tool_config": tool_config,
134
- }
84
+ def _entry_point_router(self, state: BuilderState):
85
+ """
86
+ Determines the entry point of the graph based on the initial state.
87
+ """
88
+ has_agent = state.get("generated_agent") is not None
89
+ has_messages = bool(state.get("messages"))
90
+ has_user_task = bool(state.get("user_task"))
91
+
92
+ if has_agent:
93
+ logger.info("Routing to: modify_agent.")
94
+ return "modify_agent"
95
+ elif has_messages:
96
+ logger.info("Routing to: create_agent_from_history.")
97
+ return "create_agent_from_history"
98
+ elif has_user_task:
99
+ logger.info("Routing to: create_agent_from_input.")
100
+ return "create_agent_from_input"
101
+ else:
102
+ raise ValueError("Invalid initial state. Cannot determine route.")
103
+
104
+ async def _create_agent_from_input(self, state: BuilderState) -> Command:
105
+ """SCENARIO 1: Generates a new agent from a single user_input, running agent and tool creation in parallel."""
106
+ user_task = state["user_task"]
107
+ logger.info(f"Creating new agent from input: '{user_task}'")
135
108
 
136
- async def _synthesize_new_task_from_feedback(self, state: BuilderState):
137
- """Synthesizes a new user_task from the original task and subsequent user feedback."""
138
- original_task = next((msg.content for msg in state["messages"] if isinstance(msg, HumanMessage)), None)
139
- modification_request = state["messages"][-1].content
109
+ structured_llm = self.llm.with_structured_output(Agent)
140
110
 
141
- if not original_task:
142
- raise ValueError("Could not find the original task in the conversation history.")
111
+ async def _task_generate_agent():
112
+ prompt = _build_prompt(user_task=user_task)
113
+ return await structured_llm.ainvoke(prompt)
143
114
 
144
- synthesis_prompt = TASK_SYNTHESIS_PROMPT.format(
145
- original_task=original_task,
146
- modification_request=modification_request,
115
+ async def _task_find_tools():
116
+ return await self._get_tool_config_for_task(user_task)
117
+
118
+ # Run agent creation and tool finding concurrently for max efficiency
119
+ agent_profile, tool_config = await asyncio.gather(_task_generate_agent(), _task_find_tools())
120
+
121
+ logger.info(f"Successfully created agent '{agent_profile.name}' with tools: {tool_config}")
122
+
123
+ return Command(
124
+ update={"generated_agent": agent_profile, "tool_config": tool_config},
125
+ goto=END,
147
126
  )
148
127
 
149
- response = await self.llm.ainvoke(synthesis_prompt)
150
- new_synthesized_task = response.content.strip()
151
- logger.info(f"The new synthesized task is: {new_synthesized_task}")
152
- yield {
153
- "user_task": new_synthesized_task,
154
- }
128
+ async def _create_agent_from_history(self, state: BuilderState) -> Command:
129
+ """SCENARIO 2: Generates an agent by synthesizing a conversation history."""
130
+ user_task = state.get("user_task")
155
131
 
156
- async def _synthesize_from_conversation(self, state: BuilderState):
157
- """
158
- Takes conversation history and used tools from input to synthesize a complete agent profile.
159
- This is a one-shot generation.
160
- """
161
132
  content_str = state["messages"][-1].content
162
- initial_input = json.loads(content_str)
133
+ raw_history = json.loads(content_str)
134
+ conversation_history = _clean_conversation_history(raw_history)
135
+
136
+ logger.info(f"Creating new agent from conversation history (length: {len(conversation_history)}).")
163
137
 
164
- conversation_history = initial_input.get("conversation_history")
165
- tool_config = initial_input.get("tool_config")
138
+ # 1. Generate the agent profile first to get the definitive instructions
139
+ tools_from_history = _extract_tools_from_history(raw_history)
140
+ prompt = _build_prompt(
141
+ user_task=user_task,
142
+ conversation_history=conversation_history,
143
+ tool_config=tools_from_history,
144
+ )
145
+ structured_llm = self.llm.with_structured_output(Agent)
146
+ generated_agent = await structured_llm.ainvoke(prompt)
147
+ logger.info(f"Successfully generated agent profile for '{generated_agent.name}'.")
148
+
149
+ # 2. Synthesize tool configuration based on the new instructions and history
150
+ tools_from_instructions = await self._get_tool_config_for_task(generated_agent.instructions)
166
151
 
167
- if not conversation_history or not tool_config:
168
- raise ValueError("Input must be a dictionary containing 'conversation_history' and 'tool_config'.")
152
+ final_tool_config = _merge_tool_configs(tools_from_history, tools_from_instructions)
153
+ logger.info(f"Final synthesized tool configuration: {final_tool_config}")
169
154
 
170
- prompt = AGENT_FROM_CONVERSATION_PROMPT.format(
171
- conversation_history=json.dumps(conversation_history, indent=2),
172
- tool_config=json.dumps(tool_config, indent=2),
155
+ return Command(
156
+ update={
157
+ "generated_agent": generated_agent,
158
+ "tool_config": final_tool_config,
159
+ },
160
+ goto=END,
173
161
  )
174
162
 
175
- structured_llm = self.llm.with_structured_output(Agent)
176
- generated_agent_profile = await structured_llm.ainvoke(prompt)
177
-
178
- yield {
179
- "generated_agent": generated_agent_profile,
180
- "tool_config": tool_config,
181
- "messages": [
182
- AIMessage(
183
- content=f"Successfully generated agent '{generated_agent_profile.name}' from the conversation history."
184
- )
185
- ],
186
- }
163
+ async def _modify_agent(self, state: BuilderState) -> Command:
164
+ """SCENARIO 3: Modifies an existing agent and re-evaluates its tool configuration."""
165
+ existing_agent = state["generated_agent"]
166
+ modification_request = state["user_task"]
167
+ existing_tools = state["tool_config"]
187
168
 
188
- async def _build_graph(self):
189
- """Builds the conversational agent graph."""
190
- builder = StateGraph(BuilderState)
169
+ logger.info(f"Modifying existing agent '{existing_agent.name}' with request: '{modification_request}'")
191
170
 
192
- # Add nodes
193
- builder.add_node("prepare_for_build", self._prepare_for_build)
194
- builder.add_node("create_agent", self._create_agent)
195
- builder.add_node("create_tool_config", self._create_tool_config)
196
- builder.add_node("synthesize_new_task", self._synthesize_new_task_from_feedback)
197
- builder.add_node("synthesize_from_conversation", self._synthesize_from_conversation)
198
-
199
- # The conditional entry point decides the workflow
200
- builder.add_conditional_edges(
201
- START,
202
- self._entry_point_router,
203
- {
204
- "prepare_for_build": "prepare_for_build",
205
- "synthesize_new_task": "synthesize_new_task",
206
- "synthesize_from_conversation": "synthesize_from_conversation",
171
+ # 1. Generate the modified agent profile to get the new definitive instructions
172
+ prompt = _build_prompt(
173
+ existing_instructions=existing_agent.instructions,
174
+ modification_request=modification_request,
175
+ )
176
+ structured_llm = self.llm.with_structured_output(Agent)
177
+ modified_agent = await structured_llm.ainvoke(prompt)
178
+ logger.info(f"Successfully generated modified agent profile for '{modified_agent.name}'.")
179
+
180
+ # 2. Update tool configuration based on the NEW instructions, preserving existing tools
181
+ tools_from_new_instructions = await self._get_tool_config_for_task(modified_agent.instructions)
182
+ final_tool_config = _merge_tool_configs(existing_tools, tools_from_new_instructions)
183
+ logger.info(f"Final updated tool configuration: {final_tool_config}")
184
+
185
+ return Command(
186
+ update={
187
+ "generated_agent": modified_agent,
188
+ "tool_config": final_tool_config,
207
189
  },
190
+ goto=END,
208
191
  )
209
192
 
210
- # Path for a fresh interactive build
211
- builder.add_edge("prepare_for_build", "create_agent")
212
- builder.add_edge("prepare_for_build", "create_tool_config")
193
+ async def _get_tool_config_for_task(self, task: str) -> ToolConfig:
194
+ """Helper method to find and configure tools for a given task string."""
195
+ if not task:
196
+ return {}
197
+ tool_finder_graph = build_tool_node_graph(self.llm, self.registry)
198
+ final_state = await tool_finder_graph.ainvoke({"original_task": task})
199
+ return final_state.get("execution_plan") or {}
213
200
 
214
- # Path for modifying an existing build
215
- builder.add_edge("synthesize_new_task", "create_agent")
216
- builder.add_edge("synthesize_new_task", "create_tool_config")
201
+ async def _build_graph(self):
202
+ """Builds the conversational agent graph with the new, scenario-based structure."""
203
+ builder = StateGraph(BuilderState)
217
204
 
218
- # Path for building from conversation ends after its single step
219
- builder.add_edge("synthesize_from_conversation", END)
205
+ # Add the three self-contained nodes for each scenario
206
+ builder.add_node("create_agent_from_input", self._create_agent_from_input)
207
+ builder.add_node("create_agent_from_history", self._create_agent_from_history)
208
+ builder.add_node("modify_agent", self._modify_agent)
220
209
 
221
- # Interactive creation nodes lead to the end of the run
222
- builder.add_edge("create_agent", END)
223
- builder.add_edge("create_tool_config", END)
210
+ # The entry point router directs to one of the three nodes, and they all go to END
211
+ builder.add_conditional_edges(START, self._entry_point_router)
224
212
 
225
213
  return builder.compile(checkpointer=self.memory)
@@ -0,0 +1,71 @@
1
+ import collections
2
+ from collections import defaultdict
3
+
4
+ from loguru import logger
5
+ from universal_mcp.types import ToolConfig
6
+
7
+
8
+ def _extract_tools_from_history(history: list[dict]) -> ToolConfig:
9
+ """
10
+ Parses a conversation history to find and extract all tool names,
11
+ returning them in a structured ToolConfig format.
12
+
13
+ This function identifies messages with a "type" of "tool", extracts the
14
+ tool's name from the "name" key, and filters out a predefined list of
15
+ excluded tools. The remaining tool names are expected to be in an
16
+ "app_id__tool_id" format. These are then organized into a dictionary
17
+ mapping each app_id to a sorted list of its associated tool_ids.
18
+ """
19
+ apps_with_tools = collections.defaultdict(set)
20
+ excluded_tools = {"search_tools", "load_tools"}
21
+
22
+ for message in history:
23
+ if message.get("type") == "tool":
24
+ full_tool_name = message.get("name")
25
+ if not full_tool_name or full_tool_name in excluded_tools:
26
+ continue
27
+
28
+ if "__" in full_tool_name:
29
+ app_id, tool_id = full_tool_name.split("__", 1)
30
+ apps_with_tools[app_id].add(tool_id)
31
+
32
+ return {app_id: sorted(list(tools)) for app_id, tools in apps_with_tools.items()}
33
+
34
+
35
+ def _clean_conversation_history(history: list[dict]) -> list[dict]:
36
+ """
37
+ Filters a raw conversation history, keeping only messages relevant for
38
+ agent synthesis (human, ai, and tool messages with a name containing double underscores).
39
+ """
40
+ cleaned_history = []
41
+ for message in history:
42
+ msg_type = message.get("type")
43
+
44
+ if msg_type in ["human", "ai"]:
45
+ cleaned_history.append(message)
46
+ elif msg_type == "tool" and isinstance(message.get("name"), str) and "__" in message["name"]:
47
+ cleaned_history.append(message)
48
+
49
+ return cleaned_history
50
+
51
+
52
+ def _merge_tool_configs(old_config: ToolConfig, new_config: ToolConfig) -> ToolConfig:
53
+ """Merges two tool configurations, taking the union of tools for each app."""
54
+ if not old_config:
55
+ return new_config
56
+ if not new_config:
57
+ return old_config
58
+
59
+ # Start with a copy of the old configuration
60
+ merged_config = defaultdict(set)
61
+ for app, tools in old_config.items():
62
+ merged_config[app].update(tools)
63
+
64
+ # Add the new tools, ensuring uniqueness
65
+ for app, tools in new_config.items():
66
+ merged_config[app].update(tools)
67
+
68
+ # Convert the sets back to sorted lists for consistent output
69
+ final_config = {app: sorted(list(tool_set)) for app, tool_set in merged_config.items()}
70
+ logger.info(f"Merged tool configuration: {final_config}")
71
+ return final_config