universal-mcp-agents 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. universal_mcp/agents/__init__.py +17 -19
  2. universal_mcp/agents/base.py +10 -7
  3. universal_mcp/agents/{bigtoolcache → bigtool}/__init__.py +2 -2
  4. universal_mcp/agents/{bigtoolcache → bigtool}/__main__.py +0 -1
  5. universal_mcp/agents/{bigtoolcache → bigtool}/agent.py +0 -1
  6. universal_mcp/agents/{bigtoolcache → bigtool}/graph.py +6 -5
  7. universal_mcp/agents/builder/__main__.py +125 -0
  8. universal_mcp/agents/builder/builder.py +225 -0
  9. universal_mcp/agents/builder/prompts.py +173 -0
  10. universal_mcp/agents/builder/state.py +24 -0
  11. universal_mcp/agents/cli.py +3 -2
  12. universal_mcp/agents/codeact/__main__.py +2 -4
  13. universal_mcp/agents/codeact/agent.py +166 -64
  14. universal_mcp/agents/codeact/models.py +11 -0
  15. universal_mcp/agents/codeact/prompts.py +12 -12
  16. universal_mcp/agents/codeact/sandbox.py +69 -23
  17. universal_mcp/agents/codeact/state.py +2 -0
  18. universal_mcp/agents/codeact0/__init__.py +3 -0
  19. universal_mcp/agents/codeact0/__main__.py +35 -0
  20. universal_mcp/agents/codeact0/agent.py +136 -0
  21. universal_mcp/agents/codeact0/config.py +77 -0
  22. universal_mcp/agents/codeact0/llm_tool.py +379 -0
  23. universal_mcp/agents/codeact0/prompts.py +156 -0
  24. universal_mcp/agents/codeact0/sandbox.py +90 -0
  25. universal_mcp/agents/codeact0/state.py +12 -0
  26. universal_mcp/agents/codeact0/usecases/1-unsubscribe.yaml +4 -0
  27. universal_mcp/agents/codeact0/usecases/10-reddit2.yaml +10 -0
  28. universal_mcp/agents/codeact0/usecases/11-github.yaml +13 -0
  29. universal_mcp/agents/codeact0/usecases/2-reddit.yaml +27 -0
  30. universal_mcp/agents/codeact0/usecases/2.1-instructions.md +81 -0
  31. universal_mcp/agents/codeact0/usecases/2.2-instructions.md +71 -0
  32. universal_mcp/agents/codeact0/usecases/3-earnings.yaml +4 -0
  33. universal_mcp/agents/codeact0/usecases/4-maps.yaml +41 -0
  34. universal_mcp/agents/codeact0/usecases/5-gmailreply.yaml +8 -0
  35. universal_mcp/agents/codeact0/usecases/6-contract.yaml +6 -0
  36. universal_mcp/agents/codeact0/usecases/7-overnight.yaml +14 -0
  37. universal_mcp/agents/codeact0/usecases/8-sheets_chart.yaml +25 -0
  38. universal_mcp/agents/codeact0/usecases/9-learning.yaml +9 -0
  39. universal_mcp/agents/codeact0/utils.py +374 -0
  40. universal_mcp/agents/hil.py +4 -4
  41. universal_mcp/agents/planner/__init__.py +7 -1
  42. universal_mcp/agents/react.py +11 -3
  43. universal_mcp/agents/shared/tool_node.py +1 -34
  44. universal_mcp/agents/simple.py +12 -2
  45. universal_mcp/agents/utils.py +17 -0
  46. universal_mcp/applications/llm/__init__.py +3 -0
  47. universal_mcp/applications/llm/app.py +158 -0
  48. universal_mcp/applications/ui/app.py +118 -144
  49. {universal_mcp_agents-0.1.11.dist-info → universal_mcp_agents-0.1.13.dist-info}/METADATA +1 -1
  50. universal_mcp_agents-0.1.13.dist-info/RECORD +63 -0
  51. universal_mcp/agents/bigtool2/__init__.py +0 -67
  52. universal_mcp/agents/bigtool2/__main__.py +0 -23
  53. universal_mcp/agents/bigtool2/agent.py +0 -13
  54. universal_mcp/agents/bigtool2/graph.py +0 -155
  55. universal_mcp/agents/bigtool2/meta_tools.py +0 -120
  56. universal_mcp/agents/bigtool2/prompts.py +0 -15
  57. universal_mcp/agents/bigtoolcache/state.py +0 -27
  58. universal_mcp/agents/builder.py +0 -204
  59. universal_mcp_agents-0.1.11.dist-info/RECORD +0 -42
  60. /universal_mcp/agents/{bigtoolcache → bigtool}/context.py +0 -0
  61. /universal_mcp/agents/{bigtoolcache → bigtool}/prompts.py +0 -0
  62. /universal_mcp/agents/{bigtool2 → bigtool}/state.py +0 -0
  63. /universal_mcp/agents/{bigtoolcache → bigtool}/tools.py +0 -0
  64. {universal_mcp_agents-0.1.11.dist-info → universal_mcp_agents-0.1.13.dist-info}/WHEEL +0 -0
@@ -1,155 +0,0 @@
1
- import json
2
- from datetime import UTC, datetime
3
- from typing import Literal, cast
4
-
5
- from langchain_core.language_models import BaseChatModel
6
- from langchain_core.messages import AIMessage, ToolMessage
7
- from langchain_core.tools import BaseTool
8
- from langgraph.graph import StateGraph
9
- from langgraph.types import Command
10
- from universal_mcp.logger import logger
11
- from universal_mcp.tools.registry import ToolRegistry
12
- from universal_mcp.types import ToolFormat
13
-
14
- from universal_mcp.agents.bigtool2.meta_tools import create_meta_tools
15
- from universal_mcp.agents.bigtool2.state import State
16
-
17
-
18
- def build_graph(tool_registry: ToolRegistry, llm: BaseChatModel, system_prompt: str, default_tools: list[BaseTool]):
19
- # Instantiate meta tools (search, load, web_search)
20
- search_tools, load_tools, web_search = create_meta_tools(tool_registry)
21
-
22
- async def call_model(
23
- state: State,
24
- ) -> Command[Literal["select_tools", "call_tools"]]:
25
- logger.info("Calling model...")
26
- try:
27
- system_message = system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
28
- messages = [
29
- {"role": "system", "content": system_message},
30
- *state["messages"],
31
- ]
32
-
33
- logger.info(f"Selected tool IDs: {state['selected_tool_ids']}")
34
- if len(state["selected_tool_ids"]) > 0:
35
- selected_tools = await tool_registry.export_tools(
36
- tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN
37
- )
38
- logger.info(f"Exported {len(selected_tools)} tools for model.")
39
- else:
40
- selected_tools = []
41
-
42
- model = llm
43
-
44
- tools = [search_tools, load_tools, web_search, *default_tools, *selected_tools]
45
- # Remove duplicates based on tool name
46
- seen_names = set()
47
- unique_tools = []
48
- for tool in tools:
49
- if tool.name not in seen_names:
50
- seen_names.add(tool.name)
51
- unique_tools.append(tool)
52
- tools = unique_tools
53
- model_with_tools = model.bind_tools(
54
- tools,
55
- tool_choice="auto",
56
- )
57
- response = cast(AIMessage, await model_with_tools.ainvoke(messages))
58
-
59
- if response.tool_calls:
60
- logger.info(f"Model responded with {len(response.tool_calls)} tool calls.")
61
- return Command(goto="call_tools", update={"messages": [response]})
62
- else:
63
- logger.info("Model responded with a message, ending execution.")
64
- return Command(update={"messages": [response]})
65
- except Exception as e:
66
- logger.error(f"Error in call_model: {e}")
67
- raise
68
-
69
- async def select_tools(state: State) -> Command[Literal["call_model"]]:
70
- logger.info("Selecting tools...")
71
- try:
72
- tool_call = state["messages"][-1].tool_calls[0]
73
- searched_tools = await search_tools.ainvoke(input=tool_call["args"])
74
- tool_msg = ToolMessage(
75
- f"Available tool_ids: {searched_tools}. Call load_tools to select the required tools only.",
76
- tool_call_id=tool_call["id"],
77
- )
78
- return Command(goto="call_model", update={"messages": [tool_msg]})
79
- except Exception as e:
80
- logger.error(f"Error in select_tools: {e}")
81
- raise
82
-
83
- async def call_tools(state: State) -> Command[Literal["call_model"]]:
84
- logger.info("Calling tools...")
85
- outputs = []
86
- recent_tool_ids = []
87
- for tool_call in state["messages"][-1].tool_calls:
88
- try:
89
- # Handle special tools internally (no export needed)
90
- if tool_call["name"] == search_tools.name:
91
- search_result = await search_tools.ainvoke(input=tool_call["args"])
92
- outputs.append(
93
- ToolMessage(
94
- content=search_result,
95
- name=tool_call["name"],
96
- tool_call_id=tool_call["id"],
97
- )
98
- )
99
- continue
100
-
101
- if tool_call["name"] == load_tools.name:
102
- selected_tool_ids = await load_tools.ainvoke(tool_call["args"])
103
- outputs.append(
104
- ToolMessage(
105
- content=json.dumps(f"Loaded tools- {selected_tool_ids}"),
106
- name=tool_call["name"],
107
- tool_call_id=tool_call["id"],
108
- )
109
- )
110
- recent_tool_ids = selected_tool_ids
111
- continue
112
-
113
- if tool_call["name"] == web_search.name:
114
- web_search_result = await web_search.ainvoke(input=tool_call["args"])
115
- outputs.append(
116
- ToolMessage(
117
- content=json.dumps(f"Web search result: {web_search_result}"),
118
- name=tool_call["name"],
119
- tool_call_id=tool_call["id"],
120
- )
121
- )
122
- continue
123
-
124
- # For other tools: export and call via registry
125
- await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
126
- tool_result = await tool_registry.call_tool(tool_call["name"], tool_call["args"])
127
- outputs.append(
128
- ToolMessage(
129
- content=json.dumps(tool_result),
130
- name=tool_call["name"],
131
- tool_call_id=tool_call["id"],
132
- )
133
- )
134
- recent_tool_ids.append(tool_call["name"])
135
- except Exception as e:
136
- logger.error(f"Error executing tool '{tool_call['name']}': {e}")
137
- outputs.append(
138
- ToolMessage(
139
- content=json.dumps("Error: " + str(e)),
140
- name=tool_call["name"],
141
- tool_call_id=tool_call["id"],
142
- )
143
- )
144
- return Command(
145
- goto="call_model",
146
- update={"messages": outputs, "selected_tool_ids": recent_tool_ids},
147
- )
148
-
149
- builder = StateGraph(State)
150
-
151
- builder.add_node(call_model)
152
- builder.add_node(select_tools)
153
- builder.add_node(call_tools)
154
- builder.set_entry_point("call_model")
155
- return builder
@@ -1,120 +0,0 @@
1
- import asyncio
2
- from typing import Any
3
-
4
- from langchain_core.tools import BaseTool, tool
5
- from universal_mcp.logger import logger
6
- from universal_mcp.tools.registry import ToolRegistry
7
- from universal_mcp.types import ToolFormat
8
-
9
-
10
- def create_meta_tools(tool_registry: ToolRegistry) -> list[BaseTool]:
11
- @tool
12
- async def search_tools(queries: list[str]) -> str:
13
- """Search tools for a given list of queries
14
- Each single query should be atomic (doable with a single tool).
15
- For tasks requiring multiple tools, add separate queries for each subtask"""
16
- logger.info(f"Searching tools for queries: '{queries}'")
17
- try:
18
- all_tool_candidates = ""
19
-
20
- async def fetch_app_and_connection_metadata():
21
- return await asyncio.gather(
22
- tool_registry.list_all_apps(),
23
- tool_registry.list_connected_apps(),
24
- )
25
-
26
- app_ids, connections = await fetch_app_and_connection_metadata()
27
- connection_ids = set([connection["app_id"] for connection in connections])
28
- connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
29
- app_tools: dict[str, list[str]] = {}
30
-
31
- async def find_tools_for_app(task_query: str, app_id: str) -> list[dict[str, Any]]:
32
- return await tool_registry.search_tools(task_query, limit=5, app_id=app_id)
33
-
34
- async def find_tools_for_query(task_query: str) -> list[str]:
35
- apps_list = await tool_registry.search_apps(task_query, limit=5)
36
- per_app_tool_lists = await asyncio.gather(
37
- *(find_tools_for_app(task_query, app_entry["id"]) for app_entry in apps_list)
38
- )
39
- tools_flat = [tool for sublist in per_app_tool_lists for tool in sublist]
40
- return [f"{tool['id']}: {tool['description']}" for tool in tools_flat]
41
-
42
- # Run all queries concurrently
43
- query_results = await asyncio.gather(*(find_tools_for_query(q) for q in queries))
44
-
45
- # Aggregate per-app with cap of 5 per app across all queries
46
- for tool_desc in [tool for result in query_results for tool in result]:
47
- app = tool_desc.split("__")[0]
48
- if app not in app_tools:
49
- app_tools[app] = []
50
- if len(app_tools[app]) < 5 and tool_desc not in app_tools[app]:
51
- app_tools[app].append(tool_desc)
52
- for app in app_tools:
53
- app_status = "connected" if app in connected_apps else "NOT connected"
54
- all_tool_candidates += f"Tools from {app} (status: {app_status} by user):\n"
55
- for tool in app_tools[app]:
56
- all_tool_candidates += f" - {tool}\n"
57
- all_tool_candidates += "\n"
58
-
59
- return all_tool_candidates
60
- except Exception as e:
61
- logger.error(f"Error retrieving tools: {e}")
62
- return "Error: " + str(e)
63
-
64
- @tool
65
- async def load_tools(tool_ids: list[str]) -> list[str]:
66
- """
67
- Load the tools for the given tool ids. Returns the valid tool ids after loading.
68
- Tool ids are of form 'appid__toolid'. Example: 'google_mail__send_email'
69
- """
70
- correct, incorrect = [], []
71
- app_tool_list: dict[str, list[str]] = {}
72
-
73
- # Group tool_ids by app for fewer registry calls
74
- app_to_tools: dict[str, list[str]] = {}
75
- for tool_id in tool_ids:
76
- if "__" not in tool_id:
77
- incorrect.append(tool_id)
78
- continue
79
- app, tool = tool_id.split("__", 1)
80
- app_to_tools.setdefault(app, []).append((tool_id, tool))
81
-
82
- # Fetch all apps concurrently
83
- async def fetch_tools(app: str):
84
- try:
85
- tools_dict = await tool_registry.list_tools(app)
86
- return app, {tool_unit["name"] for tool_unit in tools_dict}
87
- except Exception:
88
- return app, None
89
-
90
- results = await asyncio.gather(*(fetch_tools(app) for app in app_to_tools))
91
-
92
- # Build map of available tools per app
93
- for app, tools in results:
94
- if tools is not None:
95
- app_tool_list[app] = tools
96
-
97
- # Validate tool_ids
98
- for app, tool_entries in app_to_tools.items():
99
- available = app_tool_list.get(app)
100
- if available is None:
101
- incorrect.extend(tool_id for tool_id, _ in tool_entries)
102
- continue
103
- for tool_id, tool in tool_entries:
104
- if tool in available:
105
- correct.append(tool_id)
106
- else:
107
- incorrect.append(tool_id)
108
-
109
- return correct
110
-
111
- @tool
112
- async def web_search(query: str) -> str:
113
- """Search the web for the given query. Returns the search results. Do not use for app-specific searches (for example, reddit or linkedin searches should be done using the app's tools)"""
114
- await tool_registry.export_tools(["exa__search_with_filters"], ToolFormat.LANGCHAIN)
115
- response = await tool_registry.call_tool(
116
- "exa__search_with_filters", {"query": query, "contents": {"summary": True}}
117
- )
118
- return response
119
-
120
- return [search_tools, load_tools, web_search]
@@ -1,15 +0,0 @@
1
- """Default prompts used by the agent."""
2
-
3
- SYSTEM_PROMPT = """You are a helpful AI assistant.
4
-
5
- **Core Directives:**
6
- 1. **Always Use Tools for Tasks:** For any user request that requires an action (e.g., sending an email, searching for information, creating an event, displaying a chart), you MUST use a tool. Do not refuse a task if a tool might exist for it.
7
-
8
- 2. Check if your existing tools or knowledge can handle the user's request. If they can, use them. If they cannot, you must call the `search_tools` function to find the right tools for the user's request.You must not use the same/similar query multiple times in the list. The list should have multiple queries only if the task has clearly different sub-tasks. If you do not find any specific relevant tools, use the pre-loaded generic tools.
9
-
10
- 3. **Load Tools:** After looking at the output of `search_tools`, you MUST call the `load_tools` function to load only the tools you want to use. Provide the full tool ids, not just the app names. Use your judgement to eliminate irrelevant apps that came up just because of semantic similarity. However, sometimes, multiple apps might be relevant for the same task. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. The tool will return a link for connecting that you should pass on to the user.
11
-
12
- 4. **Strictly Follow the Process:** Your only job in your first turn is to analyze the user's request and answer using existing tools/knowledge or `search_tools` with a concise query describing the core task. Do not engage in conversation, or extend the conversation beyond the user's request.
13
-
14
- {instructions}
15
- """
@@ -1,27 +0,0 @@
1
- from typing import Annotated
2
-
3
- from langgraph.prebuilt.chat_agent_executor import AgentState
4
-
5
-
6
- def _enqueue(left: list, right: list) -> list:
7
- """Treat left as a FIFO queue, append new items from right (preserve order),
8
- keep items unique, and cap total size to 20 (drop oldest items)."""
9
- max_size = 30
10
- preferred_size = 20
11
- if len(right) > preferred_size:
12
- preferred_size = min(max_size, len(right))
13
- queue = list(left or [])
14
-
15
- for item in right[:preferred_size] or []:
16
- if item in queue:
17
- queue.remove(item)
18
- queue.append(item)
19
-
20
- if len(queue) > preferred_size:
21
- queue = queue[-preferred_size:]
22
-
23
- return queue
24
-
25
-
26
- class State(AgentState):
27
- selected_tool_ids: Annotated[list[str], _enqueue]
@@ -1,204 +0,0 @@
1
- import asyncio
2
- from collections import defaultdict
3
- from collections.abc import Sequence
4
- from typing import Annotated, TypedDict
5
-
6
- from langchain_core.language_models import BaseChatModel
7
- from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
8
- from langgraph.checkpoint.base import BaseCheckpointSaver
9
- from langgraph.graph import END, START, StateGraph
10
- from langgraph.graph.message import add_messages
11
- from pydantic import BaseModel, Field
12
- from universal_mcp.tools.registry import ToolRegistry
13
- from universal_mcp.types import ToolConfig
14
-
15
- from universal_mcp.agents.base import BaseAgent
16
- from universal_mcp.agents.llm import load_chat_model
17
- from universal_mcp.agents.shared.tool_node import build_tool_node_graph
18
- from universal_mcp.agents.utils import messages_to_list
19
-
20
-
21
- class Agent(BaseModel):
22
- """Agent that can be created by the builder."""
23
-
24
- name: str = Field(description="Name of the agent.")
25
- description: str = Field(description="A small description of the agent.")
26
- expertise: str = Field(description="The expertise of the agent.")
27
- instructions: str = Field(description="The instructions for the agent to follow.")
28
- schedule: str | None = Field(description="The cron expression for the agent to run on.", default=None)
29
-
30
-
31
- class BuilderState(TypedDict):
32
- user_task: str
33
- generated_agent: Agent | None
34
- tool_config: ToolConfig | None
35
- messages: Annotated[Sequence[BaseMessage], add_messages]
36
-
37
-
38
- AGENT_BUILDER_INSTRUCTIONS = r"""
39
- You are a specialized Agent Generation AI, tasked with creating intelligent, effective, and context-aware AI agents based on user requests.
40
-
41
- When given a user's request, immediately follow this structured process:
42
-
43
- # 1. Intent Breakdown
44
- - Clearly identify the primary goal the user wants the agent to achieve.
45
- - Recognize any special requirements, constraints, formatting requests, or interaction rules.
46
- - Summarize your understanding briefly to ensure alignment with user intent.
47
-
48
- # 2. Agent Profile Definition
49
- - **Name (2-4 words)**: Concise, clear, and memorable name reflecting core functionality.
50
- - **Description (1-2 sentences)**: Captures the unique value and primary benefit to users.
51
- - **Expertise**: Precise domain-specific expertise area. Avoid vague or overly general titles.
52
- - **Instructions**: Compose detailed, highly actionable system instructions that directly command the agent's behavior. Respond in markdown as this text will be rendered in a rich text editor. Write instructions as clear imperatives, without preamble, assuming the agent identity is already established externally.
53
- - **Schedule**: If the user specifies a schedule, you should also provide a cron expression for the agent to run on. The schedule should be in a proper cron expression and nothing more. Do not respond with any other information or explain your reasoning for the schedule, otherwise this will cause a parsing error that is undesirable.
54
-
55
- ## ROLE & RESPONSIBILITY
56
- - Clearly state the agent's primary mission, e.g., "Your primary mission is...", "Your core responsibility is...".
57
- - Outline the exact tasks it handles, specifying expected input/output clearly.
58
-
59
- ## INTERACTION STYLE
60
- - Define exactly how to communicate with users: tone, format, response structure.
61
- - Include explicit commands, e.g., "Always wrap responses in \`\`\`text\`\`\` blocks.", "Never add greetings or meta-information.", "Always provide outputs in user's requested languages."
62
-
63
- ## OUTPUT FORMATTING RULES
64
- - Clearly specify formatting standards required by the user (e.g., JSON, plain text, markdown).
65
- - Include explicit examples to illustrate correct formatting.
66
-
67
- ## LIMITATIONS & CONSTRAINTS
68
- - Explicitly define boundaries of the agent's capabilities.
69
- - Clearly state what the agent must never do or say.
70
- - Include exact phrases for declining requests outside scope.
71
-
72
- ## REAL-WORLD EXAMPLES
73
- Provide two explicit interaction examples showing:
74
- - User's typical request.
75
- - Final agent response demonstrating perfect compliance.
76
-
77
- Create an agent that feels thoughtfully designed, intelligent, and professionally reliable, perfectly matched to the user's original intent.
78
- """
79
-
80
-
81
- async def generate_agent(llm: BaseChatModel, task: str, old_agent: Agent | None = None) -> Agent:
82
- """Generates an agent from a task, optionally modifying an existing one."""
83
- prompt_parts = [AGENT_BUILDER_INSTRUCTIONS]
84
- if old_agent:
85
- prompt_parts.append(
86
- "\nThe user wants to modify the following agent design. "
87
- "Incorporate their feedback into a new design.\n\n"
88
- f"{old_agent.model_dump_json(indent=2)}"
89
- )
90
- else:
91
- prompt_parts.append(f"\n\n**Task:** {task}")
92
-
93
- prompt = "\n".join(prompt_parts)
94
- structured_llm = llm.with_structured_output(Agent)
95
- agent = await structured_llm.ainvoke(prompt)
96
- return agent
97
-
98
-
99
- class BuilderAgent(BaseAgent):
100
- def __init__(
101
- self,
102
- name: str,
103
- instructions: str,
104
- model: str,
105
- registry: ToolRegistry,
106
- memory: BaseCheckpointSaver | None = None,
107
- **kwargs,
108
- ):
109
- super().__init__(name, instructions, model, memory, **kwargs)
110
- self.registry = registry
111
- self.llm: BaseChatModel = load_chat_model(model, thinking=False)
112
-
113
- async def _create_agent(self, state: BuilderState):
114
- last_message = state["messages"][-1]
115
- task = last_message.content
116
- agent = state.get("generated_agent")
117
-
118
- yield {
119
- "messages": [
120
- AIMessage(
121
- content="Thinking... I will now design an agent to handle your request.",
122
- )
123
- ],
124
- }
125
- generated_agent = await generate_agent(self.llm, task, agent)
126
- yield {
127
- "user_task": task,
128
- "generated_agent": generated_agent,
129
- "messages": [AIMessage(content=("I've designed an agent to help you with your task."))],
130
- }
131
-
132
- async def _create_tool_config(self, state: BuilderState):
133
- task = state["user_task"]
134
- yield {
135
- "messages": [
136
- AIMessage(
137
- content="Great! Now, I will select the appropriate tools for this agent. This may take a moment.",
138
- )
139
- ]
140
- }
141
- tool_finder_graph = build_tool_node_graph(self.llm, self.registry)
142
-
143
- initial_state = {
144
- "original_task": task,
145
- "messages": [HumanMessage(content=task)],
146
- "decomposition_attempts": 0,
147
- }
148
- final_state = await tool_finder_graph.ainvoke(initial_state)
149
- execution_plan = final_state.get("execution_plan")
150
- tool_config = {}
151
- if execution_plan:
152
- # Use defaultdict to easily group tools by app_id
153
- apps_with_tools = defaultdict(list)
154
- for step in execution_plan:
155
- app_id = step.get("app_id")
156
- tool_ids = step.get("tool_ids")
157
- if app_id and tool_ids:
158
- apps_with_tools[app_id].extend(tool_ids)
159
-
160
- # Convert to a regular dict and remove any duplicate tool_ids for the same app
161
- tool_config = {app_id: list(set(tools)) for app_id, tools in apps_with_tools.items()}
162
- final_message = "I have selected the necessary tools for the agent. The agent is ready!"
163
- else:
164
- # Handle the case where the graph failed to create a plan
165
- final_message = "I was unable to find the right tools for this task. Please try rephrasing your request."
166
-
167
- yield {
168
- "tool_config": tool_config,
169
- "messages": [AIMessage(content=final_message)],
170
- }
171
-
172
- async def _build_graph(self):
173
- builder = StateGraph(BuilderState)
174
- builder.add_node("create_agent", self._create_agent)
175
- builder.add_node("create_tool_config", self._create_tool_config)
176
-
177
- builder.add_edge(START, "create_agent")
178
- builder.add_edge("create_agent", "create_tool_config")
179
- builder.add_edge("create_tool_config", END)
180
- return builder.compile(checkpointer=self.memory)
181
-
182
-
183
- async def main():
184
- from universal_mcp.agentr.registry import AgentrRegistry
185
-
186
- registry = AgentrRegistry()
187
- agent = BuilderAgent(
188
- name="Builder Agent",
189
- instructions="You are a builder agent that creates other agents.",
190
- model="gemini/gemini-1.5-pro",
191
- registry=registry,
192
- )
193
- result = await agent.invoke(
194
- "Send a daily email to manoj@agentr.dev with daily agenda of the day",
195
- )
196
- from rich import print
197
-
198
- print(messages_to_list(result["messages"]))
199
- print(result["generated_agent"])
200
- print(result["tool_config"])
201
-
202
-
203
- if __name__ == "__main__":
204
- asyncio.run(main())
@@ -1,42 +0,0 @@
1
- universal_mcp/agents/__init__.py,sha256=oPoHMITGbHN4Ey68ZrVbmy7sNzVYhoXQgVII9fZwqL8,1245
2
- universal_mcp/agents/base.py,sha256=KXBxf3TXrVHi-wBVD-cs6PSKfMtUnm73l-hC83FjOog,6753
3
- universal_mcp/agents/builder.py,sha256=Xl_dGmzbtop3lICH2njnN6yxFF0SnEGY8u1tOIJy2Pk,8677
4
- universal_mcp/agents/cli.py,sha256=-luC55FHCTwnpcRgqnV95yQa-mGlLYDlseUjLqFrAfs,1014
5
- universal_mcp/agents/hil.py,sha256=_xLlBte4v5ex-RxXy5H3LqwFhtd3KE1QUHX1QDGIl2w,3760
6
- universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
7
- universal_mcp/agents/react.py,sha256=0OZvdBTnQdFF3Wliv2l4wiF2BMd0VG1cVflOaWDC8r0,3166
8
- universal_mcp/agents/simple.py,sha256=W5_zb2DAGtAx_p5weSQGIC3gheZwxhmwZBuKKBTjd04,1938
9
- universal_mcp/agents/utils.py,sha256=lnRmP6HBIme4W-iKy1BOb9gTyb9-ooJxXwCyWAQFP9k,4712
10
- universal_mcp/agents/bigtool2/__init__.py,sha256=i4virR9r1_1FcS_-iuSHZWgEzYZwOroT6J44qPb0ZgM,2462
11
- universal_mcp/agents/bigtool2/__main__.py,sha256=t6fWhLh3SnpN_05cww3LA_r_5Rb0gaF_U4FH1Mpsv1Y,655
12
- universal_mcp/agents/bigtool2/agent.py,sha256=4GIQIy2VQgdXOezmET8G7tvP_37Vv8C027bGdGXJbTI,437
13
- universal_mcp/agents/bigtool2/graph.py,sha256=FuW1XyTIr1aUbAC4ea8JWRR0JENvLfZGUSgQFmk0h3A,6544
14
- universal_mcp/agents/bigtool2/meta_tools.py,sha256=02xOsGdxZpXBirn2KWk63UqYPQjI41nQ2KGj2zKBf7Y,5306
15
- universal_mcp/agents/bigtool2/prompts.py,sha256=rQFtZDkwU9z8d4PWdt6jpohGhyab658Xvk8hvNVBFBA,1843
16
- universal_mcp/agents/bigtool2/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
17
- universal_mcp/agents/bigtoolcache/__init__.py,sha256=qrUwYqhZNE0PdmUCGc5jRQt45Tr1xPDSfBxCPJA1DDM,2260
18
- universal_mcp/agents/bigtoolcache/__main__.py,sha256=noqT7Nqr17q7Eeiib-Dk2qG6AH4LkNyveeU_ceNDfMA,473
19
- universal_mcp/agents/bigtoolcache/agent.py,sha256=NCF6fdBPwVcaOyhv8xbAy5DBqbGxJiv8_fLqKib7QJc,267
20
- universal_mcp/agents/bigtoolcache/context.py,sha256=ny7gd-vvVpUOYAeQbAEUT0A6Vm6Nn2qGywxTzPBzYFg,929
21
- universal_mcp/agents/bigtoolcache/graph.py,sha256=9KUjsVu5dtPdnyNkqJ5LsMnfH-IF_4H2me_L5aHgsug,4315
22
- universal_mcp/agents/bigtoolcache/prompts.py,sha256=Rz30qNGdscDG65vMj9d0Vfe7X1pQjBDQBBNc3BuyC94,1886
23
- universal_mcp/agents/bigtoolcache/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
24
- universal_mcp/agents/bigtoolcache/tools.py,sha256=ynyEj9mVwKKDhxm76sjspyH51SFi63g2Vydi39pY0qY,5562
25
- universal_mcp/agents/codeact/__init__.py,sha256=rLE8gvOo5H4YSr71DRq76b3RV3uuotxuAy_VnBVaVwk,60
26
- universal_mcp/agents/codeact/__main__.py,sha256=FRfIkgcZfawP-M66v4ePijA6J2fs7nQv92G_8cj5qYA,1142
27
- universal_mcp/agents/codeact/agent.py,sha256=yB99aTRHWOdl4b67UxRstuV7WplrTjUZuqP1odwSS5o,5586
28
- universal_mcp/agents/codeact/prompts.py,sha256=6NZkWQCaS7X7CLOTV-hMZgeWa1jf6iKbAT00INuluM0,4668
29
- universal_mcp/agents/codeact/sandbox.py,sha256=pG6M1elzWw4KS46ewl-2d_U9Ap8HnHJDY9Rna_76sRI,1489
30
- universal_mcp/agents/codeact/state.py,sha256=K25HcEljNNJDaeUgG5P68Kj752XzWTh8BwILY8w_EtE,357
31
- universal_mcp/agents/codeact/utils.py,sha256=JUbT_HYGS_D1BzmzoVpORIe7SGur1KgJguTZ_1tZ4JY,1918
32
- universal_mcp/agents/planner/__init__.py,sha256=9P1UL-ABvrTIWTJ8wcvZmkqT8uyROZxsmUFhpjTK-Q4,1313
33
- universal_mcp/agents/planner/__main__.py,sha256=OfhTfYDZK_ZUfc8sX-Sa6TWk-dNqD2rl13Ln64mNAtw,771
34
- universal_mcp/agents/planner/graph.py,sha256=70hhIoEZOcYojpiyVSCedgYpnmxVP7aqdn8s6VBu-D4,3228
35
- universal_mcp/agents/planner/prompts.py,sha256=_JoHqiAvswtqCDu90AGUHmfsu8eWE1-_yI4LLn3pqMU,657
36
- universal_mcp/agents/planner/state.py,sha256=qqyp-jSGsCxe1US-PRLT4-y1sITAcVE6nCMlQLnvop0,278
37
- universal_mcp/agents/shared/prompts.py,sha256=VOsXSUEwBXPaAuxJTUF6bgDGr41u6uctUNQSMRt_OJc,6414
38
- universal_mcp/agents/shared/tool_node.py,sha256=Ua_wzMt4YgIx4zLp3_ZCow-28qORwrZ2FvKqLPt3RlI,10415
39
- universal_mcp/applications/ui/app.py,sha256=uaS1KrwrGxw9oexdLj2Jok77DrZQAmby3uVxCONQyV8,11276
40
- universal_mcp_agents-0.1.11.dist-info/METADATA,sha256=MosDAsvQTSvUr41527xNHF8QpEeatOh7g0bpBDr7lW0,878
41
- universal_mcp_agents-0.1.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
42
- universal_mcp_agents-0.1.11.dist-info/RECORD,,
File without changes
File without changes