universal-mcp-agents 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. universal_mcp/agents/__init__.py +19 -0
  2. universal_mcp/agents/autoagent/__init__.py +1 -1
  3. universal_mcp/agents/autoagent/__main__.py +1 -1
  4. universal_mcp/agents/autoagent/graph.py +32 -13
  5. universal_mcp/agents/autoagent/studio.py +3 -8
  6. universal_mcp/agents/base.py +80 -22
  7. universal_mcp/agents/bigtool/__init__.py +13 -9
  8. universal_mcp/agents/bigtool/__main__.py +6 -7
  9. universal_mcp/agents/bigtool/graph.py +84 -40
  10. universal_mcp/agents/bigtool/prompts.py +3 -3
  11. universal_mcp/agents/bigtool2/__init__.py +16 -6
  12. universal_mcp/agents/bigtool2/__main__.py +7 -6
  13. universal_mcp/agents/bigtool2/agent.py +4 -2
  14. universal_mcp/agents/bigtool2/graph.py +78 -36
  15. universal_mcp/agents/bigtool2/prompts.py +1 -1
  16. universal_mcp/agents/bigtoolcache/__init__.py +8 -4
  17. universal_mcp/agents/bigtoolcache/__main__.py +1 -1
  18. universal_mcp/agents/bigtoolcache/agent.py +5 -3
  19. universal_mcp/agents/bigtoolcache/context.py +0 -1
  20. universal_mcp/agents/bigtoolcache/graph.py +99 -69
  21. universal_mcp/agents/bigtoolcache/prompts.py +28 -0
  22. universal_mcp/agents/bigtoolcache/tools_all.txt +956 -0
  23. universal_mcp/agents/bigtoolcache/tools_important.txt +474 -0
  24. universal_mcp/agents/builder.py +62 -20
  25. universal_mcp/agents/cli.py +19 -5
  26. universal_mcp/agents/codeact/__init__.py +16 -4
  27. universal_mcp/agents/codeact/test.py +2 -1
  28. universal_mcp/agents/hil.py +16 -4
  29. universal_mcp/agents/llm.py +12 -4
  30. universal_mcp/agents/planner/__init__.py +14 -4
  31. universal_mcp/agents/planner/__main__.py +10 -6
  32. universal_mcp/agents/planner/graph.py +9 -3
  33. universal_mcp/agents/planner/prompts.py +14 -1
  34. universal_mcp/agents/planner/state.py +0 -1
  35. universal_mcp/agents/react.py +36 -22
  36. universal_mcp/agents/shared/tool_node.py +26 -11
  37. universal_mcp/agents/simple.py +27 -4
  38. universal_mcp/agents/tools.py +9 -4
  39. universal_mcp/agents/ui_tools.py +305 -0
  40. universal_mcp/agents/utils.py +55 -17
  41. {universal_mcp_agents-0.1.3.dist-info → universal_mcp_agents-0.1.5.dist-info}/METADATA +3 -2
  42. universal_mcp_agents-0.1.5.dist-info/RECORD +52 -0
  43. universal_mcp/agents/bigtool/context.py +0 -24
  44. universal_mcp/agents/bigtool2/context.py +0 -33
  45. universal_mcp_agents-0.1.3.dist-info/RECORD +0 -51
  46. {universal_mcp_agents-0.1.3.dist-info → universal_mcp_agents-0.1.5.dist-info}/WHEEL +0 -0
@@ -1,8 +1,11 @@
1
+ from langgraph.checkpoint.memory import MemorySaver
1
2
  from typer import Typer
2
-
3
- from universal_mcp.agents import ReactAgent
3
+ from universal_mcp.agentr.client import AgentrClient
4
+ from universal_mcp.agentr.registry import AgentrRegistry
4
5
  from universal_mcp.logger import setup_logger
5
6
 
7
+ from universal_mcp.agents import get_agent
8
+
6
9
  app = Typer()
7
10
 
8
11
 
@@ -13,13 +16,24 @@ app = Typer()
13
16
  mcp client run --config client_config.json
14
17
  """,
15
18
  )
16
- def run():
19
+ def run(name: str = "react"):
17
20
  """Run the agent CLI"""
18
21
  import asyncio
19
22
 
20
- setup_logger(log_file=None, level="WARNING")
21
23
 
22
- agent = ReactAgent("React Agent", "You are a helpful assistant", "openrouter/auto")
24
+ setup_logger(log_file=None, level="ERROR")
25
+ client = AgentrClient()
26
+ params = {
27
+ "instructions": "You are a helpful assistant",
28
+ "model": "anthropic/claude-sonnet-4-20250514",
29
+ "registry": AgentrRegistry(client=client),
30
+ "memory": MemorySaver(),
31
+ "tools": {
32
+ "google_mail": ["send_email"],
33
+ }
34
+ }
35
+ agent_cls = get_agent(name)
36
+ agent = agent_cls(name, **params)
23
37
  asyncio.run(agent.run_interactive())
24
38
 
25
39
 
@@ -54,7 +54,11 @@ In addition to the Python Standard Library, you can use the following functions:
54
54
 
55
55
  for tool in tools:
56
56
  # Use coroutine if it exists, otherwise use func
57
- tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
57
+ tool_callable = (
58
+ tool.coroutine
59
+ if hasattr(tool, "coroutine") and tool.coroutine is not None
60
+ else tool.func
61
+ )
58
62
  # Create a safe function name
59
63
  safe_name = make_safe_function_name(tool.name)
60
64
  # Determine if it's an async function
@@ -119,7 +123,11 @@ def create_codeact(
119
123
  for tool in tools:
120
124
  safe_name = make_safe_function_name(tool.name)
121
125
  # Use coroutine if it exists, otherwise use func (same as in create_default_prompt)
122
- tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
126
+ tool_callable = (
127
+ tool.coroutine
128
+ if hasattr(tool, "coroutine") and tool.coroutine is not None
129
+ else tool.func
130
+ )
123
131
  # Only use the safe name for consistency with the prompt
124
132
  tools_context[safe_name] = tool_callable
125
133
 
@@ -145,7 +153,9 @@ def create_codeact(
145
153
  )
146
154
 
147
155
  # Add the current response
148
- conversation_history += f'\n<message role="assistant">\n{response.content}\n</message>'
156
+ conversation_history += (
157
+ f'\n<message role="assistant">\n{response.content}\n</message>'
158
+ )
149
159
 
150
160
  # Create the reflection prompt with the tagged conversation history
151
161
  formatted_prompt = f"""
@@ -204,7 +214,9 @@ DO NOT reference the feedback directly. Instead, provide a completely new respon
204
214
 
205
215
  # Return appropriate command with only the latest response
206
216
  if code:
207
- return Command(goto="sandbox", update={"messages": [response], "script": code})
217
+ return Command(
218
+ goto="sandbox", update={"messages": [response], "script": code}
219
+ )
208
220
  else:
209
221
  # no code block, end the loop and respond to the user
210
222
  return Command(update={"messages": [response], "script": None})
@@ -1,8 +1,9 @@
1
1
  from universal_mcp.agentr import Agentr
2
+ from universal_mcp.tools.adapters import ToolFormat
3
+
2
4
  from universal_mcp.agents.codeact import create_codeact
3
5
  from universal_mcp.agents.codeact.sandbox import eval_unsafe
4
6
  from universal_mcp.agents.llm import load_chat_model
5
- from universal_mcp.tools.adapters import ToolFormat
6
7
 
7
8
  model = load_chat_model("gpt-4.1")
8
9
 
@@ -41,10 +41,15 @@ def ask_favourite_color_node(state: State) -> State:
41
41
  if state.get("favourite_color") is not None:
42
42
  return state
43
43
  favourite_color = interrupt(
44
- {"question": "What is your favourite color?", "type": "choice", "choices": ["red", "green", "blue"]}
44
+ {
45
+ "question": "What is your favourite color?",
46
+ "type": "choice",
47
+ "choices": ["red", "green", "blue"],
48
+ }
45
49
  )
46
50
  state.update(
47
- favourite_color=favourite_color, messages=[HumanMessage(content=f"My favourite color is {favourite_color}")]
51
+ favourite_color=favourite_color,
52
+ messages=[HumanMessage(content=f"My favourite color is {favourite_color}")],
48
53
  )
49
54
  return state
50
55
 
@@ -58,7 +63,12 @@ def handle_interrupt(interrupt: Interrupt) -> str | bool:
58
63
  value = input("Do you accept this? (y/n): " + interrupt.value["question"])
59
64
  return value.lower() in ["y", "yes"]
60
65
  elif interrupt_type == "choice":
61
- value = input("Enter your choice: " + interrupt.value["question"] + " " + ", ".join(interrupt.value["choices"]))
66
+ value = input(
67
+ "Enter your choice: "
68
+ + interrupt.value["question"]
69
+ + " "
70
+ + ", ".join(interrupt.value["choices"])
71
+ )
62
72
  if value in interrupt.value["choices"]:
63
73
  return value
64
74
  else:
@@ -98,7 +108,9 @@ if __name__ == "__main__":
98
108
  import asyncio
99
109
 
100
110
  agent = HilAgent(
101
- "Hil Agent", "You are a friendly agent that asks for the user's name and greets them.", "openrouter/auto"
111
+ "Hil Agent",
112
+ "You are a friendly agent that asks for the user's name and greets them.",
113
+ "openrouter/auto",
102
114
  )
103
115
 
104
116
  asyncio.run(agent.run_interactive())
@@ -1,22 +1,26 @@
1
+ from functools import lru_cache
2
+
1
3
  from langchain_anthropic import ChatAnthropic
2
4
  from langchain_core.language_models import BaseChatModel
3
5
  from langchain_google_genai import ChatGoogleGenerativeAI
4
6
  from langchain_openai import AzureChatOpenAI
5
7
 
6
8
 
9
+ @lru_cache(maxsize=8)
7
10
  def load_chat_model(
8
- fully_specified_name: str, temperature: float = 1.0, tags: list[str] | None = None
11
+ fully_specified_name: str, temperature: float = 1.0, tags: list[str] | None = None, thinking: bool = True
9
12
  ) -> BaseChatModel:
10
13
  """Load a chat model from a fully specified name.
11
14
  Args:
12
15
  fully_specified_name (str): String in the format 'provider/model'.
13
16
  """
14
- provider, model = fully_specified_name.split("/", maxsplit=1)
17
+ fully_specified_name = fully_specified_name.replace("/", ":")
18
+ provider, model = fully_specified_name.split(":", maxsplit=1)
15
19
  if provider == "anthropic":
16
20
  return ChatAnthropic(
17
21
  model=model,
18
22
  temperature=temperature,
19
- # thinking={"type": "enabled", "budget_tokens": 2048},
23
+ thinking={"type": "enabled", "budget_tokens": 2048} if thinking else None,
20
24
  max_tokens=4096,
21
25
  tags=tags,
22
26
  stream_usage=True,
@@ -39,7 +43,11 @@ def load_chat_model(
39
43
  if __name__ == "__main__":
40
44
  from loguru import logger
41
45
 
42
- models_to_test = ["azure/gpt-5-chat", "anthropic/claude-4-sonnet-20250514", "gemini/gemini-2.5-pro"]
46
+ models_to_test = [
47
+ "azure/gpt-5-chat",
48
+ "anthropic/claude-4-sonnet-20250514",
49
+ "gemini/gemini-2.5-pro",
50
+ ]
43
51
  for model in models_to_test:
44
52
  llm = load_chat_model(model)
45
53
  logger.info(llm.invoke("Hi!"))
@@ -1,11 +1,12 @@
1
1
  from langgraph.checkpoint.base import BaseCheckpointSaver
2
+ from universal_mcp.tools.registry import ToolRegistry
2
3
 
3
4
  from universal_mcp.agents.base import BaseAgent
4
5
  from universal_mcp.agents.llm import load_chat_model
5
6
  from universal_mcp.agents.react import ReactAgent
6
- from universal_mcp.tools.registry import ToolRegistry
7
7
 
8
8
  from .graph import build_graph
9
+ from .prompts import DEVELOPER_PROMPT
9
10
 
10
11
 
11
12
  class PlannerAgent(BaseAgent):
@@ -24,11 +25,20 @@ class PlannerAgent(BaseAgent):
24
25
  self.llm = load_chat_model(model)
25
26
  self.executor_agent_cls = executor_agent_cls
26
27
 
27
- async def _build_graph(self):
28
- return build_graph(self.llm, self.app_registry, self.instructions, self.model, self.executor_agent_cls).compile(
29
- checkpointer=self.memory
28
+ def _build_system_message(self):
29
+ return DEVELOPER_PROMPT.format(
30
+ name=self.name, instructions=self.instructions
30
31
  )
31
32
 
33
+ async def _build_graph(self):
34
+ return build_graph(
35
+ self.llm,
36
+ self.app_registry,
37
+ self._build_system_message(),
38
+ self.model,
39
+ self.executor_agent_cls,
40
+ ).compile(checkpointer=self.memory)
41
+
32
42
  @property
33
43
  def graph(self):
34
44
  return self._graph
@@ -1,7 +1,9 @@
1
1
  import asyncio
2
2
 
3
3
  from universal_mcp.agentr.registry import AgentrRegistry
4
+
4
5
  from universal_mcp.agents.planner import PlannerAgent
6
+ from universal_mcp.agents.utils import messages_to_list
5
7
 
6
8
 
7
9
  async def main():
@@ -9,15 +11,17 @@ async def main():
9
11
  agent = PlannerAgent(
10
12
  name="planner-agent",
11
13
  instructions="You are a helpful assistant.",
12
- model="gemini/gemini-2.5-flash",
14
+ model="azure/gpt-4o",
13
15
  registry=registry,
14
16
  )
15
- from rich.console import Console
17
+ from rich import print
16
18
 
17
- console = Console()
18
- console.print("Starting agent...", style="yellow")
19
- async for event in agent.stream(user_input="Send an email to manoj@agentr.dev'", thread_id="xyz"):
20
- console.print(event.content, style="red")
19
+ print("Starting agent...")
20
+ result = await agent.invoke(
21
+ user_input="Send an email to manoj@agentr.dev with the subject 'testing planner' and body 'This is a test of the planner agent.'",
22
+ thread_id="xyz",
23
+ )
24
+ print(messages_to_list(result["messages"]))
21
25
 
22
26
 
23
27
  if __name__ == "__main__":
@@ -1,6 +1,6 @@
1
1
  from typing import Any
2
2
 
3
- from langchain_core.messages import AIMessage
3
+ from langchain_core.messages import AIMessage, SystemMessage
4
4
  from langgraph.graph import END, START, StateGraph
5
5
  from loguru import logger
6
6
 
@@ -18,7 +18,9 @@ def build_graph(llm, registry, instructions, model, executor_agent_cls):
18
18
  task = state["messages"][-1].content
19
19
  logger.info(f"Running tool finder for task: {task}")
20
20
  tool_finder_graph = build_tool_node_graph(llm, registry)
21
- tool_finder_state = await tool_finder_graph.ainvoke({"task": task, "messages": state["messages"]})
21
+ tool_finder_state = await tool_finder_graph.ainvoke(
22
+ {"task": task, "messages": state["messages"]}
23
+ )
22
24
 
23
25
  if not tool_finder_state.get("apps_required"):
24
26
  logger.info("Tool finder determined no apps are required.")
@@ -59,7 +61,11 @@ def build_graph(llm, registry, instructions, model, executor_agent_cls):
59
61
  async def _no_tools_node(state: State) -> dict[str, Any]:
60
62
  """Handles tasks that don't require tools by invoking the LLM directly."""
61
63
  logger.info("No tools required. Invoking LLM directly.")
62
- response = await llm.ainvoke(state["messages"])
64
+ messages = [
65
+ SystemMessage(content=instructions),
66
+ *state["messages"],
67
+ ]
68
+ response = await llm.ainvoke(messages)
63
69
  return {"messages": [response]}
64
70
 
65
71
  graph_builder.add_node("tool_finder", _tool_finder_node)
@@ -1 +1,14 @@
1
- # Prompts for the planner agent
1
+ # prompts.py
2
+
3
+ DEVELOPER_PROMPT = """
4
+ You are a planner agent that orchestrates tasks by selecting the right tools.
5
+
6
+ Your primary goal is to analyze a user's request and determine the most effective sequence of tools to accomplish it. You have access to a registry of applications and their corresponding tools.
7
+
8
+ Here's your process:
9
+ 1. **Assess the Task**: Understand the user's intent and what they want to achieve.
10
+ 2. **Identify Necessary Tools**: Based on the task, identify which applications and tools are required.
11
+ 3. **Orchestrate Execution**: Pass the selected tools and instructions to an executor agent to perform the task.
12
+
13
+ {instructions}
14
+ """
@@ -2,7 +2,6 @@ from typing import Annotated
2
2
 
3
3
  from langgraph.graph.message import add_messages
4
4
  from typing_extensions import TypedDict
5
-
6
5
  from universal_mcp.types import ToolConfig
7
6
 
8
7
 
@@ -1,13 +1,28 @@
1
1
  from langgraph.checkpoint.base import BaseCheckpointSaver
2
2
  from langgraph.prebuilt import create_react_agent
3
3
  from loguru import logger
4
-
5
4
  from universal_mcp.agentr.registry import AgentrRegistry
6
- from universal_mcp.agents.base import BaseAgent
7
- from universal_mcp.agents.llm import load_chat_model
8
5
  from universal_mcp.tools.registry import ToolRegistry
9
6
  from universal_mcp.types import ToolConfig, ToolFormat
10
7
 
8
+ from universal_mcp.agents.base import BaseAgent
9
+ from universal_mcp.agents.llm import load_chat_model
10
+ from universal_mcp.agents.utils import initialize_ui_tools, messages_to_list
11
+
12
+ DEVELOPER_PROMPT = """You are {name}.
13
+
14
+ You have access to various tools that can help you answer questions and complete tasks. When you need to use a tool:
15
+
16
+ 1. Think about what information you need
17
+ 2. Call the appropriate tool with the right parameters
18
+ 3. Use the tool results to provide a comprehensive answer
19
+
20
+ Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so.
21
+
22
+ Adhere to the following instructions strictly:
23
+ {instructions}
24
+ """
25
+
11
26
 
12
27
  class ReactAgent(BaseAgent):
13
28
  def __init__(
@@ -24,6 +39,7 @@ class ReactAgent(BaseAgent):
24
39
  super().__init__(name, instructions, model, memory, **kwargs)
25
40
  self.llm = load_chat_model(model)
26
41
  self.tools = tools
42
+ self.ui_tools = initialize_ui_tools()
27
43
  self.max_iterations = max_iterations
28
44
  self.registry = registry
29
45
 
@@ -38,6 +54,8 @@ class ReactAgent(BaseAgent):
38
54
  else:
39
55
  tools = []
40
56
 
57
+ tools.extend(self.ui_tools)
58
+
41
59
  logger.debug(f"Initialized ReactAgent: name={self.name}, model={self.model}")
42
60
  return create_react_agent(
43
61
  self.llm,
@@ -47,30 +65,26 @@ class ReactAgent(BaseAgent):
47
65
  )
48
66
 
49
67
  def _build_system_message(self):
50
- system_message = f"""You are {self.name}.
51
-
52
- You have access to various tools that can help you answer questions and complete tasks. When you need to use a tool:
53
-
54
- 1. Think about what information you need
55
- 2. Call the appropriate tool with the right parameters
56
- 3. Use the tool results to provide a comprehensive answer
68
+ return DEVELOPER_PROMPT.format(name=self.name, instructions=self.instructions)
57
69
 
58
- Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so.
59
-
60
- {self.instructions}
61
- """
62
- return system_message
63
-
64
-
65
- if __name__ == "__main__":
66
- import asyncio
67
70
 
71
+ async def main():
68
72
  agent = ReactAgent(
69
73
  "Universal React Agent",
70
- instructions="",
74
+ instructions="Be very concise in your answers.",
71
75
  model="azure/gpt-4o",
72
76
  tools={"google-mail": ["send_email"]},
73
77
  registry=AgentrRegistry(),
74
78
  )
75
- result = asyncio.run(agent.invoke("Send an email with the subject 'testing react agent' to manoj@agentr.dev"))
76
- logger.info(result["messages"][-1].content)
79
+ result = await agent.invoke(
80
+ "Send an email with the subject 'testing react agent' to manoj@agentr.dev"
81
+ )
82
+ from rich import print
83
+
84
+ print(messages_to_list(result["messages"]))
85
+
86
+
87
+ if __name__ == "__main__":
88
+ import asyncio
89
+
90
+ asyncio.run(main())
@@ -9,7 +9,6 @@ from langgraph.graph import END, StateGraph
9
9
  from langgraph.graph.message import add_messages
10
10
  from loguru import logger
11
11
  from pydantic import BaseModel, Field
12
-
13
12
  from universal_mcp.tools.registry import ToolRegistry
14
13
  from universal_mcp.types import ToolConfig
15
14
 
@@ -97,7 +96,9 @@ INSTRUCTIONS:
97
96
  reasoning: str
98
97
 
99
98
  response = await llm.with_structured_output(AppList).ainvoke(
100
- input=prompt.format(task=task, all_apps=all_apps, connected_apps=connected_apps)
99
+ input=prompt.format(
100
+ task=task, all_apps=all_apps, connected_apps=connected_apps
101
+ )
101
102
  )
102
103
  app_list = response.app_list
103
104
  reasoning = f"Found relevant apps: {app_list}. Reasoning: {response.reasoning}"
@@ -105,7 +106,9 @@ INSTRUCTIONS:
105
106
 
106
107
  return {
107
108
  **state,
108
- "messages": [AIMessage(content=f"Identified relevant apps: {', '.join(app_list)}")],
109
+ "messages": [
110
+ AIMessage(content=f"Identified relevant apps: {', '.join(app_list)}")
111
+ ],
109
112
  "relevant_apps": app_list,
110
113
  "reasoning": state.get("reasoning", "") + "\n" + reasoning,
111
114
  }
@@ -129,7 +132,9 @@ Your goal is to select the most appropriate tool for the given task.
129
132
  Only return tool ids.
130
133
  """
131
134
 
132
- response = await llm.with_structured_output(schema=ToolSelectionOutput).ainvoke(input=SELECT_TOOL_PROMPT)
135
+ response = await llm.with_structured_output(schema=ToolSelectionOutput).ainvoke(
136
+ input=SELECT_TOOL_PROMPT
137
+ )
133
138
 
134
139
  selected_tool_ids = response.tool_ids
135
140
  return selected_tool_ids
@@ -159,7 +164,9 @@ Task: "{task}"
159
164
  class SearchQuery(BaseModel):
160
165
  query: str
161
166
 
162
- response = await llm.with_structured_output(SearchQuery).ainvoke(input=prompt.format(task=task))
167
+ response = await llm.with_structured_output(SearchQuery).ainvoke(
168
+ input=prompt.format(task=task)
169
+ )
163
170
  query = response.query
164
171
  logger.info(f"Generated search query '{query}' for task '{task}'")
165
172
  return query
@@ -172,11 +179,17 @@ Task: "{task}"
172
179
  apps_with_tools_dict = {}
173
180
  reasoning_steps = []
174
181
  for app_name in state["relevant_apps"]:
175
- logger.info(f"Searching for tools in {app_name} for task: {task} with query '{search_query}'")
176
- found_tools = await registry.search_tools(query=search_query, app_id=app_name)
182
+ logger.info(
183
+ f"Searching for tools in {app_name} for task: {task} with query '{search_query}'"
184
+ )
185
+ found_tools = await registry.search_tools(
186
+ query=search_query, app_id=app_name
187
+ )
177
188
  selected_tools = await _select_tools(task, found_tools)
178
189
  apps_with_tools_dict[app_name] = selected_tools
179
- reasoning_steps.append(f"For '{app_name}', selected tool(s): {', '.join(selected_tools)}.")
190
+ reasoning_steps.append(
191
+ f"For '{app_name}', selected tool(s): {', '.join(selected_tools)}."
192
+ )
180
193
 
181
194
  return {
182
195
  **state,
@@ -208,7 +221,9 @@ Task: "{task}"
208
221
  )
209
222
  workflow.add_conditional_edges(
210
223
  "find_relevant_apps",
211
- lambda state: "search_tools" if state["relevant_apps"] else "handle_no_apps_found",
224
+ lambda state: "search_tools"
225
+ if state["relevant_apps"]
226
+ else "handle_no_apps_found",
212
227
  )
213
228
 
214
229
  workflow.add_edge("search_tools", END)
@@ -219,6 +234,7 @@ Task: "{task}"
219
234
 
220
235
  async def main():
221
236
  from universal_mcp.agentr.registry import AgentrRegistry
237
+
222
238
  from universal_mcp.agents.llm import load_chat_model
223
239
 
224
240
  registry = AgentrRegistry()
@@ -228,8 +244,7 @@ async def main():
228
244
  "task": "Send an email to manoj@agentr.dev",
229
245
  "messages": [HumanMessage(content="Send an email to manoj@agentr.dev")],
230
246
  }
231
- result = await graph.ainvoke(initial_state)
232
- print(result)
247
+ await graph.ainvoke(initial_state)
233
248
 
234
249
 
235
250
  if __name__ == "__main__":
@@ -8,6 +8,14 @@ from typing_extensions import TypedDict
8
8
 
9
9
  from universal_mcp.agents.base import BaseAgent
10
10
  from universal_mcp.agents.llm import load_chat_model
11
+ from universal_mcp.agents.utils import messages_to_list
12
+
13
+ DEVELOPER_PROMPT = """
14
+ You are {name}, an helpful assistant who can answer simple questions.
15
+
16
+ Adhere to the following instructions strictly:
17
+ {instructions}
18
+ """
11
19
 
12
20
 
13
21
  class State(TypedDict):
@@ -15,16 +23,26 @@ class State(TypedDict):
15
23
 
16
24
 
17
25
  class SimpleAgent(BaseAgent):
18
- def __init__(self, name: str, instructions: str, model: str, memory: BaseCheckpointSaver = None, **kwargs):
26
+ def __init__(
27
+ self,
28
+ name: str,
29
+ instructions: str,
30
+ model: str,
31
+ memory: BaseCheckpointSaver = None,
32
+ **kwargs,
33
+ ):
19
34
  super().__init__(name, instructions, model, memory, **kwargs)
20
35
  self.llm = load_chat_model(model)
21
36
 
37
+ def _build_system_message(self):
38
+ return DEVELOPER_PROMPT.format(name=self.name, instructions=self.instructions)
39
+
22
40
  async def _build_graph(self):
23
41
  graph_builder = StateGraph(State)
24
42
 
25
43
  async def chatbot(state: State):
26
44
  messages = [
27
- {"role": "system", "content": self.instructions},
45
+ {"role": "system", "content": self._build_system_message()},
28
46
  *state["messages"],
29
47
  ]
30
48
  return {"messages": [await self.llm.ainvoke(messages)]}
@@ -34,7 +52,12 @@ class SimpleAgent(BaseAgent):
34
52
  graph_builder.add_edge("chatbot", END)
35
53
  return graph_builder.compile(checkpointer=self.memory)
36
54
 
55
+ async def main():
56
+ agent = SimpleAgent("Simple Agent", "Act as a 14 year old kid, reply in Gen-Z lingo", "azure/gpt-5-mini")
57
+ output = await agent.invoke("What is the capital of France?")
58
+ from rich import print
59
+ print(messages_to_list(output["messages"]))
60
+
37
61
 
38
62
  if __name__ == "__main__":
39
- agent = SimpleAgent("Simple Agent", "You are a helpful assistant", "azure/gpt-4o")
40
- asyncio.run(agent.run_interactive())
63
+ asyncio.run(main())
@@ -1,7 +1,6 @@
1
1
  import json
2
2
 
3
3
  from langchain_mcp_adapters.client import MultiServerMCPClient
4
-
5
4
  from universal_mcp.agentr.integration import AgentrIntegration
6
5
  from universal_mcp.applications.utils import app_from_slug
7
6
  from universal_mcp.tools.adapters import ToolFormat
@@ -15,7 +14,9 @@ async def load_agentr_tools(agentr_servers: dict):
15
14
  app = app_from_slug(app_name)
16
15
  integration = AgentrIntegration(name=app_name)
17
16
  app_instance = app(integration=integration)
18
- tool_manager.register_tools_from_app(app_instance, tool_names=tool_names["tools"])
17
+ tool_manager.register_tools_from_app(
18
+ app_instance, tool_names=tool_names["tools"]
19
+ )
19
20
  tools = tool_manager.list_tools(format=ToolFormat.LANGCHAIN)
20
21
  return tools
21
22
 
@@ -30,6 +31,10 @@ async def load_tools(path: str) -> ToolConfig:
30
31
  with open(path) as f:
31
32
  data = json.load(f)
32
33
  config = ToolConfig.model_validate(data)
33
- agentr_tools = await load_agentr_tools(config.model_dump(exclude_none=True)["agentrServers"])
34
- mcp_tools = await load_mcp_tools(config.model_dump(exclude_none=True)["mcpServers"])
34
+ agentr_tools = await load_agentr_tools(
35
+ config.model_dump(exclude_none=True)["agentrServers"]
36
+ )
37
+ mcp_tools = await load_mcp_tools(
38
+ config.model_dump(exclude_none=True)["mcpServers"]
39
+ )
35
40
  return agentr_tools + mcp_tools