universal-mcp 0.1.24rc14__py3-none-any.whl → 0.1.24rc19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. universal_mcp/agentr/client.py +11 -0
  2. universal_mcp/agentr/registry.py +40 -5
  3. universal_mcp/applications/application.py +0 -2
  4. universal_mcp/applications/sample/app.py +79 -20
  5. universal_mcp/applications/utils.py +52 -0
  6. universal_mcp/servers/server.py +4 -3
  7. universal_mcp/tools/manager.py +0 -3
  8. universal_mcp/types.py +1 -21
  9. universal_mcp/utils/prompts.py +0 -2
  10. universal_mcp/utils/testing.py +1 -1
  11. {universal_mcp-0.1.24rc14.dist-info → universal_mcp-0.1.24rc19.dist-info}/METADATA +1 -1
  12. universal_mcp-0.1.24rc19.dist-info/RECORD +54 -0
  13. universal_mcp/__init__.py +0 -0
  14. universal_mcp/agents/__init__.py +0 -10
  15. universal_mcp/agents/autoagent/__init__.py +0 -30
  16. universal_mcp/agents/autoagent/__main__.py +0 -25
  17. universal_mcp/agents/autoagent/context.py +0 -26
  18. universal_mcp/agents/autoagent/graph.py +0 -151
  19. universal_mcp/agents/autoagent/prompts.py +0 -9
  20. universal_mcp/agents/autoagent/state.py +0 -27
  21. universal_mcp/agents/autoagent/studio.py +0 -25
  22. universal_mcp/agents/autoagent/utils.py +0 -13
  23. universal_mcp/agents/base.py +0 -129
  24. universal_mcp/agents/bigtool/__init__.py +0 -54
  25. universal_mcp/agents/bigtool/__main__.py +0 -24
  26. universal_mcp/agents/bigtool/context.py +0 -24
  27. universal_mcp/agents/bigtool/graph.py +0 -166
  28. universal_mcp/agents/bigtool/prompts.py +0 -31
  29. universal_mcp/agents/bigtool/state.py +0 -27
  30. universal_mcp/agents/bigtool2/__init__.py +0 -53
  31. universal_mcp/agents/bigtool2/__main__.py +0 -24
  32. universal_mcp/agents/bigtool2/agent.py +0 -11
  33. universal_mcp/agents/bigtool2/context.py +0 -33
  34. universal_mcp/agents/bigtool2/graph.py +0 -169
  35. universal_mcp/agents/bigtool2/prompts.py +0 -12
  36. universal_mcp/agents/bigtool2/state.py +0 -27
  37. universal_mcp/agents/builder.py +0 -80
  38. universal_mcp/agents/cli.py +0 -27
  39. universal_mcp/agents/codeact/__init__.py +0 -243
  40. universal_mcp/agents/codeact/sandbox.py +0 -27
  41. universal_mcp/agents/codeact/test.py +0 -15
  42. universal_mcp/agents/codeact/utils.py +0 -61
  43. universal_mcp/agents/hil.py +0 -104
  44. universal_mcp/agents/llm.py +0 -45
  45. universal_mcp/agents/planner/__init__.py +0 -37
  46. universal_mcp/agents/planner/__main__.py +0 -24
  47. universal_mcp/agents/planner/graph.py +0 -82
  48. universal_mcp/agents/planner/prompts.py +0 -1
  49. universal_mcp/agents/planner/state.py +0 -12
  50. universal_mcp/agents/react.py +0 -84
  51. universal_mcp/agents/shared/agent_node.py +0 -34
  52. universal_mcp/agents/shared/tool_node.py +0 -235
  53. universal_mcp/agents/simple.py +0 -40
  54. universal_mcp/agents/tools.py +0 -35
  55. universal_mcp/agents/utils.py +0 -111
  56. universal_mcp/analytics.py +0 -111
  57. universal_mcp/applications/__init__.py +0 -70
  58. universal_mcp/utils/common.py +0 -278
  59. universal_mcp-0.1.24rc14.dist-info/RECORD +0 -99
  60. {universal_mcp-0.1.24rc14.dist-info → universal_mcp-0.1.24rc19.dist-info}/WHEEL +0 -0
  61. {universal_mcp-0.1.24rc14.dist-info → universal_mcp-0.1.24rc19.dist-info}/entry_points.txt +0 -0
  62. {universal_mcp-0.1.24rc14.dist-info → universal_mcp-0.1.24rc19.dist-info}/licenses/LICENSE +0 -0
@@ -1,243 +0,0 @@
1
- import inspect
2
- import re
3
- from collections.abc import Awaitable, Callable, Sequence
4
- from typing import Any, TypeVar
5
-
6
- from langchain_core.language_models import BaseChatModel
7
- from langchain_core.tools import StructuredTool
8
- from langchain_core.tools import tool as create_tool
9
- from langgraph.graph import END, START, MessagesState, StateGraph
10
- from langgraph.types import Command
11
-
12
- from .utils import extract_and_combine_codeblocks
13
-
14
- EvalFunction = Callable[[str, dict[str, Any]], tuple[str, dict[str, Any]]]
15
- EvalCoroutine = Callable[[str, dict[str, Any]], Awaitable[tuple[str, dict[str, Any]]]]
16
-
17
-
18
- class CodeActState(MessagesState):
19
- """State for CodeAct agent."""
20
-
21
- script: str | None
22
- """The Python code script to be executed."""
23
- context: dict[str, Any]
24
- """Dictionary containing the execution context with available tools and variables."""
25
-
26
-
27
- StateSchema = TypeVar("StateSchema", bound=CodeActState)
28
- StateSchemaType = type[StateSchema]
29
-
30
-
31
- def make_safe_function_name(name: str) -> str:
32
- """Convert a tool name to a valid Python function name."""
33
- # Replace non-alphanumeric characters with underscores
34
- safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
35
- # Ensure the name doesn't start with a digit
36
- if safe_name and safe_name[0].isdigit():
37
- safe_name = f"tool_{safe_name}"
38
- # Handle empty name edge case
39
- if not safe_name:
40
- safe_name = "unnamed_tool"
41
- return safe_name
42
-
43
-
44
- def create_default_prompt(tools: list[StructuredTool], base_prompt: str | None = None):
45
- """Create default prompt for the CodeAct agent."""
46
- tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in tools]
47
- prompt = f"{base_prompt}\n\n" if base_prompt else ""
48
- prompt += """You will be given a task to perform. You should output either
49
- - a Python code snippet that provides the solution to the task, or a step towards the solution. Any output you want to extract from the code should be printed to the console. Code should be output in a fenced code block.
50
- - text to be shown directly to the user, if you want to ask for more information or provide the final answer.
51
-
52
- In addition to the Python Standard Library, you can use the following functions:
53
- """
54
-
55
- for tool in tools:
56
- # Use coroutine if it exists, otherwise use func
57
- tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
58
- # Create a safe function name
59
- safe_name = make_safe_function_name(tool.name)
60
- # Determine if it's an async function
61
- is_async = inspect.iscoroutinefunction(tool_callable)
62
- # Add appropriate function definition
63
- prompt += f'''
64
- {"async " if is_async else ""}def {safe_name}{str(inspect.signature(tool_callable))}:
65
- """{tool.description}"""
66
- ...
67
- '''
68
-
69
- prompt += """
70
-
71
- Variables defined at the top level of previous code snippets can be referenced in your code.
72
-
73
- Reminder: use Python code snippets to call tools"""
74
- return prompt
75
-
76
-
77
- def create_codeact(
78
- model: BaseChatModel,
79
- tools: Sequence[StructuredTool | Callable],
80
- eval_fn: EvalFunction | EvalCoroutine,
81
- *,
82
- prompt: str | None = None,
83
- reflection_prompt: str | None = None,
84
- reflection_model: BaseChatModel | None = None,
85
- max_reflections: int = 3,
86
- state_schema: StateSchemaType = CodeActState,
87
- ) -> StateGraph:
88
- """Create a CodeAct agent.
89
-
90
- Args:
91
- model: The language model to use for generating code
92
- tools: List of tools available to the agent. Can be passed as python functions or StructuredTool instances.
93
- eval_fn: Function or coroutine that executes code in a sandbox. Takes code string and locals dict,
94
- returns a tuple of (stdout output, new variables dict)
95
- prompt: Optional custom system prompt. If None, uses default prompt.
96
- To customize default prompt you can use `create_default_prompt` helper:
97
- `create_default_prompt(tools, "You are a helpful assistant.")`
98
- reflection_prompt: Optional prompt for reflection. If provided, will be used to evaluate responses.
99
- If the reflection output contains "NONE", the response is considered valid, otherwise the
100
- reflection output is passed back to the model for regeneration.
101
- reflection_model: Optional model to use for reflection. If None, uses the same model as for generation.
102
- max_reflections: Maximum number of reflection iterations (default: 3).
103
- state_schema: The state schema to use for the agent.
104
-
105
- Returns:
106
- A StateGraph implementing the CodeAct architecture
107
- """
108
- tools = [t if isinstance(t, StructuredTool) else create_tool(t) for t in tools]
109
-
110
- if prompt is None:
111
- prompt = create_default_prompt(tools)
112
-
113
- # If no reflection model is provided, use the main model
114
- if reflection_model is None:
115
- reflection_model = model
116
-
117
- # Make tools available to the code sandbox - use safe names for keys
118
- tools_context = {}
119
- for tool in tools:
120
- safe_name = make_safe_function_name(tool.name)
121
- # Use coroutine if it exists, otherwise use func (same as in create_default_prompt)
122
- tool_callable = tool.coroutine if hasattr(tool, "coroutine") and tool.coroutine is not None else tool.func
123
- # Only use the safe name for consistency with the prompt
124
- tools_context[safe_name] = tool_callable
125
-
126
- def call_model(state: StateSchema) -> Command:
127
- messages = [{"role": "system", "content": prompt}] + state["messages"]
128
-
129
- # Run the model and potentially loop for reflection
130
- response = model.invoke(messages)
131
-
132
- # Extract and combine all code blocks
133
- code = extract_and_combine_codeblocks(response.content)
134
-
135
- # Loop for reflection if needed and if code is present
136
- if reflection_prompt and code:
137
- reflection_count = 0
138
- while reflection_count < max_reflections:
139
- # Format conversation history with XML-style tags
140
- conversation_history = "\n".join(
141
- [
142
- f'<message role="{("user" if m.type == "human" else "assistant")}">\n{m.content}\n</message>'
143
- for m in state["messages"]
144
- ]
145
- )
146
-
147
- # Add the current response
148
- conversation_history += f'\n<message role="assistant">\n{response.content}\n</message>'
149
-
150
- # Create the reflection prompt with the tagged conversation history
151
- formatted_prompt = f"""
152
- Review the assistant's latest code for as per the quality rules:
153
-
154
- <conversation_history>
155
- {conversation_history}
156
- </conversation_history>
157
-
158
- If you find ANY of these issues, describe the problem briefly and clearly.
159
- If NO issues are found, respond with EXACTLY: "NONE"
160
- """
161
-
162
- # Create messages for reflection with correct ordering
163
- reflection_messages = [
164
- {"role": "system", "content": reflection_prompt},
165
- # Include the formatted reflection prompt as the final user message
166
- {"role": "user", "content": formatted_prompt},
167
- ]
168
- reflection_result = reflection_model.invoke(reflection_messages)
169
-
170
- # Check if reflection passed
171
- if "NONE" in reflection_result.content:
172
- # Reflection passed, exit loop
173
- break
174
-
175
- # Reflection didn't pass, regenerate response
176
- reflection_messages = [
177
- {"role": "system", "content": prompt},
178
- *state["messages"],
179
- {"role": "assistant", "content": response.content},
180
- {
181
- "role": "user",
182
- "content": f"""
183
- I need you to completely regenerate your previous response based on this feedback:
184
-
185
- '''
186
- {reflection_result.content}
187
- '''
188
-
189
- DO NOT reference the feedback directly. Instead, provide a completely new response that addresses the issues.
190
- """,
191
- },
192
- ]
193
- response = model.invoke(reflection_messages)
194
-
195
- # Extract code from the new response
196
- code = extract_and_combine_codeblocks(response.content)
197
-
198
- # If no code in the new response, exit the reflection loop
199
- if not code:
200
- break
201
-
202
- # Increment reflection count
203
- reflection_count += 1
204
-
205
- # Return appropriate command with only the latest response
206
- if code:
207
- return Command(goto="sandbox", update={"messages": [response], "script": code})
208
- else:
209
- # no code block, end the loop and respond to the user
210
- return Command(update={"messages": [response], "script": None})
211
-
212
- # If eval_fn is a async, we define async node function.
213
- if inspect.iscoroutinefunction(eval_fn):
214
-
215
- async def sandbox(state: StateSchema):
216
- existing_context = state.get("context", {})
217
- context = {**existing_context, **tools_context}
218
- # Execute the script in the sandbox
219
- output, new_vars = await eval_fn(state["script"], context)
220
- new_context = {**existing_context, **new_vars}
221
- return {
222
- "messages": [{"role": "user", "content": output}],
223
- "context": new_context,
224
- }
225
- else:
226
-
227
- def sandbox(state: StateSchema):
228
- existing_context = state.get("context", {})
229
- context = {**existing_context, **tools_context}
230
- # Execute the script in the sandbox
231
- output, new_vars = eval_fn(state["script"], context)
232
- new_context = {**existing_context, **new_vars}
233
- return {
234
- "messages": [{"role": "user", "content": output}],
235
- "context": new_context,
236
- }
237
-
238
- agent = StateGraph(state_schema)
239
- agent.add_node(call_model, destinations=(END, "sandbox"))
240
- agent.add_node(sandbox)
241
- agent.add_edge(START, "call_model")
242
- agent.add_edge("sandbox", "call_model")
243
- return agent
@@ -1,27 +0,0 @@
1
- import builtins
2
- import contextlib
3
- import io
4
- from typing import Any
5
-
6
-
7
- def eval_unsafe(code: str, _locals: dict[str, Any]) -> tuple[str, dict[str, Any]]:
8
- # Store original keys before execution
9
- original_keys = set(_locals.keys())
10
- result = f"Executing code...\n{code}\n\nOutput:\n"
11
- result += "=" * 50 + "\n"
12
- try:
13
- with contextlib.redirect_stdout(io.StringIO()) as f:
14
- # Execute the code in the provided locals context
15
- # Using exec to allow dynamic code execution
16
- # This is a simplified version; in production, consider security implications
17
- exec(code, builtins.__dict__, _locals)
18
- result += f.getvalue()
19
- if not result:
20
- result = "<code ran, no output printed to stdout>"
21
- except Exception as e:
22
- result += f"Error during execution: {repr(e)}"
23
-
24
- # Determine new variables created during execution
25
- new_keys = set(_locals.keys()) - original_keys
26
- new_vars = {key: _locals[key] for key in new_keys}
27
- return result, new_vars
@@ -1,15 +0,0 @@
1
- from universal_mcp.agentr import Agentr
2
- from universal_mcp.agents.codeact import create_codeact
3
- from universal_mcp.agents.codeact.sandbox import eval_unsafe
4
- from universal_mcp.agents.llm import load_chat_model
5
- from universal_mcp.tools.adapters import ToolFormat
6
-
7
- model = load_chat_model("gpt-4.1")
8
-
9
- agentr = Agentr()
10
- agentr.load_tools(["google-mail_send_email"])
11
-
12
- tools = agentr.list_tools(format=ToolFormat.NATIVE)
13
-
14
- code_act = create_codeact(model, tools, eval_unsafe)
15
- agent = code_act.compile()
@@ -1,61 +0,0 @@
1
- import re
2
-
3
- BACKTICK_PATTERN = r"(?:^|\n)```(.*?)(?:```(?:\n|$))"
4
-
5
-
6
- def extract_and_combine_codeblocks(text: str) -> str:
7
- """
8
- Extracts all codeblocks from a text string and combines them into a single code string.
9
-
10
- Args:
11
- text: A string containing zero or more codeblocks, where each codeblock is
12
- surrounded by triple backticks (```).
13
-
14
- Returns:
15
- A string containing the combined code from all codeblocks, with each codeblock
16
- separated by a newline.
17
-
18
- Example:
19
- text = '''Here's some code:
20
-
21
- ```python
22
- print('hello')
23
- ```
24
- And more:
25
-
26
- ```
27
- print('world')
28
- ```'''
29
-
30
- result = extract_and_combine_codeblocks(text)
31
-
32
- Result:
33
-
34
- print('hello')
35
-
36
- print('world')
37
- """
38
- # Find all code blocks in the text using regex
39
- # Pattern matches anything between triple backticks, with or without a language identifier
40
- code_blocks = re.findall(BACKTICK_PATTERN, text, re.DOTALL)
41
-
42
- if not code_blocks:
43
- return ""
44
-
45
- # Process each codeblock
46
- processed_blocks = []
47
- for block in code_blocks:
48
- # Strip leading and trailing whitespace
49
- block = block.strip()
50
-
51
- # If the first line looks like a language identifier, remove it
52
- lines = block.split("\n")
53
- if lines and (not lines[0].strip() or " " not in lines[0].strip()):
54
- # First line is empty or likely a language identifier (no spaces)
55
- block = "\n".join(lines[1:])
56
-
57
- processed_blocks.append(block)
58
-
59
- # Combine all codeblocks with newlines between them
60
- combined_code = "\n\n".join(processed_blocks)
61
- return combined_code
@@ -1,104 +0,0 @@
1
- from typing import Annotated, TypedDict
2
-
3
- from langchain_core.messages import HumanMessage
4
- from langgraph.constants import END, START
5
- from langgraph.graph import StateGraph
6
- from langgraph.graph.message import add_messages
7
- from langgraph.types import Interrupt, interrupt
8
-
9
- from .base import BaseAgent
10
- from .llm import load_chat_model
11
-
12
-
13
- class State(TypedDict):
14
- messages: Annotated[list, add_messages]
15
- name: str | None = None
16
- favourite_color: str | None = None
17
- human: bool | None = None
18
-
19
-
20
- def ask_name_node(state: State) -> State:
21
- if state.get("name") is not None:
22
- return state
23
- name = interrupt({"question": "What is your name?", "type": "text"})
24
- state.update(name=name, messages=[HumanMessage(content=f"My name is {name}")])
25
- return state
26
-
27
-
28
- def ask_bool_node(state: State) -> State:
29
- if state.get("human") is not None:
30
- return state
31
- bool = interrupt({"question": "Are you a human?", "type": "bool"})
32
-
33
- if bool:
34
- state.update(human=True, messages=[HumanMessage(content="I am human")])
35
- else:
36
- state.update(human=False, messages=[HumanMessage(content="I am AI agent")])
37
- return state
38
-
39
-
40
- def ask_favourite_color_node(state: State) -> State:
41
- if state.get("favourite_color") is not None:
42
- return state
43
- favourite_color = interrupt(
44
- {"question": "What is your favourite color?", "type": "choice", "choices": ["red", "green", "blue"]}
45
- )
46
- state.update(
47
- favourite_color=favourite_color, messages=[HumanMessage(content=f"My favourite color is {favourite_color}")]
48
- )
49
- return state
50
-
51
-
52
- def handle_interrupt(interrupt: Interrupt) -> str | bool:
53
- interrupt_type = interrupt.value["type"]
54
- if interrupt_type == "text":
55
- value = input(interrupt.value["question"])
56
- return value
57
- elif interrupt_type == "bool":
58
- value = input("Do you accept this? (y/n): " + interrupt.value["question"])
59
- return value.lower() in ["y", "yes"]
60
- elif interrupt_type == "choice":
61
- value = input("Enter your choice: " + interrupt.value["question"] + " " + ", ".join(interrupt.value["choices"]))
62
- if value in interrupt.value["choices"]:
63
- return value
64
- else:
65
- return interrupt.value["choices"][0]
66
- else:
67
- raise ValueError(f"Invalid interrupt type: {interrupt.value['type']}")
68
-
69
-
70
- class HilAgent(BaseAgent):
71
- def __init__(self, name: str, instructions: str, model: str):
72
- super().__init__(name, instructions, model)
73
- self.llm = load_chat_model(model)
74
- self._graph = self._build_graph()
75
-
76
- def chatbot(self, state: State):
77
- return {"messages": [self.llm.invoke(state["messages"])]}
78
-
79
- def _build_graph(self):
80
- graph_builder = StateGraph(State)
81
- graph_builder.add_node("ask_name_node", ask_name_node)
82
- graph_builder.add_node("ask_bool_node", ask_bool_node)
83
- graph_builder.add_node("ask_favourite_color_node", ask_favourite_color_node)
84
- graph_builder.add_node("chatbot", self.chatbot)
85
- graph_builder.add_edge(START, "ask_name_node")
86
- graph_builder.add_edge("ask_name_node", "ask_bool_node")
87
- graph_builder.add_edge("ask_bool_node", "ask_favourite_color_node")
88
- graph_builder.add_edge("ask_favourite_color_node", "chatbot")
89
- graph_builder.add_edge("chatbot", END)
90
- return graph_builder.compile(checkpointer=self.memory)
91
-
92
- @property
93
- def graph(self):
94
- return self._graph
95
-
96
-
97
- if __name__ == "__main__":
98
- import asyncio
99
-
100
- agent = HilAgent(
101
- "Hil Agent", "You are a friendly agent that asks for the user's name and greets them.", "openrouter/auto"
102
- )
103
-
104
- asyncio.run(agent.run_interactive())
@@ -1,45 +0,0 @@
1
- from langchain_anthropic import ChatAnthropic
2
- from langchain_core.language_models import BaseChatModel
3
- from langchain_google_genai import ChatGoogleGenerativeAI
4
- from langchain_openai import AzureChatOpenAI
5
-
6
-
7
- def load_chat_model(
8
- fully_specified_name: str, temperature: float = 1.0, tags: list[str] | None = None
9
- ) -> BaseChatModel:
10
- """Load a chat model from a fully specified name.
11
- Args:
12
- fully_specified_name (str): String in the format 'provider/model'.
13
- """
14
- provider, model = fully_specified_name.split("/", maxsplit=1)
15
- if provider == "anthropic":
16
- return ChatAnthropic(
17
- model=model,
18
- temperature=temperature,
19
- thinking={"type": "enabled", "budget_tokens": 2048},
20
- max_tokens=4096,
21
- tags=tags,
22
- stream_usage=True,
23
- ) # pyright: ignore[reportCallIssue]
24
- elif provider == "azure":
25
- return AzureChatOpenAI(
26
- model=model,
27
- api_version="2024-12-01-preview",
28
- azure_deployment=model,
29
- temperature=temperature,
30
- tags=tags,
31
- stream_usage=True,
32
- )
33
- elif provider == "gemini":
34
- return ChatGoogleGenerativeAI(model=model, temperature=temperature)
35
- else:
36
- raise ValueError(f"Unsupported provider: {provider}")
37
-
38
-
39
- if __name__ == "__main__":
40
- from loguru import logger
41
-
42
- models_to_test = ["azure/gpt-5-chat", "anthropic/claude-4-sonnet-20250514", "gemini/gemini-2.5-pro"]
43
- for model in models_to_test:
44
- llm = load_chat_model(model)
45
- logger.info(llm.invoke("Hi!"))
@@ -1,37 +0,0 @@
1
- from langgraph.checkpoint.base import BaseCheckpointSaver
2
-
3
- from universal_mcp.agents.base import BaseAgent
4
- from universal_mcp.agents.llm import load_chat_model
5
- from universal_mcp.agents.react import ReactAgent
6
- from universal_mcp.tools.registry import ToolRegistry
7
-
8
- from .graph import build_graph
9
-
10
-
11
- class PlannerAgent(BaseAgent):
12
- def __init__(
13
- self,
14
- name: str,
15
- instructions: str,
16
- model: str,
17
- registry: ToolRegistry,
18
- memory: BaseCheckpointSaver | None = None,
19
- executor_agent_cls: type[BaseAgent] = ReactAgent,
20
- **kwargs,
21
- ):
22
- super().__init__(name, instructions, model, memory, **kwargs)
23
- self.app_registry = registry
24
- self.llm = load_chat_model(model)
25
- self.executor_agent_cls = executor_agent_cls
26
-
27
- async def _build_graph(self):
28
- return build_graph(self.llm, self.app_registry, self.instructions, self.model, self.executor_agent_cls).compile(
29
- checkpointer=self.memory
30
- )
31
-
32
- @property
33
- def graph(self):
34
- return self._graph
35
-
36
-
37
- __all__ = ["PlannerAgent"]
@@ -1,24 +0,0 @@
1
- import asyncio
2
-
3
- from universal_mcp.agentr.registry import AgentrRegistry
4
- from universal_mcp.agents.planner import PlannerAgent
5
-
6
-
7
- async def main():
8
- registry = AgentrRegistry()
9
- agent = PlannerAgent(
10
- name="planner-agent",
11
- instructions="You are a helpful assistant.",
12
- model="gemini/gemini-2.5-flash",
13
- registry=registry,
14
- )
15
- from rich.console import Console
16
-
17
- console = Console()
18
- console.print("Starting agent...", style="yellow")
19
- async for event in agent.stream(user_input="Send an email to manoj@agentr.dev'", thread_id="xyz"):
20
- console.print(event.content, style="red")
21
-
22
-
23
- if __name__ == "__main__":
24
- asyncio.run(main())
@@ -1,82 +0,0 @@
1
- from typing import Any
2
-
3
- from langchain_core.messages import AIMessage
4
- from langgraph.graph import END, START, StateGraph
5
- from loguru import logger
6
-
7
- from universal_mcp.agents.shared.tool_node import build_tool_node_graph
8
- from universal_mcp.types import AgentrToolConfig, ToolConfig
9
-
10
- from .state import State
11
-
12
-
13
- def build_graph(llm, registry, instructions, model, executor_agent_cls):
14
- """Build the graph for the planner agent."""
15
- graph_builder = StateGraph(State)
16
-
17
- async def _tool_finder_node(state: State) -> dict[str, Any]:
18
- """Runs the tool finder subgraph to identify necessary tools."""
19
- task = state["messages"][-1].content
20
- logger.info(f"Running tool finder for task: {task}")
21
- tool_finder_graph = build_tool_node_graph(llm, registry)
22
- tool_finder_state = await tool_finder_graph.ainvoke({"task": task, "messages": state["messages"]})
23
-
24
- if not tool_finder_state.get("apps_required"):
25
- logger.info("Tool finder determined no apps are required.")
26
- return {"apps_with_tools": AgentrToolConfig(agentrServers={})}
27
-
28
- apps_with_tools = tool_finder_state.get("apps_with_tools", AgentrToolConfig(agentrServers={}))
29
- logger.info(f"Tool finder identified apps and tools: {apps_with_tools}")
30
- return {"apps_with_tools": apps_with_tools, "task": task}
31
-
32
- def _should_continue(state: State) -> str:
33
- """Determines whether to continue to the executor or end."""
34
- if state.get("apps_with_tools") and state["apps_with_tools"].agentrServers:
35
- return "continue"
36
- return "end"
37
-
38
- async def _executor_node(state: State) -> dict[str, Any]:
39
- """Executes the task with the identified tools."""
40
- tool_config = state["apps_with_tools"]
41
-
42
- logger.info(f"Preparing executor with tools: {tool_config}")
43
- agent = executor_agent_cls(
44
- name="executor-agent",
45
- instructions=instructions,
46
- model=model,
47
- registry=registry,
48
- tools=ToolConfig(agentrServers=tool_config.agentrServers),
49
- )
50
-
51
- await agent.ainit()
52
- react_graph = agent._graph
53
- logger.info("Invoking executor agent with tools.")
54
- # We invoke the agent to make it run the tool
55
- response = await react_graph.ainvoke({"messages": state["messages"]})
56
-
57
- final_message = AIMessage(content=response["messages"][-1].content)
58
- return {"messages": [final_message]}
59
-
60
- async def _no_tools_node(state: State) -> dict[str, Any]:
61
- """Handles tasks that don't require tools by invoking the LLM directly."""
62
- logger.info("No tools required. Invoking LLM directly.")
63
- response = await llm.ainvoke(state["messages"])
64
- return {"messages": [response]}
65
-
66
- graph_builder.add_node("tool_finder", _tool_finder_node)
67
- graph_builder.add_node("executor", _executor_node)
68
- graph_builder.add_node("no_tools_executor", _no_tools_node)
69
-
70
- graph_builder.add_edge(START, "tool_finder")
71
- graph_builder.add_conditional_edges(
72
- "tool_finder",
73
- _should_continue,
74
- {
75
- "continue": "executor",
76
- "end": "no_tools_executor",
77
- },
78
- )
79
- graph_builder.add_edge("executor", END)
80
- graph_builder.add_edge("no_tools_executor", END)
81
-
82
- return graph_builder
@@ -1 +0,0 @@
1
- # Prompts for the planner agent
@@ -1,12 +0,0 @@
1
- from typing import Annotated
2
-
3
- from langgraph.graph.message import add_messages
4
- from typing_extensions import TypedDict
5
-
6
- from universal_mcp.types import AgentrToolConfig
7
-
8
-
9
- class State(TypedDict):
10
- messages: Annotated[list, add_messages]
11
- task: str
12
- apps_with_tools: AgentrToolConfig