universal-mcp-agents 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +19 -0
- universal_mcp/agents/autoagent/__init__.py +30 -0
- universal_mcp/agents/autoagent/__main__.py +25 -0
- universal_mcp/agents/autoagent/context.py +26 -0
- universal_mcp/agents/autoagent/graph.py +151 -0
- universal_mcp/agents/autoagent/prompts.py +9 -0
- universal_mcp/agents/autoagent/state.py +27 -0
- universal_mcp/agents/autoagent/studio.py +25 -0
- universal_mcp/agents/autoagent/utils.py +13 -0
- universal_mcp/agents/base.py +129 -0
- universal_mcp/agents/bigtool/__init__.py +54 -0
- universal_mcp/agents/bigtool/__main__.py +24 -0
- universal_mcp/agents/bigtool/context.py +24 -0
- universal_mcp/agents/bigtool/graph.py +166 -0
- universal_mcp/agents/bigtool/prompts.py +31 -0
- universal_mcp/agents/bigtool/state.py +27 -0
- universal_mcp/agents/bigtool2/__init__.py +53 -0
- universal_mcp/agents/bigtool2/__main__.py +24 -0
- universal_mcp/agents/bigtool2/agent.py +11 -0
- universal_mcp/agents/bigtool2/context.py +33 -0
- universal_mcp/agents/bigtool2/graph.py +169 -0
- universal_mcp/agents/bigtool2/prompts.py +12 -0
- universal_mcp/agents/bigtool2/state.py +27 -0
- universal_mcp/agents/bigtoolcache/__init__.py +53 -0
- universal_mcp/agents/bigtoolcache/__main__.py +24 -0
- universal_mcp/agents/bigtoolcache/agent.py +11 -0
- universal_mcp/agents/bigtoolcache/context.py +33 -0
- universal_mcp/agents/bigtoolcache/graph.py +176 -0
- universal_mcp/agents/bigtoolcache/prompts.py +13 -0
- universal_mcp/agents/bigtoolcache/state.py +27 -0
- universal_mcp/agents/builder.py +146 -0
- universal_mcp/agents/cli.py +27 -0
- universal_mcp/agents/codeact/__init__.py +243 -0
- universal_mcp/agents/codeact/sandbox.py +27 -0
- universal_mcp/agents/codeact/test.py +15 -0
- universal_mcp/agents/codeact/utils.py +61 -0
- universal_mcp/agents/hil.py +104 -0
- universal_mcp/agents/llm.py +45 -0
- universal_mcp/agents/planner/__init__.py +37 -0
- universal_mcp/agents/planner/__main__.py +24 -0
- universal_mcp/agents/planner/graph.py +81 -0
- universal_mcp/agents/planner/prompts.py +1 -0
- universal_mcp/agents/planner/state.py +12 -0
- universal_mcp/agents/react.py +76 -0
- universal_mcp/agents/shared/tool_node.py +236 -0
- universal_mcp/agents/simple.py +40 -0
- universal_mcp/agents/tools.py +35 -0
- universal_mcp/agents/utils.py +111 -0
- universal_mcp_agents-0.1.2.dist-info/METADATA +21 -0
- universal_mcp_agents-0.1.2.dist-info/RECORD +51 -0
- universal_mcp_agents-0.1.2.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
# tool_node.py
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import Annotated, TypedDict
|
|
5
|
+
|
|
6
|
+
from langchain_core.language_models import BaseChatModel
|
|
7
|
+
from langchain_core.messages import AIMessage, AnyMessage, HumanMessage
|
|
8
|
+
from langgraph.graph import END, StateGraph
|
|
9
|
+
from langgraph.graph.message import add_messages
|
|
10
|
+
from loguru import logger
|
|
11
|
+
from pydantic import BaseModel, Field
|
|
12
|
+
|
|
13
|
+
from universal_mcp.tools.registry import ToolRegistry
|
|
14
|
+
from universal_mcp.types import ToolConfig
|
|
15
|
+
|
|
16
|
+
# --- LangGraph Agent ---
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AgentState(TypedDict):
|
|
20
|
+
task: str
|
|
21
|
+
apps_required: bool
|
|
22
|
+
relevant_apps: list[str]
|
|
23
|
+
apps_with_tools: ToolConfig
|
|
24
|
+
messages: Annotated[list[AnyMessage], add_messages]
|
|
25
|
+
reasoning: str
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ToolSelectionOutput(BaseModel):
|
|
29
|
+
tool_ids: list[str] = Field(description="The ids of the tools to use")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def build_tool_node_graph(llm: BaseChatModel, registry: ToolRegistry) -> StateGraph:
|
|
33
|
+
"""Builds the LangGraph workflow."""
|
|
34
|
+
|
|
35
|
+
async def _check_if_app_needed(state: AgentState) -> AgentState:
|
|
36
|
+
"""Checks if an external application is needed for the given task."""
|
|
37
|
+
task = state["task"]
|
|
38
|
+
prompt = f"""
|
|
39
|
+
Given the user's task: "{task}"
|
|
40
|
+
Does this task require an external application to be completed?
|
|
41
|
+
Your answer should be a simple "Yes" or "No", followed by a brief explanation.
|
|
42
|
+
For example:
|
|
43
|
+
Yes, an external application is needed to send emails.
|
|
44
|
+
No, this is a general question that can be answered directly.
|
|
45
|
+
"""
|
|
46
|
+
response = await llm.ainvoke(prompt)
|
|
47
|
+
# Handle both string and list content types
|
|
48
|
+
if isinstance(response.content, list):
|
|
49
|
+
content = " ".join([str(item) for item in response.content]).strip()
|
|
50
|
+
else:
|
|
51
|
+
content = response.content.strip()
|
|
52
|
+
reasoning = f"Initial check for app requirement. LLM response: {content}"
|
|
53
|
+
|
|
54
|
+
if content.lower().startswith("yes"):
|
|
55
|
+
return {
|
|
56
|
+
**state,
|
|
57
|
+
"messages": [AIMessage(content=content)],
|
|
58
|
+
"apps_required": True,
|
|
59
|
+
"reasoning": reasoning,
|
|
60
|
+
}
|
|
61
|
+
else:
|
|
62
|
+
return {
|
|
63
|
+
**state,
|
|
64
|
+
"messages": [AIMessage(content=content)],
|
|
65
|
+
"apps_required": False,
|
|
66
|
+
"reasoning": reasoning,
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
async def _find_relevant_apps(state: AgentState) -> AgentState:
|
|
70
|
+
"""Identifies relevant apps for the given task, preferring connected apps."""
|
|
71
|
+
task = state["task"]
|
|
72
|
+
all_apps = await registry.list_all_apps()
|
|
73
|
+
connected_apps = await registry.list_connected_apps()
|
|
74
|
+
prompt = """
|
|
75
|
+
You are an expert at identifying which applications are needed to complete specific tasks.
|
|
76
|
+
|
|
77
|
+
TASK: "{task}"
|
|
78
|
+
|
|
79
|
+
AVAILABLE APPS:
|
|
80
|
+
{all_apps}
|
|
81
|
+
|
|
82
|
+
CONNECTED APPS (user has already authenticated these):
|
|
83
|
+
{connected_apps}
|
|
84
|
+
|
|
85
|
+
INSTRUCTIONS:
|
|
86
|
+
1. Analyze the task carefully to understand what functionality is required.
|
|
87
|
+
2. Review the available apps and their descriptions to identify which ones could help.
|
|
88
|
+
3. If multiple apps can perform the task, prefer connected apps, but you MUST include all relevant apps.
|
|
89
|
+
4. Consider apps that provide complementary functionality for complex tasks.
|
|
90
|
+
5. Only suggest apps that are directly relevant to the core task requirements.
|
|
91
|
+
6. Your output should be a list of app IDs.
|
|
92
|
+
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
class AppList(BaseModel):
|
|
96
|
+
app_list: list[str]
|
|
97
|
+
reasoning: str
|
|
98
|
+
|
|
99
|
+
response = await llm.with_structured_output(AppList).ainvoke(
|
|
100
|
+
input=prompt.format(task=task, all_apps=all_apps, connected_apps=connected_apps)
|
|
101
|
+
)
|
|
102
|
+
app_list = response.app_list
|
|
103
|
+
reasoning = f"Found relevant apps: {app_list}. Reasoning: {response.reasoning}"
|
|
104
|
+
logger.info(f"Found relevant apps: {app_list}.")
|
|
105
|
+
|
|
106
|
+
return {
|
|
107
|
+
**state,
|
|
108
|
+
"messages": [AIMessage(content=f"Identified relevant apps: {', '.join(app_list)}")],
|
|
109
|
+
"relevant_apps": app_list,
|
|
110
|
+
"reasoning": state.get("reasoning", "") + "\n" + reasoning,
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
async def _select_tools(task: str, tools: list[dict]) -> list[str]:
|
|
114
|
+
"""Selects the most appropriate tools from a list for a given task."""
|
|
115
|
+
tool_candidates = [f"{tool['name']}: {tool['description']}" for tool in tools]
|
|
116
|
+
|
|
117
|
+
SELECT_TOOL_PROMPT = f"""You are an AI assistant that helps the user perform tasks using various apps (each app has multiple tools).
|
|
118
|
+
You will be provided with a task and a list of tools which might be relevant for this task.
|
|
119
|
+
|
|
120
|
+
Your goal is to select the most appropriate tool for the given task.
|
|
121
|
+
<task>
|
|
122
|
+
{task}
|
|
123
|
+
</task>
|
|
124
|
+
|
|
125
|
+
<tool_candidates>
|
|
126
|
+
- {tool_candidates}
|
|
127
|
+
</tool_candidates>
|
|
128
|
+
|
|
129
|
+
Only return tool ids.
|
|
130
|
+
"""
|
|
131
|
+
|
|
132
|
+
response = await llm.with_structured_output(schema=ToolSelectionOutput).ainvoke(input=SELECT_TOOL_PROMPT)
|
|
133
|
+
|
|
134
|
+
selected_tool_ids = response.tool_ids
|
|
135
|
+
return selected_tool_ids
|
|
136
|
+
|
|
137
|
+
async def _generate_search_query(task: str) -> str:
|
|
138
|
+
"""Generates a concise search query from the user's task."""
|
|
139
|
+
prompt = f"""
|
|
140
|
+
You are an expert at summarizing a user's task into a concise search query for finding relevant tools.
|
|
141
|
+
The query should capture all the main actions or intents of the task.
|
|
142
|
+
|
|
143
|
+
For example:
|
|
144
|
+
Task: "Send an email to abc@the-read-example.com with the subject 'Hello'"
|
|
145
|
+
Query: "send email"
|
|
146
|
+
|
|
147
|
+
Task: "Create a new contact in my CRM for John Doe"
|
|
148
|
+
Query: "create contact"
|
|
149
|
+
|
|
150
|
+
Task: "Find the latest news about artificial intelligence"
|
|
151
|
+
Query: "search news"
|
|
152
|
+
|
|
153
|
+
Task: "Post a message to the #general channel in Slack and create a new issue in Jira"
|
|
154
|
+
Query: "send message, create issue"
|
|
155
|
+
|
|
156
|
+
Task: "{task}"
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
class SearchQuery(BaseModel):
|
|
160
|
+
query: str
|
|
161
|
+
|
|
162
|
+
response = await llm.with_structured_output(SearchQuery).ainvoke(input=prompt.format(task=task))
|
|
163
|
+
query = response.query
|
|
164
|
+
logger.info(f"Generated search query '{query}' for task '{task}'")
|
|
165
|
+
return query
|
|
166
|
+
|
|
167
|
+
async def _search_tools(state: AgentState) -> AgentState:
|
|
168
|
+
"""Searches for and filters tools in the relevant apps."""
|
|
169
|
+
task = state["task"]
|
|
170
|
+
logger.info(f"Searching for tools in relevant apps for task: {task}")
|
|
171
|
+
search_query = await _generate_search_query(task)
|
|
172
|
+
apps_with_tools_dict = {}
|
|
173
|
+
reasoning_steps = []
|
|
174
|
+
for app_name in state["relevant_apps"]:
|
|
175
|
+
logger.info(f"Searching for tools in {app_name} for task: {task} with query '{search_query}'")
|
|
176
|
+
found_tools = await registry.search_tools(query=search_query, app_id=app_name)
|
|
177
|
+
selected_tools = await _select_tools(task, found_tools)
|
|
178
|
+
apps_with_tools_dict[app_name] = selected_tools
|
|
179
|
+
reasoning_steps.append(f"For '{app_name}', selected tool(s): {', '.join(selected_tools)}.")
|
|
180
|
+
|
|
181
|
+
return {
|
|
182
|
+
**state,
|
|
183
|
+
"apps_with_tools": apps_with_tools_dict,
|
|
184
|
+
"reasoning": state.get("reasoning", "") + "\n" + "\n".join(reasoning_steps),
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
def _handle_no_apps_found(state: AgentState) -> AgentState:
|
|
188
|
+
"""Handles the case where no relevant apps are found."""
|
|
189
|
+
reasoning = "No suitable application was found among the available apps."
|
|
190
|
+
return {
|
|
191
|
+
**state,
|
|
192
|
+
"apps_with_tools": {},
|
|
193
|
+
"reasoning": state.get("reasoning", "") + "\n" + reasoning,
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
workflow = StateGraph(AgentState)
|
|
197
|
+
|
|
198
|
+
workflow.add_node("check_if_app_needed", _check_if_app_needed)
|
|
199
|
+
workflow.add_node("find_relevant_apps", _find_relevant_apps)
|
|
200
|
+
workflow.add_node("search_tools", _search_tools)
|
|
201
|
+
workflow.add_node("handle_no_apps_found", _handle_no_apps_found)
|
|
202
|
+
|
|
203
|
+
workflow.set_entry_point("check_if_app_needed")
|
|
204
|
+
|
|
205
|
+
workflow.add_conditional_edges(
|
|
206
|
+
"check_if_app_needed",
|
|
207
|
+
lambda state: "find_relevant_apps" if state["apps_required"] else END,
|
|
208
|
+
)
|
|
209
|
+
workflow.add_conditional_edges(
|
|
210
|
+
"find_relevant_apps",
|
|
211
|
+
lambda state: "search_tools" if state["relevant_apps"] else "handle_no_apps_found",
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
workflow.add_edge("search_tools", END)
|
|
215
|
+
workflow.add_edge("handle_no_apps_found", END)
|
|
216
|
+
|
|
217
|
+
return workflow.compile()
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
async def main():
|
|
221
|
+
from universal_mcp.agentr.registry import AgentrRegistry
|
|
222
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
223
|
+
|
|
224
|
+
registry = AgentrRegistry()
|
|
225
|
+
llm = load_chat_model("gemini/gemini-2.5-flash")
|
|
226
|
+
graph = build_tool_node_graph(llm, registry)
|
|
227
|
+
initial_state = {
|
|
228
|
+
"task": "Send an email to manoj@agentr.dev",
|
|
229
|
+
"messages": [HumanMessage(content="Send an email to manoj@agentr.dev")],
|
|
230
|
+
}
|
|
231
|
+
result = await graph.ainvoke(initial_state)
|
|
232
|
+
print(result)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
if __name__ == "__main__":
|
|
236
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from typing import Annotated
|
|
3
|
+
|
|
4
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
5
|
+
from langgraph.graph import END, START, StateGraph
|
|
6
|
+
from langgraph.graph.message import add_messages
|
|
7
|
+
from typing_extensions import TypedDict
|
|
8
|
+
|
|
9
|
+
from universal_mcp.agents.base import BaseAgent
|
|
10
|
+
from universal_mcp.agents.llm import load_chat_model
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class State(TypedDict):
|
|
14
|
+
messages: Annotated[list, add_messages]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SimpleAgent(BaseAgent):
|
|
18
|
+
def __init__(self, name: str, instructions: str, model: str, memory: BaseCheckpointSaver = None, **kwargs):
|
|
19
|
+
super().__init__(name, instructions, model, memory, **kwargs)
|
|
20
|
+
self.llm = load_chat_model(model)
|
|
21
|
+
|
|
22
|
+
async def _build_graph(self):
|
|
23
|
+
graph_builder = StateGraph(State)
|
|
24
|
+
|
|
25
|
+
async def chatbot(state: State):
|
|
26
|
+
messages = [
|
|
27
|
+
{"role": "system", "content": self.instructions},
|
|
28
|
+
*state["messages"],
|
|
29
|
+
]
|
|
30
|
+
return {"messages": [await self.llm.ainvoke(messages)]}
|
|
31
|
+
|
|
32
|
+
graph_builder.add_node("chatbot", chatbot)
|
|
33
|
+
graph_builder.add_edge(START, "chatbot")
|
|
34
|
+
graph_builder.add_edge("chatbot", END)
|
|
35
|
+
return graph_builder.compile(checkpointer=self.memory)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
if __name__ == "__main__":
|
|
39
|
+
agent = SimpleAgent("Simple Agent", "You are a helpful assistant", "azure/gpt-4o")
|
|
40
|
+
asyncio.run(agent.run_interactive())
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import json
|
|
2
|
+
|
|
3
|
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
4
|
+
|
|
5
|
+
from universal_mcp.agentr.integration import AgentrIntegration
|
|
6
|
+
from universal_mcp.applications.utils import app_from_slug
|
|
7
|
+
from universal_mcp.tools.adapters import ToolFormat
|
|
8
|
+
from universal_mcp.tools.manager import ToolManager
|
|
9
|
+
from universal_mcp.types import ToolConfig
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
async def load_agentr_tools(agentr_servers: dict):
|
|
13
|
+
tool_manager = ToolManager()
|
|
14
|
+
for app_name, tool_names in agentr_servers.items():
|
|
15
|
+
app = app_from_slug(app_name)
|
|
16
|
+
integration = AgentrIntegration(name=app_name)
|
|
17
|
+
app_instance = app(integration=integration)
|
|
18
|
+
tool_manager.register_tools_from_app(app_instance, tool_names=tool_names["tools"])
|
|
19
|
+
tools = tool_manager.list_tools(format=ToolFormat.LANGCHAIN)
|
|
20
|
+
return tools
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
async def load_mcp_tools(mcp_servers: dict):
|
|
24
|
+
client = MultiServerMCPClient(mcp_servers)
|
|
25
|
+
tools = await client.get_tools()
|
|
26
|
+
return tools
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
async def load_tools(path: str) -> ToolConfig:
|
|
30
|
+
with open(path) as f:
|
|
31
|
+
data = json.load(f)
|
|
32
|
+
config = ToolConfig.model_validate(data)
|
|
33
|
+
agentr_tools = await load_agentr_tools(config.model_dump(exclude_none=True)["agentrServers"])
|
|
34
|
+
mcp_tools = await load_mcp_tools(config.model_dump(exclude_none=True)["mcpServers"])
|
|
35
|
+
return agentr_tools + mcp_tools
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from contextlib import contextmanager
|
|
3
|
+
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
from rich.live import Live
|
|
6
|
+
from rich.markdown import Markdown
|
|
7
|
+
from rich.panel import Panel
|
|
8
|
+
from rich.prompt import Prompt
|
|
9
|
+
from rich.table import Table
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class RichCLI:
|
|
13
|
+
def __init__(self):
|
|
14
|
+
self.console = Console()
|
|
15
|
+
|
|
16
|
+
def display_welcome(self, agent_name: str):
|
|
17
|
+
"""Display welcome message"""
|
|
18
|
+
welcome_text = f"""
|
|
19
|
+
# Welcome to {agent_name}!
|
|
20
|
+
|
|
21
|
+
Available commands:
|
|
22
|
+
- Type your questions naturally
|
|
23
|
+
- `/help` - Show help
|
|
24
|
+
- `/tools` - List available tools
|
|
25
|
+
- `/exit` - Exit the application
|
|
26
|
+
"""
|
|
27
|
+
self.console.print(Panel(Markdown(welcome_text), title="đ¤ AI Agent CLI", border_style="blue"))
|
|
28
|
+
|
|
29
|
+
def display_agent_response(self, response: str, agent_name: str):
|
|
30
|
+
"""Display agent response with formatting"""
|
|
31
|
+
self.console.print(Panel(Markdown(response), title=f"đ¤ {agent_name}", border_style="green", padding=(1, 2)))
|
|
32
|
+
|
|
33
|
+
@contextmanager
|
|
34
|
+
def display_agent_response_streaming(self, agent_name: str):
|
|
35
|
+
"""Context manager for streaming agent response updates."""
|
|
36
|
+
|
|
37
|
+
with Live(refresh_per_second=10, console=self.console) as live:
|
|
38
|
+
|
|
39
|
+
class StreamUpdater:
|
|
40
|
+
content = []
|
|
41
|
+
|
|
42
|
+
def update(self, chunk: str):
|
|
43
|
+
self.content.append(chunk)
|
|
44
|
+
panel = Panel(
|
|
45
|
+
Markdown("".join(self.content)),
|
|
46
|
+
title=f"đ¤ {agent_name}",
|
|
47
|
+
border_style="green",
|
|
48
|
+
padding=(1, 2),
|
|
49
|
+
)
|
|
50
|
+
live.update(panel)
|
|
51
|
+
|
|
52
|
+
yield StreamUpdater()
|
|
53
|
+
|
|
54
|
+
def display_thinking(self, thought: str):
|
|
55
|
+
"""Display agent's thinking process"""
|
|
56
|
+
if thought:
|
|
57
|
+
self.console.print(Panel(thought, title="đ Thinking", border_style="yellow", padding=(1, 2)))
|
|
58
|
+
|
|
59
|
+
def display_tools(self, tools: list):
|
|
60
|
+
"""Display available tools in a table"""
|
|
61
|
+
table = Table(title="đ ī¸ Available Tools")
|
|
62
|
+
table.add_column("Tool Name", style="cyan")
|
|
63
|
+
table.add_column("Description", style="white")
|
|
64
|
+
|
|
65
|
+
for tool in tools:
|
|
66
|
+
func_info = tool["function"]
|
|
67
|
+
table.add_row(func_info["name"], func_info["description"])
|
|
68
|
+
|
|
69
|
+
self.console.print(table)
|
|
70
|
+
|
|
71
|
+
def display_tool_call(self, tool_call: dict):
|
|
72
|
+
"""Display tool call"""
|
|
73
|
+
tool_call_str = json.dumps(tool_call, indent=2)
|
|
74
|
+
self.console.print(Panel(tool_call_str, title="đ ī¸ Tool Call", border_style="green", padding=(1, 2)))
|
|
75
|
+
|
|
76
|
+
def display_tool_result(self, tool_result: dict):
|
|
77
|
+
"""Display tool result"""
|
|
78
|
+
tool_result_str = json.dumps(tool_result, indent=2)
|
|
79
|
+
self.console.print(Panel(tool_result_str, title="đ ī¸ Tool Result", border_style="green", padding=(1, 2)))
|
|
80
|
+
|
|
81
|
+
def display_error(self, error: str):
|
|
82
|
+
"""Display error message"""
|
|
83
|
+
self.console.print(Panel(error, title="â Error", border_style="red"))
|
|
84
|
+
|
|
85
|
+
def get_user_input(self) -> str:
|
|
86
|
+
"""Get user input with rich prompt"""
|
|
87
|
+
return Prompt.ask("[bold blue]You[/bold blue]", console=self.console)
|
|
88
|
+
|
|
89
|
+
def display_info(self, message: str):
|
|
90
|
+
"""Display info message"""
|
|
91
|
+
self.console.print(f"[bold cyan]âšī¸ {message}[/bold cyan]")
|
|
92
|
+
|
|
93
|
+
def clear_screen(self):
|
|
94
|
+
"""Clear the screen"""
|
|
95
|
+
self.console.clear()
|
|
96
|
+
|
|
97
|
+
def handle_interrupt(self, interrupt) -> str | bool:
|
|
98
|
+
interrupt_type = interrupt.value["type"]
|
|
99
|
+
if interrupt_type == "text":
|
|
100
|
+
value = Prompt.ask(interrupt.value["question"])
|
|
101
|
+
return value
|
|
102
|
+
elif interrupt_type == "bool":
|
|
103
|
+
value = Prompt.ask(interrupt.value["question"], choices=["y", "n"], default="y")
|
|
104
|
+
return value
|
|
105
|
+
elif interrupt_type == "choice":
|
|
106
|
+
value = Prompt.ask(
|
|
107
|
+
interrupt.value["question"], choices=interrupt.value["choices"], default=interrupt.value["choices"][0]
|
|
108
|
+
)
|
|
109
|
+
return value
|
|
110
|
+
else:
|
|
111
|
+
raise ValueError(f"Invalid interrupt type: {interrupt.value['type']}")
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: universal-mcp-agents
|
|
3
|
+
Version: 0.1.2
|
|
4
|
+
Summary: Add your description here
|
|
5
|
+
Project-URL: Homepage, https://github.com/universal-mcp/applications
|
|
6
|
+
Project-URL: Repository, https://github.com/universal-mcp/applications
|
|
7
|
+
Author-email: Manoj Bajaj <manojbajaj95@gmail.com>
|
|
8
|
+
License: MIT
|
|
9
|
+
Requires-Python: >=3.13
|
|
10
|
+
Requires-Dist: langchain-anthropic>=0.3.19
|
|
11
|
+
Requires-Dist: langchain-google-genai>=2.1.10
|
|
12
|
+
Requires-Dist: langchain-openai>=0.3.32
|
|
13
|
+
Requires-Dist: langgraph>=0.6.6
|
|
14
|
+
Requires-Dist: universal-mcp
|
|
15
|
+
Requires-Dist: universal-mcp-applications>=0.1.2
|
|
16
|
+
Provides-Extra: dev
|
|
17
|
+
Requires-Dist: pre-commit; extra == 'dev'
|
|
18
|
+
Requires-Dist: ruff; extra == 'dev'
|
|
19
|
+
Provides-Extra: test
|
|
20
|
+
Requires-Dist: pytest-cov; extra == 'test'
|
|
21
|
+
Requires-Dist: pytest<9.0.0,>=7.0.0; extra == 'test'
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
universal_mcp/agents/__init__.py,sha256=s50fHbQ3ufC_7mwQydHoua4KJOjKjKcUrAgMN0A_34M,588
|
|
2
|
+
universal_mcp/agents/base.py,sha256=zNYbzV1KY0OM-lDvLBMpdDpueKc6Wy54kbuzjFmBa5w,5170
|
|
3
|
+
universal_mcp/agents/builder.py,sha256=duPULxwOEt6IFMK9n0yvaeOFW3Js50WucFi224BMwG8,5956
|
|
4
|
+
universal_mcp/agents/cli.py,sha256=7GdRBpu9rhZPiC2vaNQXWI7K-0yCnvdlmE0IFpvy2Gk,539
|
|
5
|
+
universal_mcp/agents/hil.py,sha256=6xi0hhK5g-rhCrAMcGbjcKMReLWPC8rnFZMBOF3N_cY,3687
|
|
6
|
+
universal_mcp/agents/llm.py,sha256=P03zoUxBfivMa3djt2kmWANxGLg77Tapx1aQJEPVTCo,1592
|
|
7
|
+
universal_mcp/agents/react.py,sha256=EVk59XVelBFV13us3CG-RolzRmu8v7FrhlR7m2QIYUY,2526
|
|
8
|
+
universal_mcp/agents/simple.py,sha256=JL8TFyXlA1F4zcArgKhlqVIbLWXetwM05z4MPDJgFeI,1367
|
|
9
|
+
universal_mcp/agents/tools.py,sha256=J8VdS1xnSEFeEVSmp5Hb47J2-4WJWtsIidUP1lFXhds,1341
|
|
10
|
+
universal_mcp/agents/utils.py,sha256=7kwFpD0Rv6JqHG-LlNCVwSu_xRX-N119mUmiBroHJL4,4109
|
|
11
|
+
universal_mcp/agents/autoagent/__init__.py,sha256=RruAbcjyMTB-dIRkzFZYtQxrTpZetynBRYd1xD9noj8,836
|
|
12
|
+
universal_mcp/agents/autoagent/__main__.py,sha256=HH5D5gSw6xirrSoj_0CCmQlVq_wfp--b6hZdiHGfXD8,654
|
|
13
|
+
universal_mcp/agents/autoagent/context.py,sha256=RgjW1uCslucxYJpdmi4govd-0V1_9e6Y_kjWl3FpLrE,847
|
|
14
|
+
universal_mcp/agents/autoagent/graph.py,sha256=zQ8XDPELK5MbdMy5hy9rkJtgd71I1RdPlpbNkqvXtuM,6645
|
|
15
|
+
universal_mcp/agents/autoagent/prompts.py,sha256=v-EwzZ_0XPuBNd_r8aWxmKMSQlZLTVBr0o-dmTQMN1w,892
|
|
16
|
+
universal_mcp/agents/autoagent/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
17
|
+
universal_mcp/agents/autoagent/studio.py,sha256=nfVRzPXwBjDORHA0wln2k3Nz-zQXNKgZMvgeqBvkdtM,644
|
|
18
|
+
universal_mcp/agents/autoagent/utils.py,sha256=AFq-8scw_WlSZxDnTzxSNrOSiGYsIlqkqtQLDWf_rMU,431
|
|
19
|
+
universal_mcp/agents/bigtool/__init__.py,sha256=gKSEOmOE5ZsIypxu1sUibzJ8acbk83DjApxE0Adawro,1853
|
|
20
|
+
universal_mcp/agents/bigtool/__main__.py,sha256=_4HBqnlmdJwXOgeMITjBgaDHihED-aEgQmSXL9xcj0Y,602
|
|
21
|
+
universal_mcp/agents/bigtool/context.py,sha256=KM_B-rvEulrvXSBrXAJpwxGHVMW0HgiYKMnmrL2pUEQ,688
|
|
22
|
+
universal_mcp/agents/bigtool/graph.py,sha256=fOr0p547kjpM_CkRyyEcDxmTZ5lEKaTAR98nRCkgsks,8284
|
|
23
|
+
universal_mcp/agents/bigtool/prompts.py,sha256=A6El6Qw9r_D8OD4IZKuYqvrJFJZZmUhrTKlyqFPf6c0,1666
|
|
24
|
+
universal_mcp/agents/bigtool/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
25
|
+
universal_mcp/agents/bigtool2/__init__.py,sha256=uP52BOl0z1n_ECbybf6lxVvC0PAjNMrBUcXUOibXjvA,1779
|
|
26
|
+
universal_mcp/agents/bigtool2/__main__.py,sha256=C4Mi8vM9kuGa_CryzIc9nL4-u73ZvSK5tOTbMDMN54I,605
|
|
27
|
+
universal_mcp/agents/bigtool2/agent.py,sha256=iwn2kyC-Wft40f_QHlLtg7fPpFUpwkjk7I5LJrrS4i8,434
|
|
28
|
+
universal_mcp/agents/bigtool2/context.py,sha256=1DMp8g4Gb6UUxVh8bcqafV2WpTGKo6GlaDN6Ey7cAbo,930
|
|
29
|
+
universal_mcp/agents/bigtool2/graph.py,sha256=l6LBWmBCsjO0r1TZUXAQHXvz_iqkW_9tGJSlySkcG7A,8373
|
|
30
|
+
universal_mcp/agents/bigtool2/prompts.py,sha256=Kn1sDrjH2xb3js_MPPu5PJHMP45unl93CdOC97Q_hzw,1652
|
|
31
|
+
universal_mcp/agents/bigtool2/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
32
|
+
universal_mcp/agents/bigtoolcache/__init__.py,sha256=dTPxrFIJGaJKb67a935UEkgqnBGdN9q1ba2HnpNAuq4,1792
|
|
33
|
+
universal_mcp/agents/bigtoolcache/__main__.py,sha256=HkPEQqsdnWtDzWSbYdVBBc_JhpRi82TYuaubxNMtt4w,622
|
|
34
|
+
universal_mcp/agents/bigtoolcache/agent.py,sha256=xLTymzsmpNpJf8-y1Mi8BbuWl09kXsk1sdhbJyVwASU,446
|
|
35
|
+
universal_mcp/agents/bigtoolcache/context.py,sha256=1DMp8g4Gb6UUxVh8bcqafV2WpTGKo6GlaDN6Ey7cAbo,930
|
|
36
|
+
universal_mcp/agents/bigtoolcache/graph.py,sha256=--CuUEc2IKGdKeMueEvsQ-GxCYZVlDclQdKhrf0ctvU,8418
|
|
37
|
+
universal_mcp/agents/bigtoolcache/prompts.py,sha256=bIbuktsi0EkOXxYGkyB-e_rmolllrHo4kDsvWO5p_1c,1874
|
|
38
|
+
universal_mcp/agents/bigtoolcache/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
39
|
+
universal_mcp/agents/codeact/__init__.py,sha256=5D_I3lI_3tWjZERRoFav_bPe9UDaJ53pDzZYtyixg3E,10097
|
|
40
|
+
universal_mcp/agents/codeact/sandbox.py,sha256=lGRzhuXTHCB1qauuOI3bH1-fPTsyL6Lf9EmMIz4C2xQ,1039
|
|
41
|
+
universal_mcp/agents/codeact/test.py,sha256=AI3qWszpM46hF4wzuQm6A8g_UkhGmcg9KhHtk9u14ro,497
|
|
42
|
+
universal_mcp/agents/codeact/utils.py,sha256=VuMvLTxBBh3pgaJk8RWj5AK8XZFF-1gnZJ6jFLeM_CI,1690
|
|
43
|
+
universal_mcp/agents/planner/__init__.py,sha256=VTLVqIWkVh5SAuFoFupxByoqyNS1vCuc14mdUSr-vKE,1090
|
|
44
|
+
universal_mcp/agents/planner/__main__.py,sha256=nAFabo6SVZh4_4GV-SWCpnGg5GsVXgiHYpm9mhCQ6zw,685
|
|
45
|
+
universal_mcp/agents/planner/graph.py,sha256=Cj5Y1BI9uJvrYsr4JrQSPRSszznjdVD2dJHWHHJhxp0,3101
|
|
46
|
+
universal_mcp/agents/planner/prompts.py,sha256=vLViZ4BeinqUe8gXACLl04UUnH-Hie5L2qDyhCmSNe0,32
|
|
47
|
+
universal_mcp/agents/planner/state.py,sha256=EdrIELvxzBZtdC1FpmErYnCC7OSJ3Irx9QGiCBCeomA,279
|
|
48
|
+
universal_mcp/agents/shared/tool_node.py,sha256=IkjEcgzRAgjQTqcoa-i1dDY2LJfgOGj9HF8vihYuk_s,8678
|
|
49
|
+
universal_mcp_agents-0.1.2.dist-info/METADATA,sha256=yLpsTnGK6izBne_twuT9ED4M_CYhD4XBzNR3_E8WBjQ,781
|
|
50
|
+
universal_mcp_agents-0.1.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
51
|
+
universal_mcp_agents-0.1.2.dist-info/RECORD,,
|