universal-mcp-agents 0.1.8__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +11 -8
- universal_mcp/agents/base.py +13 -18
- universal_mcp/agents/bigtool2/__init__.py +6 -7
- universal_mcp/agents/bigtool2/__main__.py +2 -4
- universal_mcp/agents/bigtool2/agent.py +1 -0
- universal_mcp/agents/bigtool2/graph.py +48 -184
- universal_mcp/agents/bigtool2/meta_tools.py +120 -0
- universal_mcp/agents/bigtoolcache/__init__.py +31 -22
- universal_mcp/agents/bigtoolcache/__main__.py +1 -4
- universal_mcp/agents/bigtoolcache/agent.py +1 -3
- universal_mcp/agents/bigtoolcache/graph.py +101 -191
- universal_mcp/agents/bigtoolcache/prompts.py +7 -31
- universal_mcp/agents/bigtoolcache/tools.py +141 -0
- universal_mcp/agents/builder.py +10 -20
- universal_mcp/agents/cli.py +1 -2
- universal_mcp/agents/codeact/__init__.py +2 -254
- universal_mcp/agents/codeact/__main__.py +35 -0
- universal_mcp/agents/codeact/agent.py +160 -0
- universal_mcp/agents/codeact/prompts.py +91 -0
- universal_mcp/agents/codeact/sandbox.py +42 -18
- universal_mcp/agents/codeact/state.py +10 -0
- universal_mcp/agents/codeact/utils.py +12 -5
- universal_mcp/agents/hil.py +1 -6
- universal_mcp/agents/planner/__init__.py +1 -3
- universal_mcp/agents/planner/graph.py +1 -3
- universal_mcp/agents/react.py +14 -6
- universal_mcp/agents/shared/prompts.py +31 -17
- universal_mcp/agents/shared/tool_node.py +68 -53
- universal_mcp/agents/simple.py +2 -1
- universal_mcp/agents/utils.py +4 -15
- universal_mcp/applications/ui/app.py +5 -15
- {universal_mcp_agents-0.1.8.dist-info → universal_mcp_agents-0.1.10.dist-info}/METADATA +2 -1
- universal_mcp_agents-0.1.10.dist-info/RECORD +42 -0
- universal_mcp/agents/autoagent/__init__.py +0 -30
- universal_mcp/agents/autoagent/__main__.py +0 -25
- universal_mcp/agents/autoagent/context.py +0 -26
- universal_mcp/agents/autoagent/graph.py +0 -170
- universal_mcp/agents/autoagent/prompts.py +0 -9
- universal_mcp/agents/autoagent/state.py +0 -27
- universal_mcp/agents/autoagent/utils.py +0 -13
- universal_mcp/agents/bigtool/__init__.py +0 -58
- universal_mcp/agents/bigtool/__main__.py +0 -23
- universal_mcp/agents/bigtool/graph.py +0 -210
- universal_mcp/agents/bigtool/prompts.py +0 -31
- universal_mcp/agents/bigtool/state.py +0 -27
- universal_mcp/agents/bigtoolcache/tools_all.txt +0 -956
- universal_mcp/agents/bigtoolcache/tools_important.txt +0 -474
- universal_mcp/agents/codeact/test.py +0 -16
- universal_mcp_agents-0.1.8.dist-info/RECORD +0 -51
- {universal_mcp_agents-0.1.8.dist-info → universal_mcp_agents-0.1.10.dist-info}/WHEEL +0 -0
|
@@ -134,9 +134,7 @@ class UiApp(BaseApplication):
|
|
|
134
134
|
"headers": dict(response.headers),
|
|
135
135
|
}
|
|
136
136
|
|
|
137
|
-
def http_get(
|
|
138
|
-
self, url: str, headers: dict | None = None, query_params: dict | None = None
|
|
139
|
-
):
|
|
137
|
+
def http_get(self, url: str, headers: dict | None = None, query_params: dict | None = None):
|
|
140
138
|
"""
|
|
141
139
|
Perform a GET request to the specified URL with optional parameters.
|
|
142
140
|
|
|
@@ -150,16 +148,12 @@ class UiApp(BaseApplication):
|
|
|
150
148
|
Tags:
|
|
151
149
|
get, important
|
|
152
150
|
"""
|
|
153
|
-
logger.debug(
|
|
154
|
-
f"GET request to {url} with headers {headers} and query params {query_params}"
|
|
155
|
-
)
|
|
151
|
+
logger.debug(f"GET request to {url} with headers {headers} and query params {query_params}")
|
|
156
152
|
response = httpx.get(url, params=query_params, headers=headers)
|
|
157
153
|
response.raise_for_status()
|
|
158
154
|
return self._handle_response(response)
|
|
159
155
|
|
|
160
|
-
def http_post(
|
|
161
|
-
self, url: str, headers: dict | None = None, body: dict | None = None
|
|
162
|
-
):
|
|
156
|
+
def http_post(self, url: str, headers: dict | None = None, body: dict | None = None):
|
|
163
157
|
"""
|
|
164
158
|
Perform a POST request to the specified URL with optional parameters.
|
|
165
159
|
|
|
@@ -197,9 +191,7 @@ class UiApp(BaseApplication):
|
|
|
197
191
|
response.raise_for_status()
|
|
198
192
|
return self._handle_response(response)
|
|
199
193
|
|
|
200
|
-
def http_delete(
|
|
201
|
-
self, url: str, headers: dict | None = None, body: dict | None = None
|
|
202
|
-
):
|
|
194
|
+
def http_delete(self, url: str, headers: dict | None = None, body: dict | None = None):
|
|
203
195
|
"""
|
|
204
196
|
Perform a DELETE request to the specified URL with optional parameters.
|
|
205
197
|
|
|
@@ -218,9 +210,7 @@ class UiApp(BaseApplication):
|
|
|
218
210
|
response.raise_for_status()
|
|
219
211
|
return self._handle_response(response)
|
|
220
212
|
|
|
221
|
-
def http_patch(
|
|
222
|
-
self, url: str, headers: dict | None = None, body: dict | None = None
|
|
223
|
-
):
|
|
213
|
+
def http_patch(self, url: str, headers: dict | None = None, body: dict | None = None):
|
|
224
214
|
"""
|
|
225
215
|
Perform a PATCH request to the specified URL with optional parameters.
|
|
226
216
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: universal-mcp-agents
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.10
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Project-URL: Homepage, https://github.com/universal-mcp/applications
|
|
6
6
|
Project-URL: Repository, https://github.com/universal-mcp/applications
|
|
@@ -11,6 +11,7 @@ Requires-Dist: langchain-anthropic>=0.3.19
|
|
|
11
11
|
Requires-Dist: langchain-google-genai>=2.1.10
|
|
12
12
|
Requires-Dist: langchain-openai>=0.3.32
|
|
13
13
|
Requires-Dist: langgraph>=0.6.6
|
|
14
|
+
Requires-Dist: typer>=0.17.4
|
|
14
15
|
Requires-Dist: universal-mcp-applications>=0.1.14
|
|
15
16
|
Requires-Dist: universal-mcp>=0.1.24rc21
|
|
16
17
|
Provides-Extra: dev
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
universal_mcp/agents/__init__.py,sha256=oPoHMITGbHN4Ey68ZrVbmy7sNzVYhoXQgVII9fZwqL8,1245
|
|
2
|
+
universal_mcp/agents/base.py,sha256=KXBxf3TXrVHi-wBVD-cs6PSKfMtUnm73l-hC83FjOog,6753
|
|
3
|
+
universal_mcp/agents/builder.py,sha256=Xl_dGmzbtop3lICH2njnN6yxFF0SnEGY8u1tOIJy2Pk,8677
|
|
4
|
+
universal_mcp/agents/cli.py,sha256=-luC55FHCTwnpcRgqnV95yQa-mGlLYDlseUjLqFrAfs,1014
|
|
5
|
+
universal_mcp/agents/hil.py,sha256=_xLlBte4v5ex-RxXy5H3LqwFhtd3KE1QUHX1QDGIl2w,3760
|
|
6
|
+
universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
|
|
7
|
+
universal_mcp/agents/react.py,sha256=0OZvdBTnQdFF3Wliv2l4wiF2BMd0VG1cVflOaWDC8r0,3166
|
|
8
|
+
universal_mcp/agents/simple.py,sha256=W5_zb2DAGtAx_p5weSQGIC3gheZwxhmwZBuKKBTjd04,1938
|
|
9
|
+
universal_mcp/agents/utils.py,sha256=lnRmP6HBIme4W-iKy1BOb9gTyb9-ooJxXwCyWAQFP9k,4712
|
|
10
|
+
universal_mcp/agents/bigtool2/__init__.py,sha256=i4virR9r1_1FcS_-iuSHZWgEzYZwOroT6J44qPb0ZgM,2462
|
|
11
|
+
universal_mcp/agents/bigtool2/__main__.py,sha256=t6fWhLh3SnpN_05cww3LA_r_5Rb0gaF_U4FH1Mpsv1Y,655
|
|
12
|
+
universal_mcp/agents/bigtool2/agent.py,sha256=4GIQIy2VQgdXOezmET8G7tvP_37Vv8C027bGdGXJbTI,437
|
|
13
|
+
universal_mcp/agents/bigtool2/graph.py,sha256=FuW1XyTIr1aUbAC4ea8JWRR0JENvLfZGUSgQFmk0h3A,6544
|
|
14
|
+
universal_mcp/agents/bigtool2/meta_tools.py,sha256=02xOsGdxZpXBirn2KWk63UqYPQjI41nQ2KGj2zKBf7Y,5306
|
|
15
|
+
universal_mcp/agents/bigtool2/prompts.py,sha256=rQFtZDkwU9z8d4PWdt6jpohGhyab658Xvk8hvNVBFBA,1843
|
|
16
|
+
universal_mcp/agents/bigtool2/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
17
|
+
universal_mcp/agents/bigtoolcache/__init__.py,sha256=qrUwYqhZNE0PdmUCGc5jRQt45Tr1xPDSfBxCPJA1DDM,2260
|
|
18
|
+
universal_mcp/agents/bigtoolcache/__main__.py,sha256=noqT7Nqr17q7Eeiib-Dk2qG6AH4LkNyveeU_ceNDfMA,473
|
|
19
|
+
universal_mcp/agents/bigtoolcache/agent.py,sha256=NCF6fdBPwVcaOyhv8xbAy5DBqbGxJiv8_fLqKib7QJc,267
|
|
20
|
+
universal_mcp/agents/bigtoolcache/context.py,sha256=ny7gd-vvVpUOYAeQbAEUT0A6Vm6Nn2qGywxTzPBzYFg,929
|
|
21
|
+
universal_mcp/agents/bigtoolcache/graph.py,sha256=9KUjsVu5dtPdnyNkqJ5LsMnfH-IF_4H2me_L5aHgsug,4315
|
|
22
|
+
universal_mcp/agents/bigtoolcache/prompts.py,sha256=Rz30qNGdscDG65vMj9d0Vfe7X1pQjBDQBBNc3BuyC94,1886
|
|
23
|
+
universal_mcp/agents/bigtoolcache/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
24
|
+
universal_mcp/agents/bigtoolcache/tools.py,sha256=ynyEj9mVwKKDhxm76sjspyH51SFi63g2Vydi39pY0qY,5562
|
|
25
|
+
universal_mcp/agents/codeact/__init__.py,sha256=rLE8gvOo5H4YSr71DRq76b3RV3uuotxuAy_VnBVaVwk,60
|
|
26
|
+
universal_mcp/agents/codeact/__main__.py,sha256=FRfIkgcZfawP-M66v4ePijA6J2fs7nQv92G_8cj5qYA,1142
|
|
27
|
+
universal_mcp/agents/codeact/agent.py,sha256=L5UlVc13AMLRDgx5l1dANoAHSCqAf5A_7wpU4qPXTsc,6326
|
|
28
|
+
universal_mcp/agents/codeact/prompts.py,sha256=Atv6pd5Y7fuBZiQuXS_FKdRJk7oSETEGIob5gDGOK6E,3854
|
|
29
|
+
universal_mcp/agents/codeact/sandbox.py,sha256=qeydhM1vBhgbTWpjEWVdrSRCAcw2P-qVmRXW42YXEbA,1875
|
|
30
|
+
universal_mcp/agents/codeact/state.py,sha256=xu_iPOBus3xqah_5ERhkLa5ZaBZQpHqpia9O-7_3pGw,211
|
|
31
|
+
universal_mcp/agents/codeact/utils.py,sha256=JUbT_HYGS_D1BzmzoVpORIe7SGur1KgJguTZ_1tZ4JY,1918
|
|
32
|
+
universal_mcp/agents/planner/__init__.py,sha256=9P1UL-ABvrTIWTJ8wcvZmkqT8uyROZxsmUFhpjTK-Q4,1313
|
|
33
|
+
universal_mcp/agents/planner/__main__.py,sha256=OfhTfYDZK_ZUfc8sX-Sa6TWk-dNqD2rl13Ln64mNAtw,771
|
|
34
|
+
universal_mcp/agents/planner/graph.py,sha256=70hhIoEZOcYojpiyVSCedgYpnmxVP7aqdn8s6VBu-D4,3228
|
|
35
|
+
universal_mcp/agents/planner/prompts.py,sha256=_JoHqiAvswtqCDu90AGUHmfsu8eWE1-_yI4LLn3pqMU,657
|
|
36
|
+
universal_mcp/agents/planner/state.py,sha256=qqyp-jSGsCxe1US-PRLT4-y1sITAcVE6nCMlQLnvop0,278
|
|
37
|
+
universal_mcp/agents/shared/prompts.py,sha256=VOsXSUEwBXPaAuxJTUF6bgDGr41u6uctUNQSMRt_OJc,6414
|
|
38
|
+
universal_mcp/agents/shared/tool_node.py,sha256=Ua_wzMt4YgIx4zLp3_ZCow-28qORwrZ2FvKqLPt3RlI,10415
|
|
39
|
+
universal_mcp/applications/ui/app.py,sha256=uaS1KrwrGxw9oexdLj2Jok77DrZQAmby3uVxCONQyV8,11276
|
|
40
|
+
universal_mcp_agents-0.1.10.dist-info/METADATA,sha256=x-whXAce-crCYn1O1JVaV-ur0sEIlbsls_1qFbeFa0Q,878
|
|
41
|
+
universal_mcp_agents-0.1.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
42
|
+
universal_mcp_agents-0.1.10.dist-info/RECORD,,
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
3
|
-
|
|
4
|
-
from universal_mcp.agents.autoagent.graph import build_graph
|
|
5
|
-
from universal_mcp.agents.base import BaseAgent
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class AutoAgent(BaseAgent):
|
|
9
|
-
def __init__(
|
|
10
|
-
self,
|
|
11
|
-
name: str,
|
|
12
|
-
instructions: str,
|
|
13
|
-
model: str,
|
|
14
|
-
memory: BaseCheckpointSaver | None = None,
|
|
15
|
-
registry: ToolRegistry | None = None,
|
|
16
|
-
**kwargs,
|
|
17
|
-
):
|
|
18
|
-
super().__init__(name, instructions, model, memory, **kwargs)
|
|
19
|
-
self.tool_registry = registry
|
|
20
|
-
|
|
21
|
-
async def _build_graph(self):
|
|
22
|
-
builder = await build_graph(self.tool_registry, self.instructions)
|
|
23
|
-
return builder.compile(checkpointer=self.memory)
|
|
24
|
-
|
|
25
|
-
@property
|
|
26
|
-
def graph(self):
|
|
27
|
-
return self._graph
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
__all__ = ["AutoAgent"]
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
|
|
3
|
-
from loguru import logger
|
|
4
|
-
from universal_mcp.agentr.registry import AgentrRegistry
|
|
5
|
-
|
|
6
|
-
from universal_mcp.agents.autoagent import AutoAgent
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
async def main():
|
|
10
|
-
agent = AutoAgent(
|
|
11
|
-
name="autoagent",
|
|
12
|
-
instructions="You are a helpful assistant that can use tools to help the user.",
|
|
13
|
-
model="azure/gpt-4.1",
|
|
14
|
-
registry=AgentrRegistry(),
|
|
15
|
-
)
|
|
16
|
-
async for event in agent.stream(
|
|
17
|
-
user_input="Send an email to manoj@agentr.dev",
|
|
18
|
-
thread_id="test123",
|
|
19
|
-
):
|
|
20
|
-
logger.info(event.content)
|
|
21
|
-
# from loguru import logger; logger.debug(result)
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
if __name__ == "__main__":
|
|
25
|
-
asyncio.run(main())
|
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
from dataclasses import dataclass, field
|
|
2
|
-
from typing import Annotated
|
|
3
|
-
|
|
4
|
-
from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
@dataclass(kw_only=True)
|
|
8
|
-
class Context:
|
|
9
|
-
"""The context for the agent."""
|
|
10
|
-
|
|
11
|
-
system_prompt: str = field(
|
|
12
|
-
default=SYSTEM_PROMPT,
|
|
13
|
-
metadata={
|
|
14
|
-
"description": "The system prompt to use for the agent's interactions. "
|
|
15
|
-
"This prompt sets the context and behavior for the agent."
|
|
16
|
-
},
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
|
|
20
|
-
default="anthropic/claude-4-sonnet-20250514",
|
|
21
|
-
# default="vertex/gemini-2.5-flash",
|
|
22
|
-
metadata={
|
|
23
|
-
"description": "The name of the language model to use for the agent's main interactions. "
|
|
24
|
-
"Should be in the form: provider/model-name."
|
|
25
|
-
},
|
|
26
|
-
)
|
|
@@ -1,170 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
from datetime import UTC, datetime
|
|
3
|
-
from typing import cast
|
|
4
|
-
|
|
5
|
-
from langchain_core.messages import AIMessage, ToolMessage
|
|
6
|
-
from langchain_core.tools import tool
|
|
7
|
-
from langgraph.graph import END, START, StateGraph
|
|
8
|
-
from langgraph.runtime import Runtime
|
|
9
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
10
|
-
from universal_mcp.types import ToolFormat
|
|
11
|
-
|
|
12
|
-
from universal_mcp.agents.autoagent.context import Context
|
|
13
|
-
from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
|
|
14
|
-
from universal_mcp.agents.autoagent.state import State
|
|
15
|
-
from universal_mcp.agents.llm import load_chat_model
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
|
|
19
|
-
@tool()
|
|
20
|
-
async def search_tools(query: str, app_ids: list[str] | None = None) -> list[str]:
|
|
21
|
-
"""Retrieve tools using a search query and a list of app ids. Use multiple times if you require tools for different queries."""
|
|
22
|
-
tools_list = []
|
|
23
|
-
if app_ids is not None:
|
|
24
|
-
for app_id in app_ids:
|
|
25
|
-
tools_list.extend(
|
|
26
|
-
await tool_registry.search_tools(query, limit=10, app_id=app_id)
|
|
27
|
-
)
|
|
28
|
-
else:
|
|
29
|
-
tools_list = await tool_registry.search_tools(query, limit=10)
|
|
30
|
-
tools_list = [f"{tool['id']}: {tool['description']}" for tool in tools_list]
|
|
31
|
-
return tools_list
|
|
32
|
-
|
|
33
|
-
@tool()
|
|
34
|
-
async def ask_user(question: str) -> str:
|
|
35
|
-
"""Ask the user a question. Use this tool to ask the user for any missing information for performing a task, or when you have multiple apps to choose from for performing a task."""
|
|
36
|
-
full_question = question
|
|
37
|
-
return f"ASKING_USER: {full_question}"
|
|
38
|
-
|
|
39
|
-
@tool()
|
|
40
|
-
async def load_tools(tools: list[str]) -> list[str]:
|
|
41
|
-
"""Choose the tools you want to use by passing their tool ids. Loads the tools for the chosen tools and returns the tool ids."""
|
|
42
|
-
return tools
|
|
43
|
-
|
|
44
|
-
async def call_model(
|
|
45
|
-
state: State,
|
|
46
|
-
runtime: Runtime[Context],
|
|
47
|
-
):
|
|
48
|
-
system_prompt = SYSTEM_PROMPT
|
|
49
|
-
app_ids = await tool_registry.list_all_apps()
|
|
50
|
-
connections = await tool_registry.list_connected_apps()
|
|
51
|
-
connection_ids = set([connection["app_id"] for connection in connections])
|
|
52
|
-
connected_apps = [app["id"] for app in app_ids if app["id"] in connection_ids]
|
|
53
|
-
unconnected_apps = [
|
|
54
|
-
app["id"] for app in app_ids if app["id"] not in connection_ids
|
|
55
|
-
]
|
|
56
|
-
app_id_descriptions = (
|
|
57
|
-
"These are the apps connected to the user's account:\n"
|
|
58
|
-
+ "\n".join([f"{app}" for app in connected_apps])
|
|
59
|
-
)
|
|
60
|
-
if unconnected_apps:
|
|
61
|
-
app_id_descriptions += "\n\nOther (not connected) apps: " + "\n".join(
|
|
62
|
-
[f"{app}" for app in unconnected_apps]
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
system_prompt = system_prompt.format(
|
|
66
|
-
system_time=datetime.now(tz=UTC).isoformat(), app_ids=app_id_descriptions
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
messages = [
|
|
70
|
-
{"role": "system", "content": system_prompt + "\n" + instructions},
|
|
71
|
-
*state["messages"],
|
|
72
|
-
]
|
|
73
|
-
model = load_chat_model(runtime.context.model)
|
|
74
|
-
loaded_tools = await tool_registry.export_tools(
|
|
75
|
-
tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN
|
|
76
|
-
)
|
|
77
|
-
model_with_tools = model.bind_tools(
|
|
78
|
-
[search_tools, ask_user, load_tools, *loaded_tools], tool_choice="auto"
|
|
79
|
-
)
|
|
80
|
-
response_raw = model_with_tools.invoke(messages)
|
|
81
|
-
response = cast(AIMessage, response_raw)
|
|
82
|
-
return {"messages": [response]}
|
|
83
|
-
|
|
84
|
-
# Define the conditional edge that determines whether to continue or not
|
|
85
|
-
def should_continue(state: State):
|
|
86
|
-
messages = state["messages"]
|
|
87
|
-
last_message = messages[-1]
|
|
88
|
-
# If there is no function call, then we finish
|
|
89
|
-
if not last_message.tool_calls:
|
|
90
|
-
return END
|
|
91
|
-
else:
|
|
92
|
-
return "tools"
|
|
93
|
-
|
|
94
|
-
def tool_router(state: State):
|
|
95
|
-
last_message = state["messages"][-1]
|
|
96
|
-
if isinstance(last_message, ToolMessage) and last_message.name == ask_user.name:
|
|
97
|
-
return END
|
|
98
|
-
else:
|
|
99
|
-
return "agent"
|
|
100
|
-
|
|
101
|
-
async def tool_node(state: State):
|
|
102
|
-
outputs = []
|
|
103
|
-
tool_ids = state["selected_tool_ids"]
|
|
104
|
-
for tool_call in state["messages"][-1].tool_calls:
|
|
105
|
-
if tool_call["name"] == ask_user.name:
|
|
106
|
-
outputs.append(
|
|
107
|
-
ToolMessage(
|
|
108
|
-
content=json.dumps(
|
|
109
|
-
"The user has been asked the question, and the run will wait for the user's response."
|
|
110
|
-
),
|
|
111
|
-
name=tool_call["name"],
|
|
112
|
-
tool_call_id=tool_call["id"],
|
|
113
|
-
)
|
|
114
|
-
)
|
|
115
|
-
elif tool_call["name"] == search_tools.name:
|
|
116
|
-
tools = await search_tools.ainvoke(tool_call["args"])
|
|
117
|
-
outputs.append(
|
|
118
|
-
ToolMessage(
|
|
119
|
-
content=json.dumps(tools)
|
|
120
|
-
+ "\n\nUse the load_tools tool to load the tools you want to use.",
|
|
121
|
-
name=tool_call["name"],
|
|
122
|
-
tool_call_id=tool_call["id"],
|
|
123
|
-
)
|
|
124
|
-
)
|
|
125
|
-
|
|
126
|
-
elif tool_call["name"] == load_tools.name:
|
|
127
|
-
tool_ids = await load_tools.ainvoke(tool_call["args"])
|
|
128
|
-
|
|
129
|
-
outputs.append(
|
|
130
|
-
ToolMessage(
|
|
131
|
-
content=json.dumps(tool_ids),
|
|
132
|
-
name=tool_call["name"],
|
|
133
|
-
tool_call_id=tool_call["id"],
|
|
134
|
-
)
|
|
135
|
-
)
|
|
136
|
-
else:
|
|
137
|
-
await tool_registry.export_tools(
|
|
138
|
-
[tool_call["name"]], ToolFormat.LANGCHAIN
|
|
139
|
-
)
|
|
140
|
-
try:
|
|
141
|
-
tool_result = await tool_registry.call_tool(
|
|
142
|
-
tool_call["name"], tool_call["args"]
|
|
143
|
-
)
|
|
144
|
-
outputs.append(
|
|
145
|
-
ToolMessage(
|
|
146
|
-
content=json.dumps(tool_result),
|
|
147
|
-
name=tool_call["name"],
|
|
148
|
-
tool_call_id=tool_call["id"],
|
|
149
|
-
)
|
|
150
|
-
)
|
|
151
|
-
except Exception as e:
|
|
152
|
-
outputs.append(
|
|
153
|
-
ToolMessage(
|
|
154
|
-
content=json.dumps("Error: " + str(e)),
|
|
155
|
-
name=tool_call["name"],
|
|
156
|
-
tool_call_id=tool_call["id"],
|
|
157
|
-
)
|
|
158
|
-
)
|
|
159
|
-
return {"messages": outputs, "selected_tool_ids": tool_ids}
|
|
160
|
-
|
|
161
|
-
builder = StateGraph(State, context_schema=Context)
|
|
162
|
-
|
|
163
|
-
builder.add_node("agent", call_model)
|
|
164
|
-
builder.add_node("tools", tool_node)
|
|
165
|
-
|
|
166
|
-
builder.add_edge(START, "agent")
|
|
167
|
-
builder.add_conditional_edges("agent", should_continue)
|
|
168
|
-
builder.add_conditional_edges("tools", tool_router)
|
|
169
|
-
|
|
170
|
-
return builder
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
"""Default prompts used by the agent."""
|
|
2
|
-
|
|
3
|
-
SYSTEM_PROMPT = """You are a helpful AI assistant. When you lack tools for any task you should use the `search_tools` function to unlock relevant tools. Whenever you need to ask the user for any information, or choose between multiple different applications, you can ask the user using the `ask_user` function.
|
|
4
|
-
|
|
5
|
-
System time: {system_time}
|
|
6
|
-
These are the list of apps available to you:
|
|
7
|
-
{app_ids}
|
|
8
|
-
Note that when multiple apps seem relevant for a task, you MUST ask the user to choose the app. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app. In case the user asks you to use an app that is not connected, call the apps tools normally. You will be provided a link for connection that you should pass on to the user.
|
|
9
|
-
"""
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
from typing import Annotated
|
|
2
|
-
|
|
3
|
-
from langgraph.prebuilt.chat_agent_executor import AgentState
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
def _enqueue(left: list, right: list) -> list:
|
|
7
|
-
"""Treat left as a FIFO queue, append new items from right (preserve order),
|
|
8
|
-
keep items unique, and cap total size to 20 (drop oldest items)."""
|
|
9
|
-
max_size = 30
|
|
10
|
-
preferred_size = 20
|
|
11
|
-
if len(right) > preferred_size:
|
|
12
|
-
preferred_size = min(max_size, len(right))
|
|
13
|
-
queue = list(left or [])
|
|
14
|
-
|
|
15
|
-
for item in right[:preferred_size] or []:
|
|
16
|
-
if item in queue:
|
|
17
|
-
queue.remove(item)
|
|
18
|
-
queue.append(item)
|
|
19
|
-
|
|
20
|
-
if len(queue) > preferred_size:
|
|
21
|
-
queue = queue[-preferred_size:]
|
|
22
|
-
|
|
23
|
-
return queue
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class State(AgentState):
|
|
27
|
-
selected_tool_ids: Annotated[list[str], _enqueue]
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
from langchain_core.messages import BaseMessage
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
def get_message_text(msg: BaseMessage) -> str:
|
|
5
|
-
"""Get the text content of a message."""
|
|
6
|
-
content = msg.content
|
|
7
|
-
if isinstance(content, str):
|
|
8
|
-
return content
|
|
9
|
-
elif isinstance(content, dict):
|
|
10
|
-
return content.get("text", "")
|
|
11
|
-
else:
|
|
12
|
-
txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content]
|
|
13
|
-
return "".join(txts).strip()
|
|
@@ -1,58 +0,0 @@
|
|
|
1
|
-
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
-
from universal_mcp.logger import logger
|
|
3
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
4
|
-
|
|
5
|
-
from universal_mcp.agents.base import BaseAgent
|
|
6
|
-
from universal_mcp.agents.llm import load_chat_model
|
|
7
|
-
|
|
8
|
-
from .graph import build_graph
|
|
9
|
-
from .prompts import SYSTEM_PROMPT
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class BigToolAgent(BaseAgent):
|
|
13
|
-
def __init__(
|
|
14
|
-
self,
|
|
15
|
-
name: str,
|
|
16
|
-
instructions: str,
|
|
17
|
-
model: str,
|
|
18
|
-
registry: ToolRegistry,
|
|
19
|
-
memory: BaseCheckpointSaver | None = None,
|
|
20
|
-
**kwargs,
|
|
21
|
-
):
|
|
22
|
-
super().__init__(name, instructions, model, memory, **kwargs)
|
|
23
|
-
self.registry = registry
|
|
24
|
-
self.llm = load_chat_model(self.model)
|
|
25
|
-
|
|
26
|
-
logger.info(
|
|
27
|
-
f"BigToolAgent '{self.name}' initialized with model '{self.model}'."
|
|
28
|
-
)
|
|
29
|
-
|
|
30
|
-
def _build_system_message(self):
|
|
31
|
-
return SYSTEM_PROMPT.format(
|
|
32
|
-
name=self.name,
|
|
33
|
-
instructions=self.instructions,
|
|
34
|
-
)
|
|
35
|
-
|
|
36
|
-
async def _build_graph(self):
|
|
37
|
-
"""Build the bigtool agent graph using the existing create_agent function."""
|
|
38
|
-
logger.info(f"Building graph for BigToolAgent '{self.name}'...")
|
|
39
|
-
try:
|
|
40
|
-
graph_builder = build_graph(
|
|
41
|
-
tool_registry=self.registry,
|
|
42
|
-
llm=self.llm,
|
|
43
|
-
system_prompt=self._build_system_message(),
|
|
44
|
-
)
|
|
45
|
-
|
|
46
|
-
compiled_graph = graph_builder.compile(checkpointer=self.memory)
|
|
47
|
-
logger.info("Graph built and compiled successfully.")
|
|
48
|
-
return compiled_graph
|
|
49
|
-
except Exception as e:
|
|
50
|
-
logger.error(f"Error building graph for BigToolAgent '{self.name}': {e}")
|
|
51
|
-
raise
|
|
52
|
-
|
|
53
|
-
@property
|
|
54
|
-
def graph(self):
|
|
55
|
-
return self._graph
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
__all__ = ["BigToolAgent"]
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
|
|
3
|
-
from loguru import logger
|
|
4
|
-
from universal_mcp.agentr.registry import AgentrRegistry
|
|
5
|
-
|
|
6
|
-
from universal_mcp.agents.bigtool import BigToolAgent
|
|
7
|
-
from universal_mcp.agents.utils import messages_to_list
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
async def main():
|
|
11
|
-
agent = BigToolAgent(
|
|
12
|
-
name="bigtool",
|
|
13
|
-
instructions="You are a helpful assistant that can use tools to help the user.",
|
|
14
|
-
model="azure/gpt-4.1",
|
|
15
|
-
registry=AgentrRegistry(),
|
|
16
|
-
)
|
|
17
|
-
await agent.ainit()
|
|
18
|
-
output = await agent.invoke(
|
|
19
|
-
user_input="Send an email to manoj@agentr.dev")
|
|
20
|
-
logger.info(messages_to_list(output["messages"]))
|
|
21
|
-
|
|
22
|
-
if __name__ == "__main__":
|
|
23
|
-
asyncio.run(main())
|