universal-mcp 0.1.24rc7__py3-none-any.whl → 0.1.24rc9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,5 @@
1
- from .agentr import Agentr
2
1
  from .client import AgentrClient
3
2
  from .integration import AgentrIntegration
4
3
  from .registry import AgentrRegistry
5
4
 
6
- __all__ = ["Agentr", "AgentrClient", "AgentrRegistry", "AgentrIntegration"]
5
+ __all__ = ["AgentrClient", "AgentrRegistry", "AgentrIntegration"]
@@ -16,21 +16,45 @@ class AgentrClient:
16
16
  Args:
17
17
  api_key (str, optional): AgentR API key. If not provided, will look for AGENTR_API_KEY env var.
18
18
  base_url (str, optional): Base URL for AgentR API. Defaults to https://api.agentr.dev.
19
+ auth_token (str, optional): Auth token for AgentR API. If not provided, will look for AGENTR_AUTH_TOKEN env var.
19
20
  """
20
21
 
21
- def __init__(self, api_key: str | None = None, base_url: str | None = None):
22
+ def __init__(
23
+ self, api_key: str | None = None, base_url: str | None = None, auth_token: str | None = None, **kwargs
24
+ ):
22
25
  base_url = base_url or os.getenv("AGENTR_BASE_URL", "https://api.agentr.dev")
23
26
  self.base_url = f"{base_url.rstrip('/')}/v1"
24
- self.api_key = api_key or os.getenv("AGENTR_API_KEY")
25
- if not self.api_key:
26
- raise ValueError("No API key provided and AGENTR_API_KEY not found in environment variables")
27
- self.client = httpx.Client(
28
- base_url=self.base_url,
29
- headers={"X-API-KEY": self.api_key},
30
- timeout=30,
31
- follow_redirects=True,
32
- verify=False,
33
- )
27
+ api_key = api_key or os.getenv("AGENTR_API_KEY")
28
+ if api_key:
29
+ self.client = httpx.Client(
30
+ base_url=self.base_url,
31
+ headers={"X-API-KEY": api_key, "accept": "application/json"},
32
+ timeout=30,
33
+ follow_redirects=True,
34
+ verify=False,
35
+ )
36
+ me_data = self.me()
37
+ logger.debug(f"Client initialized with user: {me_data['email']}")
38
+ elif auth_token:
39
+ logger.debug("Initializing client with auth token")
40
+ self.client = httpx.Client(
41
+ base_url=self.base_url,
42
+ headers={"Authorization": f"Bearer {auth_token}", "accept": "application/json"},
43
+ timeout=30,
44
+ follow_redirects=True,
45
+ verify=False,
46
+ )
47
+ me_data = self.me()
48
+ logger.debug(f"Client initialized with user: {me_data['email']}")
49
+ else:
50
+ raise ValueError("No API key or auth token provided")
51
+
52
+ def me(self):
53
+ response = self.client.get("/users/me/")
54
+ logger.debug(f"Me response: {response.status_code}")
55
+ response.raise_for_status()
56
+ data = response.json()
57
+ return data
34
58
 
35
59
  def get_credentials(self, app_id: str) -> dict[str, Any]:
36
60
  """Get credentials for an integration from the AgentR API.
@@ -49,6 +73,7 @@ class AgentrClient:
49
73
  "/credentials/",
50
74
  params={"app_id": app_id},
51
75
  )
76
+ logger.debug(f"Credentials response: {response.status_code}")
52
77
  if response.status_code == 404:
53
78
  logger.warning(f"No credentials found for app '{app_id}'. Requesting authorization...")
54
79
  action_url = self.get_authorization_url(app_id)
@@ -68,12 +93,12 @@ class AgentrClient:
68
93
  Raises:
69
94
  HTTPError: If the API request fails.
70
95
  """
71
- response = self.client.post("/connections/authorize", json={"app_id": app_id})
96
+ response = self.client.post("/connections/authorize/", json={"app_id": app_id})
72
97
  response.raise_for_status()
73
98
  url = response.json().get("authorize_url")
74
99
  return f"Please ask the user to visit the following url to authorize the application: {url}. Render the url in proper markdown format with a clickable link."
75
100
 
76
- def list_all_apps(self) -> list[dict[str, Any]]:
101
+ def list_all_apps(self):
77
102
  """Fetch available apps from AgentR API.
78
103
 
79
104
  Returns:
@@ -86,7 +111,7 @@ class AgentrClient:
86
111
  response.raise_for_status()
87
112
  return response.json().get("items", [])
88
113
 
89
- def list_my_apps(self) -> list[dict[str, Any]]:
114
+ def list_my_apps(self):
90
115
  """Fetch user apps from AgentR API.
91
116
 
92
117
  Returns:
@@ -96,7 +121,7 @@ class AgentrClient:
96
121
  response.raise_for_status()
97
122
  return response.json().get("items", [])
98
123
 
99
- def list_my_connections(self) -> list[dict[str, Any]]:
124
+ def list_my_connections(self):
100
125
  """Fetch user connections from AgentR API.
101
126
 
102
127
  Returns:
@@ -106,7 +131,7 @@ class AgentrClient:
106
131
  response.raise_for_status()
107
132
  return response.json().get("items", [])
108
133
 
109
- def get_app_details(self, app_id: str) -> dict[str, Any]:
134
+ def get_app_details(self, app_id: str):
110
135
  """Fetch a specific app from AgentR API.
111
136
 
112
137
  Args:
@@ -118,11 +143,11 @@ class AgentrClient:
118
143
  Raises:
119
144
  httpx.HTTPError: If the API request fails.
120
145
  """
121
- response = self.client.get(f"/apps/{app_id}")
146
+ response = self.client.get(f"/apps/{app_id}/")
122
147
  response.raise_for_status()
123
148
  return response.json()
124
149
 
125
- def list_all_tools(self) -> list[dict[str, Any]]:
150
+ def list_all_tools(self, app_id: str | None = None):
126
151
  """List all available tools from the AgentR API.
127
152
 
128
153
  Note: In the backend, tools are globally listed and not tied to a
@@ -131,11 +156,14 @@ class AgentrClient:
131
156
  Returns:
132
157
  List[Dict[str, Any]]: A list of tool configurations.
133
158
  """
134
- response = self.client.get("/tools/")
159
+ params = {}
160
+ if app_id:
161
+ params["app_id"] = app_id
162
+ response = self.client.get("/tools/", params=params)
135
163
  response.raise_for_status()
136
164
  return response.json().get("items", [])
137
165
 
138
- def get_tool_details(self, tool_id: str) -> dict[str, Any]:
166
+ def get_tool_details(self, tool_id: str):
139
167
  """Fetch a specific tool configuration from the AgentR API.
140
168
 
141
169
  Args:
@@ -147,11 +175,11 @@ class AgentrClient:
147
175
  Raises:
148
176
  httpx.HTTPError: If the API request fails.
149
177
  """
150
- response = self.client.get(f"/tools/{tool_id}")
178
+ response = self.client.get(f"/tools/{tool_id}/")
151
179
  response.raise_for_status()
152
180
  return response.json()
153
181
 
154
- def search_all_apps(self, query: str, limit: int = 2) -> list[dict[str, Any]]:
182
+ def search_all_apps(self, query: str, limit: int = 2):
155
183
  """Search for apps from the AgentR API.
156
184
 
157
185
  Args:
@@ -165,13 +193,17 @@ class AgentrClient:
165
193
  response.raise_for_status()
166
194
  return response.json().get("items", [])
167
195
 
168
- def search_all_tools(self, query: str, limit: int = 2) -> list[dict[str, Any]]:
196
+ def search_all_tools(self, query: str, limit: int = 2, app_id: str | None = None):
169
197
  """Search for tools from the AgentR API.
170
198
 
171
199
  Args:
172
200
  query (str): The query to search for.
173
201
  limit (int, optional): The number of tools to return. Defaults to 2.
202
+ app_id (str, optional): The ID of the app to search tools for.
174
203
  """
175
- response = self.client.get("/tools/", params={"search": query, "limit": limit})
204
+ params = {"search": query, "limit": limit}
205
+ if app_id:
206
+ params["app_id"] = app_id
207
+ response = self.client.get("/tools/", params=params)
176
208
  response.raise_for_status()
177
209
  return response.json().get("items", [])
@@ -1,9 +1,12 @@
1
+ from typing import Any
2
+
1
3
  from loguru import logger
2
4
 
3
5
  from universal_mcp.agentr.client import AgentrClient
4
6
  from universal_mcp.applications import app_from_slug
5
7
  from universal_mcp.tools.manager import ToolManager, _get_app_and_tool_name
6
8
  from universal_mcp.tools.registry import ToolRegistry
9
+ from universal_mcp.types import ToolConfig, ToolFormat
7
10
 
8
11
  from .integration import AgentrIntegration
9
12
 
@@ -15,9 +18,10 @@ class AgentrRegistry(ToolRegistry):
15
18
  """Initialize the AgentR platform manager."""
16
19
 
17
20
  self.client = client or AgentrClient(**kwargs)
21
+ self.tool_manager = ToolManager()
18
22
  logger.debug("AgentrRegistry initialized successfully")
19
23
 
20
- def list_apps(self) -> list[dict[str, str]]:
24
+ async def list_all_apps(self) -> list[dict[str, Any]]:
21
25
  """Get list of available apps from AgentR.
22
26
 
23
27
  Returns:
@@ -32,7 +36,7 @@ class AgentrRegistry(ToolRegistry):
32
36
  logger.error(f"Error fetching apps from AgentR: {e}")
33
37
  return []
34
38
 
35
- def get_app_details(self, app_id: str) -> dict[str, str]:
39
+ async def get_app_details(self, app_id: str) -> dict[str, Any]:
36
40
  """Get detailed information about a specific app from AgentR.
37
41
 
38
42
  Args:
@@ -48,40 +52,131 @@ class AgentrRegistry(ToolRegistry):
48
52
  logger.error(f"Error getting details for app {app_id}: {e}")
49
53
  return {}
50
54
 
51
- def load_tools(self, tools: list[str] | None, tool_manager: ToolManager) -> None:
55
+ async def search_apps(
56
+ self,
57
+ query: str,
58
+ limit: int = 10,
59
+ ) -> list[dict[str, Any]]:
60
+ """Search for apps by a query.
61
+
62
+ Args:
63
+ query: The query to search for
64
+ limit: The number of apps to return
65
+
66
+ Returns:
67
+ List of app dictionaries matching the query
68
+ """
69
+ try:
70
+ apps = self.client.search_all_apps(query, limit)
71
+ return apps
72
+ except Exception as e:
73
+ logger.error(f"Error searching apps from AgentR: {e}")
74
+ return []
75
+
76
+ async def list_tools(
77
+ self,
78
+ app_id: str,
79
+ ) -> list[dict[str, Any]]:
80
+ """List all tools available on the platform, filter by app_id.
81
+
82
+ Args:
83
+ app_id: The ID of the app to list tools for
84
+
85
+ Returns:
86
+ List of tool dictionaries for the specified app
87
+ """
88
+ try:
89
+ all_tools = self.client.list_all_tools(app_id=app_id)
90
+ return all_tools
91
+ except Exception as e:
92
+ logger.error(f"Error listing tools for app {app_id}: {e}")
93
+ return []
94
+
95
+ async def search_tools(
96
+ self,
97
+ query: str,
98
+ limit: int = 2,
99
+ app_id: str | None = None,
100
+ ) -> list[dict[str, Any]]:
101
+ """Search for tools by a query.
102
+
103
+ Args:
104
+ query: The query to search for
105
+ limit: The number of tools to return
106
+ app_id: The ID of the app to list tools for
107
+ Returns:
108
+ List of tool dictionaries matching the query
109
+ """
110
+ try:
111
+ tools = self.client.search_all_tools(query, limit, app_id)
112
+ return tools
113
+ except Exception as e:
114
+ logger.error(f"Error searching tools from AgentR: {e}")
115
+ return []
116
+
117
+ async def export_tools(
118
+ self,
119
+ tools: list[str] | ToolConfig,
120
+ format: ToolFormat,
121
+ ) -> str:
122
+ """Export given tools to required format.
123
+
124
+ Args:
125
+ tools: List of tool identifiers to export
126
+ format: The format to export tools to (native, mcp, langchain, openai)
127
+
128
+ Returns:
129
+ String representation of tools in the specified format
130
+ """
131
+ try:
132
+ # Clear tools from tool manager before loading new tools
133
+ self.tool_manager.clear_tools()
134
+ if isinstance(tools, ToolConfig):
135
+ print("Loading tools from tool config")
136
+ self._load_tools_from_tool_config(tools, self.tool_manager)
137
+ else:
138
+ print("Loading tools from list")
139
+ self._load_agentr_tools_from_list(tools, self.tool_manager)
140
+ loaded_tools = self.tool_manager.list_tools(format=format)
141
+ logger.info(f"Exporting {len(loaded_tools)} tools to {format} format")
142
+ return loaded_tools
143
+ except Exception as e:
144
+ logger.error(f"Error exporting tools: {e}")
145
+ return ""
146
+
147
+ def _load_tools(self, app_name: str, tool_names: list[str], tool_manager: ToolManager) -> None:
148
+ """Helper method to load and register tools for an app."""
149
+ app = app_from_slug(app_name)
150
+ integration = AgentrIntegration(name=app_name, client=self.client)
151
+ app_instance = app(integration=integration)
152
+ tool_manager.register_tools_from_app(app_instance, tool_names=tool_names)
153
+
154
+ def _load_agentr_tools_from_list(self, tools: list[str], tool_manager: ToolManager) -> None:
52
155
  """Load tools from AgentR and register them as tools.
53
156
 
54
157
  Args:
55
- tools: The list of tools to load ( prefixed with app name )
158
+ tools: The list of tools to load (prefixed with app name)
56
159
  tool_manager: The tool manager to register tools with
57
160
  """
58
- if not tools:
59
- return
60
- logger.info(f"Loading all actions for app: {tools}")
61
- # Group all tools by app_name, tools
161
+ logger.info(f"Loading all tools: {tools}")
62
162
  tools_by_app = {}
63
163
  for tool_name in tools:
64
164
  app_name, _ = _get_app_and_tool_name(tool_name)
65
- if app_name not in tools_by_app:
66
- tools_by_app[app_name] = []
67
- tools_by_app[app_name].append(tool_name)
165
+ tools_by_app.setdefault(app_name, []).append(tool_name)
68
166
 
69
167
  for app_name, tool_names in tools_by_app.items():
70
- app = app_from_slug(app_name)
71
- integration = AgentrIntegration(name=app_name)
72
- # TODO: Import with name param, some apps are written incorrectly and hence passing name fails
73
- app_instance = app(integration=integration)
74
- tool_manager.register_tools_from_app(app_instance, tool_names=tool_names)
75
- return
168
+ self._load_tools(app_name, tool_names, tool_manager)
76
169
 
77
- def search_tools(self, query: str, limit: int = 20) -> list[str]:
78
- """Search for tools in AgentR.
170
+ def _load_tools_from_tool_config(self, tool_config: ToolConfig, tool_manager: ToolManager) -> None:
171
+ """Load tools from ToolConfig and register them as tools.
79
172
 
80
173
  Args:
81
- query: The query to search for
82
-
83
- Returns:
84
- List of tool names
174
+ tool_config: The tool configuration containing app names and tools
175
+ tool_manager: The tool manager to register tools with
85
176
  """
86
- return self.client.search_all_tools(query, limit)
87
-
177
+ for app_name, tool_data in tool_config.agentrServers.items():
178
+ self._load_tools(app_name, tool_data.tools, tool_manager)
179
+
180
+ async def call_tool(self, tool_name: str, tool_args: dict[str, Any]) -> dict[str, Any]:
181
+ """Call a tool with the given name and arguments."""
182
+ return await self.tool_manager.call_tool(tool_name, tool_args)
@@ -1,4 +1,4 @@
1
- from universal_mcp.agents.auto import AutoAgent
1
+ from universal_mcp.agents.autoagent import AutoAgent
2
2
  from universal_mcp.agents.base import BaseAgent
3
3
  from universal_mcp.agents.react import ReactAgent
4
4
  from universal_mcp.agents.simple import SimpleAgent
@@ -12,13 +12,12 @@ from pydantic import BaseModel
12
12
  from typing_extensions import TypedDict
13
13
 
14
14
  from universal_mcp.agentr.registry import AgentrRegistry
15
+ from universal_mcp.agents.base import BaseAgent
16
+ from universal_mcp.agents.llm import load_chat_model
15
17
  from universal_mcp.tools import ToolManager
16
18
  from universal_mcp.tools.adapters import ToolFormat
17
19
  from universal_mcp.tools.registry import ToolRegistry
18
20
 
19
- from universal_mcp.agents.base import BaseAgent
20
- from universal_mcp.agents.llm import load_chat_model
21
-
22
21
  # Auto Agent
23
22
  # Working
24
23
  # 1. For every message, and given list of tools, figure out if external tools are needed
@@ -1,10 +1,10 @@
1
+ from langgraph.checkpoint.base import BaseCheckpointSaver
2
+
1
3
  from universal_mcp.agentr.registry import AgentrRegistry
4
+ from universal_mcp.agents.autoagent.graph import build_graph
2
5
  from universal_mcp.agents.base import BaseAgent
3
- from universal_mcp.tools.manager import ToolManager
4
6
  from universal_mcp.tools.registry import ToolRegistry
5
7
 
6
- from universal_mcp.agents.autoagent.graph import create_agent
7
-
8
8
 
9
9
  class AutoAgent(BaseAgent):
10
10
  def __init__(
@@ -12,20 +12,16 @@ class AutoAgent(BaseAgent):
12
12
  name: str,
13
13
  instructions: str,
14
14
  model: str,
15
- tool_registry: ToolRegistry | None = None,
16
- tool_manager: ToolManager | None = None,
15
+ memory: BaseCheckpointSaver | None = None,
16
+ registry: ToolRegistry | None = None,
17
+ **kwargs,
17
18
  ):
18
- super().__init__(name, instructions, model, tool_registry)
19
- self.tool_registry = tool_registry or AgentrRegistry()
20
- self.tool_manager = tool_manager or ToolManager()
21
- self.model = model
22
- self.name = name
23
- self.instructions = instructions
24
- self._graph = self._build_graph()
19
+ super().__init__(name, instructions, model, memory, **kwargs)
20
+ self.tool_registry = registry or AgentrRegistry()
25
21
 
26
- def _build_graph(self):
27
- builder = create_agent(self.tool_registry, self.tool_manager, self.instructions)
28
- return builder.compile()
22
+ async def _build_graph(self):
23
+ builder = await build_graph(self.tool_registry, self.instructions)
24
+ return builder.compile(checkpointer=self.memory)
29
25
 
30
26
  @property
31
27
  def graph(self):
@@ -8,10 +8,10 @@ async def main():
8
8
  agent = AutoAgent(
9
9
  name="autoagent",
10
10
  instructions="You are a helpful assistant that can use tools to help the user.",
11
- model="azure/gpt-4o",
11
+ model="anthropic/claude-4-sonnet-20250514",
12
12
  tool_registry=AgentrRegistry(),
13
13
  )
14
- result = await agent.run(
14
+ result = await agent.invoke(
15
15
  user_input="Send an email to Manoj from my google mail account, manoj@agentr.dev, with the subject 'Hello from auto agent' and the body 'testing'"
16
16
  )
17
17
  print(result)
@@ -18,6 +18,7 @@ class Context:
18
18
 
19
19
  model: Annotated[str, {"__template_metadata__": {"kind": "llm"}}] = field(
20
20
  default="anthropic/claude-4-sonnet-20250514",
21
+ # default="vertex/gemini-2.5-flash",
21
22
  metadata={
22
23
  "description": "The name of the language model to use for the agent's main interactions. "
23
24
  "Should be in the form: provider/model-name."
@@ -7,49 +7,69 @@ from langchain_core.tools import tool
7
7
  from langgraph.graph import END, START, StateGraph
8
8
  from langgraph.runtime import Runtime
9
9
 
10
- from universal_mcp.agents.llm import load_chat_model
11
- from universal_mcp.tools.manager import ToolManager
12
- from universal_mcp.tools.registry import ToolRegistry
13
- from universal_mcp.types import ToolFormat
14
-
15
10
  from universal_mcp.agents.autoagent.context import Context
16
11
  from universal_mcp.agents.autoagent.prompts import SYSTEM_PROMPT
17
12
  from universal_mcp.agents.autoagent.state import State
13
+ from universal_mcp.agents.llm import load_chat_model
14
+ from universal_mcp.tools.registry import ToolRegistry
15
+ from universal_mcp.types import ToolFormat
18
16
 
19
17
 
20
- def create_agent(tool_registry: ToolRegistry, tool_manager: ToolManager, instructions: str = ""):
18
+ async def build_graph(tool_registry: ToolRegistry, instructions: str = ""):
21
19
  @tool()
22
- def retrieve_tools(query: str) -> list[str]:
23
- """Retrieve tools using a search query. Use multiple times if you require tools for different tasks."""
24
- tools = tool_registry.search_tools(query)
25
- my_connections = tool_registry.client.list_my_connections()
26
- connected_apps = set(connection["app_id"] for connection in my_connections)
27
- filtered_tools = [tool for tool in tools if tool["app_id"] in connected_apps]
28
- if len(filtered_tools) == 0:
29
- return tools
30
- return filtered_tools
31
-
20
+ async def search_tools(query: str, app_ids: list[str] | None = None) -> list[str]:
21
+ """Retrieve tools using a search query and a list of app ids. Use multiple times if you require tools for different queries."""
22
+ tools_list = []
23
+ if app_ids is not None:
24
+ for app_id in app_ids:
25
+ tools_list.extend(await tool_registry.search_tools(query, limit=10, app_id=app_id))
26
+ else:
27
+ tools_list = await tool_registry.search_tools(query, limit=10)
28
+ tools_list = [f"{tool['id']}: {tool['description']}" for tool in tools_list]
29
+ return tools_list
30
+
32
31
  @tool()
33
- def ask_user(question: str) -> str:
32
+ async def ask_user(question: str) -> str:
34
33
  """Ask the user a question. Use this tool to ask the user for any missing information for performing a task, or when you have multiple apps to choose from for performing a task."""
35
34
  full_question = question
36
35
  return f"ASKING_USER: {full_question}"
36
+
37
+ @tool()
38
+ async def load_tools(tools: list[str]) -> list[str]:
39
+ """Choose the tools you want to use by passing their tool ids. Loads the tools for the chosen tools and returns the tool ids."""
40
+ return tools
41
+
37
42
 
38
- def call_model(
43
+ async def call_model(
39
44
  state: State,
40
45
  runtime: Runtime[Context],
41
46
  ):
42
47
  system_prompt = runtime.context.system_prompt if runtime.context.system_prompt else SYSTEM_PROMPT
43
- system_prompt = system_prompt.format(system_time=datetime.now(tz=UTC).isoformat())
44
-
48
+ app_ids = await tool_registry.list_all_apps()
49
+ connections = tool_registry.client.list_my_connections()
50
+ connection_ids = set([connection["app_id"] for connection in connections])
51
+ connected_apps = [app['id'] for app in app_ids if app["id"] in connection_ids]
52
+ unconnected_apps = [app['id'] for app in app_ids if app["id"] not in connection_ids]
53
+ app_id_descriptions = "These are the apps connected to the user's account:\n" + "\n".join([f"{app}" for app in connected_apps])
54
+ if unconnected_apps:
55
+ app_id_descriptions += "\n\nOther (not connected) apps: " + "\n".join([f"{app}" for app in unconnected_apps])
56
+ print(app_id_descriptions)
57
+ system_prompt = system_prompt.format(system_time=datetime.now(tz=UTC).isoformat(), app_ids=app_id_descriptions)
58
+
45
59
  messages = [{"role": "system", "content": system_prompt + "\n" + instructions}, *state["messages"]]
46
60
  model = load_chat_model(runtime.context.model)
47
61
  # Load tools from tool registry
48
- tool_registry.load_tools(tools=state["selected_tool_ids"], tool_manager=tool_manager)
49
- loaded_tools = tool_manager.list_tools(format=ToolFormat.LANGCHAIN)
50
- model_with_tools = model.bind_tools([retrieve_tools, ask_user, *loaded_tools], tool_choice="auto")
51
- response = cast(AIMessage, model_with_tools.invoke(messages))
52
- return {"messages": [response]}
62
+ loaded_tools = await tool_registry.export_tools(tools=state["selected_tool_ids"], format=ToolFormat.LANGCHAIN)
63
+ model_with_tools = model.bind_tools([search_tools, ask_user, load_tools, *loaded_tools], tool_choice="auto")
64
+ response_raw = model_with_tools.invoke(messages)
65
+ token_usage = state.get("token_usage", {})
66
+ for key in ["input_tokens", "output_tokens", "total_tokens"]:
67
+ if key in token_usage:
68
+ token_usage[key] += response_raw.usage_metadata[key]
69
+ else:
70
+ token_usage[key] = response_raw.usage_metadata[key]
71
+ response = cast(AIMessage, response_raw)
72
+ return {"messages": [response], "token_usage": token_usage}
53
73
 
54
74
  # Define the conditional edge that determines whether to continue or not
55
75
  def should_continue(state: State):
@@ -58,10 +78,9 @@ def create_agent(tool_registry: ToolRegistry, tool_manager: ToolManager, instruc
58
78
  # If there is no function call, then we finish
59
79
  if not last_message.tool_calls:
60
80
  return END
61
- # Otherwise if there is, we continue
62
81
  else:
63
82
  return "tools"
64
-
83
+
65
84
  def tool_router(state: State):
66
85
  last_message = state["messages"][-1]
67
86
  if isinstance(last_message, ToolMessage):
@@ -69,44 +88,67 @@ def create_agent(tool_registry: ToolRegistry, tool_manager: ToolManager, instruc
69
88
  else:
70
89
  return END
71
90
 
72
-
73
91
  async def tool_node(state: State):
74
92
  outputs = []
75
93
  tool_ids = state["selected_tool_ids"]
76
94
  for tool_call in state["messages"][-1].tool_calls:
77
- if tool_call["name"] == retrieve_tools.name:
78
- tool_result = retrieve_tools.invoke(tool_call["args"])
79
- tool_ids = [tool["id"] for tool in tool_result]
95
+ if tool_call["name"] == ask_user.name:
80
96
  outputs.append(
81
97
  ToolMessage(
82
- content=json.dumps(tool_result),
98
+ content=json.dumps(
99
+ "The user has been asked the question, and the run will wait for the user's response."
100
+ ),
83
101
  name=tool_call["name"],
84
102
  tool_call_id=tool_call["id"],
85
103
  )
86
104
  )
87
- elif tool_call["name"] == ask_user.name:
105
+ ai_message = AIMessage(content=tool_call["args"]["question"])
106
+ outputs.append(ai_message)
107
+ elif tool_call["name"] == search_tools.name:
108
+ tools = await search_tools.ainvoke(tool_call["args"])
88
109
  outputs.append(
89
110
  ToolMessage(
90
- content=json.dumps("The user has been asked the question, and the run will wait for the user's response."),
111
+ content=json.dumps(tools)+"\n\nUse the load_tools tool to load the tools you want to use.",
91
112
  name=tool_call["name"],
92
113
  tool_call_id=tool_call["id"],
93
114
  )
94
115
  )
95
- ai_message = AIMessage(content=tool_call["args"]["question"])
96
- outputs.append(ai_message)
97
- else:
98
- tool_manager.clear_tools()
99
- tool_registry.load_tools([tool_call["name"]], tool_manager=tool_manager)
100
- tool_result = await tool_manager.call_tool(tool_call["name"], tool_call["args"])
116
+
117
+ elif tool_call["name"] == load_tools.name:
118
+ tool_ids = await load_tools.ainvoke(tool_call["args"])
119
+ print(tool_ids)
101
120
  outputs.append(
102
121
  ToolMessage(
103
- content=json.dumps(tool_result),
122
+ content=json.dumps(tool_ids),
104
123
  name=tool_call["name"],
105
124
  tool_call_id=tool_call["id"],
106
125
  )
107
126
  )
127
+ else:
128
+ await tool_registry.export_tools([tool_call["name"]], ToolFormat.LANGCHAIN)
129
+ try:
130
+ tool_result = await tool_registry.call_tool(tool_call["name"], tool_call["args"])
131
+ outputs.append(
132
+ ToolMessage(
133
+ content=json.dumps(tool_result),
134
+ name=tool_call["name"],
135
+ tool_call_id=tool_call["id"],
136
+ )
137
+ )
138
+ except Exception as e:
139
+ outputs.append(
140
+ ToolMessage(
141
+ content=json.dumps("Error: "+str(e)),
142
+ name=tool_call["name"],
143
+ tool_call_id=tool_call["id"],
144
+ )
145
+ )
108
146
  return {"messages": outputs, "selected_tool_ids": tool_ids}
109
147
 
148
+
149
+
150
+
151
+
110
152
  builder = StateGraph(State, context_schema=Context)
111
153
 
112
154
  builder.add_node("agent", call_model)
@@ -1,5 +1,9 @@
1
1
  """Default prompts used by the agent."""
2
2
 
3
- SYSTEM_PROMPT = """You are a helpful AI assistant. When you lack tools for any task you should use the `retrieve_tools` function to unlock relevant tools. Whenever you need to ask the user for any information, or choose between multiple different applications, you can ask the user using the `ask_user` function.
3
+ SYSTEM_PROMPT = """You are a helpful AI assistant. When you lack tools for any task you should use the `search_tools` function to unlock relevant tools. Whenever you need to ask the user for any information, or choose between multiple different applications, you can ask the user using the `ask_user` function.
4
4
 
5
- System time: {system_time}"""
5
+ System time: {system_time}
6
+ These are the list of apps available to you:
7
+ {app_ids}
8
+ Note that when multiple apps seem relevant for a task, you MUST ask the user to choose the app. Prefer connected apps over unconnected apps while breaking a tie. If more than one relevant app (or none of the relevant apps) are connected, you must ask the user to choose the app.
9
+ """
@@ -25,3 +25,4 @@ def _enqueue(left: list, right: list) -> list:
25
25
 
26
26
  class State(AgentState):
27
27
  selected_tool_ids: Annotated[list[str], _enqueue]
28
+ token_usage: dict[str, int]
@@ -1,5 +1,7 @@
1
+ import asyncio
2
+
1
3
  from universal_mcp.agentr.registry import AgentrRegistry
2
- from universal_mcp.agents.autoagent import create_agent
4
+ from universal_mcp.agents.autoagent import build_graph
3
5
  from universal_mcp.tools import ToolManager
4
6
 
5
7
  tool_registry = AgentrRegistry()
@@ -7,16 +9,14 @@ tool_manager = ToolManager()
7
9
 
8
10
 
9
11
 
10
- apps = tool_registry.client.list_all_apps()
11
- names = [app["name"] for app in apps]
12
-
13
- instructions = """
14
- You are a helpful assistant that can use tools to help the user. If a task requires multiple steps, you should perform separate different searches for different actions.
15
- These are the list of applications you can use to help the user:
16
- {names}
17
- """
18
- graph = create_agent(tool_registry, tool_manager, instructions=instructions)
12
+ async def main():
13
+ instructions = """
14
+ You are a helpful assistant that can use tools to help the user. If a task requires multiple steps, you should perform separate different searches for different actions. Prefer completing one action before searching for another.
15
+ """
16
+ graph = await build_graph(tool_registry, instructions=instructions)
17
+ return graph
19
18
 
19
+ graph = asyncio.run(main())
20
20
 
21
21
 
22
22
 
@@ -7,7 +7,6 @@ from langgraph.checkpoint.base import BaseCheckpointSaver
7
7
  from langgraph.checkpoint.memory import MemorySaver
8
8
  from langgraph.types import Command
9
9
 
10
- from .llm import load_chat_model
11
10
  from .utils import RichCLI
12
11
 
13
12
 
@@ -18,15 +17,19 @@ class BaseAgent:
18
17
  self.model = model
19
18
  self.memory = memory or MemorySaver()
20
19
  self._graph = None
21
- self.llm = load_chat_model(model)
20
+ self._initialized = False
22
21
  self.cli = RichCLI()
23
22
 
23
+ async def ainit(self):
24
+ if not self._initialized:
25
+ self._graph = await self._build_graph()
26
+ self._initialized = True
27
+
24
28
  async def _build_graph(self):
25
29
  raise NotImplementedError("Subclasses must implement this method")
26
30
 
27
31
  async def stream(self, thread_id: str, user_input: str):
28
- if self._graph is None:
29
- self._graph = await self._build_graph()
32
+ await self.ainit()
30
33
  async for event, _ in self._graph.astream(
31
34
  {"messages": [{"role": "user", "content": user_input}]},
32
35
  config={"configurable": {"thread_id": thread_id}},
@@ -36,14 +39,14 @@ class BaseAgent:
36
39
  yield event
37
40
 
38
41
  async def stream_interactive(self, thread_id: str, user_input: str):
42
+ await self.ainit()
39
43
  with self.cli.display_agent_response_streaming(self.name) as stream_updater:
40
- async for event in self.astream(thread_id, user_input):
44
+ async for event in self.stream(thread_id, user_input):
41
45
  stream_updater.update(event.content)
42
46
 
43
- async def run(self, user_input: str, thread_id: str = str(uuid4())):
47
+ async def invoke(self, user_input: str, thread_id: str = str(uuid4())):
44
48
  """Run the agent"""
45
- if not self._graph:
46
- self._graph = await self._build_graph()
49
+ await self.ainit()
47
50
  return await self._graph.ainvoke(
48
51
  {"messages": [{"role": "user", "content": user_input}]},
49
52
  config={"configurable": {"thread_id": thread_id}},
@@ -53,8 +56,7 @@ class BaseAgent:
53
56
  async def run_interactive(self, thread_id: str = str(uuid4())):
54
57
  """Main application loop"""
55
58
 
56
- if not self._graph:
57
- self._graph = await self._build_graph()
59
+ await self.ainit()
58
60
  # Display welcome
59
61
  self.cli.display_welcome(self.name)
60
62
 
@@ -1,12 +1,12 @@
1
1
  from langchain_anthropic import ChatAnthropic
2
2
  from langchain_core.language_models import BaseChatModel
3
3
  from langchain_google_vertexai.model_garden import ChatAnthropicVertex
4
+ from langchain_google_vertexai import ChatVertexAI
4
5
  from langchain_openai import AzureChatOpenAI
5
6
 
6
7
 
7
8
  def load_chat_model(fully_specified_name: str, tags: list[str] | None = None) -> BaseChatModel:
8
9
  """Load a chat model from a fully specified name.
9
-
10
10
  Args:
11
11
  fully_specified_name (str): String in the format 'provider/model'.
12
12
  """
@@ -19,6 +19,8 @@ def load_chat_model(fully_specified_name: str, tags: list[str] | None = None) ->
19
19
  ) # pyright: ignore[reportCallIssue]
20
20
  elif provider == "azure":
21
21
  return AzureChatOpenAI(model=model, api_version="2024-12-01-preview", azure_deployment=model, tags=tags)
22
+ elif provider == "vertex":
23
+ return ChatVertexAI(model=model, temperature=0.1, tags=tags)
22
24
  else:
23
25
  raise ValueError(f"Unsupported provider: {provider}")
24
26
 
@@ -2,9 +2,12 @@ from langgraph.checkpoint.base import BaseCheckpointSaver
2
2
  from langgraph.prebuilt import create_react_agent
3
3
  from loguru import logger
4
4
 
5
+ from universal_mcp.agentr.registry import AgentrRegistry
5
6
  from universal_mcp.agents.base import BaseAgent
6
- from universal_mcp.agents.tools import load_agentr_tools, load_mcp_tools
7
- from universal_mcp.types import ToolConfig
7
+ from universal_mcp.agents.llm import load_chat_model
8
+ from universal_mcp.agents.tools import load_mcp_tools
9
+ from universal_mcp.tools.registry import ToolRegistry
10
+ from universal_mcp.types import ToolConfig, ToolFormat
8
11
 
9
12
 
10
13
  class ReactAgent(BaseAgent):
@@ -15,18 +18,29 @@ class ReactAgent(BaseAgent):
15
18
  model: str,
16
19
  memory: BaseCheckpointSaver | None = None,
17
20
  tools: ToolConfig | None = None,
21
+ registry: ToolRegistry | None = None,
18
22
  max_iterations: int = 10,
19
23
  **kwargs,
20
24
  ):
21
25
  super().__init__(name, instructions, model, memory, **kwargs)
26
+ self.llm = load_chat_model(model)
22
27
  self.tools = tools
23
28
  self.max_iterations = max_iterations
29
+ self.registry = registry
24
30
 
25
31
  async def _build_graph(self):
26
32
  if self.tools:
27
33
  config = self.tools.model_dump(exclude_none=True)
28
- agentr_tools = await load_agentr_tools(config["agentrServers"]) if config.get("agentrServers") else []
34
+ if config.get("agentrServers") and not self.registry:
35
+ raise ValueError("Agentr servers are configured but no registry is provided")
36
+ agentr_tools = (
37
+ await self.registry.export_tools(self.tools, ToolFormat.LANGCHAIN)
38
+ if config.get("agentrServers")
39
+ else []
40
+ )
41
+ print(agentr_tools)
29
42
  mcp_tools = await load_mcp_tools(config["mcpServers"]) if config.get("mcpServers") else []
43
+ print(mcp_tools)
30
44
  tools = agentr_tools + mcp_tools
31
45
  else:
32
46
  tools = []
@@ -39,7 +53,7 @@ class ReactAgent(BaseAgent):
39
53
  )
40
54
 
41
55
  def _build_system_message(self) -> str:
42
- system_message = f"""You are {self.name}. {self.instructions}
56
+ system_message = f"""You are {self.name}.
43
57
 
44
58
  You have access to various tools that can help you answer questions and complete tasks. When you need to use a tool:
45
59
 
@@ -47,7 +61,10 @@ You have access to various tools that can help you answer questions and complete
47
61
  2. Call the appropriate tool with the right parameters
48
62
  3. Use the tool results to provide a comprehensive answer
49
63
 
50
- Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so."""
64
+ Always explain your reasoning and be thorough in your responses. If you need to use multiple tools to answer a question completely, do so.
65
+
66
+ {self.instructions}
67
+ """
51
68
  return system_message
52
69
 
53
70
 
@@ -57,10 +74,11 @@ if __name__ == "__main__":
57
74
  agent = ReactAgent(
58
75
  "Universal React Agent",
59
76
  instructions="",
60
- model="gpt-4o",
77
+ model="azure/gpt-4o",
61
78
  tools=ToolConfig(agentrServers={"google-mail": {"tools": ["send_email"]}}),
79
+ registry=AgentrRegistry(),
62
80
  )
63
81
  result = asyncio.run(
64
- agent.run(user_input="Send an email with the subject 'testing react agent' to manoj@agentr.dev")
82
+ agent.invoke(user_input="Send an email with the subject 'testing react agent' to manoj@agentr.dev")
65
83
  )
66
84
  print(result["messages"][-1].content)
@@ -1,6 +1,7 @@
1
1
  import asyncio
2
2
  from typing import Annotated
3
3
 
4
+ from langgraph.checkpoint.base import BaseCheckpointSaver
4
5
  from langgraph.graph import END, START, StateGraph
5
6
  from langgraph.graph.message import add_messages
6
7
  from typing_extensions import TypedDict
@@ -14,27 +15,26 @@ class State(TypedDict):
14
15
 
15
16
 
16
17
  class SimpleAgent(BaseAgent):
17
- def __init__(self, name: str, instructions: str, model: str):
18
- super().__init__(name, instructions, model)
18
+ def __init__(self, name: str, instructions: str, model: str, memory: BaseCheckpointSaver = None, **kwargs):
19
+ super().__init__(name, instructions, model, memory, **kwargs)
19
20
  self.llm = load_chat_model(model)
20
- self._graph = self._build_graph()
21
21
 
22
- def _build_graph(self):
22
+ async def _build_graph(self):
23
23
  graph_builder = StateGraph(State)
24
24
 
25
- def chatbot(state: State):
26
- return {"messages": [self.llm.invoke(state["messages"])]}
25
+ async def chatbot(state: State):
26
+ messages = [
27
+ {"role": "system", "content": self.instructions},
28
+ *state["messages"],
29
+ ]
30
+ return {"messages": [await self.llm.ainvoke(messages)]}
27
31
 
28
32
  graph_builder.add_node("chatbot", chatbot)
29
33
  graph_builder.add_edge(START, "chatbot")
30
34
  graph_builder.add_edge("chatbot", END)
31
35
  return graph_builder.compile(checkpointer=self.memory)
32
36
 
33
- @property
34
- def graph(self):
35
- return self._graph
36
-
37
37
 
38
38
  if __name__ == "__main__":
39
- agent = SimpleAgent("Simple Agent", "You are a helpful assistant", "openrouter/auto")
39
+ agent = SimpleAgent("Simple Agent", "You are a helpful assistant", "azure/gpt-4o")
40
40
  asyncio.run(agent.run_interactive())
@@ -1,19 +1,27 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from typing import Any
3
3
 
4
- from universal_mcp.tools.manager import ToolManager
4
+ from universal_mcp.types import ToolConfig, ToolFormat
5
5
 
6
6
 
7
7
  class ToolRegistry(ABC):
8
8
  """Abstract base class for platform-specific functionality.
9
9
 
10
10
  This class abstracts away platform-specific operations like fetching apps,
11
- loading actions, and managing integrations. This allows the AutoAgent to
11
+ loading actions, and managing integrations. This allows the agents to
12
12
  work with different platforms without being tightly coupled to any specific one.
13
+
14
+ The following methods are abstract and must be implemented by the subclass:
15
+ - list_all_apps: Get list of available apps from the platform.
16
+ - get_app_details: Get details of a specific app.
17
+ - search_apps: Search for apps by a query.
18
+ - list_tools: List all tools available on the platform, filter by app_id.
19
+ - search_tools: Search for tools by a query.
20
+ - export_tools: Export tools to required format.
13
21
  """
14
22
 
15
23
  @abstractmethod
16
- def list_apps(self) -> list[dict[str, Any]]:
24
+ async def list_all_apps(self) -> list[dict[str, Any]]:
17
25
  """Get list of available apps from the platform.
18
26
 
19
27
  Returns:
@@ -22,7 +30,7 @@ class ToolRegistry(ABC):
22
30
  pass
23
31
 
24
32
  @abstractmethod
25
- def get_app_details(self, app_id: str) -> dict[str, Any]:
33
+ async def get_app_details(self, app_id: str) -> dict[str, Any]:
26
34
  """Get detailed information about a specific app.
27
35
 
28
36
  Args:
@@ -34,18 +42,42 @@ class ToolRegistry(ABC):
34
42
  pass
35
43
 
36
44
  @abstractmethod
37
- def load_tools(self, tools: list[str], tool_manager: ToolManager) -> None:
38
- """Load tools from the platform and register them as tools.
45
+ async def search_apps(
46
+ self,
47
+ query: str,
48
+ limit: int = 2,
49
+ ) -> list[dict[str, Any]]:
50
+ """Search for apps by a query."""
51
+ pass
39
52
 
40
- Args:
41
- tools: The list of tools to load
42
- """
53
+ @abstractmethod
54
+ async def list_tools(
55
+ self,
56
+ app_id: str,
57
+ ) -> list[dict[str, Any]]:
58
+ """List all tools available on the platform, filter by app_id."""
43
59
  pass
44
60
 
45
61
  @abstractmethod
46
- def search_tools(
62
+ async def search_tools(
47
63
  self,
48
64
  query: str,
49
- ) -> list[str]:
50
- """Retrieve a tool to use, given a search query."""
65
+ limit: int = 2,
66
+ app_id: str | None = None,
67
+ ) -> list[dict[str, Any]]:
68
+ """Search for tools by a query."""
69
+ pass
70
+
71
+ @abstractmethod
72
+ async def export_tools(
73
+ self,
74
+ tools: list[str] | ToolConfig,
75
+ format: ToolFormat,
76
+ ) -> str:
77
+ """Export giventools to required format."""
78
+ pass
79
+
80
+ @abstractmethod
81
+ async def call_tool(self, tool_name: str, tool_args: dict[str, Any]) -> dict[str, Any]:
82
+ """Call a tool with the given name and arguments."""
51
83
  pass
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: universal-mcp
3
- Version: 0.1.24rc7
3
+ Version: 0.1.24rc9
4
4
  Summary: Universal MCP acts as a middle ware for your API applications. It can store your credentials, authorize, enable disable apps on the fly and much more.
5
5
  Author-email: Manoj Bajaj <manojbajaj95@gmail.com>
6
6
  License: MIT
@@ -7,29 +7,28 @@ universal_mcp/logger.py,sha256=VmH_83efpErLEDTJqz55Dp0dioTXfGvMBLZUx5smOLc,2116
7
7
  universal_mcp/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
8
  universal_mcp/types.py,sha256=jeUEkUnwdGWo3T_qSRSF83u0fYpuydaWzdKlCYBlCQA,770
9
9
  universal_mcp/agentr/README.md,sha256=t15pVgkCwZM5wzgLgrf0Zv6hVL7dPmKXvAeTf8CiXPQ,6641
10
- universal_mcp/agentr/__init__.py,sha256=ogOhH_OJwkoUZu_2nQJc7-vEGmYQxEjOE511-6ubrX0,217
11
- universal_mcp/agentr/agentr.py,sha256=kTUiBpDl4ODuXit9VE_ZXW28IKpCqEnJDNtMXi2B3Pc,1245
12
- universal_mcp/agentr/client.py,sha256=B4pjIpfD5nDCZ9qldKIqlBV8DISrkTRrajCIQK4r4Hs,6232
10
+ universal_mcp/agentr/__init__.py,sha256=fv1ZnOCduIUiJ9oN4e6Ya_hA2oWQvcEuDU3Ek1vEufI,180
11
+ universal_mcp/agentr/client.py,sha256=TQgwrNc7dEMXuprELf0Q-fdYdrH92Ppd7PUDZoD-KcY,7429
13
12
  universal_mcp/agentr/integration.py,sha256=V5GjqocqS02tRoI8MeV9PL6m-BzejwBzgJhOHo4MxAE,4212
14
- universal_mcp/agentr/registry.py,sha256=BOxy9iuJagKLmH9komaabwXvUglrsWbpRX8WY9xJ7lI,3115
13
+ universal_mcp/agentr/registry.py,sha256=O60qOsuby1GTkgc9GXVf9BjHmjMTyDbxqbDfIJ13CJ4,6583
15
14
  universal_mcp/agentr/server.py,sha256=bIPmHMiKKwnUYnxmfZVRh1thcn7Rytm_-bNiXTfANzc,2098
16
- universal_mcp/agents/__init__.py,sha256=ZkdQ71fn838LvYdyln6fL1mUMUUCZRZMyqos4aW2_I4,265
17
- universal_mcp/agents/auto.py,sha256=UUx3p9riLww2OwRg0pg10mWzWdDNydBrKJ-UdwzAQSk,25411
18
- universal_mcp/agents/base.py,sha256=uRb-flv_pdKfDJnHID1c-loYt-EvlAgFmB1_wJQNhUs,4152
15
+ universal_mcp/agents/__init__.py,sha256=AMBDQs3p5PePjzdoHYNoPYEsUK_PLHGNVPGxK7yrKVo,270
16
+ universal_mcp/agents/auto.py,sha256=BknCoeexTbFvwmVzYdGiiH72S_r6_5s9tmjH9M0I4d4,25410
17
+ universal_mcp/agents/base.py,sha256=o41gxvwOqWY0IZnupAHfKeH7OG0dgHUQDDpqntsFg6M,4128
19
18
  universal_mcp/agents/cli.py,sha256=7GdRBpu9rhZPiC2vaNQXWI7K-0yCnvdlmE0IFpvy2Gk,539
20
19
  universal_mcp/agents/hil.py,sha256=6xi0hhK5g-rhCrAMcGbjcKMReLWPC8rnFZMBOF3N_cY,3687
21
- universal_mcp/agents/llm.py,sha256=0HUI2Srh3RWtGyrjJCKqsroEgc1Rtkta3T8I1axl1mg,1232
22
- universal_mcp/agents/react.py,sha256=kAyTS68xzBLWRNgjJrLSP85o1ligz_ziatdlMZAavnA,2385
23
- universal_mcp/agents/simple.py,sha256=CXmwJq7jvxAoDJifNK7jKJTMKG4Pvre75x-k2CE-ZEM,1202
20
+ universal_mcp/agents/llm.py,sha256=jollWOAUv15IouzbLpuqKzbjj2x2ioZUukSQNyNjb4Y,1382
21
+ universal_mcp/agents/react.py,sha256=g2PwqxYe7v7wCMDCCQhTtGU1eFmSsF4g7T_t9d-v0ho,3012
22
+ universal_mcp/agents/simple.py,sha256=JL8TFyXlA1F4zcArgKhlqVIbLWXetwM05z4MPDJgFeI,1367
24
23
  universal_mcp/agents/tools.py,sha256=7Vdw0VZYxXVAzAYSpRKWHzVl9Ll6NOnVRlc4cTXguUQ,1335
25
24
  universal_mcp/agents/utils.py,sha256=7kwFpD0Rv6JqHG-LlNCVwSu_xRX-N119mUmiBroHJL4,4109
26
- universal_mcp/agents/autoagent/__init__.py,sha256=Vfm8brM9TNXCjKbVXV-CAPg_BVnYHOn6RVmkS0EaNV0,1072
27
- universal_mcp/agents/autoagent/__main__.py,sha256=FUSETuCDMpp7VSis0UFDnpI32HmQuJYaAXaOX5fQl-4,622
28
- universal_mcp/agents/autoagent/context.py,sha256=1ic3sIL14XZeiMjpkwysLImRTQFKXRFSx7rvgVh4plY,802
29
- universal_mcp/agents/autoagent/graph.py,sha256=f_TPcMk0t4JgM1gYs4sLFIeCrTGAzecc2rN0MPsmxvs,5116
30
- universal_mcp/agents/autoagent/prompts.py,sha256=DwLHwvsISuNrxeua0tKxTQbkU8u9gljCpk3P18VGk4w,386
31
- universal_mcp/agents/autoagent/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
32
- universal_mcp/agents/autoagent/studio.py,sha256=FWmZTAH54euF0ePG6xCBNwklBjdmjZ3jAOBoTrwNcqs,656
25
+ universal_mcp/agents/autoagent/__init__.py,sha256=E_vMnFz8Z120qdqaKXPNP_--4Tes4jImen7m_iZvtVo,913
26
+ universal_mcp/agents/autoagent/__main__.py,sha256=ps0cT7b9DN-jQK8pOKFVRZf3Oz_dVSBSYWOa8sc7VNc,647
27
+ universal_mcp/agents/autoagent/context.py,sha256=RgjW1uCslucxYJpdmi4govd-0V1_9e6Y_kjWl3FpLrE,847
28
+ universal_mcp/agents/autoagent/graph.py,sha256=0vjJD_FIwaAijAnqyvZ5BtfTvLKB3xwPqej16e-D-kE,7250
29
+ universal_mcp/agents/autoagent/prompts.py,sha256=ptnXyOarigq96nVW-P1ceT2WRilKvh7NPfE_Cy0NTz4,719
30
+ universal_mcp/agents/autoagent/state.py,sha256=pTKbgTK8TTx7qIioVt98u9KZpeWacWaRmKJWRv3Jj40,791
31
+ universal_mcp/agents/autoagent/studio.py,sha256=nfVRzPXwBjDORHA0wln2k3Nz-zQXNKgZMvgeqBvkdtM,644
33
32
  universal_mcp/agents/autoagent/utils.py,sha256=AFq-8scw_WlSZxDnTzxSNrOSiGYsIlqkqtQLDWf_rMU,431
34
33
  universal_mcp/agents/codeact/__init__.py,sha256=5D_I3lI_3tWjZERRoFav_bPe9UDaJ53pDzZYtyixg3E,10097
35
34
  universal_mcp/agents/codeact/sandbox.py,sha256=lGRzhuXTHCB1qauuOI3bH1-fPTsyL6Lf9EmMIz4C2xQ,1039
@@ -52,7 +51,7 @@ universal_mcp/tools/adapters.py,sha256=YJ2oqgc8JgmtsdRRtvO-PO0Q0bKqTJ4Y3J6yxlESo
52
51
  universal_mcp/tools/docstring_parser.py,sha256=efEOE-ME7G5Jbbzpn7pN2xNuyu2M5zfZ1Tqu1lRB0Gk,8392
53
52
  universal_mcp/tools/func_metadata.py,sha256=F4jd--hoZWKPBbZihVtluYKUsIdXdq4a0VWRgMl5k-Q,10838
54
53
  universal_mcp/tools/manager.py,sha256=24Rkn5Uvv_AuYAtjeMq986bJ7uzTaGE1290uB9eDtRE,10435
55
- universal_mcp/tools/registry.py,sha256=EA-xJ6GCYGajUVCrRmPIpr9Xekwxnqhmso8ztfsTeE8,1401
54
+ universal_mcp/tools/registry.py,sha256=etluwUwf2EfiGBoqQFZ1nf2xcPWtrJn0N4Qhcg7UGCU,2440
56
55
  universal_mcp/tools/tools.py,sha256=Lk-wUO3rfhwdxaRANtC7lQr5fXi7nclf0oHzxNAb79Q,4927
57
56
  universal_mcp/utils/__init__.py,sha256=8wi4PGWu-SrFjNJ8U7fr2iFJ1ktqlDmSKj1xYd7KSDc,41
58
57
  universal_mcp/utils/common.py,sha256=3aJK3AnBkmYf-dbsFLaEu_dGuXQ0Qi2HuqYTueLVhXQ,10968
@@ -73,8 +72,8 @@ universal_mcp/utils/openapi/readme.py,sha256=R2Jp7DUXYNsXPDV6eFTkLiy7MXbSULUj1vH
73
72
  universal_mcp/utils/openapi/test_generator.py,sha256=h44gQXEXmrw4pD3-XNHKB7T9c2lDomqrJxVO6oszCqM,12186
74
73
  universal_mcp/utils/templates/README.md.j2,sha256=Mrm181YX-o_-WEfKs01Bi2RJy43rBiq2j6fTtbWgbTA,401
75
74
  universal_mcp/utils/templates/api_client.py.j2,sha256=972Im7LNUAq3yZTfwDcgivnb-b8u6_JLKWXwoIwXXXQ,908
76
- universal_mcp-0.1.24rc7.dist-info/METADATA,sha256=agZ4cb79U2UCRPE6V_NXT5pL-_gAxB2Gz04jsc9rVCs,3143
77
- universal_mcp-0.1.24rc7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
78
- universal_mcp-0.1.24rc7.dist-info/entry_points.txt,sha256=QlBrVKmA2jIM0q-C-3TQMNJTTWOsOFQvgedBq2rZTS8,56
79
- universal_mcp-0.1.24rc7.dist-info/licenses/LICENSE,sha256=NweDZVPslBAZFzlgByF158b85GR0f5_tLQgq1NS48To,1063
80
- universal_mcp-0.1.24rc7.dist-info/RECORD,,
75
+ universal_mcp-0.1.24rc9.dist-info/METADATA,sha256=8fuQP2tvQjMrU4RrHqhSEdQmN1YyesKjz_9aJ4r-l1w,3143
76
+ universal_mcp-0.1.24rc9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
77
+ universal_mcp-0.1.24rc9.dist-info/entry_points.txt,sha256=QlBrVKmA2jIM0q-C-3TQMNJTTWOsOFQvgedBq2rZTS8,56
78
+ universal_mcp-0.1.24rc9.dist-info/licenses/LICENSE,sha256=NweDZVPslBAZFzlgByF158b85GR0f5_tLQgq1NS48To,1063
79
+ universal_mcp-0.1.24rc9.dist-info/RECORD,,
@@ -1,37 +0,0 @@
1
- import os
2
-
3
- from universal_mcp.tools import Tool, ToolFormat, ToolManager
4
-
5
- from .client import AgentrClient
6
- from .registry import AgentrRegistry
7
-
8
-
9
- class Agentr:
10
- def __init__(
11
- self,
12
- api_key: str | None = None,
13
- base_url: str | None = None,
14
- registry: AgentrRegistry | None = None,
15
- format: ToolFormat | None = None,
16
- manager: ToolManager | None = None,
17
- ):
18
- self.api_key = api_key or os.getenv("AGENTR_API_KEY")
19
- self.base_url = base_url or os.getenv("AGENTR_BASE_URL")
20
- self.client = AgentrClient(api_key=self.api_key, base_url=self.base_url)
21
- self.registry = registry or AgentrRegistry(client=self.client)
22
- self.format = format or ToolFormat.NATIVE
23
- self.manager = manager or ToolManager()
24
-
25
- def load_tools(self, tool_names: list[str]) -> None:
26
- self.registry.load_tools(tool_names, self.manager)
27
- return
28
-
29
- def list_tools(self, format: ToolFormat | None = None) -> list[Tool]:
30
- return self.manager.list_tools(format=format or self.format)
31
-
32
- def search_tools(
33
- self,
34
- query: str,
35
- ) -> list[str]:
36
- """Retrieve a tool to use, given a search query."""
37
- return self.registry.search_tools(query)