universal-mcp-agents 0.1.23rc8__tar.gz → 0.1.23rc10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-agents might be problematic. Click here for more details.

Files changed (66) hide show
  1. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/PKG-INFO +1 -1
  2. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/pyproject.toml +1 -1
  3. universal_mcp_agents-0.1.23rc10/src/universal_mcp/agents/codeact0/tools.py +513 -0
  4. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/llm.py +1 -2
  5. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/applications/llm/app.py +22 -20
  6. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/uv.lock +7 -7
  7. universal_mcp_agents-0.1.23rc8/src/universal_mcp/agents/codeact0/tools.py +0 -416
  8. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/.github/workflows/evals.yml +0 -0
  9. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/.github/workflows/lint.yml +0 -0
  10. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/.github/workflows/release-please.yml +0 -0
  11. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/.github/workflows/tests.yml +0 -0
  12. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/.gitignore +0 -0
  13. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/.pre-commit-config.yaml +0 -0
  14. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/GEMINI.md +0 -0
  15. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/PROMPTS.md +0 -0
  16. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/README.md +0 -0
  17. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/bump_and_release.sh +0 -0
  18. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/evals/__init__.py +0 -0
  19. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/evals/dataset.py +0 -0
  20. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/evals/datasets/exact.jsonl +0 -0
  21. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/evals/datasets/tasks.jsonl +0 -0
  22. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/evals/datasets/test.jsonl +0 -0
  23. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/evals/evaluators.py +0 -0
  24. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/evals/prompts.py +0 -0
  25. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/evals/run.py +0 -0
  26. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/evals/utils.py +0 -0
  27. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/tests/test_agents.py +0 -0
  28. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/tests/test_sandbox.py +0 -0
  29. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/__init__.py +0 -0
  30. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/base.py +0 -0
  31. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/bigtool/__init__.py +0 -0
  32. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/bigtool/__main__.py +0 -0
  33. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/bigtool/agent.py +0 -0
  34. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/bigtool/context.py +0 -0
  35. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/bigtool/graph.py +0 -0
  36. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/bigtool/prompts.py +0 -0
  37. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/bigtool/state.py +0 -0
  38. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/bigtool/tools.py +0 -0
  39. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/builder/__main__.py +0 -0
  40. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/builder/builder.py +0 -0
  41. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/builder/helper.py +0 -0
  42. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/builder/prompts.py +0 -0
  43. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/builder/state.py +0 -0
  44. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/cli.py +0 -0
  45. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/__init__.py +0 -0
  46. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/__main__.py +0 -0
  47. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/agent.py +0 -0
  48. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/config.py +0 -0
  49. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/langgraph_agent.py +0 -0
  50. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/llm_tool.py +0 -0
  51. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/prompts.py +0 -0
  52. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/sandbox.py +0 -0
  53. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/state.py +0 -0
  54. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/codeact0/utils.py +0 -0
  55. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/hil.py +0 -0
  56. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/react.py +0 -0
  57. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/sandbox.py +0 -0
  58. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/shared/__main__.py +0 -0
  59. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/shared/prompts.py +0 -0
  60. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/shared/tool_node.py +0 -0
  61. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/simple.py +0 -0
  62. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/agents/utils.py +0 -0
  63. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/applications/filesystem/__init__.py +0 -0
  64. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/applications/filesystem/app.py +0 -0
  65. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/applications/llm/__init__.py +0 -0
  66. {universal_mcp_agents-0.1.23rc8 → universal_mcp_agents-0.1.23rc10}/src/universal_mcp/applications/ui/app.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: universal-mcp-agents
3
- Version: 0.1.23rc8
3
+ Version: 0.1.23rc10
4
4
  Summary: Add your description here
5
5
  Project-URL: Homepage, https://github.com/universal-mcp/applications
6
6
  Project-URL: Repository, https://github.com/universal-mcp/applications
@@ -6,7 +6,7 @@ build-backend = "hatchling.build"
6
6
 
7
7
  [project]
8
8
  name = "universal-mcp-agents"
9
- version = "0.1.23-rc8"
9
+ version = "0.1.23-rc10"
10
10
  description = "Add your description here"
11
11
  readme = "README.md"
12
12
  authors = [
@@ -0,0 +1,513 @@
1
+ import asyncio
2
+ import base64
3
+ from collections import defaultdict
4
+ from pathlib import Path
5
+ from typing import Annotated, Any
6
+
7
+ from langchain_core.tools import tool
8
+ from pydantic import Field
9
+ from universal_mcp.agentr.client import AgentrClient
10
+ from universal_mcp.agentr.registry import AgentrRegistry
11
+ from universal_mcp.applications.markitdown.app import MarkitdownApp
12
+ from universal_mcp.types import ToolFormat
13
+
14
+ from universal_mcp.agents.codeact0.prompts import build_tool_definitions
15
+
16
+
17
+ def enter_agent_builder_mode():
18
+ """Call this function to enter agent builder mode. Agent builder mode is when the user wants to store a repeated task as a script with some inputs for the future."""
19
+ return
20
+
21
+
22
+ def create_meta_tools(tool_registry: AgentrRegistry) -> dict[str, Any]:
23
+ """Create the meta tools for searching and loading tools"""
24
+
25
+ @tool
26
+ async def search_functions(
27
+ queries: Annotated[
28
+ list[list[str]] | None,
29
+ Field(
30
+ description="A list of query lists. Each inner list contains one or more search terms that will be used together to find relevant tools."
31
+ ),
32
+ ] = None,
33
+ app_ids: Annotated[
34
+ list[str] | None,
35
+ Field(description="The ID or list of IDs (common names) of specific applications to search within."),
36
+ ] = None,
37
+ ) -> str:
38
+ """
39
+ Searches for relevant functions based on queries and/or applications. This function
40
+ operates in three powerful modes with support for multi-query searches:
41
+
42
+ 1. **Global Search** (`queries` only as List[List[str]]):
43
+ - Searches all functions across all applications.
44
+ - Supports multiple independent searches in parallel.
45
+ - Each inner list represents a separate search query.
46
+
47
+ Examples:
48
+ - Single global search:
49
+ `search_functions(queries=[["create presentation"]])`
50
+
51
+ - Multiple independent global searches:
52
+ `search_functions(queries=[["send email"], ["schedule meeting"]])`
53
+
54
+ - Multi-term search for comprehensive results:
55
+ `search_functions(queries=[["send email", "draft email", "compose email"]])`
56
+
57
+ 2. **App Discovery** (`app_ids` only as List[str]):
58
+ - Returns ALL available functions for one or more specific applications.
59
+ - Use this to explore the complete capability set of an application.
60
+
61
+ Examples:
62
+ - Single app discovery:
63
+ `search_functions(app_ids=["Gmail"])`
64
+
65
+ - Multiple app discovery:
66
+ `search_functions(app_ids=["Gmail", "Google Calendar", "Slack"])`
67
+
68
+ 3. **Scoped Search** (`queries` as List[List[str]] and `app_ids` as List[str]):
69
+ - Performs targeted searches within specific applications in parallel.
70
+ - The number of app_ids must match the number of inner query lists.
71
+ - Each query list is searched within its corresponding app_id.
72
+ - Supports multiple search terms per app for comprehensive discovery.
73
+
74
+ Examples:
75
+ - Basic scoped search (one query per app):
76
+ `search_functions(queries=[["find email"], ["share file"]], app_ids=["Gmail", "Google_Drive"])`
77
+
78
+ - Multi-term scoped search (multiple queries per app):
79
+ `search_functions(
80
+ queries=[
81
+ ["send email", "draft email", "compose email", "reply to email"],
82
+ ["create event", "schedule meeting", "find free time"],
83
+ ["upload file", "share file", "create folder", "search files"]
84
+ ],
85
+ app_ids=["Gmail", "Google Calendar", "Google_Drive"]
86
+ )`
87
+
88
+ - Mixed complexity (some apps with single query, others with multiple):
89
+ `search_functions(
90
+ queries=[
91
+ ["list messages"],
92
+ ["create event", "delete event", "update event"]
93
+ ],
94
+ app_ids=["Gmail", "Google Calendar"]
95
+ )`
96
+
97
+ **Pro Tips:**
98
+ - Use multiple search terms in a single query list to cast a wider net and discover related functionality
99
+ - Multi-term searches are more efficient than separate calls
100
+ - Scoped searches return more focused results than global searches
101
+ - The function returns connection status for each app (connected vs NOT connected)
102
+ - All searches within a single call execute in parallel for maximum efficiency
103
+
104
+ **Parameters:**
105
+ - `queries` (List[List[str]], optional): A list of query lists. Each inner list contains one or more
106
+ search terms that will be used together to find relevant tools.
107
+ - `app_ids` (List[str], optional): A list of application IDs to search within or discover.
108
+
109
+ **Returns:**
110
+ - A structured response containing:
111
+ - Matched tools with their descriptions
112
+ - Connection status for each app
113
+ - Recommendations for which tools to load next
114
+ """
115
+ registry = tool_registry
116
+
117
+ TOOL_THRESHOLD = 0.75
118
+ APP_THRESHOLD = 0.7
119
+
120
+ # --- Helper Functions for Different Search Modes ---
121
+
122
+ async def _handle_global_search(queries: list[str]) -> list[list[dict[str, Any]]]:
123
+ """Performs a broad search across all apps to find relevant tools and apps."""
124
+ # 1. Perform initial broad searches for tools and apps concurrently.
125
+ initial_tool_tasks = [registry.search_tools(query=q, distance_threshold=TOOL_THRESHOLD) for q in queries]
126
+ app_search_tasks = [registry.search_apps(query=q, distance_threshold=APP_THRESHOLD) for q in queries]
127
+
128
+ initial_tool_results, app_search_results = await asyncio.gather(
129
+ asyncio.gather(*initial_tool_tasks), asyncio.gather(*app_search_tasks)
130
+ )
131
+
132
+ # 2. Create a prioritized list of app IDs for the final search.
133
+ app_ids_from_apps = {app["id"] for result_list in app_search_results for app in result_list}
134
+ prioritized_app_id_list = list(app_ids_from_apps)
135
+
136
+ app_ids_from_tools = {tool["app_id"] for result_list in initial_tool_results for tool in result_list}
137
+ for tool_app_id in app_ids_from_tools:
138
+ if tool_app_id not in app_ids_from_apps:
139
+ prioritized_app_id_list.append(tool_app_id)
140
+
141
+ if not prioritized_app_id_list:
142
+ return []
143
+
144
+ # 3. Perform the final, comprehensive tool search across the prioritized apps.
145
+ final_tool_search_tasks = [
146
+ registry.search_tools(query=query, app_id=app_id_to_search, distance_threshold=TOOL_THRESHOLD)
147
+ for app_id_to_search in prioritized_app_id_list
148
+ for query in queries
149
+ ]
150
+ return await asyncio.gather(*final_tool_search_tasks)
151
+
152
+ async def _handle_scoped_search(app_ids: list[str], queries: list[list[str]]) -> list[list[dict[str, Any]]]:
153
+ """Performs targeted searches for specific queries within specific applications."""
154
+ if len(app_ids) != len(queries):
155
+ raise ValueError("The number of app_ids must match the number of query lists.")
156
+
157
+ tasks = []
158
+ for app_id, query_list in zip(app_ids, queries):
159
+ for query in query_list:
160
+ # Create a search task for each query in the list for the corresponding app
161
+ tasks.append(registry.search_tools(query=query, app_id=app_id, distance_threshold=TOOL_THRESHOLD))
162
+
163
+ return await asyncio.gather(*tasks)
164
+
165
+ async def _handle_app_discovery(app_ids: list[str]) -> list[list[dict[str, Any]]]:
166
+ """Fetches all tools for a list of applications."""
167
+ tasks = [registry.search_tools(query="", app_id=app_id, limit=20) for app_id in app_ids]
168
+ return await asyncio.gather(*tasks)
169
+
170
+ # --- Helper Functions for Structuring and Formatting Results ---
171
+
172
+ def _format_response(structured_results: list[dict[str, Any]]) -> str:
173
+ """Builds the final, user-facing formatted string response from structured data."""
174
+ if not structured_results:
175
+ return "No relevant functions were found."
176
+
177
+ result_parts = []
178
+ apps_in_results = {app["app_id"] for app in structured_results}
179
+ connected_apps_in_results = {
180
+ app["app_id"] for app in structured_results if app["connection_status"] == "connected"
181
+ }
182
+
183
+ for app in structured_results:
184
+ app_id = app["app_id"]
185
+ app_status = "connected" if app["connection_status"] == "connected" else "NOT connected"
186
+ result_parts.append(f"Tools from {app_id} (status: {app_status} by user):")
187
+
188
+ for tool in app["tools"]:
189
+ result_parts.append(f" - {tool['id']}: {tool['description']}")
190
+ result_parts.append("") # Empty line for readability
191
+
192
+ # Add summary connection status messages
193
+ if not connected_apps_in_results and len(apps_in_results) > 1:
194
+ result_parts.append(
195
+ "Connection Status: None of the apps in the results are connected. "
196
+ "You must ask the user to choose the application."
197
+ )
198
+ elif len(connected_apps_in_results) > 1:
199
+ connected_list = ", ".join(sorted(list(connected_apps_in_results)))
200
+ result_parts.append(
201
+ f"Connection Status: Multiple apps are connected ({connected_list}). "
202
+ "You must ask the user to select which application they want to use."
203
+ )
204
+
205
+ result_parts.append("Call load_functions to select the required functions only.")
206
+ if 0 < len(connected_apps_in_results) < len(apps_in_results):
207
+ result_parts.append(
208
+ "Unconnected app functions can also be loaded if required by the user, "
209
+ "but prefer connected ones. Ask the user to choose if none of the "
210
+ "relevant apps are connected."
211
+ )
212
+
213
+ return "\n".join(result_parts)
214
+
215
+ def _structure_tool_results(
216
+ raw_tool_lists: list[list[dict[str, Any]]], connected_app_ids: set[str]
217
+ ) -> list[dict[str, Any]]:
218
+ """
219
+ Converts raw search results into a structured format, handling duplicates,
220
+ cleaning descriptions, and adding connection status.
221
+ """
222
+ aggregated_tools = defaultdict(dict)
223
+ # Use a list to maintain the order of apps as they are found.
224
+ ordered_app_ids = []
225
+
226
+ for tool_list in raw_tool_lists:
227
+ for tool in tool_list:
228
+ app_id = tool.get("app_id", "unknown")
229
+ tool_id = tool.get("id")
230
+
231
+ if not tool_id:
232
+ continue
233
+
234
+ if app_id not in aggregated_tools:
235
+ ordered_app_ids.append(app_id)
236
+
237
+ if tool_id not in aggregated_tools[app_id]:
238
+ aggregated_tools[app_id][tool_id] = {
239
+ "id": tool_id,
240
+ "description": _clean_tool_description(tool.get("description", "")),
241
+ }
242
+
243
+ # Build the final results list respecting the discovery order.
244
+ found_tools_result = []
245
+ for app_id in ordered_app_ids:
246
+ if app_id in aggregated_tools and aggregated_tools[app_id]:
247
+ found_tools_result.append(
248
+ {
249
+ "app_id": app_id,
250
+ "connection_status": "connected" if app_id in connected_app_ids else "not_connected",
251
+ "tools": list(aggregated_tools[app_id].values()),
252
+ }
253
+ )
254
+ return found_tools_result
255
+
256
+ def _clean_tool_description(description: str) -> str:
257
+ """Consistently formats tool descriptions by removing implementation details."""
258
+ return description.split("Context:")[0].strip()
259
+
260
+ # Main Function Logic
261
+
262
+ if not queries and not app_ids:
263
+ raise ValueError("You must provide 'queries', 'app_ids', or both.")
264
+
265
+ # --- Initialization and Input Normalization ---
266
+ connections = await registry.list_connected_apps()
267
+ connected_app_ids = {connection["app_id"] for connection in connections}
268
+
269
+ canonical_app_ids = []
270
+ if app_ids:
271
+ # Concurrently search for all provided app names
272
+ app_search_tasks = [
273
+ registry.search_apps(query=app_name, distance_threshold=APP_THRESHOLD) for app_name in app_ids
274
+ ]
275
+ app_search_results = await asyncio.gather(*app_search_tasks)
276
+
277
+ # Process results and build the list of canonical IDs, handling not found errors
278
+ for app_name, result_list in zip(app_ids, app_search_results):
279
+ if not result_list:
280
+ raise ValueError(f"Application '{app_name}' could not be found.")
281
+ # Assume the first result is the correct one
282
+ canonical_app_ids.append(result_list[0]["id"])
283
+
284
+ # --- Mode Dispatching ---
285
+ raw_results = []
286
+
287
+ if canonical_app_ids and queries:
288
+ raw_results = await _handle_scoped_search(canonical_app_ids, queries)
289
+ elif canonical_app_ids:
290
+ raw_results = await _handle_app_discovery(canonical_app_ids)
291
+ elif queries:
292
+ # Flatten list of lists to list of strings for global search
293
+ flat_queries = (
294
+ [q for sublist in queries for q in sublist] if queries and not isinstance(queries[0], str) else queries
295
+ )
296
+ raw_results = await _handle_global_search(flat_queries)
297
+
298
+ # --- Structuring and Formatting ---
299
+ structured_data = _structure_tool_results(raw_results, connected_app_ids)
300
+ return _format_response(structured_data)
301
+
302
+ @tool
303
+ async def load_functions(tool_ids: list[str]) -> str:
304
+ """
305
+ Loads specified functions and returns their Python signatures and docstrings.
306
+ This makes the functions available for use inside the 'execute_ipython_cell' tool.
307
+ The agent MUST use the returned information to understand how to call the functions correctly.
308
+
309
+ Args:
310
+ tool_ids: A list of function IDs in the format 'app__function'. Example: ['google_mail__send_email']
311
+
312
+ Returns:
313
+ A string containing the signatures and docstrings of the successfully loaded functions,
314
+ ready for the agent to use in its code.
315
+ """
316
+ if not tool_ids:
317
+ return "No tool IDs provided to load."
318
+
319
+ # Step 1: Validate which tools are usable and get login links for others.
320
+ valid_tools, unconnected_links = await get_valid_tools(tool_ids=tool_ids, registry=tool_registry)
321
+
322
+ if not valid_tools:
323
+ return "Error: None of the provided tool IDs could be validated or loaded."
324
+
325
+ # Step 2: Export the schemas of the valid tools.
326
+ await tool_registry.load_tools(valid_tools)
327
+ exported_tools = await tool_registry.export_tools(
328
+ valid_tools, ToolFormat.NATIVE
329
+ ) # Get definition for only the new tools
330
+
331
+ # Step 3: Build the informational string for the agent.
332
+ tool_definitions, new_tools_context = build_tool_definitions(exported_tools)
333
+
334
+ result_parts = [
335
+ f"Successfully loaded {len(exported_tools)} functions. They are now available for use inside `execute_ipython_cell`:",
336
+ "\n".join(tool_definitions),
337
+ ]
338
+
339
+ response_string = "\n\n".join(result_parts)
340
+ unconnected_links = "\n".join(unconnected_links)
341
+
342
+ return response_string, new_tools_context, valid_tools, unconnected_links
343
+
344
+ async def web_search(query: str) -> dict:
345
+ """
346
+ Get an LLM answer to a question informed by Exa search results. Useful when you need information from a wide range of real-time sources on the web. Do not use this when you need to access contents of a specific webpage.
347
+
348
+ This tool performs an Exa `/answer` request, which:
349
+ 1. Provides a **direct answer** for factual queries (e.g., "What is the capital of France?" → "Paris")
350
+ 2. Generates a **summary with citations** for open-ended questions
351
+ (e.g., "What is the state of AI in healthcare?" → A detailed summary with source links)
352
+
353
+ Args:
354
+ query (str): The question or topic to answer.
355
+ Returns:
356
+ dict: A structured response containing only:
357
+ - answer (str): Generated answer
358
+ - citations (list[dict]): List of cited sources
359
+ """
360
+ await tool_registry.export_tools(["exa__answer"], ToolFormat.LANGCHAIN)
361
+ response = await tool_registry.call_tool("exa__answer", {"query": query, "text": True})
362
+
363
+ # Extract only desired fields
364
+ return {
365
+ "answer": response.get("answer"),
366
+ "citations": response.get("citations", []),
367
+ }
368
+
369
+ async def read_file(uri: str) -> str:
370
+ """
371
+ Asynchronously reads a local file or uri and returns the content as a markdown string.
372
+
373
+ This tool aims to extract the main text content from various sources.
374
+ It automatically prepends 'file://' to the input string if it appears
375
+ to be a local path without a specified scheme (like http, https, data, file).
376
+
377
+ Args:
378
+ uri (str): The URI pointing to the resource or a local file path.
379
+ Supported schemes:
380
+ - http:// or https:// (Web pages, feeds, APIs)
381
+ - file:// (Local or accessible network files)
382
+ - data: (Embedded data)
383
+
384
+ Returns:
385
+ A string containing the markdown representation of the content at the specified URI
386
+
387
+ Raises:
388
+ ValueError: If the URI is invalid, empty, or uses an unsupported scheme
389
+ after automatic prefixing.
390
+
391
+ Tags:
392
+ convert, markdown, async, uri, transform, document, important
393
+ """
394
+ markitdown = MarkitdownApp()
395
+ response = await markitdown.convert_to_markdown(uri)
396
+ return response
397
+
398
+ async def save_file(file_name: str, content: str) -> dict:
399
+ """
400
+ Saves a file to the local filesystem.
401
+
402
+ Args:
403
+ file_name (str): The name of the file to save.
404
+ content (str): The content to save to the file.
405
+
406
+ Returns:
407
+ dict: A dictionary containing the result of the save operation with the following fields:
408
+ - status (str): "success" if the save succeeded, "error" otherwise.
409
+ - message (str): A message returned by the server, typically indicating success or providing error details.
410
+ """
411
+ with Path(file_name).open("w") as f:
412
+ f.write(content)
413
+
414
+ return {
415
+ "status": "success",
416
+ "message": f"File {file_name} saved successfully",
417
+ "file_path": Path(file_name).absolute(),
418
+ }
419
+
420
+ async def upload_file(file_name: str, mime_type: str, base64_data: str) -> dict:
421
+ """
422
+ Uploads a file to the server via the AgentrClient.
423
+
424
+ Args:
425
+ file_name (str): The name of the file to upload.
426
+ mime_type (str): The MIME type of the file.
427
+ base64_data (str): The file content encoded as a base64 string.
428
+
429
+ Returns:
430
+ dict: A dictionary containing the result of the upload operation with the following fields:
431
+ - status (str): "success" if the upload succeeded, "error" otherwise.
432
+ - message (str): A message returned by the server, typically indicating success or providing error details.
433
+ - signed_url (str or None): The signed URL to access the uploaded file if successful, None otherwise.
434
+ """
435
+ client: AgentrClient = tool_registry.client
436
+ bytes_data = base64.b64decode(base64_data)
437
+ response = client._upload_file(file_name, mime_type, bytes_data)
438
+ if response.get("status") != "success":
439
+ return {
440
+ "status": "error",
441
+ "message": response.get("message"),
442
+ "signed_url": None,
443
+ }
444
+ return {
445
+ "status": "success",
446
+ "message": response.get("message"),
447
+ "signed_url": response.get("signed_url"),
448
+ }
449
+
450
+ return {
451
+ "search_functions": search_functions,
452
+ "load_functions": load_functions,
453
+ "web_search": web_search,
454
+ "read_file": read_file,
455
+ "upload_file": upload_file,
456
+ "save_file": save_file,
457
+ }
458
+
459
+
460
+ async def get_valid_tools(tool_ids: list[str], registry: AgentrRegistry) -> tuple[list[str], list[str]]:
461
+ """For a given list of tool_ids, validates the tools and returns a list of links for the apps that have not been logged in"""
462
+ correct, incorrect = [], []
463
+ connections = await registry.list_connected_apps()
464
+ connected_apps = {connection["app_id"] for connection in connections}
465
+ unconnected = set()
466
+ unconnected_links = []
467
+ app_tool_list: dict[str, set[str]] = {}
468
+
469
+ # Group tool_ids by app for fewer registry calls
470
+ app_to_tools: dict[str, list[tuple[str, str]]] = {}
471
+ for tool_id in tool_ids:
472
+ if "__" not in tool_id:
473
+ incorrect.append(tool_id)
474
+ continue
475
+ app, tool_name = tool_id.split("__", 1)
476
+ app_to_tools.setdefault(app, []).append((tool_id, tool_name))
477
+
478
+ # Fetch all apps concurrently
479
+ async def fetch_tools(app: str):
480
+ try:
481
+ tools_dict = await registry.list_tools(app)
482
+ return app, {tool_unit["name"] for tool_unit in tools_dict}
483
+ except Exception:
484
+ return app, None
485
+
486
+ results = await asyncio.gather(*(fetch_tools(app) for app in app_to_tools))
487
+
488
+ # Build map of available tools per app
489
+ for app, tools in results:
490
+ if tools is not None:
491
+ app_tool_list[app] = tools
492
+
493
+ # Validate tool_ids
494
+ for app, tool_entries in app_to_tools.items():
495
+ available = app_tool_list.get(app)
496
+ if available is None:
497
+ incorrect.extend(tool_id for tool_id, _ in tool_entries)
498
+ continue
499
+ if app not in connected_apps and app not in unconnected:
500
+ unconnected.add(app)
501
+ text = await registry.authorise_app(app_id=app)
502
+ start = text.find(":") + 1
503
+ end = text.find(". R", start)
504
+ url = text[start:end].strip()
505
+ markdown_link = f"[{app}]({url})"
506
+ unconnected_links.append(markdown_link)
507
+ for tool_id, tool_name in tool_entries:
508
+ if tool_name in available:
509
+ correct.append(tool_id)
510
+ else:
511
+ incorrect.append(tool_id)
512
+
513
+ return correct, unconnected_links
@@ -4,6 +4,7 @@ from langchain_anthropic import ChatAnthropic
4
4
  from langchain_core.language_models import BaseChatModel
5
5
  from langchain_google_genai import ChatGoogleGenerativeAI
6
6
  from langchain_openai import AzureChatOpenAI
7
+ from loguru import logger
7
8
 
8
9
 
9
10
  @lru_cache(maxsize=8)
@@ -41,8 +42,6 @@ def load_chat_model(
41
42
 
42
43
 
43
44
  if __name__ == "__main__":
44
- from loguru import logger
45
-
46
45
  models_to_test = [
47
46
  "azure/gpt-5-chat",
48
47
  "anthropic/claude-4-sonnet-20250514",
@@ -1,6 +1,7 @@
1
1
  import json
2
2
  from typing import Any, Literal, cast
3
3
 
4
+ from langchain.agents import create_agent
4
5
  from pydantic import BaseModel, Field
5
6
  from universal_mcp.applications.application import BaseApplication
6
7
 
@@ -37,7 +38,7 @@ class LlmApp(BaseApplication):
37
38
  """Initialize the LLMApp."""
38
39
  super().__init__(name="llm")
39
40
 
40
- async def generate_text(
41
+ def generate_text(
41
42
  self,
42
43
  task: str,
43
44
  context: str | list[str] | dict[str, str] = "",
@@ -91,10 +92,10 @@ class LlmApp(BaseApplication):
91
92
  full_prompt = f"{prompt}\n\nContext:\n{context_str}\n\n"
92
93
 
93
94
  model = load_chat_model("azure/gpt-5-mini")
94
- response = await model.with_retry(stop_after_attempt=MAX_RETRIES).ainvoke(full_prompt)
95
+ response = model.with_retry(stop_after_attempt=MAX_RETRIES).invoke(full_prompt, stream=False)
95
96
  return str(response.content)
96
97
 
97
- async def classify_data(
98
+ def classify_data(
98
99
  self,
99
100
  classification_task_and_requirements: str,
100
101
  context: Any | list[Any] | dict[str, Any],
@@ -150,24 +151,24 @@ class LlmApp(BaseApplication):
150
151
  f"This is a classification task.\nPossible classes and descriptions:\n"
151
152
  f"{json.dumps(class_descriptions, indent=2)}\n\n"
152
153
  f"Context:\n{context_str}\n\n"
153
- "Return ONLY a valid JSON object, no extra text."
154
154
  )
155
155
 
156
- model = load_chat_model("azure/gpt-5-mini", temperature=0)
157
-
158
156
  class ClassificationResult(BaseModel):
159
157
  probabilities: dict[str, float] = Field(..., description="The probabilities for each class.")
160
158
  reason: str = Field(..., description="The reasoning behind the classification.")
161
159
  top_class: str = Field(..., description="The class with the highest probability.")
162
160
 
163
- response = (
164
- await model.with_structured_output(schema=ClassificationResult)
165
- .with_retry(stop_after_attempt=MAX_RETRIES)
166
- .ainvoke(prompt)
161
+ model = load_chat_model("azure/gpt-5-mini", temperature=0)
162
+ agent = create_agent(
163
+ model=model,
164
+ tools=[],
165
+ response_format=ClassificationResult, # Auto-selects ProviderStrategy
167
166
  )
168
- return response.model_dump()
169
167
 
170
- async def extract_data(
168
+ result = agent.invoke({"messages": [{"role": "user", "content": prompt}]}, stream=False)
169
+ return result["structured_response"].model_dump()
170
+
171
+ def extract_data(
171
172
  self,
172
173
  extraction_task: str,
173
174
  source: Any | list[Any] | dict[str, Any],
@@ -230,14 +231,14 @@ class LlmApp(BaseApplication):
230
231
 
231
232
  model = load_chat_model("azure/gpt-5-mini", temperature=0)
232
233
 
233
- response = await (
234
+ response = (
234
235
  model.with_structured_output(schema=output_schema, method="json_mode")
235
236
  .with_retry(stop_after_attempt=MAX_RETRIES)
236
- .ainvoke(prompt)
237
+ .invoke(prompt, stream=False)
237
238
  )
238
239
  return cast(dict[str, Any], response)
239
240
 
240
- async def call_llm(
241
+ def call_llm(
241
242
  self,
242
243
  task_instructions: str,
243
244
  context: Any | list[Any] | dict[str, Any],
@@ -283,12 +284,13 @@ class LlmApp(BaseApplication):
283
284
 
284
285
  model = load_chat_model("azure/gpt-5-mini", temperature=0)
285
286
 
286
- response = await (
287
- model.with_structured_output(schema=output_schema)
288
- .with_retry(stop_after_attempt=MAX_RETRIES)
289
- .ainvoke(prompt)
287
+ agent = create_agent(
288
+ model=model,
289
+ tools=[],
290
+ response_format=output_schema,
290
291
  )
291
- return response.model_dump()
292
+ result = agent.invoke({"messages": [{"role": "user", "content": prompt}]}, stream=False)
293
+ return result["structured_response"]
292
294
 
293
295
  def list_tools(self):
294
296
  return [