universal-mcp-agents 0.1.19rc1__py3-none-any.whl → 0.1.20rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +5 -9
- universal_mcp/agents/base.py +4 -1
- universal_mcp/agents/cli.py +0 -3
- universal_mcp/agents/codeact0/__init__.py +2 -3
- universal_mcp/agents/codeact0/__main__.py +2 -2
- universal_mcp/agents/codeact0/agent.py +231 -83
- universal_mcp/agents/codeact0/langgraph_agent.py +1 -1
- universal_mcp/agents/codeact0/prompts.py +38 -5
- universal_mcp/agents/codeact0/sandbox.py +31 -1
- universal_mcp/agents/codeact0/state.py +3 -1
- universal_mcp/agents/codeact0/tools.py +200 -85
- {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.20rc1.dist-info}/METADATA +1 -1
- universal_mcp_agents-0.1.20rc1.dist-info/RECORD +44 -0
- universal_mcp/agents/codeact/__init__.py +0 -3
- universal_mcp/agents/codeact/__main__.py +0 -33
- universal_mcp/agents/codeact/agent.py +0 -240
- universal_mcp/agents/codeact/models.py +0 -11
- universal_mcp/agents/codeact/prompts.py +0 -82
- universal_mcp/agents/codeact/sandbox.py +0 -85
- universal_mcp/agents/codeact/state.py +0 -11
- universal_mcp/agents/codeact/utils.py +0 -68
- universal_mcp/agents/codeact0/playbook_agent.py +0 -355
- universal_mcp/agents/unified/README.md +0 -45
- universal_mcp/agents/unified/__init__.py +0 -3
- universal_mcp/agents/unified/__main__.py +0 -28
- universal_mcp/agents/unified/agent.py +0 -289
- universal_mcp/agents/unified/langgraph_agent.py +0 -14
- universal_mcp/agents/unified/llm_tool.py +0 -25
- universal_mcp/agents/unified/prompts.py +0 -192
- universal_mcp/agents/unified/sandbox.py +0 -101
- universal_mcp/agents/unified/state.py +0 -42
- universal_mcp/agents/unified/tools.py +0 -188
- universal_mcp/agents/unified/utils.py +0 -388
- universal_mcp_agents-0.1.19rc1.dist-info/RECORD +0 -64
- {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.20rc1.dist-info}/WHEEL +0 -0
|
@@ -1,188 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
from collections import defaultdict
|
|
3
|
-
from typing import Any
|
|
4
|
-
|
|
5
|
-
from langchain_core.tools import tool
|
|
6
|
-
from universal_mcp.tools.registry import ToolRegistry
|
|
7
|
-
from universal_mcp.types import ToolFormat
|
|
8
|
-
|
|
9
|
-
MAX_LENGHT = 100
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def enter_playbook_mode():
|
|
13
|
-
"""Call this function to enter playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
|
|
14
|
-
return
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
def exit_playbook_mode():
|
|
18
|
-
"""Call this function to exit playbook mode. Playbook mode is when the user wants to store a repeated task as a script with some inputs for the future."""
|
|
19
|
-
return
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def create_meta_tools(tool_registry: ToolRegistry) -> dict[str, Any]:
|
|
23
|
-
"""Create the meta tools for searching and loading tools"""
|
|
24
|
-
|
|
25
|
-
@tool
|
|
26
|
-
async def search_functions(queries: list[str]) -> str:
|
|
27
|
-
"""Search for relevant functions given list of queries.
|
|
28
|
-
Each single query should be atomic (doable with a single function).
|
|
29
|
-
For tasks requiring multiple functions, add separate queries for each subtask"""
|
|
30
|
-
try:
|
|
31
|
-
# Fetch all connections
|
|
32
|
-
connections = await tool_registry.list_connected_apps()
|
|
33
|
-
connected_apps = {connection["app_id"] for connection in connections}
|
|
34
|
-
|
|
35
|
-
app_tools = defaultdict(set)
|
|
36
|
-
MAX_LENGTH = 20
|
|
37
|
-
|
|
38
|
-
# Process all queries concurrently
|
|
39
|
-
search_tasks = []
|
|
40
|
-
for query in queries:
|
|
41
|
-
search_tasks.append(_search_query_tools(query))
|
|
42
|
-
|
|
43
|
-
query_results = await asyncio.gather(*search_tasks)
|
|
44
|
-
|
|
45
|
-
# Aggregate results with limit per app and automatic deduplication
|
|
46
|
-
for tools_list in query_results:
|
|
47
|
-
for tool in tools_list:
|
|
48
|
-
app = tool["id"].split("__")[0]
|
|
49
|
-
tool_id = tool["id"]
|
|
50
|
-
|
|
51
|
-
# Check if within limit and add to set (automatically deduplicates)
|
|
52
|
-
if len(app_tools[app]) < MAX_LENGTH:
|
|
53
|
-
cleaned_desc = tool["description"].split("Context:")[0].strip()
|
|
54
|
-
app_tools[app].add(f"{tool_id}: {cleaned_desc}")
|
|
55
|
-
|
|
56
|
-
# Build result string efficiently
|
|
57
|
-
result_parts = []
|
|
58
|
-
for app, tools in app_tools.items():
|
|
59
|
-
app_status = "connected" if app in connected_apps else "NOT connected"
|
|
60
|
-
result_parts.append(f"Tools from {app} (status: {app_status} by user):")
|
|
61
|
-
# Convert set to sorted list for consistent output
|
|
62
|
-
for tool in sorted(tools):
|
|
63
|
-
result_parts.append(f" - {tool}")
|
|
64
|
-
result_parts.append("") # Empty line between apps
|
|
65
|
-
|
|
66
|
-
result_parts.append("Call load_functions to select the required functions only.")
|
|
67
|
-
return "\n".join(result_parts)
|
|
68
|
-
|
|
69
|
-
except Exception as e:
|
|
70
|
-
return f"Error: {e}"
|
|
71
|
-
|
|
72
|
-
async def _search_query_tools(query: str) -> list[dict]:
|
|
73
|
-
"""Helper function to search apps and tools for a single query."""
|
|
74
|
-
# Start both searches concurrently
|
|
75
|
-
tools_search_task = tool_registry.search_tools(query, limit=10)
|
|
76
|
-
apps_search_task = tool_registry.search_apps(query, limit=4)
|
|
77
|
-
|
|
78
|
-
# Wait for both to complete
|
|
79
|
-
tools_from_general_search, apps_list = await asyncio.gather(tools_search_task, apps_search_task)
|
|
80
|
-
|
|
81
|
-
# Create tasks for searching tools from each app
|
|
82
|
-
app_tool_tasks = [tool_registry.search_tools(query, limit=5, app_id=app["id"]) for app in apps_list]
|
|
83
|
-
|
|
84
|
-
# Wait for all app-specific tool searches to complete
|
|
85
|
-
app_tools_results = await asyncio.gather(*app_tool_tasks)
|
|
86
|
-
|
|
87
|
-
# Combine all results
|
|
88
|
-
tools_list = list(tools_from_general_search)
|
|
89
|
-
for app_tools in app_tools_results:
|
|
90
|
-
tools_list.extend(app_tools)
|
|
91
|
-
|
|
92
|
-
return tools_list
|
|
93
|
-
|
|
94
|
-
@tool
|
|
95
|
-
async def load_functions(tool_ids: list[str]) -> str:
|
|
96
|
-
"""Load specific functions by their IDs for use in subsequent steps.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
tool_ids: Function ids in the form 'app__function'. Example: 'google_mail__send_email'
|
|
100
|
-
|
|
101
|
-
Returns:
|
|
102
|
-
Confirmation message about loaded functions
|
|
103
|
-
"""
|
|
104
|
-
return f"Successfully loaded {len(tool_ids)} functions: {tool_ids}"
|
|
105
|
-
|
|
106
|
-
@tool
|
|
107
|
-
async def web_search(query: str) -> dict:
|
|
108
|
-
"""
|
|
109
|
-
Get an LLM answer to a question informed by Exa search results.
|
|
110
|
-
|
|
111
|
-
This tool performs an Exa `/answer` request, which:
|
|
112
|
-
1. Provides a **direct answer** for factual queries (e.g., "What is the capital of France?" → "Paris")
|
|
113
|
-
2. Generates a **summary with citations** for open-ended questions
|
|
114
|
-
(e.g., "What is the state of AI in healthcare?" → A detailed summary with source links)
|
|
115
|
-
|
|
116
|
-
Args:
|
|
117
|
-
query (str): The question or topic to answer.
|
|
118
|
-
Returns:
|
|
119
|
-
dict: A structured response containing only:
|
|
120
|
-
- answer (str): Generated answer
|
|
121
|
-
- citations (list[dict]): List of cited sources
|
|
122
|
-
"""
|
|
123
|
-
await tool_registry.export_tools(["exa__answer"], ToolFormat.LANGCHAIN)
|
|
124
|
-
response = await tool_registry.call_tool("exa__answer", {"query": query, "text": True})
|
|
125
|
-
|
|
126
|
-
# Extract only desired fields
|
|
127
|
-
return {
|
|
128
|
-
"answer": response.get("answer"),
|
|
129
|
-
"citations": response.get("citations", []),
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
return {"search_functions": search_functions, "load_functions": load_functions, "web_search": web_search}
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
async def get_valid_tools(tool_ids: list[str], registry: ToolRegistry) -> tuple[list[str], list[str]]:
|
|
136
|
-
"""For a given list of tool_ids, validates the tools and returns a list of links for the apps that have not been logged in"""
|
|
137
|
-
correct, incorrect = [], []
|
|
138
|
-
connections = await registry.list_connected_apps()
|
|
139
|
-
connected_apps = {connection["app_id"] for connection in connections}
|
|
140
|
-
unconnected = set()
|
|
141
|
-
unconnected_links = []
|
|
142
|
-
app_tool_list: dict[str, set[str]] = {}
|
|
143
|
-
|
|
144
|
-
# Group tool_ids by app for fewer registry calls
|
|
145
|
-
app_to_tools: dict[str, list[tuple[str, str]]] = {}
|
|
146
|
-
for tool_id in tool_ids:
|
|
147
|
-
if "__" not in tool_id:
|
|
148
|
-
incorrect.append(tool_id)
|
|
149
|
-
continue
|
|
150
|
-
app, tool_name = tool_id.split("__", 1)
|
|
151
|
-
app_to_tools.setdefault(app, []).append((tool_id, tool_name))
|
|
152
|
-
|
|
153
|
-
# Fetch all apps concurrently
|
|
154
|
-
async def fetch_tools(app: str):
|
|
155
|
-
try:
|
|
156
|
-
tools_dict = await registry.list_tools(app)
|
|
157
|
-
return app, {tool_unit["name"] for tool_unit in tools_dict}
|
|
158
|
-
except Exception:
|
|
159
|
-
return app, None
|
|
160
|
-
|
|
161
|
-
results = await asyncio.gather(*(fetch_tools(app) for app in app_to_tools))
|
|
162
|
-
|
|
163
|
-
# Build map of available tools per app
|
|
164
|
-
for app, tools in results:
|
|
165
|
-
if tools is not None:
|
|
166
|
-
app_tool_list[app] = tools
|
|
167
|
-
|
|
168
|
-
# Validate tool_ids
|
|
169
|
-
for app, tool_entries in app_to_tools.items():
|
|
170
|
-
available = app_tool_list.get(app)
|
|
171
|
-
if available is None:
|
|
172
|
-
incorrect.extend(tool_id for tool_id, _ in tool_entries)
|
|
173
|
-
continue
|
|
174
|
-
if app not in connected_apps and app not in unconnected:
|
|
175
|
-
unconnected.add(app)
|
|
176
|
-
text = registry.client.get_authorization_url(app)
|
|
177
|
-
start = text.find(":") + 1
|
|
178
|
-
end = text.find(". R", start)
|
|
179
|
-
url = text[start:end].strip()
|
|
180
|
-
markdown_link = f"[{app}]({url})"
|
|
181
|
-
unconnected_links.append(markdown_link)
|
|
182
|
-
for tool_id, tool_name in tool_entries:
|
|
183
|
-
if tool_name in available:
|
|
184
|
-
correct.append(tool_id)
|
|
185
|
-
else:
|
|
186
|
-
incorrect.append(tool_id)
|
|
187
|
-
|
|
188
|
-
return correct, unconnected_links
|
|
@@ -1,388 +0,0 @@
|
|
|
1
|
-
import ast
|
|
2
|
-
import importlib
|
|
3
|
-
import re
|
|
4
|
-
from collections.abc import Sequence
|
|
5
|
-
from typing import Any
|
|
6
|
-
|
|
7
|
-
from langchain_core.messages import BaseMessage
|
|
8
|
-
|
|
9
|
-
MAX_CHARS = 5000
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def light_copy(data):
|
|
13
|
-
"""
|
|
14
|
-
Deep copy a dict[str, any] or Sequence[any] with string truncation.
|
|
15
|
-
|
|
16
|
-
Args:
|
|
17
|
-
data: Either a dictionary with string keys, or a sequence of such dictionaries
|
|
18
|
-
|
|
19
|
-
Returns:
|
|
20
|
-
A deep copy where all string values are truncated to MAX_CHARS characters
|
|
21
|
-
"""
|
|
22
|
-
|
|
23
|
-
def truncate_string(value):
|
|
24
|
-
"""Truncate string to MAX_CHARS chars, preserve other types"""
|
|
25
|
-
if isinstance(value, str) and len(value) > MAX_CHARS:
|
|
26
|
-
return value[:MAX_CHARS] + "..."
|
|
27
|
-
return value
|
|
28
|
-
|
|
29
|
-
def copy_dict(d):
|
|
30
|
-
"""Recursively copy a dictionary, truncating strings"""
|
|
31
|
-
result = {}
|
|
32
|
-
for key, value in d.items():
|
|
33
|
-
if isinstance(value, dict):
|
|
34
|
-
result[key] = copy_dict(value)
|
|
35
|
-
elif isinstance(value, Sequence) and not isinstance(value, str):
|
|
36
|
-
result[key] = [
|
|
37
|
-
copy_dict(item) if isinstance(item, dict) else truncate_string(item) for item in value[:20]
|
|
38
|
-
] # Limit to first 20 items
|
|
39
|
-
else:
|
|
40
|
-
result[key] = truncate_string(value)
|
|
41
|
-
return result
|
|
42
|
-
|
|
43
|
-
# Handle the two main cases
|
|
44
|
-
if isinstance(data, dict):
|
|
45
|
-
return copy_dict(data)
|
|
46
|
-
elif isinstance(data, Sequence) and not isinstance(data, str):
|
|
47
|
-
return [
|
|
48
|
-
copy_dict(item) if isinstance(item, dict) else truncate_string(item) for item in data[:20]
|
|
49
|
-
] # Limit to first 20 items
|
|
50
|
-
else:
|
|
51
|
-
# For completeness, handle other types
|
|
52
|
-
return truncate_string(data)
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
def get_message_text(msg: BaseMessage) -> str:
|
|
56
|
-
"""Get the text content of a message."""
|
|
57
|
-
content = msg.content
|
|
58
|
-
if isinstance(content, str):
|
|
59
|
-
return content
|
|
60
|
-
elif isinstance(content, dict):
|
|
61
|
-
return content.get("text", "")
|
|
62
|
-
else:
|
|
63
|
-
txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content]
|
|
64
|
-
return "".join(txts).strip()
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
def make_safe_function_name(name: str) -> str:
|
|
68
|
-
"""Convert a tool name to a valid Python function name."""
|
|
69
|
-
# Replace non-alphanumeric characters with underscores
|
|
70
|
-
safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
|
|
71
|
-
# Ensure the name doesn't start with a digit
|
|
72
|
-
if safe_name and safe_name[0].isdigit():
|
|
73
|
-
safe_name = f"tool_{safe_name}"
|
|
74
|
-
# Handle empty name edge case
|
|
75
|
-
if not safe_name:
|
|
76
|
-
safe_name = "unnamed_tool"
|
|
77
|
-
return safe_name
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
|
|
81
|
-
"""
|
|
82
|
-
Derive context from code by extracting classes, functions, and import statements.
|
|
83
|
-
|
|
84
|
-
Args:
|
|
85
|
-
code: Python code as a string
|
|
86
|
-
context: Existing context dictionary to append to
|
|
87
|
-
|
|
88
|
-
Returns:
|
|
89
|
-
Updated context dictionary with extracted entities
|
|
90
|
-
"""
|
|
91
|
-
|
|
92
|
-
# Initialize context keys if they don't exist
|
|
93
|
-
if "imports" not in context:
|
|
94
|
-
context["imports"] = []
|
|
95
|
-
if "classes" not in context:
|
|
96
|
-
context["classes"] = []
|
|
97
|
-
if "functions" not in context:
|
|
98
|
-
context["functions"] = []
|
|
99
|
-
|
|
100
|
-
try:
|
|
101
|
-
# Parse the code into an AST
|
|
102
|
-
tree = ast.parse(code)
|
|
103
|
-
|
|
104
|
-
# Extract imports
|
|
105
|
-
for node in ast.walk(tree):
|
|
106
|
-
if isinstance(node, ast.Import):
|
|
107
|
-
for alias in node.names:
|
|
108
|
-
if alias.asname:
|
|
109
|
-
import_stmt = f"import {alias.name} as {alias.asname}"
|
|
110
|
-
else:
|
|
111
|
-
import_stmt = f"import {alias.name}"
|
|
112
|
-
if import_stmt not in context["imports"]:
|
|
113
|
-
context["imports"].append(import_stmt)
|
|
114
|
-
|
|
115
|
-
elif isinstance(node, ast.ImportFrom):
|
|
116
|
-
module = node.module or ""
|
|
117
|
-
# Handle multiple imports in a single from statement
|
|
118
|
-
import_names = []
|
|
119
|
-
for alias in node.names:
|
|
120
|
-
if alias.asname:
|
|
121
|
-
import_names.append(f"{alias.name} as {alias.asname}")
|
|
122
|
-
else:
|
|
123
|
-
import_names.append(alias.name)
|
|
124
|
-
|
|
125
|
-
import_stmt = f"from {module} import {', '.join(import_names)}"
|
|
126
|
-
if import_stmt not in context["imports"]:
|
|
127
|
-
context["imports"].append(import_stmt)
|
|
128
|
-
|
|
129
|
-
# Extract class definitions
|
|
130
|
-
for node in ast.walk(tree):
|
|
131
|
-
if isinstance(node, ast.ClassDef):
|
|
132
|
-
# Get the class definition as a string
|
|
133
|
-
class_lines = code.split("\n")[node.lineno - 1 : node.end_lineno]
|
|
134
|
-
class_def = "\n".join(class_lines)
|
|
135
|
-
|
|
136
|
-
# Clean up the class definition (remove leading/trailing whitespace)
|
|
137
|
-
class_def = class_def.strip()
|
|
138
|
-
|
|
139
|
-
if class_def not in context["classes"]:
|
|
140
|
-
context["classes"].append(class_def)
|
|
141
|
-
|
|
142
|
-
# Extract function definitions (including async)
|
|
143
|
-
for node in ast.walk(tree):
|
|
144
|
-
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
|
145
|
-
func_lines = code.split("\n")[node.lineno - 1 : node.end_lineno]
|
|
146
|
-
func_def = "\n".join(func_lines)
|
|
147
|
-
|
|
148
|
-
# Only top-level functions (col_offset == 0)
|
|
149
|
-
if node.col_offset == 0:
|
|
150
|
-
func_def = func_def.strip()
|
|
151
|
-
if func_def not in context["functions"]:
|
|
152
|
-
context["functions"].append(func_def)
|
|
153
|
-
|
|
154
|
-
except SyntaxError:
|
|
155
|
-
# If the code has syntax errors, try a simpler regex-based approach
|
|
156
|
-
|
|
157
|
-
# Extract import statements using regex
|
|
158
|
-
import_patterns = [
|
|
159
|
-
r"import\s+(\w+(?:\.\w+)*)(?:\s+as\s+(\w+))?",
|
|
160
|
-
r"from\s+(\w+(?:\.\w+)*)\s+import\s+(\w+(?:\s+as\s+\w+)?)",
|
|
161
|
-
]
|
|
162
|
-
|
|
163
|
-
for pattern in import_patterns:
|
|
164
|
-
matches = re.finditer(pattern, code)
|
|
165
|
-
for match in matches:
|
|
166
|
-
if "from" in pattern:
|
|
167
|
-
module = match.group(1)
|
|
168
|
-
imports = match.group(2).split(",")
|
|
169
|
-
for import_name in imports:
|
|
170
|
-
imp = import_name.strip()
|
|
171
|
-
if " as " in imp:
|
|
172
|
-
name, alias = imp.split(" as ")
|
|
173
|
-
import_stmt = f"from {module} import {name.strip()} as {alias.strip()}"
|
|
174
|
-
else:
|
|
175
|
-
import_stmt = f"from {module} import {imp}"
|
|
176
|
-
if import_stmt not in context["imports"]:
|
|
177
|
-
context["imports"].append(import_stmt)
|
|
178
|
-
else:
|
|
179
|
-
module = match.group(1)
|
|
180
|
-
alias = match.group(2)
|
|
181
|
-
if alias:
|
|
182
|
-
import_stmt = f"import {module} as {alias}"
|
|
183
|
-
else:
|
|
184
|
-
import_stmt = f"import {module}"
|
|
185
|
-
if import_stmt not in context["imports"]:
|
|
186
|
-
context["imports"].append(import_stmt)
|
|
187
|
-
|
|
188
|
-
# Extract class definitions using regex
|
|
189
|
-
class_pattern = r"class\s+(\w+).*?(?=class\s+\w+|def\s+\w+|$)"
|
|
190
|
-
class_matches = re.finditer(class_pattern, code, re.DOTALL)
|
|
191
|
-
for match in class_matches:
|
|
192
|
-
class_def = match.group(0).strip()
|
|
193
|
-
if class_def not in context["classes"]:
|
|
194
|
-
context["classes"].append(class_def)
|
|
195
|
-
|
|
196
|
-
# Extract function definitions using regex
|
|
197
|
-
func_pattern = r"def\s+(\w+).*?(?=class\s+\w+|def\s+\w+|$)"
|
|
198
|
-
func_matches = re.finditer(func_pattern, code, re.DOTALL)
|
|
199
|
-
for match in func_matches:
|
|
200
|
-
func_def = match.group(0).strip()
|
|
201
|
-
if func_def not in context["functions"]:
|
|
202
|
-
context["functions"].append(func_def)
|
|
203
|
-
|
|
204
|
-
return context
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
def inject_context(
|
|
208
|
-
context_dict: dict[str, list[str]], existing_namespace: dict[str, Any] | None = None
|
|
209
|
-
) -> dict[str, Any]:
|
|
210
|
-
"""
|
|
211
|
-
Inject Python entities from a dictionary into a namespace.
|
|
212
|
-
|
|
213
|
-
This function takes a dictionary where keys represent entity types (imports, classes, functions, etc.)
|
|
214
|
-
and values are lists of entity definitions. It attempts to import or create these entities and returns
|
|
215
|
-
them in a namespace dictionary. Can optionally build upon an existing namespace and apply additional aliases.
|
|
216
|
-
|
|
217
|
-
Args:
|
|
218
|
-
context_dict: Dictionary with entity types as keys and lists of entity definitions as values.
|
|
219
|
-
Supported keys: 'imports', 'classes', 'functions'
|
|
220
|
-
- 'imports': List of import statements as strings (e.g., ['import pandas', 'import numpy as np'])
|
|
221
|
-
- 'classes': List of class definitions as strings
|
|
222
|
-
- 'functions': List of function definitions as strings
|
|
223
|
-
existing_namespace: Optional existing namespace to build upon. If provided, new entities
|
|
224
|
-
will be added to this namespace rather than creating a new one.
|
|
225
|
-
|
|
226
|
-
Returns:
|
|
227
|
-
Dictionary containing the injected entities as key-value pairs
|
|
228
|
-
|
|
229
|
-
Example:
|
|
230
|
-
context = {
|
|
231
|
-
'imports': ['import pandas as pd', 'import numpy as np'],
|
|
232
|
-
'classes': ['class MyClass:\n def __init__(self, x):\n self.x = x'],
|
|
233
|
-
'functions': ['def my_function(x):\n return x * 2']
|
|
234
|
-
}
|
|
235
|
-
existing_ns = {'math': <math module>, 'data': [1, 2, 3]}
|
|
236
|
-
namespace = inject_context(context, existing_ns)
|
|
237
|
-
# namespace will contain: {'math': <math module>, 'data': [1, 2, 3], 'pandas': <module>, 'pd': <module>, 'numpy': <module>, 'np': <module>, 'MyClass': <class>, 'MC': <class>, 'my_function': <function>, ...}
|
|
238
|
-
"""
|
|
239
|
-
|
|
240
|
-
# Start with existing namespace or create new one
|
|
241
|
-
namespace: dict[str, Any] = existing_namespace.copy() if existing_namespace is not None else {}
|
|
242
|
-
|
|
243
|
-
# Handle imports (execute import statements as strings)
|
|
244
|
-
if "imports" in context_dict:
|
|
245
|
-
for import_statement in context_dict["imports"]:
|
|
246
|
-
try:
|
|
247
|
-
# Execute the import statement in the current namespace
|
|
248
|
-
exec(import_statement, namespace)
|
|
249
|
-
except Exception as e:
|
|
250
|
-
# If execution fails, try to extract module name and create placeholder
|
|
251
|
-
|
|
252
|
-
# Handle different import patterns
|
|
253
|
-
import_match = re.search(r"import\s+(\w+)(?:\s+as\s+(\w+))?", import_statement)
|
|
254
|
-
if import_match:
|
|
255
|
-
module_name = import_match.group(1)
|
|
256
|
-
alias_name = import_match.group(2)
|
|
257
|
-
|
|
258
|
-
try:
|
|
259
|
-
# Try to import the module manually
|
|
260
|
-
module = importlib.import_module(module_name)
|
|
261
|
-
namespace[module_name] = module
|
|
262
|
-
if alias_name:
|
|
263
|
-
namespace[alias_name] = module
|
|
264
|
-
except ImportError:
|
|
265
|
-
# Create placeholders for missing imports
|
|
266
|
-
namespace[module_name] = f"<import '{module_name}' not available>"
|
|
267
|
-
if alias_name:
|
|
268
|
-
namespace[alias_name] = f"<import '{module_name}' as '{alias_name}' not available>"
|
|
269
|
-
else:
|
|
270
|
-
# If we can't parse the import statement, create a generic placeholder
|
|
271
|
-
namespace[f"import_{len(namespace)}"] = f"<import statement failed: {str(e)}>"
|
|
272
|
-
|
|
273
|
-
# Handle classes - execute class definitions as strings
|
|
274
|
-
if "classes" in context_dict:
|
|
275
|
-
for class_definition in context_dict["classes"]:
|
|
276
|
-
try:
|
|
277
|
-
# Execute the class definition in the current namespace
|
|
278
|
-
exec(class_definition, namespace)
|
|
279
|
-
except Exception:
|
|
280
|
-
# If execution fails, try to extract class name and create placeholder
|
|
281
|
-
|
|
282
|
-
class_match = re.search(r"class\s+(\w+)", class_definition)
|
|
283
|
-
if class_match:
|
|
284
|
-
class_name = class_match.group(1)
|
|
285
|
-
|
|
286
|
-
# Create a placeholder class
|
|
287
|
-
class PlaceholderClass:
|
|
288
|
-
def __init__(self, *args, **kwargs):
|
|
289
|
-
raise NotImplementedError("Class '{class_name}' failed to load")
|
|
290
|
-
|
|
291
|
-
namespace[class_name] = PlaceholderClass
|
|
292
|
-
else:
|
|
293
|
-
# If we can't extract class name, create a generic placeholder
|
|
294
|
-
class GenericPlaceholderClass:
|
|
295
|
-
def __init__(self, *args, **kwargs):
|
|
296
|
-
raise NotImplementedError("Class definition failed to load")
|
|
297
|
-
|
|
298
|
-
namespace[f"class_{len(namespace)}"] = GenericPlaceholderClass
|
|
299
|
-
|
|
300
|
-
# Handle functions - execute function definitions as strings
|
|
301
|
-
if "functions" in context_dict:
|
|
302
|
-
for function_definition in context_dict["functions"]:
|
|
303
|
-
try:
|
|
304
|
-
# Execute the function definition in the current namespace
|
|
305
|
-
exec(function_definition, namespace)
|
|
306
|
-
except Exception:
|
|
307
|
-
# If execution fails, try to extract function name and create placeholder
|
|
308
|
-
func_match = re.search(r"(async\s+)?def\s+(\w+)", function_definition)
|
|
309
|
-
if func_match:
|
|
310
|
-
func_name = func_match.group(2)
|
|
311
|
-
is_async = bool(func_match.group(1))
|
|
312
|
-
|
|
313
|
-
if is_async:
|
|
314
|
-
|
|
315
|
-
async def placeholder_func(*args, **kwargs):
|
|
316
|
-
raise NotImplementedError(f"Async function '{func_name}' failed to load")
|
|
317
|
-
else:
|
|
318
|
-
|
|
319
|
-
def placeholder_func(*args, **kwargs):
|
|
320
|
-
raise NotImplementedError(f"Function '{func_name}' failed to load")
|
|
321
|
-
|
|
322
|
-
placeholder_func.__name__ = func_name
|
|
323
|
-
namespace[func_name] = placeholder_func
|
|
324
|
-
|
|
325
|
-
return namespace
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
def schema_to_signature(schema: dict, func_name="my_function") -> str:
|
|
329
|
-
type_map = {
|
|
330
|
-
"integer": "int",
|
|
331
|
-
"string": "str",
|
|
332
|
-
"boolean": "bool",
|
|
333
|
-
"null": "None",
|
|
334
|
-
}
|
|
335
|
-
|
|
336
|
-
params = []
|
|
337
|
-
for name, meta in schema.items():
|
|
338
|
-
# figure out type
|
|
339
|
-
if "type" in meta:
|
|
340
|
-
typ = type_map.get(meta["type"], "Any")
|
|
341
|
-
elif "anyOf" in meta:
|
|
342
|
-
types = [type_map.get(t["type"], "Any") for t in meta["anyOf"]]
|
|
343
|
-
typ = " | ".join(set(types))
|
|
344
|
-
else:
|
|
345
|
-
typ = "Any"
|
|
346
|
-
|
|
347
|
-
default = meta.get("default", None)
|
|
348
|
-
default_repr = repr(default)
|
|
349
|
-
|
|
350
|
-
params.append(f"{name}: {typ} = {default_repr}")
|
|
351
|
-
|
|
352
|
-
# join into signature
|
|
353
|
-
param_str = ",\n ".join(params)
|
|
354
|
-
return f"def {func_name}(\n {param_str},\n):"
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
def smart_truncate(
|
|
358
|
-
output: str, max_chars_full: int = 2000, max_lines_headtail: int = 20, summary_threshold: int = 10000
|
|
359
|
-
) -> str:
|
|
360
|
-
"""
|
|
361
|
-
Truncates or summarizes output intelligently to avoid filling the context too fast.
|
|
362
|
-
|
|
363
|
-
Args:
|
|
364
|
-
output (str): The string output from code execution.
|
|
365
|
-
max_chars_full (int): Max characters to keep full output.
|
|
366
|
-
max_lines_headtail (int): Number of lines to keep from head and tail for medium outputs.
|
|
367
|
-
summary_threshold (int): If truncated output exceeds this, hard-truncate.
|
|
368
|
-
|
|
369
|
-
Returns:
|
|
370
|
-
str: Truncated or summarized output.
|
|
371
|
-
"""
|
|
372
|
-
if len(output) <= max_chars_full:
|
|
373
|
-
return output # Small output, include fully
|
|
374
|
-
|
|
375
|
-
lines = output.splitlines()
|
|
376
|
-
if len(lines) <= 2 * max_lines_headtail:
|
|
377
|
-
return output # Medium output, include fully
|
|
378
|
-
|
|
379
|
-
# Medium-large output: take head + tail
|
|
380
|
-
head = "\n".join(lines[:max_lines_headtail])
|
|
381
|
-
tail = "\n".join(lines[-max_lines_headtail:])
|
|
382
|
-
truncated = f"{head}\n... [truncated {len(lines) - 2 * max_lines_headtail} lines] ...\n{tail}"
|
|
383
|
-
|
|
384
|
-
# If still too big, cut to summary threshold
|
|
385
|
-
if len(truncated) > summary_threshold:
|
|
386
|
-
truncated = truncated[:summary_threshold] + "\n... [output truncated to fit context] ..."
|
|
387
|
-
|
|
388
|
-
return truncated
|
|
@@ -1,64 +0,0 @@
|
|
|
1
|
-
universal_mcp/agents/__init__.py,sha256=rXWhqmhXdhUdId7QzuQHUpWUwfioPmj7ksu6RHZvCFI,1368
|
|
2
|
-
universal_mcp/agents/base.py,sha256=GmagwWFJdPpp_yLeAfsrr4fu-zzxh7CgIlBJbTgFgWM,7072
|
|
3
|
-
universal_mcp/agents/cli.py,sha256=AG9e4iSX3GazT537573YrYT1wSaZYOr42rrYQ7xP3YA,1016
|
|
4
|
-
universal_mcp/agents/hil.py,sha256=_5PCK6q0goGm8qylJq44aSp2MadP-yCPvhOJYKqWLMo,3808
|
|
5
|
-
universal_mcp/agents/llm.py,sha256=hVRwjZs3MHl5_3BWedmurs2Jt1oZDfFX0Zj9F8KH7fk,1787
|
|
6
|
-
universal_mcp/agents/react.py,sha256=8XQvJ0HLVgc-K0qn9Ml48WGcgUGuIKtL67HatlT6Da0,3334
|
|
7
|
-
universal_mcp/agents/sandbox.py,sha256=Int2O8JNFPlB8c7gb86KRxlNbuV0zdz5_NCo_GMcCds,2876
|
|
8
|
-
universal_mcp/agents/simple.py,sha256=NSATg5TWzsRNS7V3LFiDG28WSOCIwCdcC1g7NRwg2nM,2095
|
|
9
|
-
universal_mcp/agents/utils.py,sha256=P6W9k6XAOBp6tdjC2VTP4tE0B2M4-b1EDmr-ylJ47Pw,7765
|
|
10
|
-
universal_mcp/agents/bigtool/__init__.py,sha256=mZG8dsaCVyKlm82otxtiTA225GIFLUCUUYPEIPF24uw,2299
|
|
11
|
-
universal_mcp/agents/bigtool/__main__.py,sha256=0i-fbd2yQ90qa8n2nM3luqoJVN9Reh5HZXR5oK7SAck,445
|
|
12
|
-
universal_mcp/agents/bigtool/agent.py,sha256=mtCDNN8WjE2hjJjooDqusmbferKBHeJMHrhXUPUWaVc,252
|
|
13
|
-
universal_mcp/agents/bigtool/context.py,sha256=ny7gd-vvVpUOYAeQbAEUT0A6Vm6Nn2qGywxTzPBzYFg,929
|
|
14
|
-
universal_mcp/agents/bigtool/graph.py,sha256=2Sy0dtevTWeT3hJDq4BDerZFvk_zJqx15j8VH2XLq8Y,5848
|
|
15
|
-
universal_mcp/agents/bigtool/prompts.py,sha256=Joi5mCzZX63aM_6eBrMOKuNRHjTkceVIibSsGBGqhYE,2041
|
|
16
|
-
universal_mcp/agents/bigtool/state.py,sha256=TQeGZD99okclkoCh5oz-VYIlEsC9yLQyDpnBnm7QCN8,759
|
|
17
|
-
universal_mcp/agents/bigtool/tools.py,sha256=-u80ta6xEaqzEMSzDVe3QZiTZm3YlgLkBD8WTghzClw,6315
|
|
18
|
-
universal_mcp/agents/builder/__main__.py,sha256=VJDJOr-dJJerT53ibh5LVqIsMJ0m0sG2UlzFB784pKw,11680
|
|
19
|
-
universal_mcp/agents/builder/builder.py,sha256=mh3MZpMVB1FE1DWzvMW9NnfiaF145VGn8cJzKSYUlzY,8587
|
|
20
|
-
universal_mcp/agents/builder/helper.py,sha256=8igR1b3Gy_N2u3WxHYKIWzvw7F5BMnfpO2IU74v6vsw,2680
|
|
21
|
-
universal_mcp/agents/builder/prompts.py,sha256=8Xs6uzTUHguDRngVMLak3lkXFkk2VV_uQXaDllzP5cI,4670
|
|
22
|
-
universal_mcp/agents/builder/state.py,sha256=7DeWllxfN-yD6cd9wJ3KIgjO8TctkJvVjAbZT8W_zqk,922
|
|
23
|
-
universal_mcp/agents/codeact/__init__.py,sha256=rLE8gvOo5H4YSr71DRq76b3RV3uuotxuAy_VnBVaVwk,60
|
|
24
|
-
universal_mcp/agents/codeact/__main__.py,sha256=W2cHXRwH1dZG3ETIkMwUqA_d62K3IctHP-FDZWDjxdw,1067
|
|
25
|
-
universal_mcp/agents/codeact/agent.py,sha256=sKZWokTHcuL68Y6SNyaaHe6_XkWxaIq36TrNmPJfQto,9762
|
|
26
|
-
universal_mcp/agents/codeact/models.py,sha256=2fdAcF5bxWDpljjEwDEdPBflTMShSPwwncHrphRjsYg,222
|
|
27
|
-
universal_mcp/agents/codeact/prompts.py,sha256=EMI-imnd0Ps0Bd2FOvSqgiicvvtFFu0MF9s93PiC_3k,4493
|
|
28
|
-
universal_mcp/agents/codeact/sandbox.py,sha256=NjN6ISj8psFtHf8V0w24ChJdUMUWkq7OrlbHdzm4wBc,2299
|
|
29
|
-
universal_mcp/agents/codeact/state.py,sha256=WTPfpxDlGRnlr5tZuXMg_KU7GS7TZbnrIKslOvZLbQI,565
|
|
30
|
-
universal_mcp/agents/codeact/utils.py,sha256=JUbT_HYGS_D1BzmzoVpORIe7SGur1KgJguTZ_1tZ4JY,1918
|
|
31
|
-
universal_mcp/agents/codeact0/__init__.py,sha256=ebKkpgg-0UnsvDtagEJ2tMer1VsfhmEE5KJcFzUk9fU,133
|
|
32
|
-
universal_mcp/agents/codeact0/__main__.py,sha256=xeqNuawP9M8JVAnkhLesalnpI_TakC49ATJaSCzCsYs,880
|
|
33
|
-
universal_mcp/agents/codeact0/agent.py,sha256=9BInAQr3csES-XHSscmeJlYJ3-wQUHPvLOf-6wFILUU,6695
|
|
34
|
-
universal_mcp/agents/codeact0/config.py,sha256=H-1woj_nhSDwf15F63WYn723y4qlRefXzGxuH81uYF0,2215
|
|
35
|
-
universal_mcp/agents/codeact0/langgraph_agent.py,sha256=ehjMV_Z1118pCFWB_Sa5H7XnUp0udsbUHjfjXjhIQM8,435
|
|
36
|
-
universal_mcp/agents/codeact0/llm_tool.py,sha256=q-hiqkKtjVmpyNceFoRgo7hvKh4HtQf_I1VudRUEPR0,11075
|
|
37
|
-
universal_mcp/agents/codeact0/playbook_agent.py,sha256=Eq_us8j7plxgep1LuJ4I3u8qxjS95_2vGEaPR3g8A-o,17796
|
|
38
|
-
universal_mcp/agents/codeact0/prompts.py,sha256=CF2X6zSK1lYQ9ef78cn0iN0oQN_tDe3T02Ecfn1o45U,8627
|
|
39
|
-
universal_mcp/agents/codeact0/sandbox.py,sha256=zMgHrWnQYkSkJb2MzfXvT3euCc4hvqzBE_EbX2_iLxA,3142
|
|
40
|
-
universal_mcp/agents/codeact0/state.py,sha256=Y-Rzn_S7--aXH18KPvyhqDqOOB-miu1lsAmLgmMlaAg,1259
|
|
41
|
-
universal_mcp/agents/codeact0/tools.py,sha256=7hcFJxR26w_VCOWL8Oec8Ezfn3Auyv3YgeRv9f8j9xo,7642
|
|
42
|
-
universal_mcp/agents/codeact0/utils.py,sha256=jAZItSd3KGDkY9PquSWRIFCj9N26K9Kt0HKQ_jwvvSQ,15944
|
|
43
|
-
universal_mcp/agents/shared/__main__.py,sha256=XxH5qGDpgFWfq7fwQfgKULXGiUgeTp_YKfcxftuVZq8,1452
|
|
44
|
-
universal_mcp/agents/shared/prompts.py,sha256=yjP3zbbuKi87qCj21qwTTicz8TqtkKgnyGSeEjMu3ho,3761
|
|
45
|
-
universal_mcp/agents/shared/tool_node.py,sha256=DC9F-Ri28Pam0u3sXWNODVgmj9PtAEUb5qP1qOoGgfs,9169
|
|
46
|
-
universal_mcp/agents/unified/README.md,sha256=lcZobDA7MVm7vOAefLr4EMlEmgmODhndOZhwrOQO8lg,3658
|
|
47
|
-
universal_mcp/agents/unified/__init__.py,sha256=Oi5nfloc1saGh0HuYZvw2h7NRRWKKVptBXg6PFK6lnw,60
|
|
48
|
-
universal_mcp/agents/unified/__main__.py,sha256=XzHdp_AEd7k2CaWmsUekk3r1eefjmZSKsPhyBBWQDHU,865
|
|
49
|
-
universal_mcp/agents/unified/agent.py,sha256=jQrEk_AcaWpbty5hlncI9cTOOw1W_U4KYQB7xiraoYA,13551
|
|
50
|
-
universal_mcp/agents/unified/langgraph_agent.py,sha256=7v76Uv_1UydCsQpDj99k1FtR1wm7e9yZihwCYdFebwY,403
|
|
51
|
-
universal_mcp/agents/unified/llm_tool.py,sha256=-pAz04OrbZ_dJ2ueysT1qZd02DrbLY4EbU0tiuF_UNU,798
|
|
52
|
-
universal_mcp/agents/unified/prompts.py,sha256=ojHoZUJ1tLc7CwOZPWRJvzdwC_H3hE6PgZ9GKfHqAhU,9521
|
|
53
|
-
universal_mcp/agents/unified/sandbox.py,sha256=zMgHrWnQYkSkJb2MzfXvT3euCc4hvqzBE_EbX2_iLxA,3142
|
|
54
|
-
universal_mcp/agents/unified/state.py,sha256=6gtl1TV4qnnvkhXgHntH13jlVL53S00dFtw_mPKTFw4,1380
|
|
55
|
-
universal_mcp/agents/unified/tools.py,sha256=7hcFJxR26w_VCOWL8Oec8Ezfn3Auyv3YgeRv9f8j9xo,7642
|
|
56
|
-
universal_mcp/agents/unified/utils.py,sha256=jAZItSd3KGDkY9PquSWRIFCj9N26K9Kt0HKQ_jwvvSQ,15944
|
|
57
|
-
universal_mcp/applications/filesystem/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
58
|
-
universal_mcp/applications/filesystem/app.py,sha256=0TRjjm8YnslVRSmfkXI7qQOAlqWlD1eEn8Jm0xBeigs,5561
|
|
59
|
-
universal_mcp/applications/llm/__init__.py,sha256=_XGRxN3O1--ZS5joAsPf8IlI9Qa6negsJrwJ5VJXno0,46
|
|
60
|
-
universal_mcp/applications/llm/app.py,sha256=oqX3byvlFRmeRo4jJJxUBGy-iTDGm2fplMEKA2pcMtw,12743
|
|
61
|
-
universal_mcp/applications/ui/app.py,sha256=c7OkZsO2fRtndgAzAQbKu-1xXRuRp9Kjgml57YD2NR4,9459
|
|
62
|
-
universal_mcp_agents-0.1.19rc1.dist-info/METADATA,sha256=6euKZt6O6J-GgvtqE57uQbZRrNtvL7Al2wGdOEBALvQ,881
|
|
63
|
-
universal_mcp_agents-0.1.19rc1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
64
|
-
universal_mcp_agents-0.1.19rc1.dist-info/RECORD,,
|
|
File without changes
|