universal-mcp-agents 0.1.11__py3-none-any.whl → 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agents/__init__.py +17 -19
- universal_mcp/agents/base.py +10 -7
- universal_mcp/agents/{bigtoolcache → bigtool}/__init__.py +2 -2
- universal_mcp/agents/{bigtoolcache → bigtool}/__main__.py +0 -1
- universal_mcp/agents/{bigtoolcache → bigtool}/agent.py +0 -1
- universal_mcp/agents/{bigtoolcache → bigtool}/graph.py +6 -5
- universal_mcp/agents/builder/__main__.py +125 -0
- universal_mcp/agents/builder/builder.py +225 -0
- universal_mcp/agents/builder/prompts.py +173 -0
- universal_mcp/agents/builder/state.py +24 -0
- universal_mcp/agents/cli.py +3 -2
- universal_mcp/agents/codeact/__main__.py +2 -4
- universal_mcp/agents/codeact/agent.py +165 -63
- universal_mcp/agents/codeact/models.py +11 -0
- universal_mcp/agents/codeact/prompts.py +12 -12
- universal_mcp/agents/codeact/sandbox.py +73 -23
- universal_mcp/agents/codeact/state.py +2 -0
- universal_mcp/agents/codeact0/__init__.py +3 -0
- universal_mcp/agents/codeact0/__main__.py +35 -0
- universal_mcp/agents/codeact0/agent.py +136 -0
- universal_mcp/agents/codeact0/config.py +77 -0
- universal_mcp/agents/codeact0/langgraph_graph.py +17 -0
- universal_mcp/agents/codeact0/legacy_codeact.py +104 -0
- universal_mcp/agents/codeact0/llm_tool.py +379 -0
- universal_mcp/agents/codeact0/prompts.py +156 -0
- universal_mcp/agents/codeact0/sandbox.py +90 -0
- universal_mcp/agents/codeact0/state.py +12 -0
- universal_mcp/agents/codeact0/usecases/1-unsubscribe.yaml +4 -0
- universal_mcp/agents/codeact0/usecases/10-reddit2.yaml +10 -0
- universal_mcp/agents/codeact0/usecases/11-github.yaml +13 -0
- universal_mcp/agents/codeact0/usecases/2-reddit.yaml +27 -0
- universal_mcp/agents/codeact0/usecases/2.1-instructions.md +81 -0
- universal_mcp/agents/codeact0/usecases/2.2-instructions.md +71 -0
- universal_mcp/agents/codeact0/usecases/3-earnings.yaml +4 -0
- universal_mcp/agents/codeact0/usecases/4-maps.yaml +41 -0
- universal_mcp/agents/codeact0/usecases/5-gmailreply.yaml +8 -0
- universal_mcp/agents/codeact0/usecases/6-contract.yaml +6 -0
- universal_mcp/agents/codeact0/usecases/7-overnight.yaml +14 -0
- universal_mcp/agents/codeact0/usecases/8-sheets_chart.yaml +25 -0
- universal_mcp/agents/codeact0/usecases/9-learning.yaml +9 -0
- universal_mcp/agents/codeact0/utils.py +374 -0
- universal_mcp/agents/hil.py +4 -4
- universal_mcp/agents/planner/__init__.py +7 -1
- universal_mcp/agents/react.py +11 -3
- universal_mcp/agents/simple.py +12 -2
- universal_mcp/agents/utils.py +17 -0
- universal_mcp/applications/llm/__init__.py +3 -0
- universal_mcp/applications/llm/app.py +158 -0
- universal_mcp/applications/ui/app.py +118 -144
- {universal_mcp_agents-0.1.11.dist-info → universal_mcp_agents-0.1.12.dist-info}/METADATA +1 -1
- universal_mcp_agents-0.1.12.dist-info/RECORD +65 -0
- universal_mcp/agents/bigtool2/__init__.py +0 -67
- universal_mcp/agents/bigtool2/__main__.py +0 -23
- universal_mcp/agents/bigtool2/agent.py +0 -13
- universal_mcp/agents/bigtool2/graph.py +0 -155
- universal_mcp/agents/bigtool2/meta_tools.py +0 -120
- universal_mcp/agents/bigtool2/prompts.py +0 -15
- universal_mcp/agents/bigtoolcache/state.py +0 -27
- universal_mcp/agents/builder.py +0 -204
- universal_mcp_agents-0.1.11.dist-info/RECORD +0 -42
- /universal_mcp/agents/{bigtoolcache → bigtool}/context.py +0 -0
- /universal_mcp/agents/{bigtoolcache → bigtool}/prompts.py +0 -0
- /universal_mcp/agents/{bigtool2 → bigtool}/state.py +0 -0
- /universal_mcp/agents/{bigtoolcache → bigtool}/tools.py +0 -0
- {universal_mcp_agents-0.1.11.dist-info → universal_mcp_agents-0.1.12.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,374 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from collections.abc import Sequence
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from langchain_core.messages import BaseMessage
|
|
6
|
+
from pydantic import ValidationError
|
|
7
|
+
from requests import JSONDecodeError
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def light_copy(data):
|
|
11
|
+
"""
|
|
12
|
+
Deep copy a dict[str, any] or Sequence[any] with string truncation.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
data: Either a dictionary with string keys, or a sequence of such dictionaries
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
A deep copy where all string values are truncated to 30 characters
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def truncate_string(value):
|
|
22
|
+
"""Truncate string to 30 chars, preserve other types"""
|
|
23
|
+
if isinstance(value, str) and len(value) > 30:
|
|
24
|
+
return value[:30] + "..."
|
|
25
|
+
return value
|
|
26
|
+
|
|
27
|
+
def copy_dict(d):
|
|
28
|
+
"""Recursively copy a dictionary, truncating strings"""
|
|
29
|
+
result = {}
|
|
30
|
+
for key, value in d.items():
|
|
31
|
+
if isinstance(value, dict):
|
|
32
|
+
result[key] = copy_dict(value)
|
|
33
|
+
elif isinstance(value, Sequence) and not isinstance(value, str):
|
|
34
|
+
result[key] = [
|
|
35
|
+
copy_dict(item) if isinstance(item, dict) else truncate_string(item) for item in value[:20]
|
|
36
|
+
] # Limit to first 20 items
|
|
37
|
+
else:
|
|
38
|
+
result[key] = truncate_string(value)
|
|
39
|
+
return result
|
|
40
|
+
|
|
41
|
+
# Handle the two main cases
|
|
42
|
+
if isinstance(data, dict):
|
|
43
|
+
return copy_dict(data)
|
|
44
|
+
elif isinstance(data, Sequence) and not isinstance(data, str):
|
|
45
|
+
return [
|
|
46
|
+
copy_dict(item) if isinstance(item, dict) else truncate_string(item) for item in data[:20]
|
|
47
|
+
] # Limit to first 20 items
|
|
48
|
+
else:
|
|
49
|
+
# For completeness, handle other types
|
|
50
|
+
return truncate_string(data)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def get_message_text(msg: BaseMessage) -> str:
|
|
54
|
+
"""Get the text content of a message."""
|
|
55
|
+
content = msg.content
|
|
56
|
+
if isinstance(content, str):
|
|
57
|
+
return content
|
|
58
|
+
elif isinstance(content, dict):
|
|
59
|
+
return content.get("text", "")
|
|
60
|
+
else:
|
|
61
|
+
txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content]
|
|
62
|
+
return "".join(txts).strip()
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def make_safe_function_name(name: str) -> str:
|
|
66
|
+
"""Convert a tool name to a valid Python function name."""
|
|
67
|
+
# Replace non-alphanumeric characters with underscores
|
|
68
|
+
safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
|
|
69
|
+
# Ensure the name doesn't start with a digit
|
|
70
|
+
if safe_name and safe_name[0].isdigit():
|
|
71
|
+
safe_name = f"tool_{safe_name}"
|
|
72
|
+
# Handle empty name edge case
|
|
73
|
+
if not safe_name:
|
|
74
|
+
safe_name = "unnamed_tool"
|
|
75
|
+
return safe_name
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def filter_retry_on(exc: Exception) -> bool:
|
|
79
|
+
import httpx
|
|
80
|
+
import requests
|
|
81
|
+
|
|
82
|
+
if isinstance(
|
|
83
|
+
exc,
|
|
84
|
+
(
|
|
85
|
+
ConnectionError,
|
|
86
|
+
JSONDecodeError,
|
|
87
|
+
ValidationError,
|
|
88
|
+
),
|
|
89
|
+
):
|
|
90
|
+
return True
|
|
91
|
+
if isinstance(
|
|
92
|
+
exc,
|
|
93
|
+
(
|
|
94
|
+
ValueError,
|
|
95
|
+
TypeError,
|
|
96
|
+
ArithmeticError,
|
|
97
|
+
ImportError,
|
|
98
|
+
LookupError,
|
|
99
|
+
NameError,
|
|
100
|
+
SyntaxError,
|
|
101
|
+
RuntimeError,
|
|
102
|
+
ReferenceError,
|
|
103
|
+
StopIteration,
|
|
104
|
+
StopAsyncIteration,
|
|
105
|
+
OSError,
|
|
106
|
+
),
|
|
107
|
+
):
|
|
108
|
+
return False
|
|
109
|
+
if isinstance(exc, httpx.HTTPStatusError):
|
|
110
|
+
return 500 <= exc.response.status_code < 600
|
|
111
|
+
if isinstance(exc, requests.HTTPError):
|
|
112
|
+
return 500 <= exc.response.status_code < 600 if exc.response else True
|
|
113
|
+
return True
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
|
|
117
|
+
"""
|
|
118
|
+
Derive context from code by extracting classes, functions, and import statements.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
code: Python code as a string
|
|
122
|
+
context: Existing context dictionary to append to
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
Updated context dictionary with extracted entities
|
|
126
|
+
"""
|
|
127
|
+
import ast
|
|
128
|
+
import re
|
|
129
|
+
|
|
130
|
+
# Initialize context keys if they don't exist
|
|
131
|
+
if "imports" not in context:
|
|
132
|
+
context["imports"] = []
|
|
133
|
+
if "classes" not in context:
|
|
134
|
+
context["classes"] = []
|
|
135
|
+
if "functions" not in context:
|
|
136
|
+
context["functions"] = []
|
|
137
|
+
|
|
138
|
+
try:
|
|
139
|
+
# Parse the code into an AST
|
|
140
|
+
tree = ast.parse(code)
|
|
141
|
+
|
|
142
|
+
# Extract imports
|
|
143
|
+
for node in ast.walk(tree):
|
|
144
|
+
if isinstance(node, ast.Import):
|
|
145
|
+
for alias in node.names:
|
|
146
|
+
if alias.asname:
|
|
147
|
+
import_stmt = f"import {alias.name} as {alias.asname}"
|
|
148
|
+
else:
|
|
149
|
+
import_stmt = f"import {alias.name}"
|
|
150
|
+
if import_stmt not in context["imports"]:
|
|
151
|
+
context["imports"].append(import_stmt)
|
|
152
|
+
|
|
153
|
+
elif isinstance(node, ast.ImportFrom):
|
|
154
|
+
module = node.module or ""
|
|
155
|
+
# Handle multiple imports in a single from statement
|
|
156
|
+
import_names = []
|
|
157
|
+
for alias in node.names:
|
|
158
|
+
if alias.asname:
|
|
159
|
+
import_names.append(f"{alias.name} as {alias.asname}")
|
|
160
|
+
else:
|
|
161
|
+
import_names.append(alias.name)
|
|
162
|
+
|
|
163
|
+
import_stmt = f"from {module} import {', '.join(import_names)}"
|
|
164
|
+
if import_stmt not in context["imports"]:
|
|
165
|
+
context["imports"].append(import_stmt)
|
|
166
|
+
|
|
167
|
+
# Extract class definitions
|
|
168
|
+
for node in ast.walk(tree):
|
|
169
|
+
if isinstance(node, ast.ClassDef):
|
|
170
|
+
# Get the class definition as a string
|
|
171
|
+
class_lines = code.split("\n")[node.lineno - 1 : node.end_lineno]
|
|
172
|
+
class_def = "\n".join(class_lines)
|
|
173
|
+
|
|
174
|
+
# Clean up the class definition (remove leading/trailing whitespace)
|
|
175
|
+
class_def = class_def.strip()
|
|
176
|
+
|
|
177
|
+
if class_def not in context["classes"]:
|
|
178
|
+
context["classes"].append(class_def)
|
|
179
|
+
|
|
180
|
+
# Extract function definitions (only top-level functions, not class methods)
|
|
181
|
+
for node in ast.walk(tree):
|
|
182
|
+
if isinstance(node, ast.FunctionDef):
|
|
183
|
+
# Get the function definition as a string
|
|
184
|
+
func_lines = code.split("\n")[node.lineno - 1 : node.end_lineno]
|
|
185
|
+
func_def = "\n".join(func_lines)
|
|
186
|
+
|
|
187
|
+
# Check if this is a top-level function by looking at indentation
|
|
188
|
+
# Top-level functions should start at column 0 (no indentation)
|
|
189
|
+
if node.col_offset == 0:
|
|
190
|
+
# Clean up the function definition (remove leading/trailing whitespace)
|
|
191
|
+
func_def = func_def.strip()
|
|
192
|
+
|
|
193
|
+
if func_def not in context["functions"]:
|
|
194
|
+
context["functions"].append(func_def)
|
|
195
|
+
|
|
196
|
+
except SyntaxError:
|
|
197
|
+
# If the code has syntax errors, try a simpler regex-based approach
|
|
198
|
+
|
|
199
|
+
# Extract import statements using regex
|
|
200
|
+
import_patterns = [
|
|
201
|
+
r"import\s+(\w+(?:\.\w+)*)(?:\s+as\s+(\w+))?",
|
|
202
|
+
r"from\s+(\w+(?:\.\w+)*)\s+import\s+(\w+(?:\s+as\s+\w+)?)",
|
|
203
|
+
]
|
|
204
|
+
|
|
205
|
+
for pattern in import_patterns:
|
|
206
|
+
matches = re.finditer(pattern, code)
|
|
207
|
+
for match in matches:
|
|
208
|
+
if "from" in pattern:
|
|
209
|
+
module = match.group(1)
|
|
210
|
+
imports = match.group(2).split(",")
|
|
211
|
+
for imp in imports:
|
|
212
|
+
imp = imp.strip()
|
|
213
|
+
if " as " in imp:
|
|
214
|
+
name, alias = imp.split(" as ")
|
|
215
|
+
import_stmt = f"from {module} import {name.strip()} as {alias.strip()}"
|
|
216
|
+
else:
|
|
217
|
+
import_stmt = f"from {module} import {imp}"
|
|
218
|
+
if import_stmt not in context["imports"]:
|
|
219
|
+
context["imports"].append(import_stmt)
|
|
220
|
+
else:
|
|
221
|
+
module = match.group(1)
|
|
222
|
+
alias = match.group(2)
|
|
223
|
+
if alias:
|
|
224
|
+
import_stmt = f"import {module} as {alias}"
|
|
225
|
+
else:
|
|
226
|
+
import_stmt = f"import {module}"
|
|
227
|
+
if import_stmt not in context["imports"]:
|
|
228
|
+
context["imports"].append(import_stmt)
|
|
229
|
+
|
|
230
|
+
# Extract class definitions using regex
|
|
231
|
+
class_pattern = r"class\s+(\w+).*?(?=class\s+\w+|def\s+\w+|$)"
|
|
232
|
+
class_matches = re.finditer(class_pattern, code, re.DOTALL)
|
|
233
|
+
for match in class_matches:
|
|
234
|
+
class_def = match.group(0).strip()
|
|
235
|
+
if class_def not in context["classes"]:
|
|
236
|
+
context["classes"].append(class_def)
|
|
237
|
+
|
|
238
|
+
# Extract function definitions using regex
|
|
239
|
+
func_pattern = r"def\s+(\w+).*?(?=class\s+\w+|def\s+\w+|$)"
|
|
240
|
+
func_matches = re.finditer(func_pattern, code, re.DOTALL)
|
|
241
|
+
for match in func_matches:
|
|
242
|
+
func_def = match.group(0).strip()
|
|
243
|
+
if func_def not in context["functions"]:
|
|
244
|
+
context["functions"].append(func_def)
|
|
245
|
+
|
|
246
|
+
return context
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def inject_context(
|
|
250
|
+
context_dict: dict[str, list[str]], existing_namespace: dict[str, Any] | None = None
|
|
251
|
+
) -> dict[str, Any]:
|
|
252
|
+
"""
|
|
253
|
+
Inject Python entities from a dictionary into a namespace.
|
|
254
|
+
|
|
255
|
+
This function takes a dictionary where keys represent entity types (imports, classes, functions, etc.)
|
|
256
|
+
and values are lists of entity definitions. It attempts to import or create these entities and returns
|
|
257
|
+
them in a namespace dictionary. Can optionally build upon an existing namespace and apply additional aliases.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
context_dict: Dictionary with entity types as keys and lists of entity definitions as values.
|
|
261
|
+
Supported keys: 'imports', 'classes', 'functions'
|
|
262
|
+
- 'imports': List of import statements as strings (e.g., ['import pandas', 'import numpy as np'])
|
|
263
|
+
- 'classes': List of class definitions as strings
|
|
264
|
+
- 'functions': List of function definitions as strings
|
|
265
|
+
existing_namespace: Optional existing namespace to build upon. If provided, new entities
|
|
266
|
+
will be added to this namespace rather than creating a new one.
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
Dictionary containing the injected entities as key-value pairs
|
|
270
|
+
|
|
271
|
+
Example:
|
|
272
|
+
context = {
|
|
273
|
+
'imports': ['import pandas as pd', 'import numpy as np'],
|
|
274
|
+
'classes': ['class MyClass:\n def __init__(self, x):\n self.x = x'],
|
|
275
|
+
'functions': ['def my_function(x):\n return x * 2']
|
|
276
|
+
}
|
|
277
|
+
existing_ns = {'math': <math module>, 'data': [1, 2, 3]}
|
|
278
|
+
namespace = inject_context(context, existing_ns)
|
|
279
|
+
# namespace will contain: {'math': <math module>, 'data': [1, 2, 3], 'pandas': <module>, 'pd': <module>, 'numpy': <module>, 'np': <module>, 'MyClass': <class>, 'MC': <class>, 'my_function': <function>, ...}
|
|
280
|
+
"""
|
|
281
|
+
import importlib
|
|
282
|
+
from typing import Any
|
|
283
|
+
|
|
284
|
+
# Start with existing namespace or create new one
|
|
285
|
+
namespace: dict[str, Any] = existing_namespace.copy() if existing_namespace is not None else {}
|
|
286
|
+
|
|
287
|
+
# Handle imports (execute import statements as strings)
|
|
288
|
+
if "imports" in context_dict:
|
|
289
|
+
for import_statement in context_dict["imports"]:
|
|
290
|
+
try:
|
|
291
|
+
# Execute the import statement in the current namespace
|
|
292
|
+
exec(import_statement, namespace)
|
|
293
|
+
except Exception as e:
|
|
294
|
+
# If execution fails, try to extract module name and create placeholder
|
|
295
|
+
import re
|
|
296
|
+
|
|
297
|
+
# Handle different import patterns
|
|
298
|
+
import_match = re.search(r"import\s+(\w+)(?:\s+as\s+(\w+))?", import_statement)
|
|
299
|
+
if import_match:
|
|
300
|
+
module_name = import_match.group(1)
|
|
301
|
+
alias_name = import_match.group(2)
|
|
302
|
+
|
|
303
|
+
try:
|
|
304
|
+
# Try to import the module manually
|
|
305
|
+
module = importlib.import_module(module_name)
|
|
306
|
+
namespace[module_name] = module
|
|
307
|
+
if alias_name:
|
|
308
|
+
namespace[alias_name] = module
|
|
309
|
+
except ImportError:
|
|
310
|
+
# Create placeholders for missing imports
|
|
311
|
+
namespace[module_name] = f"<import '{module_name}' not available>"
|
|
312
|
+
if alias_name:
|
|
313
|
+
namespace[alias_name] = f"<import '{module_name}' as '{alias_name}' not available>"
|
|
314
|
+
else:
|
|
315
|
+
# If we can't parse the import statement, create a generic placeholder
|
|
316
|
+
namespace[f"import_{len(namespace)}"] = f"<import statement failed: {str(e)}>"
|
|
317
|
+
|
|
318
|
+
# Handle classes - execute class definitions as strings
|
|
319
|
+
if "classes" in context_dict:
|
|
320
|
+
for class_definition in context_dict["classes"]:
|
|
321
|
+
try:
|
|
322
|
+
# Execute the class definition in the current namespace
|
|
323
|
+
exec(class_definition, namespace)
|
|
324
|
+
except Exception:
|
|
325
|
+
# If execution fails, try to extract class name and create placeholder
|
|
326
|
+
import re
|
|
327
|
+
|
|
328
|
+
class_match = re.search(r"class\s+(\w+)", class_definition)
|
|
329
|
+
if class_match:
|
|
330
|
+
class_name = class_match.group(1)
|
|
331
|
+
|
|
332
|
+
# Create a placeholder class
|
|
333
|
+
class PlaceholderClass:
|
|
334
|
+
def __init__(self, *args, **kwargs):
|
|
335
|
+
raise NotImplementedError(f"Class '{class_name}' failed to load: {str(e)}")
|
|
336
|
+
|
|
337
|
+
namespace[class_name] = PlaceholderClass
|
|
338
|
+
else:
|
|
339
|
+
# If we can't extract class name, create a generic placeholder
|
|
340
|
+
class GenericPlaceholderClass:
|
|
341
|
+
def __init__(self, *args, **kwargs):
|
|
342
|
+
raise NotImplementedError(f"Class definition failed to load: {str(e)}")
|
|
343
|
+
|
|
344
|
+
namespace[f"class_{len(namespace)}"] = GenericPlaceholderClass
|
|
345
|
+
|
|
346
|
+
# Handle functions - execute function definitions as strings
|
|
347
|
+
if "functions" in context_dict:
|
|
348
|
+
for function_definition in context_dict["functions"]:
|
|
349
|
+
try:
|
|
350
|
+
# Execute the function definition in the current namespace
|
|
351
|
+
exec(function_definition, namespace)
|
|
352
|
+
except Exception:
|
|
353
|
+
# If execution fails, try to extract function name and create placeholder
|
|
354
|
+
import re
|
|
355
|
+
|
|
356
|
+
func_match = re.search(r"def\s+(\w+)", function_definition)
|
|
357
|
+
if func_match:
|
|
358
|
+
func_name = func_match.group(1)
|
|
359
|
+
|
|
360
|
+
# Create a placeholder function
|
|
361
|
+
def placeholder_func(*args, **kwargs):
|
|
362
|
+
raise NotImplementedError(f"Function '{func_name}' failed to load: {str(e)}")
|
|
363
|
+
|
|
364
|
+
placeholder_func.__name__ = func_name
|
|
365
|
+
namespace[func_name] = placeholder_func
|
|
366
|
+
else:
|
|
367
|
+
# If we can't extract function name, create a generic placeholder
|
|
368
|
+
def generic_placeholder_func(*args, **kwargs):
|
|
369
|
+
raise NotImplementedError(f"Function definition failed to load: {str(e)}")
|
|
370
|
+
|
|
371
|
+
generic_placeholder_func.__name__ = f"func_{len(namespace)}"
|
|
372
|
+
namespace[generic_placeholder_func.__name__] = generic_placeholder_func
|
|
373
|
+
|
|
374
|
+
return namespace
|
universal_mcp/agents/hil.py
CHANGED
|
@@ -74,7 +74,7 @@ def handle_interrupt(interrupt: Interrupt) -> str | bool:
|
|
|
74
74
|
|
|
75
75
|
class HilAgent(BaseAgent):
|
|
76
76
|
def __init__(self, name: str, instructions: str, model: str):
|
|
77
|
-
super().__init__(name, instructions, model)
|
|
77
|
+
super().__init__(name=name, instructions=instructions, model=model)
|
|
78
78
|
self.llm = load_chat_model(model)
|
|
79
79
|
self._graph = self._build_graph()
|
|
80
80
|
|
|
@@ -103,9 +103,9 @@ if __name__ == "__main__":
|
|
|
103
103
|
import asyncio
|
|
104
104
|
|
|
105
105
|
agent = HilAgent(
|
|
106
|
-
"Hil Agent",
|
|
107
|
-
"You are a friendly agent that asks for the user's name and greets them.",
|
|
108
|
-
"openrouter/auto",
|
|
106
|
+
name="Hil Agent",
|
|
107
|
+
instructions="You are a friendly agent that asks for the user's name and greets them.",
|
|
108
|
+
model="openrouter/auto",
|
|
109
109
|
)
|
|
110
110
|
|
|
111
111
|
asyncio.run(agent.run_interactive())
|
|
@@ -20,7 +20,13 @@ class PlannerAgent(BaseAgent):
|
|
|
20
20
|
executor_agent_cls: type[BaseAgent] = ReactAgent,
|
|
21
21
|
**kwargs,
|
|
22
22
|
):
|
|
23
|
-
super().__init__(
|
|
23
|
+
super().__init__(
|
|
24
|
+
name=name,
|
|
25
|
+
instructions=instructions,
|
|
26
|
+
model=model,
|
|
27
|
+
memory=memory,
|
|
28
|
+
**kwargs,
|
|
29
|
+
)
|
|
24
30
|
self.app_registry = registry
|
|
25
31
|
self.llm = load_chat_model(model)
|
|
26
32
|
self.executor_agent_cls = executor_agent_cls
|
universal_mcp/agents/react.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
2
|
from langgraph.prebuilt import create_react_agent
|
|
3
3
|
from loguru import logger
|
|
4
|
+
from rich import print
|
|
4
5
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
5
6
|
from universal_mcp.tools.registry import ToolRegistry
|
|
6
7
|
from universal_mcp.types import ToolConfig, ToolFormat
|
|
7
|
-
from rich import print
|
|
8
8
|
|
|
9
9
|
from universal_mcp.agents.base import BaseAgent
|
|
10
10
|
from universal_mcp.agents.llm import load_chat_model
|
|
@@ -37,7 +37,13 @@ class ReactAgent(BaseAgent):
|
|
|
37
37
|
max_iterations: int = 10,
|
|
38
38
|
**kwargs,
|
|
39
39
|
):
|
|
40
|
-
super().__init__(
|
|
40
|
+
super().__init__(
|
|
41
|
+
name=name,
|
|
42
|
+
instructions=instructions,
|
|
43
|
+
model=model,
|
|
44
|
+
memory=memory,
|
|
45
|
+
**kwargs,
|
|
46
|
+
)
|
|
41
47
|
self.llm = load_chat_model(model)
|
|
42
48
|
self.tools = tools or {}
|
|
43
49
|
if "ui" not in self.tools:
|
|
@@ -52,6 +58,8 @@ class ReactAgent(BaseAgent):
|
|
|
52
58
|
"http_delete",
|
|
53
59
|
"http_patch",
|
|
54
60
|
"read_file",
|
|
61
|
+
"web_search",
|
|
62
|
+
"web_content",
|
|
55
63
|
]
|
|
56
64
|
self.max_iterations = max_iterations
|
|
57
65
|
self.registry = registry
|
|
@@ -80,7 +88,7 @@ class ReactAgent(BaseAgent):
|
|
|
80
88
|
|
|
81
89
|
async def main():
|
|
82
90
|
agent = ReactAgent(
|
|
83
|
-
"Universal React Agent",
|
|
91
|
+
name="Universal React Agent",
|
|
84
92
|
instructions="Be very concise in your answers.",
|
|
85
93
|
model="azure/gpt-4o",
|
|
86
94
|
tools={"google-mail": ["send_email"]},
|
universal_mcp/agents/simple.py
CHANGED
|
@@ -32,7 +32,13 @@ class SimpleAgent(BaseAgent):
|
|
|
32
32
|
memory: BaseCheckpointSaver = None,
|
|
33
33
|
**kwargs,
|
|
34
34
|
):
|
|
35
|
-
super().__init__(
|
|
35
|
+
super().__init__(
|
|
36
|
+
name=name,
|
|
37
|
+
instructions=instructions,
|
|
38
|
+
model=model,
|
|
39
|
+
memory=memory,
|
|
40
|
+
**kwargs,
|
|
41
|
+
)
|
|
36
42
|
self.llm = load_chat_model(model)
|
|
37
43
|
|
|
38
44
|
def _build_system_message(self):
|
|
@@ -55,7 +61,11 @@ class SimpleAgent(BaseAgent):
|
|
|
55
61
|
|
|
56
62
|
|
|
57
63
|
async def main():
|
|
58
|
-
agent = SimpleAgent(
|
|
64
|
+
agent = SimpleAgent(
|
|
65
|
+
name="Simple Agent",
|
|
66
|
+
instructions="Act as a 14 year old kid, reply in Gen-Z lingo",
|
|
67
|
+
model="azure/gpt-5-mini",
|
|
68
|
+
)
|
|
59
69
|
output = await agent.invoke("What is the capital of France?")
|
|
60
70
|
print(messages_to_list(output["messages"]))
|
|
61
71
|
|
universal_mcp/agents/utils.py
CHANGED
|
@@ -2,6 +2,7 @@ import json
|
|
|
2
2
|
from contextlib import contextmanager
|
|
3
3
|
|
|
4
4
|
from langchain_core.messages.base import BaseMessage
|
|
5
|
+
from loguru import logger
|
|
5
6
|
from rich.console import Console
|
|
6
7
|
from rich.live import Live
|
|
7
8
|
from rich.markdown import Markdown
|
|
@@ -126,3 +127,19 @@ Available commands:
|
|
|
126
127
|
|
|
127
128
|
def messages_to_list(messages: list[BaseMessage]):
|
|
128
129
|
return [{"type": message.type, "content": message.content} for message in messages]
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def get_message_text(message: BaseMessage):
|
|
133
|
+
try:
|
|
134
|
+
if isinstance(message.content, str):
|
|
135
|
+
return message.content
|
|
136
|
+
elif isinstance(message.content, dict):
|
|
137
|
+
return message.content.get("text", "")
|
|
138
|
+
elif isinstance(message.content, list):
|
|
139
|
+
return " ".join([c.get("text", "") for c in message.content])
|
|
140
|
+
else:
|
|
141
|
+
return ""
|
|
142
|
+
except Exception as e:
|
|
143
|
+
logger.error(f"Error getting message text: {e}")
|
|
144
|
+
logger.error(f"Message: {message}")
|
|
145
|
+
raise e
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Any, Literal, cast
|
|
3
|
+
|
|
4
|
+
from langchain.chat_models import init_chat_model
|
|
5
|
+
from langchain_openai import AzureChatOpenAI
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
7
|
+
from universal_mcp.applications.application import BaseApplication
|
|
8
|
+
|
|
9
|
+
MAX_RETRIES = 3
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _get_context_as_string(source: Any | list[Any] | dict[str, Any]) -> str:
|
|
13
|
+
"""Converts context to a string representation.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
source: The source data to be converted. Can be a single value, a list of values, or a dictionary.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
A string representation of the source data, formatted with XML-like tags for dictionaries.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
if not isinstance(source, dict):
|
|
23
|
+
if isinstance(source, list):
|
|
24
|
+
source = {f"doc_{i + 1}": str(doc) for i, doc in enumerate(source)}
|
|
25
|
+
else:
|
|
26
|
+
source = {"content": str(source)}
|
|
27
|
+
|
|
28
|
+
return "\n".join(f"<{k}>\n{str(v)}\n</{k}>" for k, v in source.items())
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class LLMApp(BaseApplication):
|
|
32
|
+
"""
|
|
33
|
+
An application for leveraging Large Language Models (LLMs) for advanced text processing tasks.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(self, **kwargs):
|
|
37
|
+
"""Initialize the LLMApp."""
|
|
38
|
+
super().__init__(name="llm")
|
|
39
|
+
|
|
40
|
+
def generate_text(
|
|
41
|
+
self,
|
|
42
|
+
task: str,
|
|
43
|
+
context: Any | list[Any] | dict[str, Any],
|
|
44
|
+
tone: str = "normal",
|
|
45
|
+
output_format: Literal["markdown", "html", "plain"] = "markdown",
|
|
46
|
+
length: Literal["very-short", "concise", "normal", "long"] = "concise",
|
|
47
|
+
) -> str:
|
|
48
|
+
"""
|
|
49
|
+
Generates well-written text for a high-level task using the provided context.
|
|
50
|
+
|
|
51
|
+
Use this function for creative writing, summarization, and other text generation tasks.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
task: The main writing task or directive.
|
|
55
|
+
context: A single string, list of strings, or dictionary mapping labels to content.
|
|
56
|
+
tone: The desired tone of the output (e.g., "formal", "casual", "technical").
|
|
57
|
+
output_format: The desired output format ('markdown', 'html', 'plain').
|
|
58
|
+
length: The desired length of the output ('very-short', 'concise', 'normal', 'long').
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
The generated text as a string.
|
|
62
|
+
"""
|
|
63
|
+
context_str = _get_context_as_string(context)
|
|
64
|
+
|
|
65
|
+
prompt = f"{task.strip()}\n\n"
|
|
66
|
+
if output_format == "markdown":
|
|
67
|
+
prompt += "Please write in Markdown format.\n\n"
|
|
68
|
+
elif output_format == "html":
|
|
69
|
+
prompt += "Please write in HTML format.\n\n"
|
|
70
|
+
else:
|
|
71
|
+
prompt += "Please write in plain text format. Do not use markdown or HTML.\n\n"
|
|
72
|
+
|
|
73
|
+
if tone not in ["normal", "default", ""]:
|
|
74
|
+
prompt = f"{prompt} (Tone instructions: {tone})"
|
|
75
|
+
|
|
76
|
+
if length not in ["normal", "default", ""]:
|
|
77
|
+
prompt = f"{prompt} (Length instructions: {length})"
|
|
78
|
+
|
|
79
|
+
full_prompt = f"{prompt}\n\nContext:\n{context_str}\n\n"
|
|
80
|
+
|
|
81
|
+
model = AzureChatOpenAI(model="gpt-4o", temperature=0.7)
|
|
82
|
+
response = model.with_retry(stop_after_attempt=MAX_RETRIES).invoke(full_prompt)
|
|
83
|
+
return str(response.content)
|
|
84
|
+
|
|
85
|
+
def classify_data(
|
|
86
|
+
self,
|
|
87
|
+
task: str,
|
|
88
|
+
context: Any | list[Any] | dict[str, Any],
|
|
89
|
+
class_descriptions: dict[str, str],
|
|
90
|
+
) -> dict[str, Any]:
|
|
91
|
+
"""
|
|
92
|
+
Classifies data into one of several categories based on a given task and context.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
task: The classification question and any specific rules or requirements.
|
|
96
|
+
context: The data to be classified, provided as a string, list, or dictionary.
|
|
97
|
+
class_descriptions: A dictionary mapping class names to their descriptions.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
A dictionary containing the classification probabilities, the reasoning, and the top class.
|
|
101
|
+
"""
|
|
102
|
+
context_str = _get_context_as_string(context)
|
|
103
|
+
|
|
104
|
+
prompt = (
|
|
105
|
+
f"{task}\n\n"
|
|
106
|
+
f"This is a classification task.\nPossible classes and descriptions:\n"
|
|
107
|
+
f"{json.dumps(class_descriptions, indent=2)}\n\n"
|
|
108
|
+
f"Context:\n{context_str}\n\n"
|
|
109
|
+
"Return ONLY a valid JSON object, no extra text."
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
|
|
113
|
+
|
|
114
|
+
class ClassificationResult(BaseModel):
|
|
115
|
+
probabilities: dict[str, float] = Field(..., description="The probabilities for each class.")
|
|
116
|
+
reason: str = Field(..., description="The reasoning behind the classification.")
|
|
117
|
+
top_class: str = Field(..., description="The class with the highest probability.")
|
|
118
|
+
|
|
119
|
+
response = (
|
|
120
|
+
model.with_structured_output(schema=ClassificationResult, method="json_mode")
|
|
121
|
+
.with_retry(stop_after_attempt=MAX_RETRIES)
|
|
122
|
+
.invoke(prompt)
|
|
123
|
+
)
|
|
124
|
+
return cast(dict[str, Any], response)
|
|
125
|
+
|
|
126
|
+
def extract_data(
|
|
127
|
+
self,
|
|
128
|
+
task: str,
|
|
129
|
+
source: Any | list[Any] | dict[str, Any],
|
|
130
|
+
output_schema: dict[str, Any],
|
|
131
|
+
) -> dict[str, Any]:
|
|
132
|
+
"""
|
|
133
|
+
Extracts structured data from unstructured text based on a provided JSON schema.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
task: A description of the data to be extracted.
|
|
137
|
+
source: The unstructured data to extract from (e.g., document, webpage content).
|
|
138
|
+
output_schema: A valid JSON schema with a 'title' and 'description'.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
A dictionary containing the extracted data, matching the provided schema.
|
|
142
|
+
"""
|
|
143
|
+
context_str = _get_context_as_string(source)
|
|
144
|
+
|
|
145
|
+
prompt = (
|
|
146
|
+
f"{task}\n\n"
|
|
147
|
+
f"Context:\n{context_str}\n\n"
|
|
148
|
+
"Return ONLY a valid JSON object that conforms to the provided schema, with no extra text."
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
|
|
152
|
+
|
|
153
|
+
response = (
|
|
154
|
+
model.with_structured_output(schema=output_schema, method="json_mode")
|
|
155
|
+
.with_retry(stop_after_attempt=MAX_RETRIES)
|
|
156
|
+
.invoke(prompt)
|
|
157
|
+
)
|
|
158
|
+
return cast(dict[str, Any], response)
|