universal-mcp-agents 0.1.18__py3-none-any.whl → 0.1.19rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-agents might be problematic. Click here for more details.

@@ -0,0 +1,388 @@
1
+ import ast
2
+ import importlib
3
+ import re
4
+ from collections.abc import Sequence
5
+ from typing import Any
6
+
7
+ from langchain_core.messages import BaseMessage
8
+
9
+ MAX_CHARS = 5000
10
+
11
+
12
+ def light_copy(data):
13
+ """
14
+ Deep copy a dict[str, any] or Sequence[any] with string truncation.
15
+
16
+ Args:
17
+ data: Either a dictionary with string keys, or a sequence of such dictionaries
18
+
19
+ Returns:
20
+ A deep copy where all string values are truncated to MAX_CHARS characters
21
+ """
22
+
23
+ def truncate_string(value):
24
+ """Truncate string to MAX_CHARS chars, preserve other types"""
25
+ if isinstance(value, str) and len(value) > MAX_CHARS:
26
+ return value[:MAX_CHARS] + "..."
27
+ return value
28
+
29
+ def copy_dict(d):
30
+ """Recursively copy a dictionary, truncating strings"""
31
+ result = {}
32
+ for key, value in d.items():
33
+ if isinstance(value, dict):
34
+ result[key] = copy_dict(value)
35
+ elif isinstance(value, Sequence) and not isinstance(value, str):
36
+ result[key] = [
37
+ copy_dict(item) if isinstance(item, dict) else truncate_string(item) for item in value[:20]
38
+ ] # Limit to first 20 items
39
+ else:
40
+ result[key] = truncate_string(value)
41
+ return result
42
+
43
+ # Handle the two main cases
44
+ if isinstance(data, dict):
45
+ return copy_dict(data)
46
+ elif isinstance(data, Sequence) and not isinstance(data, str):
47
+ return [
48
+ copy_dict(item) if isinstance(item, dict) else truncate_string(item) for item in data[:20]
49
+ ] # Limit to first 20 items
50
+ else:
51
+ # For completeness, handle other types
52
+ return truncate_string(data)
53
+
54
+
55
+ def get_message_text(msg: BaseMessage) -> str:
56
+ """Get the text content of a message."""
57
+ content = msg.content
58
+ if isinstance(content, str):
59
+ return content
60
+ elif isinstance(content, dict):
61
+ return content.get("text", "")
62
+ else:
63
+ txts = [c if isinstance(c, str) else (c.get("text") or "") for c in content]
64
+ return "".join(txts).strip()
65
+
66
+
67
+ def make_safe_function_name(name: str) -> str:
68
+ """Convert a tool name to a valid Python function name."""
69
+ # Replace non-alphanumeric characters with underscores
70
+ safe_name = re.sub(r"[^a-zA-Z0-9_]", "_", name)
71
+ # Ensure the name doesn't start with a digit
72
+ if safe_name and safe_name[0].isdigit():
73
+ safe_name = f"tool_{safe_name}"
74
+ # Handle empty name edge case
75
+ if not safe_name:
76
+ safe_name = "unnamed_tool"
77
+ return safe_name
78
+
79
+
80
+ def derive_context(code: str, context: dict[str, Any]) -> dict[str, Any]:
81
+ """
82
+ Derive context from code by extracting classes, functions, and import statements.
83
+
84
+ Args:
85
+ code: Python code as a string
86
+ context: Existing context dictionary to append to
87
+
88
+ Returns:
89
+ Updated context dictionary with extracted entities
90
+ """
91
+
92
+ # Initialize context keys if they don't exist
93
+ if "imports" not in context:
94
+ context["imports"] = []
95
+ if "classes" not in context:
96
+ context["classes"] = []
97
+ if "functions" not in context:
98
+ context["functions"] = []
99
+
100
+ try:
101
+ # Parse the code into an AST
102
+ tree = ast.parse(code)
103
+
104
+ # Extract imports
105
+ for node in ast.walk(tree):
106
+ if isinstance(node, ast.Import):
107
+ for alias in node.names:
108
+ if alias.asname:
109
+ import_stmt = f"import {alias.name} as {alias.asname}"
110
+ else:
111
+ import_stmt = f"import {alias.name}"
112
+ if import_stmt not in context["imports"]:
113
+ context["imports"].append(import_stmt)
114
+
115
+ elif isinstance(node, ast.ImportFrom):
116
+ module = node.module or ""
117
+ # Handle multiple imports in a single from statement
118
+ import_names = []
119
+ for alias in node.names:
120
+ if alias.asname:
121
+ import_names.append(f"{alias.name} as {alias.asname}")
122
+ else:
123
+ import_names.append(alias.name)
124
+
125
+ import_stmt = f"from {module} import {', '.join(import_names)}"
126
+ if import_stmt not in context["imports"]:
127
+ context["imports"].append(import_stmt)
128
+
129
+ # Extract class definitions
130
+ for node in ast.walk(tree):
131
+ if isinstance(node, ast.ClassDef):
132
+ # Get the class definition as a string
133
+ class_lines = code.split("\n")[node.lineno - 1 : node.end_lineno]
134
+ class_def = "\n".join(class_lines)
135
+
136
+ # Clean up the class definition (remove leading/trailing whitespace)
137
+ class_def = class_def.strip()
138
+
139
+ if class_def not in context["classes"]:
140
+ context["classes"].append(class_def)
141
+
142
+ # Extract function definitions (including async)
143
+ for node in ast.walk(tree):
144
+ if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
145
+ func_lines = code.split("\n")[node.lineno - 1 : node.end_lineno]
146
+ func_def = "\n".join(func_lines)
147
+
148
+ # Only top-level functions (col_offset == 0)
149
+ if node.col_offset == 0:
150
+ func_def = func_def.strip()
151
+ if func_def not in context["functions"]:
152
+ context["functions"].append(func_def)
153
+
154
+ except SyntaxError:
155
+ # If the code has syntax errors, try a simpler regex-based approach
156
+
157
+ # Extract import statements using regex
158
+ import_patterns = [
159
+ r"import\s+(\w+(?:\.\w+)*)(?:\s+as\s+(\w+))?",
160
+ r"from\s+(\w+(?:\.\w+)*)\s+import\s+(\w+(?:\s+as\s+\w+)?)",
161
+ ]
162
+
163
+ for pattern in import_patterns:
164
+ matches = re.finditer(pattern, code)
165
+ for match in matches:
166
+ if "from" in pattern:
167
+ module = match.group(1)
168
+ imports = match.group(2).split(",")
169
+ for import_name in imports:
170
+ imp = import_name.strip()
171
+ if " as " in imp:
172
+ name, alias = imp.split(" as ")
173
+ import_stmt = f"from {module} import {name.strip()} as {alias.strip()}"
174
+ else:
175
+ import_stmt = f"from {module} import {imp}"
176
+ if import_stmt not in context["imports"]:
177
+ context["imports"].append(import_stmt)
178
+ else:
179
+ module = match.group(1)
180
+ alias = match.group(2)
181
+ if alias:
182
+ import_stmt = f"import {module} as {alias}"
183
+ else:
184
+ import_stmt = f"import {module}"
185
+ if import_stmt not in context["imports"]:
186
+ context["imports"].append(import_stmt)
187
+
188
+ # Extract class definitions using regex
189
+ class_pattern = r"class\s+(\w+).*?(?=class\s+\w+|def\s+\w+|$)"
190
+ class_matches = re.finditer(class_pattern, code, re.DOTALL)
191
+ for match in class_matches:
192
+ class_def = match.group(0).strip()
193
+ if class_def not in context["classes"]:
194
+ context["classes"].append(class_def)
195
+
196
+ # Extract function definitions using regex
197
+ func_pattern = r"def\s+(\w+).*?(?=class\s+\w+|def\s+\w+|$)"
198
+ func_matches = re.finditer(func_pattern, code, re.DOTALL)
199
+ for match in func_matches:
200
+ func_def = match.group(0).strip()
201
+ if func_def not in context["functions"]:
202
+ context["functions"].append(func_def)
203
+
204
+ return context
205
+
206
+
207
+ def inject_context(
208
+ context_dict: dict[str, list[str]], existing_namespace: dict[str, Any] | None = None
209
+ ) -> dict[str, Any]:
210
+ """
211
+ Inject Python entities from a dictionary into a namespace.
212
+
213
+ This function takes a dictionary where keys represent entity types (imports, classes, functions, etc.)
214
+ and values are lists of entity definitions. It attempts to import or create these entities and returns
215
+ them in a namespace dictionary. Can optionally build upon an existing namespace and apply additional aliases.
216
+
217
+ Args:
218
+ context_dict: Dictionary with entity types as keys and lists of entity definitions as values.
219
+ Supported keys: 'imports', 'classes', 'functions'
220
+ - 'imports': List of import statements as strings (e.g., ['import pandas', 'import numpy as np'])
221
+ - 'classes': List of class definitions as strings
222
+ - 'functions': List of function definitions as strings
223
+ existing_namespace: Optional existing namespace to build upon. If provided, new entities
224
+ will be added to this namespace rather than creating a new one.
225
+
226
+ Returns:
227
+ Dictionary containing the injected entities as key-value pairs
228
+
229
+ Example:
230
+ context = {
231
+ 'imports': ['import pandas as pd', 'import numpy as np'],
232
+ 'classes': ['class MyClass:\n def __init__(self, x):\n self.x = x'],
233
+ 'functions': ['def my_function(x):\n return x * 2']
234
+ }
235
+ existing_ns = {'math': <math module>, 'data': [1, 2, 3]}
236
+ namespace = inject_context(context, existing_ns)
237
+ # namespace will contain: {'math': <math module>, 'data': [1, 2, 3], 'pandas': <module>, 'pd': <module>, 'numpy': <module>, 'np': <module>, 'MyClass': <class>, 'MC': <class>, 'my_function': <function>, ...}
238
+ """
239
+
240
+ # Start with existing namespace or create new one
241
+ namespace: dict[str, Any] = existing_namespace.copy() if existing_namespace is not None else {}
242
+
243
+ # Handle imports (execute import statements as strings)
244
+ if "imports" in context_dict:
245
+ for import_statement in context_dict["imports"]:
246
+ try:
247
+ # Execute the import statement in the current namespace
248
+ exec(import_statement, namespace)
249
+ except Exception as e:
250
+ # If execution fails, try to extract module name and create placeholder
251
+
252
+ # Handle different import patterns
253
+ import_match = re.search(r"import\s+(\w+)(?:\s+as\s+(\w+))?", import_statement)
254
+ if import_match:
255
+ module_name = import_match.group(1)
256
+ alias_name = import_match.group(2)
257
+
258
+ try:
259
+ # Try to import the module manually
260
+ module = importlib.import_module(module_name)
261
+ namespace[module_name] = module
262
+ if alias_name:
263
+ namespace[alias_name] = module
264
+ except ImportError:
265
+ # Create placeholders for missing imports
266
+ namespace[module_name] = f"<import '{module_name}' not available>"
267
+ if alias_name:
268
+ namespace[alias_name] = f"<import '{module_name}' as '{alias_name}' not available>"
269
+ else:
270
+ # If we can't parse the import statement, create a generic placeholder
271
+ namespace[f"import_{len(namespace)}"] = f"<import statement failed: {str(e)}>"
272
+
273
+ # Handle classes - execute class definitions as strings
274
+ if "classes" in context_dict:
275
+ for class_definition in context_dict["classes"]:
276
+ try:
277
+ # Execute the class definition in the current namespace
278
+ exec(class_definition, namespace)
279
+ except Exception:
280
+ # If execution fails, try to extract class name and create placeholder
281
+
282
+ class_match = re.search(r"class\s+(\w+)", class_definition)
283
+ if class_match:
284
+ class_name = class_match.group(1)
285
+
286
+ # Create a placeholder class
287
+ class PlaceholderClass:
288
+ def __init__(self, *args, **kwargs):
289
+ raise NotImplementedError("Class '{class_name}' failed to load")
290
+
291
+ namespace[class_name] = PlaceholderClass
292
+ else:
293
+ # If we can't extract class name, create a generic placeholder
294
+ class GenericPlaceholderClass:
295
+ def __init__(self, *args, **kwargs):
296
+ raise NotImplementedError("Class definition failed to load")
297
+
298
+ namespace[f"class_{len(namespace)}"] = GenericPlaceholderClass
299
+
300
+ # Handle functions - execute function definitions as strings
301
+ if "functions" in context_dict:
302
+ for function_definition in context_dict["functions"]:
303
+ try:
304
+ # Execute the function definition in the current namespace
305
+ exec(function_definition, namespace)
306
+ except Exception:
307
+ # If execution fails, try to extract function name and create placeholder
308
+ func_match = re.search(r"(async\s+)?def\s+(\w+)", function_definition)
309
+ if func_match:
310
+ func_name = func_match.group(2)
311
+ is_async = bool(func_match.group(1))
312
+
313
+ if is_async:
314
+
315
+ async def placeholder_func(*args, **kwargs):
316
+ raise NotImplementedError(f"Async function '{func_name}' failed to load")
317
+ else:
318
+
319
+ def placeholder_func(*args, **kwargs):
320
+ raise NotImplementedError(f"Function '{func_name}' failed to load")
321
+
322
+ placeholder_func.__name__ = func_name
323
+ namespace[func_name] = placeholder_func
324
+
325
+ return namespace
326
+
327
+
328
+ def schema_to_signature(schema: dict, func_name="my_function") -> str:
329
+ type_map = {
330
+ "integer": "int",
331
+ "string": "str",
332
+ "boolean": "bool",
333
+ "null": "None",
334
+ }
335
+
336
+ params = []
337
+ for name, meta in schema.items():
338
+ # figure out type
339
+ if "type" in meta:
340
+ typ = type_map.get(meta["type"], "Any")
341
+ elif "anyOf" in meta:
342
+ types = [type_map.get(t["type"], "Any") for t in meta["anyOf"]]
343
+ typ = " | ".join(set(types))
344
+ else:
345
+ typ = "Any"
346
+
347
+ default = meta.get("default", None)
348
+ default_repr = repr(default)
349
+
350
+ params.append(f"{name}: {typ} = {default_repr}")
351
+
352
+ # join into signature
353
+ param_str = ",\n ".join(params)
354
+ return f"def {func_name}(\n {param_str},\n):"
355
+
356
+
357
+ def smart_truncate(
358
+ output: str, max_chars_full: int = 2000, max_lines_headtail: int = 20, summary_threshold: int = 10000
359
+ ) -> str:
360
+ """
361
+ Truncates or summarizes output intelligently to avoid filling the context too fast.
362
+
363
+ Args:
364
+ output (str): The string output from code execution.
365
+ max_chars_full (int): Max characters to keep full output.
366
+ max_lines_headtail (int): Number of lines to keep from head and tail for medium outputs.
367
+ summary_threshold (int): If truncated output exceeds this, hard-truncate.
368
+
369
+ Returns:
370
+ str: Truncated or summarized output.
371
+ """
372
+ if len(output) <= max_chars_full:
373
+ return output # Small output, include fully
374
+
375
+ lines = output.splitlines()
376
+ if len(lines) <= 2 * max_lines_headtail:
377
+ return output # Medium output, include fully
378
+
379
+ # Medium-large output: take head + tail
380
+ head = "\n".join(lines[:max_lines_headtail])
381
+ tail = "\n".join(lines[-max_lines_headtail:])
382
+ truncated = f"{head}\n... [truncated {len(lines) - 2 * max_lines_headtail} lines] ...\n{tail}"
383
+
384
+ # If still too big, cut to summary threshold
385
+ if len(truncated) > summary_threshold:
386
+ truncated = truncated[:summary_threshold] + "\n... [output truncated to fit context] ..."
387
+
388
+ return truncated
File without changes
@@ -0,0 +1,160 @@
1
+ import fnmatch
2
+ import os
3
+ import pathlib
4
+ import re
5
+ import uuid
6
+
7
+ from loguru import logger
8
+ from universal_mcp.applications.application import BaseApplication
9
+
10
+
11
+ class FileSystemApp(BaseApplication):
12
+ """
13
+ A class to safely interact with the filesystem within a specified working directory.
14
+ """
15
+
16
+ def __init__(self, working_dir: str | None = None, **kwargs):
17
+ """
18
+ Initializes the FileSystemApp with a working directory.
19
+
20
+ Args:
21
+ working_dir: The absolute path to the directory where all operations will be performed.
22
+ """
23
+ super().__init__(name="Filesystem")
24
+
25
+ self.set_working_dir(working_dir or f"/tmp/{uuid.uuid4()}")
26
+
27
+ def set_working_dir(self, working_dir: str):
28
+ self.working_dir = pathlib.Path(working_dir).absolute()
29
+ # Create dir if not exists
30
+ self.working_dir.mkdir(parents=True, exist_ok=True)
31
+
32
+ def _is_safe_path(self, path: str) -> bool:
33
+ """
34
+ Checks if the given path is within the working directory.
35
+
36
+ Args:
37
+ path: The path to check.
38
+
39
+ Returns:
40
+ True if the path is safe, False otherwise.
41
+ """
42
+ common_path = os.path.commonpath([self.working_dir, path])
43
+ return common_path == str(self.working_dir)
44
+
45
+ def create_file(self, path: str, content: str = "") -> None:
46
+ """
47
+ Creates a file with the given content.
48
+
49
+ Args:
50
+ path: The relative path to the file to create.
51
+ content: The content to write to the file.
52
+
53
+ Raises:
54
+ ValueError: If the path is outside the working directory.
55
+ """
56
+ if not self._is_safe_path(path):
57
+ error = f"Path is outside the working directory: {path} vs {self.working_dir}"
58
+ logger.error(error)
59
+ raise ValueError(error)
60
+
61
+ full_path = os.path.join(self.working_dir, path)
62
+ os.makedirs(os.path.dirname(full_path), exist_ok=True)
63
+ with open(full_path, "w") as f:
64
+ f.write(content)
65
+
66
+ def read_file(self, path: str) -> str:
67
+ """
68
+ Reads the content of a file.
69
+
70
+ Args:
71
+ path: The relative path to the file to read.
72
+
73
+ Returns:
74
+ The content of the file.
75
+
76
+ Raises:
77
+ ValueError: If the path is outside the working directory.
78
+ FileNotFoundError: If the file does not exist.
79
+ """
80
+ if not self._is_safe_path(path):
81
+ raise ValueError("Path is outside the working directory.")
82
+
83
+ full_path = os.path.join(self.working_dir, path)
84
+ if not os.path.exists(full_path):
85
+ raise FileNotFoundError(f"File not found: {full_path}")
86
+
87
+ with open(full_path) as f:
88
+ return f.read()
89
+
90
+ def list_files(self, path: str = ".", recursive: bool = False) -> list[str]:
91
+ """
92
+ Lists files in a directory.
93
+
94
+ Args:
95
+ path: The relative path to the directory to list.
96
+ recursive: Whether to list files recursively.
97
+
98
+ Returns:
99
+ A list of file paths.
100
+
101
+ Raises:
102
+ ValueError: If the path is outside the working directory.
103
+ """
104
+ if not self._is_safe_path(path):
105
+ raise ValueError("Path is outside the working directory.")
106
+
107
+ full_path = os.path.join(self.working_dir, path)
108
+ if not os.path.isdir(full_path):
109
+ raise ValueError(f"Path '{path}' is not a directory.")
110
+
111
+ files = []
112
+ if recursive:
113
+ for root, _, filenames in os.walk(full_path):
114
+ for filename in filenames:
115
+ files.append(os.path.relpath(os.path.join(root, filename), self.working_dir))
116
+ else:
117
+ for item in os.listdir(full_path):
118
+ item_path = os.path.join(full_path, item)
119
+ if os.path.isfile(item_path):
120
+ files.append(os.path.relpath(item_path, self.working_dir))
121
+ return files
122
+
123
+ def grep(self, pattern: str, path: str = ".", file_pattern: str = "*") -> list[str]:
124
+ """
125
+ Searches for a pattern in files.
126
+
127
+ Args:
128
+ pattern: The regex pattern to search for.
129
+ path: The relative path to the directory to search in.
130
+ file_pattern: A glob pattern to filter files to search.
131
+
132
+ Returns:
133
+ A list of strings with "file:line_number:line" for each match.
134
+
135
+ Raises:
136
+ ValueError: If the path is outside the working directory.
137
+ """
138
+ if not self._is_safe_path(path):
139
+ raise ValueError("Path is outside the working directory.")
140
+
141
+ full_path = os.path.join(self.working_dir, path)
142
+ if not os.path.isdir(full_path):
143
+ raise ValueError(f"Path '{path}' is not a directory.")
144
+
145
+ matches = []
146
+ for root, _, filenames in os.walk(full_path):
147
+ for filename in fnmatch.filter(filenames, file_pattern):
148
+ file_path = os.path.join(root, filename)
149
+ try:
150
+ with open(file_path, errors="ignore") as f:
151
+ for i, line in enumerate(f, 1):
152
+ if re.search(pattern, line):
153
+ relative_path = os.path.relpath(file_path, self.working_dir)
154
+ matches.append(f"{relative_path}:{i}:{line.strip()}")
155
+ except OSError:
156
+ continue # Skip files that can't be opened
157
+ return matches
158
+
159
+ def list_tools(self):
160
+ return [self.create_file, self.grep, self.list_files, self.read_file]
@@ -1,3 +1,3 @@
1
- from .app import LLMApp
1
+ from .app import LlmApp
2
2
 
3
- __all__ = ["LLMApp"]
3
+ __all__ = ["LlmApp"]