universal-mcp-agents 0.1.19rc1__py3-none-any.whl → 0.1.24rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/agents/__init__.py +15 -16
- universal_mcp/agents/base.py +46 -35
- universal_mcp/agents/bigtool/state.py +1 -1
- universal_mcp/agents/cli.py +2 -5
- universal_mcp/agents/codeact0/__init__.py +2 -3
- universal_mcp/agents/codeact0/__main__.py +4 -7
- universal_mcp/agents/codeact0/agent.py +444 -96
- universal_mcp/agents/codeact0/langgraph_agent.py +1 -1
- universal_mcp/agents/codeact0/llm_tool.py +2 -254
- universal_mcp/agents/codeact0/prompts.py +247 -137
- universal_mcp/agents/codeact0/sandbox.py +52 -18
- universal_mcp/agents/codeact0/state.py +26 -6
- universal_mcp/agents/codeact0/tools.py +400 -74
- universal_mcp/agents/codeact0/utils.py +175 -11
- universal_mcp/agents/codeact00/__init__.py +3 -0
- universal_mcp/agents/{unified → codeact00}/__main__.py +4 -6
- universal_mcp/agents/codeact00/agent.py +578 -0
- universal_mcp/agents/codeact00/config.py +77 -0
- universal_mcp/agents/{unified → codeact00}/langgraph_agent.py +2 -2
- universal_mcp/agents/{unified → codeact00}/llm_tool.py +1 -1
- universal_mcp/agents/codeact00/prompts.py +364 -0
- universal_mcp/agents/{unified → codeact00}/sandbox.py +52 -18
- universal_mcp/agents/codeact00/state.py +66 -0
- universal_mcp/agents/codeact00/tools.py +525 -0
- universal_mcp/agents/codeact00/utils.py +678 -0
- universal_mcp/agents/codeact01/__init__.py +3 -0
- universal_mcp/agents/{codeact → codeact01}/__main__.py +4 -11
- universal_mcp/agents/codeact01/agent.py +413 -0
- universal_mcp/agents/codeact01/config.py +77 -0
- universal_mcp/agents/codeact01/langgraph_agent.py +14 -0
- universal_mcp/agents/codeact01/llm_tool.py +25 -0
- universal_mcp/agents/codeact01/prompts.py +246 -0
- universal_mcp/agents/codeact01/sandbox.py +162 -0
- universal_mcp/agents/{unified → codeact01}/state.py +26 -10
- universal_mcp/agents/codeact01/tools.py +648 -0
- universal_mcp/agents/{unified → codeact01}/utils.py +175 -11
- universal_mcp/agents/llm.py +14 -4
- universal_mcp/agents/react.py +3 -3
- universal_mcp/agents/sandbox.py +124 -69
- universal_mcp/applications/llm/app.py +76 -24
- {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/METADATA +6 -5
- universal_mcp_agents-0.1.24rc3.dist-info/RECORD +66 -0
- universal_mcp/agents/codeact/__init__.py +0 -3
- universal_mcp/agents/codeact/agent.py +0 -240
- universal_mcp/agents/codeact/models.py +0 -11
- universal_mcp/agents/codeact/prompts.py +0 -82
- universal_mcp/agents/codeact/sandbox.py +0 -85
- universal_mcp/agents/codeact/state.py +0 -11
- universal_mcp/agents/codeact/utils.py +0 -68
- universal_mcp/agents/codeact0/playbook_agent.py +0 -355
- universal_mcp/agents/unified/README.md +0 -45
- universal_mcp/agents/unified/__init__.py +0 -3
- universal_mcp/agents/unified/agent.py +0 -289
- universal_mcp/agents/unified/prompts.py +0 -192
- universal_mcp/agents/unified/tools.py +0 -188
- universal_mcp_agents-0.1.19rc1.dist-info/RECORD +0 -64
- {universal_mcp_agents-0.1.19rc1.dist-info → universal_mcp_agents-0.1.24rc3.dist-info}/WHEEL +0 -0
|
@@ -4,11 +4,90 @@ import re
|
|
|
4
4
|
from collections.abc import Sequence
|
|
5
5
|
from typing import Any
|
|
6
6
|
|
|
7
|
-
from langchain_core.messages import BaseMessage
|
|
7
|
+
from langchain_core.messages import AIMessage, BaseMessage
|
|
8
|
+
from universal_mcp.types import ToolConfig
|
|
8
9
|
|
|
9
10
|
MAX_CHARS = 5000
|
|
10
11
|
|
|
11
12
|
|
|
13
|
+
def build_anthropic_cache_message(text: str, role: str = "system", ttl: str = "1h") -> list[dict[str, Any]]:
|
|
14
|
+
"""Build a complete Anthropic cache messages array from text.
|
|
15
|
+
|
|
16
|
+
Returns a list with a single cache message whose content is the
|
|
17
|
+
cached Anthropic content array with ephemeral cache control and TTL.
|
|
18
|
+
"""
|
|
19
|
+
return [
|
|
20
|
+
{
|
|
21
|
+
"role": role,
|
|
22
|
+
"content": [
|
|
23
|
+
{
|
|
24
|
+
"type": "text",
|
|
25
|
+
"text": text,
|
|
26
|
+
"cache_control": {"type": "ephemeral", "ttl": ttl},
|
|
27
|
+
}
|
|
28
|
+
],
|
|
29
|
+
}
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def strip_thinking(messages: list[BaseMessage]):
|
|
34
|
+
"""Remove Anthropic 'thinking' segments from the most recent AIMessage in-place.
|
|
35
|
+
|
|
36
|
+
Scans from the end to find the last AIMessage, then removes thinking blocks
|
|
37
|
+
from its content. Handles both plain-string and block-array content.
|
|
38
|
+
"""
|
|
39
|
+
if not messages:
|
|
40
|
+
return messages
|
|
41
|
+
|
|
42
|
+
# Find the last AIMessage from the end
|
|
43
|
+
last_ai_index = None
|
|
44
|
+
for i in range(len(messages) - 1, -1, -1):
|
|
45
|
+
if isinstance(messages[i], AIMessage):
|
|
46
|
+
last_ai_index = i
|
|
47
|
+
break
|
|
48
|
+
|
|
49
|
+
if last_ai_index is None:
|
|
50
|
+
return messages
|
|
51
|
+
|
|
52
|
+
ai_msg = messages[last_ai_index]
|
|
53
|
+
content = ai_msg.content
|
|
54
|
+
|
|
55
|
+
# If it's already plain text, nothing to strip
|
|
56
|
+
if isinstance(content, str):
|
|
57
|
+
return messages
|
|
58
|
+
|
|
59
|
+
# If Anthropic-style content blocks
|
|
60
|
+
if isinstance(content, list):
|
|
61
|
+
filtered_output: list[object] = []
|
|
62
|
+
removed_any = False
|
|
63
|
+
for b in content:
|
|
64
|
+
is_thinking = False
|
|
65
|
+
if isinstance(b, dict):
|
|
66
|
+
t = b.get("type")
|
|
67
|
+
if t == "thinking":
|
|
68
|
+
is_thinking = True
|
|
69
|
+
elif "thinking" in b and isinstance(b["thinking"], str):
|
|
70
|
+
is_thinking = True
|
|
71
|
+
|
|
72
|
+
if is_thinking:
|
|
73
|
+
removed_any = True
|
|
74
|
+
continue
|
|
75
|
+
filtered_output.append(b)
|
|
76
|
+
|
|
77
|
+
if removed_any:
|
|
78
|
+
ai_msg.content = filtered_output
|
|
79
|
+
messages[last_ai_index] = ai_msg
|
|
80
|
+
|
|
81
|
+
return messages
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def add_tools(tool_config: ToolConfig, tools_to_add: ToolConfig):
|
|
85
|
+
for app_id, new_tools in tools_to_add.items():
|
|
86
|
+
all_tools = tool_config.get(app_id, []) + new_tools
|
|
87
|
+
tool_config[app_id] = list(set(all_tools))
|
|
88
|
+
return tool_config
|
|
89
|
+
|
|
90
|
+
|
|
12
91
|
def light_copy(data):
|
|
13
92
|
"""
|
|
14
93
|
Deep copy a dict[str, any] or Sequence[any] with string truncation.
|
|
@@ -325,31 +404,45 @@ def inject_context(
|
|
|
325
404
|
return namespace
|
|
326
405
|
|
|
327
406
|
|
|
328
|
-
def schema_to_signature(schema: dict, func_name="my_function") -> str:
|
|
407
|
+
def schema_to_signature(schema: dict, func_name: str = "my_function") -> str:
|
|
408
|
+
"""
|
|
409
|
+
Convert a JSON schema into a Python-style function signature string.
|
|
410
|
+
Handles fields with `type`, `anyOf`, defaults, and missing metadata safely.
|
|
411
|
+
"""
|
|
329
412
|
type_map = {
|
|
330
413
|
"integer": "int",
|
|
331
414
|
"string": "str",
|
|
332
415
|
"boolean": "bool",
|
|
333
416
|
"null": "None",
|
|
417
|
+
"number": "float",
|
|
418
|
+
"array": "list",
|
|
419
|
+
"object": "dict",
|
|
334
420
|
}
|
|
335
421
|
|
|
336
422
|
params = []
|
|
337
423
|
for name, meta in schema.items():
|
|
338
|
-
|
|
339
|
-
|
|
424
|
+
if not isinstance(meta, dict):
|
|
425
|
+
typ = "Any"
|
|
426
|
+
elif "type" in meta:
|
|
340
427
|
typ = type_map.get(meta["type"], "Any")
|
|
341
428
|
elif "anyOf" in meta:
|
|
342
|
-
types = [
|
|
343
|
-
|
|
429
|
+
types = []
|
|
430
|
+
for t in meta["anyOf"]:
|
|
431
|
+
if not isinstance(t, dict):
|
|
432
|
+
continue
|
|
433
|
+
t_type = t.get("type")
|
|
434
|
+
types.append(type_map.get(t_type, "Any") if t_type else "Any")
|
|
435
|
+
typ = " | ".join(sorted(set(types))) if types else "Any"
|
|
344
436
|
else:
|
|
345
437
|
typ = "Any"
|
|
346
438
|
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
439
|
+
# Handle defaults gracefully
|
|
440
|
+
default = meta.get("default")
|
|
441
|
+
if default is None:
|
|
442
|
+
params.append(f"{name}: {typ}")
|
|
443
|
+
else:
|
|
444
|
+
params.append(f"{name}: {typ} = {repr(default)}")
|
|
351
445
|
|
|
352
|
-
# join into signature
|
|
353
446
|
param_str = ",\n ".join(params)
|
|
354
447
|
return f"def {func_name}(\n {param_str},\n):"
|
|
355
448
|
|
|
@@ -386,3 +479,74 @@ def smart_truncate(
|
|
|
386
479
|
truncated = truncated[:summary_threshold] + "\n... [output truncated to fit context] ..."
|
|
387
480
|
|
|
388
481
|
return truncated
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
async def get_connected_apps_string(registry) -> str:
|
|
485
|
+
"""Get a formatted string of connected applications from the registry."""
|
|
486
|
+
if not registry:
|
|
487
|
+
return ""
|
|
488
|
+
|
|
489
|
+
try:
|
|
490
|
+
# Get connected apps from registry
|
|
491
|
+
connections = await registry.list_connected_apps()
|
|
492
|
+
if not connections:
|
|
493
|
+
return "No applications are currently connected."
|
|
494
|
+
|
|
495
|
+
# Extract app names from connections
|
|
496
|
+
connected_app_ids = {connection["app_id"] for connection in connections}
|
|
497
|
+
|
|
498
|
+
# Format the apps list
|
|
499
|
+
apps_list = []
|
|
500
|
+
for app_id in connected_app_ids:
|
|
501
|
+
apps_list.append(f"- {app_id}")
|
|
502
|
+
|
|
503
|
+
return "\n".join(apps_list)
|
|
504
|
+
except Exception:
|
|
505
|
+
return "Unable to retrieve connected applications."
|
|
506
|
+
|
|
507
|
+
def extract_plan_parameters(plan_steps: list[str]) -> list[dict[str, Any]]:
|
|
508
|
+
"""
|
|
509
|
+
Extracts parameters from plan steps and formats them into a list of OpenAPI-like parameter objects.
|
|
510
|
+
|
|
511
|
+
Parses parameters enclosed in backticks, identifying their name, if they are required, and any default values.
|
|
512
|
+
e.g., `variable` -> {"name": "variable", "required": True}
|
|
513
|
+
e.g., `variable(default = 'value')` -> {"name": "variable", "required": False, "default": "value"}
|
|
514
|
+
"""
|
|
515
|
+
parameters_map: dict[str, Any] = {}
|
|
516
|
+
# Regex to find anything inside backticks
|
|
517
|
+
outer_pattern = re.compile(r"`([^`]+)`")
|
|
518
|
+
# Regex to parse parameters with default values
|
|
519
|
+
inner_pattern = re.compile(r"^\s*(\w+)\s*\(\s*default\s*=\s*(.+)\s*\)\s*$")
|
|
520
|
+
|
|
521
|
+
for step in plan_steps:
|
|
522
|
+
matches = outer_pattern.findall(step)
|
|
523
|
+
for match in matches:
|
|
524
|
+
param_str = match.strip()
|
|
525
|
+
inner_match = inner_pattern.match(param_str)
|
|
526
|
+
|
|
527
|
+
if inner_match:
|
|
528
|
+
# Parameter with a default value
|
|
529
|
+
name, default_val_str = inner_match.groups()
|
|
530
|
+
default_value: Any
|
|
531
|
+
try:
|
|
532
|
+
# Safely evaluate the default value (e.g., 'string', 123, True)
|
|
533
|
+
default_value = ast.literal_eval(default_val_str)
|
|
534
|
+
except (ValueError, SyntaxError):
|
|
535
|
+
# If it's not a valid literal, treat it as a string
|
|
536
|
+
default_value = default_val_str
|
|
537
|
+
parameters_map[name] = {"required": False, "default": default_value}
|
|
538
|
+
else:
|
|
539
|
+
# Required parameter (no default value)
|
|
540
|
+
name = param_str
|
|
541
|
+
# Only set as required if it hasn't been defined with a default already
|
|
542
|
+
if name not in parameters_map:
|
|
543
|
+
parameters_map[name] = {"required": True}
|
|
544
|
+
|
|
545
|
+
# Convert the map to the final list format
|
|
546
|
+
final_parameters = []
|
|
547
|
+
for name, details in sorted(parameters_map.items()):
|
|
548
|
+
param_obj = {"name": name}
|
|
549
|
+
param_obj.update(details)
|
|
550
|
+
final_parameters.append(param_obj)
|
|
551
|
+
|
|
552
|
+
return final_parameters
|
universal_mcp/agents/llm.py
CHANGED
|
@@ -4,26 +4,37 @@ from langchain_anthropic import ChatAnthropic
|
|
|
4
4
|
from langchain_core.language_models import BaseChatModel
|
|
5
5
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
6
6
|
from langchain_openai import AzureChatOpenAI
|
|
7
|
+
from loguru import logger
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
@lru_cache(maxsize=8)
|
|
10
11
|
def load_chat_model(
|
|
11
|
-
fully_specified_name: str,
|
|
12
|
+
fully_specified_name: str,
|
|
13
|
+
temperature: float = 1.0,
|
|
14
|
+
tags: tuple[str, ...] | None = None,
|
|
15
|
+
thinking: bool = True,
|
|
16
|
+
disable_streaming: bool = False,
|
|
12
17
|
) -> BaseChatModel:
|
|
13
18
|
"""Load a chat model from a fully specified name.
|
|
14
19
|
Args:
|
|
15
20
|
fully_specified_name (str): String in the format 'provider/model'.
|
|
16
21
|
"""
|
|
17
22
|
fully_specified_name = fully_specified_name.replace("/", ":")
|
|
23
|
+
if tags:
|
|
24
|
+
if isinstance(tags, str):
|
|
25
|
+
tags = [tags]
|
|
26
|
+
else:
|
|
27
|
+
tags = list[str](tags)
|
|
18
28
|
provider, model = fully_specified_name.split(":", maxsplit=1)
|
|
19
29
|
if provider == "anthropic":
|
|
20
30
|
return ChatAnthropic(
|
|
21
31
|
model=model,
|
|
22
32
|
temperature=temperature,
|
|
23
33
|
thinking={"type": "enabled", "budget_tokens": 2048} if thinking else None,
|
|
24
|
-
max_tokens=
|
|
34
|
+
max_tokens=8096,
|
|
25
35
|
tags=tags,
|
|
26
36
|
stream_usage=True,
|
|
37
|
+
disable_streaming=disable_streaming,
|
|
27
38
|
) # pyright: ignore[reportCallIssue]
|
|
28
39
|
elif provider == "azure":
|
|
29
40
|
return AzureChatOpenAI(
|
|
@@ -33,6 +44,7 @@ def load_chat_model(
|
|
|
33
44
|
temperature=temperature,
|
|
34
45
|
tags=tags,
|
|
35
46
|
stream_usage=True,
|
|
47
|
+
disable_streaming=disable_streaming,
|
|
36
48
|
)
|
|
37
49
|
elif provider == "gemini":
|
|
38
50
|
return ChatGoogleGenerativeAI(model=model, temperature=temperature)
|
|
@@ -41,8 +53,6 @@ def load_chat_model(
|
|
|
41
53
|
|
|
42
54
|
|
|
43
55
|
if __name__ == "__main__":
|
|
44
|
-
from loguru import logger
|
|
45
|
-
|
|
46
56
|
models_to_test = [
|
|
47
57
|
"azure/gpt-5-chat",
|
|
48
58
|
"anthropic/claude-4-sonnet-20250514",
|
universal_mcp/agents/react.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
+
from langchain.agents import create_agent
|
|
1
2
|
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
2
|
-
from langgraph.prebuilt import create_react_agent
|
|
3
3
|
from loguru import logger
|
|
4
4
|
from rich import print
|
|
5
5
|
from universal_mcp.agentr.registry import AgentrRegistry
|
|
@@ -75,10 +75,10 @@ class ReactAgent(BaseAgent):
|
|
|
75
75
|
tools = []
|
|
76
76
|
|
|
77
77
|
logger.debug(f"Initialized ReactAgent: name={self.name}, model={self.model}")
|
|
78
|
-
return
|
|
78
|
+
return create_agent(
|
|
79
79
|
self.llm,
|
|
80
80
|
tools,
|
|
81
|
-
|
|
81
|
+
system_prompt=self._build_system_message(),
|
|
82
82
|
checkpointer=self.memory,
|
|
83
83
|
)
|
|
84
84
|
|
universal_mcp/agents/sandbox.py
CHANGED
|
@@ -1,90 +1,145 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import base64
|
|
1
3
|
import contextlib
|
|
2
|
-
import inspect
|
|
3
4
|
import io
|
|
4
|
-
import
|
|
5
|
-
import re
|
|
6
|
-
import socket
|
|
7
|
-
import threading
|
|
8
|
-
import types
|
|
9
|
-
from typing import Any
|
|
5
|
+
import traceback
|
|
10
6
|
|
|
11
|
-
|
|
7
|
+
import cloudpickle as pickle
|
|
8
|
+
from loguru import logger
|
|
12
9
|
|
|
13
10
|
|
|
14
11
|
class Sandbox:
|
|
15
12
|
"""
|
|
16
|
-
A
|
|
13
|
+
A simulated environment for executing Python code cells with context
|
|
14
|
+
maintained across multiple runs.
|
|
17
15
|
"""
|
|
18
16
|
|
|
19
|
-
def __init__(self
|
|
17
|
+
def __init__(self):
|
|
18
|
+
# Dictionary to store variables (context) across runs
|
|
19
|
+
self.context = {}
|
|
20
|
+
|
|
21
|
+
def add_context(self, context: dict[str, any]):
|
|
22
|
+
"""
|
|
23
|
+
Adds a dictionary of context to the sandbox.
|
|
24
|
+
"""
|
|
25
|
+
self.context.update(context)
|
|
26
|
+
|
|
27
|
+
def save_context(self) -> str:
|
|
28
|
+
"""
|
|
29
|
+
Saves the context to a base64 string.
|
|
30
|
+
files, IO, threads, etc. are not pickable. So we only pickle the context that is pickable.
|
|
31
|
+
"""
|
|
32
|
+
pickable_context = {}
|
|
33
|
+
for key, value in self.context.items():
|
|
34
|
+
try:
|
|
35
|
+
pickle.dumps(value)
|
|
36
|
+
pickable_context[key] = value
|
|
37
|
+
except Exception as e:
|
|
38
|
+
logger.error(f"Error picking {key}: {e}")
|
|
39
|
+
pickled_data = pickle.dumps(pickable_context)
|
|
40
|
+
base64_encoded = base64.b64encode(pickled_data).decode("utf-8")
|
|
41
|
+
return base64_encoded
|
|
42
|
+
|
|
43
|
+
def load_context(self, context: str, add_context: list[str] = []):
|
|
20
44
|
"""
|
|
21
|
-
|
|
45
|
+
Loads the context from a base64 string.
|
|
46
|
+
Also executes the add_context code strings to add to the context.
|
|
47
|
+
"""
|
|
48
|
+
if context:
|
|
49
|
+
pickled_data = base64.b64decode(context)
|
|
50
|
+
new_context = pickle.loads(pickled_data)
|
|
51
|
+
self.context.update(new_context)
|
|
52
|
+
for code in add_context:
|
|
53
|
+
self.run(code)
|
|
54
|
+
return self.context
|
|
55
|
+
|
|
56
|
+
def _filter_context(self, context: dict[str, any]) -> dict[str, any]:
|
|
57
|
+
"""
|
|
58
|
+
Filters the context to only include pickable variables.
|
|
59
|
+
"""
|
|
60
|
+
return {k: v for k, v in context.items() if not k.startswith("__")}
|
|
61
|
+
|
|
62
|
+
def run(self, code: str) -> dict[str, any]:
|
|
63
|
+
"""
|
|
64
|
+
Executes the provided Python code string in the maintained context.
|
|
65
|
+
|
|
22
66
|
Args:
|
|
23
|
-
|
|
67
|
+
code (str): The Python code to execute.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
dict: A dictionary containing the execution results.
|
|
24
71
|
"""
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
72
|
+
# Prepare the execution environment:
|
|
73
|
+
# Use a copy of the context for execution locals/globals
|
|
74
|
+
exec_scope = self.context.copy()
|
|
75
|
+
|
|
76
|
+
stdout_capture = io.StringIO()
|
|
77
|
+
stderr_output = ""
|
|
28
78
|
|
|
29
|
-
|
|
79
|
+
# Use a true context manager for robust stdout capture
|
|
80
|
+
try:
|
|
81
|
+
with contextlib.redirect_stdout(stdout_capture):
|
|
82
|
+
# Execute the code. Using the same dictionary for globals and locals
|
|
83
|
+
# allows newly created variables to be visible immediately.
|
|
84
|
+
exec(code, exec_scope, exec_scope)
|
|
85
|
+
|
|
86
|
+
# Update the context with any new/modified variables
|
|
87
|
+
# Filter out dunder methods/system keys that might be introduced by exec
|
|
88
|
+
new_context = self._filter_context(exec_scope)
|
|
89
|
+
self.context.update(new_context)
|
|
90
|
+
|
|
91
|
+
except Exception:
|
|
92
|
+
# Capture the traceback for better error reporting (simulated stderr)
|
|
93
|
+
stderr_output = traceback.format_exc()
|
|
94
|
+
|
|
95
|
+
# The execution scope might contain partially defined variables,
|
|
96
|
+
# but we continue to maintain the *previous* valid context.
|
|
97
|
+
# We don't update self.context on failure to avoid polluting it.
|
|
98
|
+
|
|
99
|
+
return {"stdout": stdout_capture.getvalue(), "stderr": stderr_output, "success": stderr_output == ""}
|
|
100
|
+
|
|
101
|
+
def get_context(self) -> dict[str, any]:
|
|
30
102
|
"""
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
103
|
+
Returns a copy of the current execution context.
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
dict: A copy of the context dictionary.
|
|
35
107
|
"""
|
|
108
|
+
return self.context.copy()
|
|
36
109
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
threading.Event,
|
|
43
|
-
threading.Condition,
|
|
44
|
-
threading.Semaphore,
|
|
45
|
-
queue.Queue,
|
|
46
|
-
socket.socket,
|
|
47
|
-
io.IOBase,
|
|
48
|
-
)
|
|
110
|
+
def reset(self):
|
|
111
|
+
"""
|
|
112
|
+
Resets the sandbox's context, clearing all defined variables.
|
|
113
|
+
"""
|
|
114
|
+
self.context = {}
|
|
49
115
|
|
|
50
|
-
|
|
116
|
+
async def arun(self, code: str) -> dict[str, any]:
|
|
117
|
+
"""
|
|
118
|
+
Asynchronously executes Python code, supporting top-level await.
|
|
119
|
+
"""
|
|
120
|
+
# Use a copy of the context for execution
|
|
121
|
+
exec_scope = self.context.copy()
|
|
122
|
+
stdout_capture = io.StringIO()
|
|
123
|
+
stderr_output = ""
|
|
51
124
|
|
|
52
|
-
def target():
|
|
53
|
-
try:
|
|
54
|
-
with contextlib.redirect_stdout(io.StringIO()) as f:
|
|
55
|
-
exec(code, self._locals, self._locals)
|
|
56
|
-
result_container["output"] = f.getvalue() or "<code ran, no output printed to stdout>"
|
|
57
|
-
except Exception as e:
|
|
58
|
-
result_container["output"] = "Error during execution: " + str(e)
|
|
59
|
-
|
|
60
|
-
thread = threading.Thread(target=target)
|
|
61
|
-
thread.start()
|
|
62
|
-
thread.join(self.timeout)
|
|
63
|
-
|
|
64
|
-
if thread.is_alive():
|
|
65
|
-
result_container["output"] = f"Code timeout: code execution exceeded {self.timeout} seconds."
|
|
66
|
-
|
|
67
|
-
# Filter locals for picklable/storable variables
|
|
68
|
-
all_vars = {}
|
|
69
|
-
for key, value in self._locals.items():
|
|
70
|
-
if key == "__builtins__":
|
|
71
|
-
continue
|
|
72
|
-
if inspect.iscoroutine(value) or inspect.iscoroutinefunction(value):
|
|
73
|
-
continue
|
|
74
|
-
if inspect.isasyncgen(value) or inspect.isasyncgenfunction(value):
|
|
75
|
-
continue
|
|
76
|
-
if isinstance(value, EXCLUDE_TYPES):
|
|
77
|
-
continue
|
|
78
|
-
if not callable(value) or not hasattr(value, "__name__"):
|
|
79
|
-
all_vars[key] = value
|
|
80
|
-
|
|
81
|
-
self._locals = all_vars
|
|
82
|
-
|
|
83
|
-
# Safely derive context
|
|
84
125
|
try:
|
|
85
|
-
|
|
126
|
+
# Compile the code with the special flag to allow top-level await
|
|
127
|
+
compiled_code = compile(code, "<string>", "exec", flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT)
|
|
128
|
+
|
|
129
|
+
with contextlib.redirect_stdout(stdout_capture):
|
|
130
|
+
# Eval the compiled code to get a coroutine
|
|
131
|
+
coroutine = eval(compiled_code, exec_scope, exec_scope)
|
|
132
|
+
|
|
133
|
+
# Await the coroutine to run the code if it's async
|
|
134
|
+
if coroutine:
|
|
135
|
+
await coroutine
|
|
136
|
+
|
|
137
|
+
# Update the context with any new/modified variables
|
|
138
|
+
new_context = self._filter_context(exec_scope)
|
|
139
|
+
if new_context:
|
|
140
|
+
self.context.update(new_context)
|
|
141
|
+
|
|
86
142
|
except Exception:
|
|
87
|
-
|
|
88
|
-
pass
|
|
143
|
+
stderr_output = traceback.format_exc()
|
|
89
144
|
|
|
90
|
-
return
|
|
145
|
+
return {"stdout": stdout_capture.getvalue(), "stderr": stderr_output, "success": stderr_output == ""}
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import json
|
|
2
|
-
from typing import Any, Literal, cast
|
|
2
|
+
from typing import Any, Literal, cast, Optional, List
|
|
3
3
|
|
|
4
|
-
from langchain.
|
|
5
|
-
from pydantic import BaseModel, Field
|
|
4
|
+
from langchain.agents import create_agent
|
|
5
|
+
from pydantic import BaseModel, Field, create_model
|
|
6
6
|
from universal_mcp.applications.application import BaseApplication
|
|
7
7
|
|
|
8
8
|
from universal_mcp.agents.llm import load_chat_model
|
|
@@ -10,6 +10,59 @@ from universal_mcp.agents.llm import load_chat_model
|
|
|
10
10
|
MAX_RETRIES = 3
|
|
11
11
|
|
|
12
12
|
|
|
13
|
+
def _pydantic_model_from_json_schema(schema: dict[str, Any]) -> type[BaseModel]:
|
|
14
|
+
"""Create a Pydantic model from a JSON schema (subset support).
|
|
15
|
+
|
|
16
|
+
Supported keywords: type, properties, required, items, description, title.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def to_type(subschema: dict[str, Any]) -> Any:
|
|
20
|
+
stype = subschema.get("type")
|
|
21
|
+
if stype == "object" or (stype is None and "properties" in subschema):
|
|
22
|
+
title = subschema.get("title", "SubObject")
|
|
23
|
+
props: dict[str, dict[str, Any]] = subschema.get("properties", {})
|
|
24
|
+
required: list[str] = subschema.get("required", [])
|
|
25
|
+
fields: dict[str, tuple[Any, Any]] = {}
|
|
26
|
+
for name, prop_schema in props.items():
|
|
27
|
+
t = to_type(prop_schema)
|
|
28
|
+
if name in required:
|
|
29
|
+
fields[name] = (t, ...)
|
|
30
|
+
else:
|
|
31
|
+
fields[name] = (Optional[t], None) # type: ignore[index]
|
|
32
|
+
return create_model(title, **fields) # type: ignore[return-value]
|
|
33
|
+
if stype == "array":
|
|
34
|
+
item_schema = subschema.get("items", {"type": "string"})
|
|
35
|
+
return List[to_type(item_schema)] # type: ignore[index]
|
|
36
|
+
if stype == "string":
|
|
37
|
+
return str
|
|
38
|
+
if stype == "integer":
|
|
39
|
+
return int
|
|
40
|
+
if stype == "number":
|
|
41
|
+
return float
|
|
42
|
+
if stype == "boolean":
|
|
43
|
+
return bool
|
|
44
|
+
if stype == "null":
|
|
45
|
+
return Optional[Any]
|
|
46
|
+
# Fallback to Any for unsupported/omitted types
|
|
47
|
+
return Any
|
|
48
|
+
|
|
49
|
+
title = schema.get("title", "Output")
|
|
50
|
+
if schema.get("type") == "object" or "properties" in schema:
|
|
51
|
+
props = schema.get("properties", {})
|
|
52
|
+
required = schema.get("required", [])
|
|
53
|
+
fields: dict[str, tuple[Any, Any]] = {}
|
|
54
|
+
for name, prop_schema in props.items():
|
|
55
|
+
t = to_type(prop_schema)
|
|
56
|
+
if name in required:
|
|
57
|
+
fields[name] = (t, ...)
|
|
58
|
+
else:
|
|
59
|
+
fields[name] = (Optional[t], None) # type: ignore[index]
|
|
60
|
+
return create_model(title, **fields) # type: ignore[return-value]
|
|
61
|
+
# Non-object root types
|
|
62
|
+
root_type = to_type(schema)
|
|
63
|
+
return create_model(title, __root__=(root_type, ...)) # type: ignore[return-value]
|
|
64
|
+
|
|
65
|
+
|
|
13
66
|
def _get_context_as_string(source: Any | list[Any] | dict[str, Any]) -> str:
|
|
14
67
|
"""Converts context to a string representation.
|
|
15
68
|
|
|
@@ -91,8 +144,8 @@ class LlmApp(BaseApplication):
|
|
|
91
144
|
|
|
92
145
|
full_prompt = f"{prompt}\n\nContext:\n{context_str}\n\n"
|
|
93
146
|
|
|
94
|
-
model = load_chat_model("azure/gpt-5-mini")
|
|
95
|
-
response = model.with_retry(stop_after_attempt=MAX_RETRIES).invoke(full_prompt)
|
|
147
|
+
model = load_chat_model("azure/gpt-5-mini", disable_streaming=True, tags=("quiet",))
|
|
148
|
+
response = model.with_retry(stop_after_attempt=MAX_RETRIES).invoke(full_prompt, stream=False)
|
|
96
149
|
return str(response.content)
|
|
97
150
|
|
|
98
151
|
def classify_data(
|
|
@@ -151,22 +204,22 @@ class LlmApp(BaseApplication):
|
|
|
151
204
|
f"This is a classification task.\nPossible classes and descriptions:\n"
|
|
152
205
|
f"{json.dumps(class_descriptions, indent=2)}\n\n"
|
|
153
206
|
f"Context:\n{context_str}\n\n"
|
|
154
|
-
"Return ONLY a valid JSON object, no extra text."
|
|
155
207
|
)
|
|
156
208
|
|
|
157
|
-
model = init_chat_model(model="claude-4-sonnet-20250514", temperature=0)
|
|
158
|
-
|
|
159
209
|
class ClassificationResult(BaseModel):
|
|
160
210
|
probabilities: dict[str, float] = Field(..., description="The probabilities for each class.")
|
|
161
211
|
reason: str = Field(..., description="The reasoning behind the classification.")
|
|
162
212
|
top_class: str = Field(..., description="The class with the highest probability.")
|
|
163
213
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
214
|
+
model = load_chat_model("azure/gpt-5-mini", temperature=0, disable_streaming=True, tags=("quiet",))
|
|
215
|
+
agent = create_agent(
|
|
216
|
+
model=model,
|
|
217
|
+
tools=[],
|
|
218
|
+
response_format=ClassificationResult, # Auto-selects ProviderStrategy
|
|
168
219
|
)
|
|
169
|
-
|
|
220
|
+
|
|
221
|
+
result = agent.invoke({"messages": [{"role": "user", "content": prompt}]}, stream=False)
|
|
222
|
+
return result["structured_response"].model_dump()
|
|
170
223
|
|
|
171
224
|
def extract_data(
|
|
172
225
|
self,
|
|
@@ -229,14 +282,15 @@ class LlmApp(BaseApplication):
|
|
|
229
282
|
"Return ONLY a valid JSON object that conforms to the provided schema, with no extra text."
|
|
230
283
|
)
|
|
231
284
|
|
|
232
|
-
model =
|
|
285
|
+
model = load_chat_model("azure/gpt-5-mini", temperature=0, disable_streaming=True, tags=("quiet",))
|
|
233
286
|
|
|
287
|
+
PModel = _pydantic_model_from_json_schema(output_schema)
|
|
234
288
|
response = (
|
|
235
|
-
model.with_structured_output(
|
|
289
|
+
model.with_structured_output(PModel)
|
|
236
290
|
.with_retry(stop_after_attempt=MAX_RETRIES)
|
|
237
|
-
.invoke(prompt)
|
|
291
|
+
.invoke(prompt, stream=False)
|
|
238
292
|
)
|
|
239
|
-
return cast(dict[str, Any], response)
|
|
293
|
+
return cast(dict[str, Any], response.model_dump())
|
|
240
294
|
|
|
241
295
|
def call_llm(
|
|
242
296
|
self,
|
|
@@ -282,14 +336,12 @@ class LlmApp(BaseApplication):
|
|
|
282
336
|
|
|
283
337
|
prompt = f"{task_instructions}\n\nContext:\n{context_str}\n\nReturn ONLY a valid JSON object, no extra text."
|
|
284
338
|
|
|
285
|
-
model =
|
|
339
|
+
model = load_chat_model("azure/gpt-5-mini", temperature=0, disable_streaming=True, tags=("quiet",))
|
|
286
340
|
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
)
|
|
292
|
-
return cast(dict[str, Any], response)
|
|
341
|
+
PModel = _pydantic_model_from_json_schema(output_schema)
|
|
342
|
+
model_with_structure = model.with_structured_output(PModel)
|
|
343
|
+
response = model_with_structure.with_retry(stop_after_attempt=MAX_RETRIES).invoke(prompt, stream=False)
|
|
344
|
+
return cast(dict[str, Any], response.model_dump())
|
|
293
345
|
|
|
294
346
|
def list_tools(self):
|
|
295
347
|
return [
|