universal-mcp-agents 0.1.12__py3-none-any.whl → 0.1.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-agents might be problematic. Click here for more details.
- universal_mcp/agents/__init__.py +1 -1
- universal_mcp/agents/base.py +2 -0
- universal_mcp/agents/bigtool/__init__.py +1 -1
- universal_mcp/agents/bigtool/agent.py +2 -2
- universal_mcp/agents/bigtool/graph.py +65 -31
- universal_mcp/agents/bigtool/prompts.py +2 -2
- universal_mcp/agents/bigtool/tools.py +18 -4
- universal_mcp/agents/builder/__main__.py +105 -30
- universal_mcp/agents/builder/builder.py +149 -160
- universal_mcp/agents/builder/helper.py +73 -0
- universal_mcp/agents/builder/prompts.py +33 -152
- universal_mcp/agents/builder/state.py +1 -1
- universal_mcp/agents/cli.py +2 -2
- universal_mcp/agents/codeact/agent.py +1 -1
- universal_mcp/agents/codeact/sandbox.py +1 -5
- universal_mcp/agents/codeact0/agent.py +5 -4
- universal_mcp/agents/codeact0/langgraph_agent.py +17 -0
- universal_mcp/agents/codeact0/llm_tool.py +1 -1
- universal_mcp/agents/codeact0/prompts.py +34 -23
- universal_mcp/agents/codeact0/usecases/11-github.yaml +6 -5
- universal_mcp/agents/codeact0/utils.py +42 -63
- universal_mcp/agents/shared/__main__.py +43 -0
- universal_mcp/agents/shared/prompts.py +50 -99
- universal_mcp/agents/shared/tool_node.py +149 -203
- universal_mcp/agents/utils.py +65 -0
- universal_mcp/applications/ui/app.py +2 -2
- {universal_mcp_agents-0.1.12.dist-info → universal_mcp_agents-0.1.14.dist-info}/METADATA +1 -1
- {universal_mcp_agents-0.1.12.dist-info → universal_mcp_agents-0.1.14.dist-info}/RECORD +29 -28
- universal_mcp/agents/codeact0/langgraph_graph.py +0 -17
- universal_mcp/agents/codeact0/legacy_codeact.py +0 -104
- {universal_mcp_agents-0.1.12.dist-info → universal_mcp_agents-0.1.14.dist-info}/WHEEL +0 -0
|
@@ -1,104 +0,0 @@
|
|
|
1
|
-
import contextlib
|
|
2
|
-
import inspect
|
|
3
|
-
import io
|
|
4
|
-
import queue
|
|
5
|
-
import re
|
|
6
|
-
import socket
|
|
7
|
-
import threading
|
|
8
|
-
import types
|
|
9
|
-
from typing import Any
|
|
10
|
-
|
|
11
|
-
import yaml
|
|
12
|
-
from langchain.chat_models import init_chat_model
|
|
13
|
-
from langchain_anthropic import ChatAnthropic
|
|
14
|
-
from langchain_core.runnables import RunnableConfig
|
|
15
|
-
from langchain_google_vertexai.model_garden import ChatAnthropicVertex
|
|
16
|
-
from universal_mcp.agentr import Agentr
|
|
17
|
-
from universal_mcp.types import ToolFormat
|
|
18
|
-
|
|
19
|
-
from universal_mcp.agents.codeact0 import create_codeact
|
|
20
|
-
from universal_mcp.agents.codeact0.config import ContextSchema
|
|
21
|
-
from universal_mcp.agents.codeact0.utils import derive_context
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def eval(code: str, _locals: dict[str, Any], add_context: dict[str, Any]) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
|
25
|
-
# print(_locals)
|
|
26
|
-
EXCLUDE_TYPES = (
|
|
27
|
-
types.ModuleType, # modules
|
|
28
|
-
type(re.match("", "")),
|
|
29
|
-
type(threading.Lock()), # instead of threading.Lock
|
|
30
|
-
type(threading.RLock()), # reentrant lock
|
|
31
|
-
threading.Event, # events
|
|
32
|
-
threading.Condition, # condition vars
|
|
33
|
-
threading.Semaphore, # semaphores
|
|
34
|
-
queue.Queue, # thread-safe queues
|
|
35
|
-
socket.socket, # network sockets
|
|
36
|
-
io.IOBase, # file handles (and StringIO/BytesIO)
|
|
37
|
-
)
|
|
38
|
-
try:
|
|
39
|
-
with contextlib.redirect_stdout(io.StringIO()) as f:
|
|
40
|
-
# Execute the code in the provided locals context
|
|
41
|
-
# Using exec to allow dynamic code execution
|
|
42
|
-
# This is a simplified version; in production, consider security implications
|
|
43
|
-
exec(code, _locals, _locals)
|
|
44
|
-
result = f.getvalue()
|
|
45
|
-
if not result:
|
|
46
|
-
result = "<code ran, no output printed to stdout>"
|
|
47
|
-
except Exception as e:
|
|
48
|
-
result = f"Error during execution: {repr(e)}"
|
|
49
|
-
|
|
50
|
-
# Return all variables in locals except __builtins__ and unpicklable objects (including tools)
|
|
51
|
-
all_vars = {}
|
|
52
|
-
for key, value in _locals.items():
|
|
53
|
-
if key == "__builtins__":
|
|
54
|
-
continue
|
|
55
|
-
|
|
56
|
-
# Skip coroutines, async generators, and coroutine functions
|
|
57
|
-
if inspect.iscoroutine(value) or inspect.iscoroutinefunction(value):
|
|
58
|
-
continue
|
|
59
|
-
if inspect.isasyncgen(value) or inspect.isasyncgenfunction(value):
|
|
60
|
-
continue
|
|
61
|
-
|
|
62
|
-
# Skip "obviously unpicklable" types
|
|
63
|
-
if isinstance(value, EXCLUDE_TYPES):
|
|
64
|
-
continue
|
|
65
|
-
|
|
66
|
-
# Keep if it's not a callable OR if it has no __name__ attribute
|
|
67
|
-
if not callable(value) or not hasattr(value, "__name__"):
|
|
68
|
-
all_vars[key] = value
|
|
69
|
-
|
|
70
|
-
new_add_context = derive_context(code, add_context)
|
|
71
|
-
return result, all_vars, new_add_context
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
async def agent(config: RunnableConfig):
|
|
75
|
-
cfg = ContextSchema(**config.get("configurable", {}))
|
|
76
|
-
|
|
77
|
-
if cfg.json_prompt_name and cfg.json_prompt_name.strip():
|
|
78
|
-
with open(f"usecases/{cfg.json_prompt_name}.yaml", encoding="utf-8") as f:
|
|
79
|
-
content = f.read()
|
|
80
|
-
data = yaml.safe_load(content)
|
|
81
|
-
if cfg.base_prompt and cfg.base_prompt.strip():
|
|
82
|
-
pass
|
|
83
|
-
else:
|
|
84
|
-
cfg.base_prompt = data["base_prompt"]
|
|
85
|
-
cfg.tool_names = data["tools"]
|
|
86
|
-
agentr = Agentr()
|
|
87
|
-
agentr.load_tools(cfg.tool_names)
|
|
88
|
-
tools = [] # can add custom tools here like get_weather, get_simple_weather, etc.
|
|
89
|
-
|
|
90
|
-
tools_agentr = agentr.list_tools(format=ToolFormat.NATIVE)
|
|
91
|
-
tools.extend(tools_agentr)
|
|
92
|
-
|
|
93
|
-
if cfg.model_provider == "google_anthropic_vertex":
|
|
94
|
-
# For Google Anthropic Vertex, we need to use the specific model initialization due to location
|
|
95
|
-
model = ChatAnthropicVertex(model=cfg.model, temperature=0.2, location="asia-east1")
|
|
96
|
-
elif cfg.model == "claude-4-sonnet-20250514":
|
|
97
|
-
model = ChatAnthropic(
|
|
98
|
-
model=cfg.model, temperature=1, thinking={"type": "enabled", "budget_tokens": 2048}, max_tokens=4096
|
|
99
|
-
) # pyright: ignore[reportCallIssue]
|
|
100
|
-
else:
|
|
101
|
-
model = init_chat_model(model=cfg.model, model_provider=cfg.model_provider, temperature=0.2)
|
|
102
|
-
|
|
103
|
-
code_act = create_codeact(model, cfg.base_prompt, tools, eval)
|
|
104
|
-
return code_act.compile()
|
|
File without changes
|